From 7707ad5548f649f9b468bada0171c27bf12ce7b7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 14 Oct 2016 17:12:42 +0300 Subject: [PATCH 0001/1124] fix pathman_process_utility_hook() inadequate behavior in conjunction with some other extensions --- src/hooks.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 4e33f13f..42f9cc79 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -613,27 +613,28 @@ pathman_process_utility_hook(Node *parsetree, DestReceiver *dest, char *completionTag) { - /* Call hooks set by other extensions */ - if (process_utility_hook_next) - process_utility_hook_next(parsetree, queryString, - context, params, - dest, completionTag); - /* Override standard COPY statement if needed */ if (IsPathmanReady() && is_pathman_related_copy(parsetree)) { uint64 processed; + /* Handle our COPY case (and show a special cmd name) */ PathmanDoCopy((CopyStmt *) parsetree, queryString, &processed); if (completionTag) snprintf(completionTag, COMPLETION_TAG_BUFSIZE, "PATHMAN COPY " UINT64_FORMAT, processed); - return; /* don't call standard_ProcessUtility() */ + return; /* don't call standard_ProcessUtility() or hooks */ } - /* Call internal implementation */ - standard_ProcessUtility(parsetree, queryString, - context, params, - dest, completionTag); + /* Call hooks set by other extensions if needed */ + if (process_utility_hook_next) + process_utility_hook_next(parsetree, queryString, + context, params, + dest, completionTag); + /* Else call internal implementation */ + else + standard_ProcessUtility(parsetree, queryString, + context, params, + dest, completionTag); } From e4e95cf95e0bcf620f755a6970cc9ea63d02a83b Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 14 Oct 2016 19:12:51 +0300 Subject: [PATCH 0002/1124] mentioned conflicts between extensions in docs --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index a54e2fa7..039e6572 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,8 @@ Modify the **`shared_preload_libraries`** parameter in `postgresql.conf` as foll ``` shared_preload_libraries = 'pg_pathman' ``` +> **Important:** `pg_pathman` may have conflicts with some other extensions which uses the same hook functions. For example, `pg_pathman` uses `ProcessUtility_hook` hook to handle COPY queries for partitioned tables. And it could sometimes interfere with `pg_stat_statements` extension which uses the same hook. In this case try to list libraries in certain order: `shared_preload_libraries = 'pg_pathman, pg_stat_statements'` + It is essential to restart the PostgreSQL instance. After that, execute the following query in psql: ```plpgsql CREATE EXTENSION pg_pathman; From 62351cccdf8d18806e09834dee4cbf8e5073ba19 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 17 Oct 2016 20:18:11 +0300 Subject: [PATCH 0003/1124] basic support for Coveralls --- travis/pg-travis-test.sh | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index 44552ae3..f7c77887 100644 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -13,6 +13,8 @@ pip_packages="testgres" status=0 # pg_config path +pg_ctl_path=/usr/lib/postgresql/$PGVERSION/bin/pg_ctl +initdb_path=/usr/lib/postgresql/$PGVERSION/bin/initdb config_path=/usr/lib/postgresql/$PGVERSION/bin/pg_config @@ -29,7 +31,9 @@ sudo chmod a+x /etc/init.d/postgresql sudo apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install -qq $apt_packages # create cluster 'test' -sudo pg_createcluster --start $PGVERSION test -p 55435 -- -A trust +CLUSTER_PATH=$(pwd)/test_cluster +$initdb_path -D $CLUSTER_PATH -U $USER -A trust + # perform code analysis if necessary if [ $CHECK_CODE = "true" ]; then @@ -61,16 +65,24 @@ if [ $CHECK_CODE = "true" ]; then make clean USE_PGXS=1 PG_CONFIG=$config_path fi -# build pg_pathman -make USE_PGXS=1 PG_CONFIG=$config_path +# build pg_pathman (using CFLAGS_SL for gcov) +make USE_PGXS=1 PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -fprofile-arcs -ftest-coverage" sudo make install USE_PGXS=1 PG_CONFIG=$config_path +# set permission to write postgres locks +sudo chown $USER /var/run/postgresql/ + +# check build +status=$? +if [ $status -ne 0 ]; then exit $status; fi + # add pg_pathman to shared_preload_libraries and restart cluster 'test' -sudo bash -c "echo \"shared_preload_libraries = 'pg_pathman'\" >> /etc/postgresql/$PGVERSION/test/postgresql.conf" -sudo pg_ctlcluster $PGVERSION test restart +echo "shared_preload_libraries = 'pg_pathman'" >> $CLUSTER_PATH/postgresql.conf +echo "port = 55435" >> $CLUSTER_PATH/postgresql.conf +$pg_ctl_path -D $CLUSTER_PATH start -l postgres.log # run regression tests -PGPORT=55435 make installcheck USE_PGXS=1 PGUSER=postgres PG_CONFIG=$config_path || status=$? +PGPORT=55435 PGUSER=$USER PG_CONFIG=$config_path make installcheck USE_PGXS=1 || status=$? # show diff if it exists if test -f regression.diffs; then cat regression.diffs; fi @@ -84,13 +96,17 @@ source /tmp/envs/pg_pathman/bin/activate # install pip packages pip install $pip_packages -# set permission to write postgres locks -sudo chmod a+w /var/run/postgresql/ - # run python tests cd tests PG_CONFIG=$config_path python -m unittest partitioning_test || status=$? +cd .. set -u +# finally report code coverage +sudo apt-get install -qq -y lcov +gem install coveralls-lcov +lcov --no-extern --capture --directory src --output-file coverage.info +coveralls-lcov coverage.info + exit $status From a91935a781c62bdd05711a85837e6db36f43e296 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Oct 2016 14:45:39 +0300 Subject: [PATCH 0004/1124] PGPRO-specific improvements for COPY on Windows --- src/copy_stmt_hooking.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/copy_stmt_hooking.c b/src/copy_stmt_hooking.c index 0ff52783..0d22b71a 100644 --- a/src/copy_stmt_hooking.c +++ b/src/copy_stmt_hooking.c @@ -35,14 +35,21 @@ #include "libpq/libpq.h" +/* + * Determine whether we should enable COPY or not (PostgresPro has a fix). + */ +#if defined(WIN32) && !defined(PGPRO_PATHMAN_AWARE_COPY) +#define DISABLE_PATHMAN_COPY +#endif + /* * While building PostgreSQL on Windows the msvc compiler produces .def file * which contains all the symbols that were declared as external except the ones * that were declared but not defined. We redefine variables below to prevent * 'unresolved symbol' errors on Windows. But we have to disable COPY feature - * on Windows + * on Windows. */ -#ifdef WIN32 +#ifdef DISABLE_PATHMAN_COPY bool XactReadOnly = false; ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; #endif @@ -107,10 +114,12 @@ is_pathman_related_copy(Node *parsetree) elog(ERROR, "freeze is not supported for partitioned tables"); } - elog(DEBUG1, "Overriding default behavior for COPY [%u]", partitioned_table); - - #ifdef WIN32 + /* Emit ERROR if we can't see the necessary symbols */ + #ifdef DISABLE_PATHMAN_COPY elog(ERROR, "COPY is not supported for partitioned tables on Windows"); + #else + elog(DEBUG1, "Overriding default behavior for COPY [%u]", + partitioned_table); #endif return true; From fce7945c40c48bdf37feb7321066f8b996c528da Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Oct 2016 17:31:05 +0300 Subject: [PATCH 0005/1124] introduce pathman_copy_stmt_hooking regression test --- Makefile | 3 +- expected/pathman_copy_stmt_hooking.out | 140 +++++++++++++++++++++++++ sql/pathman_copy_stmt_hooking.sql | 64 +++++++++++ 3 files changed, 206 insertions(+), 1 deletion(-) create mode 100644 expected/pathman_copy_stmt_hooking.out create mode 100644 sql/pathman_copy_stmt_hooking.sql diff --git a/Makefile b/Makefile index d78816c7..faf63057 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,8 @@ REGRESS = pathman_basic \ pathman_domains \ pathman_foreign_keys \ pathman_permissions \ - pathman_rowmarks + pathman_rowmarks \ + pathman_copy_stmt_hooking EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output diff --git a/expected/pathman_copy_stmt_hooking.out b/expected/pathman_copy_stmt_hooking.out new file mode 100644 index 00000000..4b001044 --- /dev/null +++ b/expected/pathman_copy_stmt_hooking.out @@ -0,0 +1,140 @@ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA copy_stmt_hooking; +CREATE TABLE copy_stmt_hooking.test(val int not null, comment text); +INSERT INTO copy_stmt_hooking.test SELECT generate_series(1, 20), 'comment'; +/* test for RANGE partitioning */ +SELECT create_range_partitions('copy_stmt_hooking.test', 'val', 1, 5); +NOTICE: sequence "test_seq" does not exist, skipping + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform VACUUM */ +VACUUM FULL copy_stmt_hooking.test_1; +VACUUM FULL copy_stmt_hooking.test_2; +VACUUM FULL copy_stmt_hooking.test_3; +VACUUM FULL copy_stmt_hooking.test_4; +/* COPY TO */ +COPY copy_stmt_hooking.test TO stdout; +1 comment +2 comment +3 comment +4 comment +5 comment +6 comment +7 comment +8 comment +9 comment +10 comment +11 comment +12 comment +13 comment +14 comment +15 comment +16 comment +17 comment +18 comment +19 comment +20 comment +\copy copy_stmt_hooking.test to stdout (format csv) +1,comment +2,comment +3,comment +4,comment +5,comment +6,comment +7,comment +8,comment +9,comment +10,comment +11,comment +12,comment +13,comment +14,comment +15,comment +16,comment +17,comment +18,comment +19,comment +20,comment +\copy copy_stmt_hooking.test(comment) to stdout +comment +comment +comment +comment +comment +comment +comment +comment +comment +comment +comment +comment +comment +comment +comment +comment +comment +comment +comment +comment +/* DELETE ROWS, COPY FROM */ +DELETE FROM copy_stmt_hooking.test; +COPY copy_stmt_hooking.test FROM stdin; +SELECT count(*) FROM ONLY copy_stmt_hooking.test; + count +------- + 0 +(1 row) + +SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; + val | comment | tableoid +-----+---------+-------------------------- + 1 | test_1 | copy_stmt_hooking.test_1 + 6 | test_2 | copy_stmt_hooking.test_2 + 7 | test_2 | copy_stmt_hooking.test_2 + 11 | test_3 | copy_stmt_hooking.test_3 + 16 | test_4 | copy_stmt_hooking.test_4 +(5 rows) + +/* COPY TO (partition does not exist) */ +COPY copy_stmt_hooking.test FROM stdin; +ERROR: no suitable partition for key '21' +COPY copy_stmt_hooking.test(comment) FROM stdin; +ERROR: partitioned column's value should not be NULL +/* delete all data */ +SELECT drop_partitions('copy_stmt_hooking.test', true); +NOTICE: function copy_stmt_hooking.test_upd_trig_func() does not exist, skipping + drop_partitions +----------------- + 4 +(1 row) + +/* test for HASH partitioning */ +SELECT create_hash_partitions('copy_stmt_hooking.test', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* DELETE ROWS, COPY FROM */ +DELETE FROM copy_stmt_hooking.test; +COPY copy_stmt_hooking.test FROM stdin; +SELECT count(*) FROM ONLY copy_stmt_hooking.test; + count +------- + 0 +(1 row) + +SELECT * FROM copy_stmt_hooking.test ORDER BY val; + val | comment +-----+--------- + 1 | hash_1 + 6 | hash_2 +(2 rows) + +DROP SCHEMA copy_stmt_hooking CASCADE; +NOTICE: drop cascades to 7 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_copy_stmt_hooking.sql b/sql/pathman_copy_stmt_hooking.sql new file mode 100644 index 00000000..ef88f587 --- /dev/null +++ b/sql/pathman_copy_stmt_hooking.sql @@ -0,0 +1,64 @@ +\set VERBOSITY terse + +CREATE EXTENSION pg_pathman; +CREATE SCHEMA copy_stmt_hooking; + + +CREATE TABLE copy_stmt_hooking.test(val int not null, comment text); +INSERT INTO copy_stmt_hooking.test SELECT generate_series(1, 20), 'comment'; + + +/* test for RANGE partitioning */ +SELECT create_range_partitions('copy_stmt_hooking.test', 'val', 1, 5); + +/* perform VACUUM */ +VACUUM FULL copy_stmt_hooking.test_1; +VACUUM FULL copy_stmt_hooking.test_2; +VACUUM FULL copy_stmt_hooking.test_3; +VACUUM FULL copy_stmt_hooking.test_4; + +/* COPY TO */ +COPY copy_stmt_hooking.test TO stdout; +\copy copy_stmt_hooking.test to stdout (format csv) +\copy copy_stmt_hooking.test(comment) to stdout + +/* DELETE ROWS, COPY FROM */ +DELETE FROM copy_stmt_hooking.test; +COPY copy_stmt_hooking.test FROM stdin; +1 test_1 +6 test_2 +7 test_2 +11 test_3 +16 test_4 +\. +SELECT count(*) FROM ONLY copy_stmt_hooking.test; +SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; + +/* COPY TO (partition does not exist) */ +COPY copy_stmt_hooking.test FROM stdin; +21 test_no_part +\. +COPY copy_stmt_hooking.test(comment) FROM stdin; +test_no_part +\. + + +/* delete all data */ +SELECT drop_partitions('copy_stmt_hooking.test', true); + + +/* test for HASH partitioning */ +SELECT create_hash_partitions('copy_stmt_hooking.test', 'val', 5); + +/* DELETE ROWS, COPY FROM */ +DELETE FROM copy_stmt_hooking.test; +COPY copy_stmt_hooking.test FROM stdin; +1 hash_1 +6 hash_2 +\. +SELECT count(*) FROM ONLY copy_stmt_hooking.test; +SELECT * FROM copy_stmt_hooking.test ORDER BY val; + + +DROP SCHEMA copy_stmt_hooking CASCADE; +DROP EXTENSION pg_pathman; From 1d8c48ba531dbf1bc3fef4a5991221ba71d325fe Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Oct 2016 18:45:13 +0300 Subject: [PATCH 0006/1124] improve regression tests for COPY, fix relcache reference leaks in COPY, allow partition creation in COPY --- expected/pathman_copy_stmt_hooking.out | 22 ++++++++++++++++++++-- sql/pathman_copy_stmt_hooking.sql | 15 ++++++++++++++- src/copy_stmt_hooking.c | 5 ++++- 3 files changed, 38 insertions(+), 4 deletions(-) diff --git a/expected/pathman_copy_stmt_hooking.out b/expected/pathman_copy_stmt_hooking.out index 4b001044..bedc8035 100644 --- a/expected/pathman_copy_stmt_hooking.out +++ b/expected/pathman_copy_stmt_hooking.out @@ -3,6 +3,7 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA copy_stmt_hooking; CREATE TABLE copy_stmt_hooking.test(val int not null, comment text); INSERT INTO copy_stmt_hooking.test SELECT generate_series(1, 20), 'comment'; +CREATE INDEX ON copy_stmt_hooking.test(val); /* test for RANGE partitioning */ SELECT create_range_partitions('copy_stmt_hooking.test', 'val', 1, 5); NOTICE: sequence "test_seq" does not exist, skipping @@ -12,6 +13,7 @@ NOTICE: sequence "test_seq" does not exist, skipping (1 row) /* perform VACUUM */ +VACUUM FULL copy_stmt_hooking.test; VACUUM FULL copy_stmt_hooking.test_1; VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; @@ -99,9 +101,25 @@ SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; 16 | test_4 | copy_stmt_hooking.test_4 (5 rows) -/* COPY TO (partition does not exist) */ +/* COPY TO (partition does not exist, NOT allowed to create partitions) */ +SET pg_pathman.enable_auto_partition = OFF; COPY copy_stmt_hooking.test FROM stdin; ERROR: no suitable partition for key '21' +SELECT * FROM copy_stmt_hooking.test WHERE val > 20; + val | comment +-----+--------- +(0 rows) + +/* COPY TO (partition does not exist, allowed to create partitions) */ +SET pg_pathman.enable_auto_partition = ON; +COPY copy_stmt_hooking.test FROM stdin; +SELECT * FROM copy_stmt_hooking.test WHERE val > 20; + val | comment +-----+-------------- + 21 | test_no_part +(1 row) + +/* COPY TO (partitioned column is not specified) */ COPY copy_stmt_hooking.test(comment) FROM stdin; ERROR: partitioned column's value should not be NULL /* delete all data */ @@ -109,7 +127,7 @@ SELECT drop_partitions('copy_stmt_hooking.test', true); NOTICE: function copy_stmt_hooking.test_upd_trig_func() does not exist, skipping drop_partitions ----------------- - 4 + 5 (1 row) /* test for HASH partitioning */ diff --git a/sql/pathman_copy_stmt_hooking.sql b/sql/pathman_copy_stmt_hooking.sql index ef88f587..00d6a5d2 100644 --- a/sql/pathman_copy_stmt_hooking.sql +++ b/sql/pathman_copy_stmt_hooking.sql @@ -6,12 +6,14 @@ CREATE SCHEMA copy_stmt_hooking; CREATE TABLE copy_stmt_hooking.test(val int not null, comment text); INSERT INTO copy_stmt_hooking.test SELECT generate_series(1, 20), 'comment'; +CREATE INDEX ON copy_stmt_hooking.test(val); /* test for RANGE partitioning */ SELECT create_range_partitions('copy_stmt_hooking.test', 'val', 1, 5); /* perform VACUUM */ +VACUUM FULL copy_stmt_hooking.test; VACUUM FULL copy_stmt_hooking.test_1; VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; @@ -34,10 +36,21 @@ COPY copy_stmt_hooking.test FROM stdin; SELECT count(*) FROM ONLY copy_stmt_hooking.test; SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; -/* COPY TO (partition does not exist) */ +/* COPY TO (partition does not exist, NOT allowed to create partitions) */ +SET pg_pathman.enable_auto_partition = OFF; COPY copy_stmt_hooking.test FROM stdin; 21 test_no_part \. +SELECT * FROM copy_stmt_hooking.test WHERE val > 20; + +/* COPY TO (partition does not exist, allowed to create partitions) */ +SET pg_pathman.enable_auto_partition = ON; +COPY copy_stmt_hooking.test FROM stdin; +21 test_no_part +\. +SELECT * FROM copy_stmt_hooking.test WHERE val > 20; + +/* COPY TO (partitioned column is not specified) */ COPY copy_stmt_hooking.test(comment) FROM stdin; test_no_part \. diff --git a/src/copy_stmt_hooking.c b/src/copy_stmt_hooking.c index 0ff52783..fec975fc 100644 --- a/src/copy_stmt_hooking.c +++ b/src/copy_stmt_hooking.c @@ -466,7 +466,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Search for a matching partition */ rri_holder_child = select_partition_for_insert(prel, &parts_storage, values[prel->attnum - 1], - estate, false); + estate, true); child_result_rel = rri_holder_child->result_rel_info; estate->es_result_relation_info = child_result_rel; @@ -556,6 +556,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Close partitions and destroy hash table */ fini_result_parts_storage(&parts_storage, true); + /* Close parent's indices */ + ExecCloseIndices(parent_result_rel); + FreeExecutorState(estate); return processed; From a824568d820d3f7e7f76cc7c799b83ecc6f55f2b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Oct 2016 18:58:23 +0300 Subject: [PATCH 0007/1124] make pg_ctl wait for startup --- travis/pg-travis-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index f7c77887..08aac214 100644 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -79,7 +79,7 @@ if [ $status -ne 0 ]; then exit $status; fi # add pg_pathman to shared_preload_libraries and restart cluster 'test' echo "shared_preload_libraries = 'pg_pathman'" >> $CLUSTER_PATH/postgresql.conf echo "port = 55435" >> $CLUSTER_PATH/postgresql.conf -$pg_ctl_path -D $CLUSTER_PATH start -l postgres.log +$pg_ctl_path -D $CLUSTER_PATH start -l postgres.log -w # run regression tests PGPORT=55435 PGUSER=$USER PG_CONFIG=$config_path make installcheck USE_PGXS=1 || status=$? From f19a764861f462b1e490e8c9d9fca62dee76f65b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Oct 2016 19:26:57 +0300 Subject: [PATCH 0008/1124] remove useless and obsolete functions, fix format string in pathman_workers.c --- src/pathman_workers.c | 2 +- src/utils.c | 83 ------------------------------------------- src/utils.h | 6 ---- 3 files changed, 1 insertion(+), 90 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 1473b3c2..0cea2103 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -540,7 +540,7 @@ bgw_main_concurrent_part(Datum main_arg) part_slot->total_rows += rows; /* Report debug message */ #ifdef USE_ASSERT_CHECKING - elog(DEBUG1, "%s: relocated %d rows, total: %lu [%u]", + elog(DEBUG1, "%s: relocated %d rows, total: " UINT64_FORMAT " [%u]", concurrent_part_bgw, rows, part_slot->total_rows, MyProcPid); #endif SpinLockRelease(&part_slot->mutex); diff --git a/src/utils.c b/src/utils.c index 34ff5249..831d5a24 100644 --- a/src/utils.c +++ b/src/utils.c @@ -176,65 +176,6 @@ bms_print(Bitmapset *bms) return str.data; } -/* - * Copied from util/plancat.c - * - * Build a targetlist representing the columns of the specified index. - */ -List * -build_index_tlist(PlannerInfo *root, IndexOptInfo *index, - Relation heapRelation) -{ - List *tlist = NIL; - Index varno = index->rel->relid; - ListCell *indexpr_item; - int i; - - indexpr_item = list_head(index->indexprs); - for (i = 0; i < index->ncolumns; i++) - { - int indexkey = index->indexkeys[i]; - Expr *indexvar; - - if (indexkey != 0) - { - /* simple column */ - Form_pg_attribute att_tup; - - if (indexkey < 0) - att_tup = SystemAttributeDefinition(indexkey, - heapRelation->rd_rel->relhasoids); - else - att_tup = heapRelation->rd_att->attrs[indexkey - 1]; - - indexvar = (Expr *) makeVar(varno, - indexkey, - att_tup->atttypid, - att_tup->atttypmod, - att_tup->attcollation, - 0); - } - else - { - /* expression column */ - if (indexpr_item == NULL) - elog(ERROR, "wrong number of index expressions"); - indexvar = (Expr *) lfirst(indexpr_item); - indexpr_item = lnext(indexpr_item); - } - - tlist = lappend(tlist, - makeTargetEntry(indexvar, - i + 1, - NULL, - false)); - } - if (indexpr_item != NULL) - elog(ERROR, "wrong number of index expressions"); - - return tlist; -} - /* * Get BTORDER_PROC for two types described by Oids */ @@ -583,16 +524,6 @@ is_date_type_internal(Oid typid) typid == DATEOID; } -/* - * Check if this is a string type. - */ -bool -is_string_type_internal(Oid typid) -{ - return typid == TEXTOID || - typid == CSTRINGOID; -} - /* * Try to find binary operator. @@ -652,20 +583,6 @@ get_rel_name_or_relid(Oid relid) return relname; } -/* - * Try to get opname or at least opid as cstring. - */ -char * -get_op_name_or_opid(Oid opid) -{ - char *opname = get_opname(opid); - - if (!opname) - return DatumGetCString(DirectFunctionCall1(oidout, - ObjectIdGetDatum(opid))); - return opname; -} - #if PG_VERSION_NUM < 90600 /* diff --git a/src/utils.h b/src/utils.h index 6b7f287b..4222f549 100644 --- a/src/utils.h +++ b/src/utils.h @@ -32,9 +32,6 @@ typedef struct void plan_tree_walker(Plan *plan, void (*visitor) (Plan *plan, void *context), void *context); -List * build_index_tlist(PlannerInfo *root, - IndexOptInfo *index, - Relation heapRelation); void change_varnos(Node *node, Oid old_varno, Oid new_varno); /* @@ -48,7 +45,6 @@ void postprocess_lock_rows(List *rtable, Plan *plan); */ bool clause_contains_params(Node *clause); bool is_date_type_internal(Oid typid); -bool is_string_type_internal(Oid typid); bool validate_on_part_init_cb(Oid procid, bool emit_error); bool check_security_policy_internal(Oid relid, Oid role); @@ -67,8 +63,6 @@ Oid get_rel_owner(Oid relid); * Handy execution-stage functions. */ char * get_rel_name_or_relid(Oid relid); -char * get_op_name_or_opid(Oid opid); - Oid get_binary_operator_oid(char *opname, Oid arg1, Oid arg2); void fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, From a6ca7c2e8767671341321fcff52dee6fbb9d9a81 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Oct 2016 20:16:23 +0300 Subject: [PATCH 0009/1124] fix check_overlap(): return false if there's no partitions --- src/pl_range_funcs.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index bd71ce09..4644a92d 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -104,9 +104,17 @@ check_overlap(PG_FUNCTION_ARGS) RangeEntry *ranges; const PartRelationInfo *prel; + + /* Try fetching the PartRelationInfo structure */ prel = get_pathman_relation_info(parent_oid); + + /* If there's no prel, return FALSE (overlap is not possible) */ + if (!prel) PG_RETURN_BOOL(false); + + /* Emit an error if it is not partitioned by RANGE */ shout_if_prel_is_invalid(parent_oid, prel, PT_RANGE); + /* Get base type of partitioned column */ part_type = getBaseType(prel->atttype); /* Fetch comparison functions */ From ba7b0366f12e84200994d9391243943607913792 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Oct 2016 20:25:14 +0300 Subject: [PATCH 0010/1124] improve pathman_domains regression test --- expected/pathman_domains.out | 40 +++++++++++++++++++++++++++++++++++- sql/pathman_domains.sql | 7 +++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index 283a6d5b..169f66ea 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -87,6 +87,44 @@ ORDER BY range_min::INT, range_max::INT; domains.dom_table | domains.dom_table_15 | 2 | val | 1101 | 1201 (14 rows) +SELECT drop_partitions('domains.dom_table'); +NOTICE: function domains.dom_table_upd_trig_func() does not exist, skipping +NOTICE: 49 rows copied from domains.dom_table_1 +NOTICE: 100 rows copied from domains.dom_table_3 +NOTICE: 100 rows copied from domains.dom_table_4 +NOTICE: 100 rows copied from domains.dom_table_5 +NOTICE: 100 rows copied from domains.dom_table_6 +NOTICE: 100 rows copied from domains.dom_table_7 +NOTICE: 100 rows copied from domains.dom_table_8 +NOTICE: 100 rows copied from domains.dom_table_9 +NOTICE: 99 rows copied from domains.dom_table_10 +NOTICE: 1 rows copied from domains.dom_table_11 +NOTICE: 0 rows copied from domains.dom_table_12 +NOTICE: 0 rows copied from domains.dom_table_13 +NOTICE: 151 rows copied from domains.dom_table_14 +NOTICE: 1 rows copied from domains.dom_table_15 + drop_partitions +----------------- + 14 +(1 row) + +SELECT create_hash_partitions('domains.dom_table', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +SELECT * FROM pathman_partition_list +ORDER BY partition::TEXT; + parent | partition | parttype | partattr | range_min | range_max +-------------------+---------------------+----------+----------+-----------+----------- + domains.dom_table | domains.dom_table_0 | 1 | val | | + domains.dom_table | domains.dom_table_1 | 1 | val | | + domains.dom_table | domains.dom_table_2 | 1 | val | | + domains.dom_table | domains.dom_table_3 | 1 | val | | + domains.dom_table | domains.dom_table_4 | 1 | val | | +(5 rows) + DROP SCHEMA domains CASCADE; -NOTICE: drop cascades to 17 other objects +NOTICE: drop cascades to 8 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_domains.sql b/sql/pathman_domains.sql index bc5d227e..250c5615 100644 --- a/sql/pathman_domains.sql +++ b/sql/pathman_domains.sql @@ -33,5 +33,12 @@ SELECT * FROM pathman_partition_list ORDER BY range_min::INT, range_max::INT; +SELECT drop_partitions('domains.dom_table'); +SELECT create_hash_partitions('domains.dom_table', 'val', 5); + +SELECT * FROM pathman_partition_list +ORDER BY partition::TEXT; + + DROP SCHEMA domains CASCADE; DROP EXTENSION pg_pathman CASCADE; From f603e6c5402c217e75ebe5c1a7647695063151b2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 19 Oct 2016 15:17:37 +0300 Subject: [PATCH 0011/1124] fix args validation in invoke_on_partition_created_callback(), introduce pathman_calamity regression test --- Makefile | 3 +- expected/pathman_calamity.out | 298 ++++++++++++++++++++++++++++++++++ sql/pathman_calamity.sql | 114 +++++++++++++ src/pl_funcs.c | 4 +- 4 files changed, 416 insertions(+), 3 deletions(-) create mode 100644 expected/pathman_calamity.out create mode 100644 sql/pathman_calamity.sql diff --git a/Makefile b/Makefile index faf63057..58e5e939 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,8 @@ REGRESS = pathman_basic \ pathman_foreign_keys \ pathman_permissions \ pathman_rowmarks \ - pathman_copy_stmt_hooking + pathman_copy_stmt_hooking \ + pathman_calamity EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out new file mode 100644 index 00000000..237c59f8 --- /dev/null +++ b/expected/pathman_calamity.out @@ -0,0 +1,298 @@ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +-------------------- + calamity.part_test +(1 row) + +/* SELECT validate_relname(NULL); -- FIXME: %s */ +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_attribute_type() */ +SELECT get_attribute_type('calamity.part_test', 'val'); + get_attribute_type +-------------------- + integer +(1 row) + +SELECT get_attribute_type('calamity.part_test', NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT get_attribute_type(NULL, 'val') IS NULL; + ?column? +---------- + t +(1 row) + +SELECT get_attribute_type(NULL, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name_attnum() */ +SELECT build_check_constraint_name('calamity.part_test', 1::int2); + build_check_constraint_name +----------------------------- + pathman_part_test_1_check +(1 row) + +SELECT build_check_constraint_name('calamity.part_test', NULL::int2) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_check_constraint_name(NULL, 1::int2) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_check_constraint_name(NULL, NULL::int2) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name_attname() */ +SELECT build_check_constraint_name('calamity.part_test', 'val'); + build_check_constraint_name +----------------------------- + pathman_part_test_1_check +(1 row) + +SELECT build_check_constraint_name('calamity.part_test', NULL::text) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_check_constraint_name(NULL, 'val') IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_check_constraint_name(NULL, NULL::text) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_update_trigger_name() */ +SELECT build_update_trigger_name('calamity.part_test'); + build_update_trigger_name +--------------------------- + part_test_upd_trig +(1 row) + +SELECT build_update_trigger_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_update_trigger_func_name() */ +SELECT build_update_trigger_func_name('calamity.part_test'); + build_update_trigger_func_name +---------------------------------- + calamity.part_test_upd_trig_func +(1 row) + +SELECT build_update_trigger_func_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check invoke_on_partition_created_callback() for RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, NULL::int); +ERROR: both bounds must be provided for RANGE partition +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, 1, NULL); +ERROR: both bounds must be provided for RANGE partition +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, 1); +ERROR: both bounds must be provided for RANGE partition +/* check invoke_on_partition_created_callback() for HASH */ +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: parent_relid should not be null +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: partition should not be null +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config('calamity.part_test', NULL); +ERROR: attname should not be null +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); +NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); +NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_1_check" for partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_1_check" for partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_1_check +CHECK (val < 10); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: Wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_1_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: Wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_1_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: Wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +DROP SCHEMA calamity CASCADE; +NOTICE: drop cascades to 8 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql new file mode 100644 index 00000000..f7f01699 --- /dev/null +++ b/sql/pathman_calamity.sql @@ -0,0 +1,114 @@ +\set VERBOSITY terse + +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; + + +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); +set client_min_messages = NOTICE; + + +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); + + +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); +/* SELECT validate_relname(NULL); -- FIXME: %s */ + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +SELECT get_parent_of_partition(NULL) IS NULL; + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); +SELECT get_base_type('calamity.test_domain'::regtype); +SELECT get_base_type(NULL) IS NULL; + +/* check function get_attribute_type() */ +SELECT get_attribute_type('calamity.part_test', 'val'); +SELECT get_attribute_type('calamity.part_test', NULL) IS NULL; +SELECT get_attribute_type(NULL, 'val') IS NULL; +SELECT get_attribute_type(NULL, NULL) IS NULL; + +/* check function build_check_constraint_name_attnum() */ +SELECT build_check_constraint_name('calamity.part_test', 1::int2); +SELECT build_check_constraint_name('calamity.part_test', NULL::int2) IS NULL; +SELECT build_check_constraint_name(NULL, 1::int2) IS NULL; +SELECT build_check_constraint_name(NULL, NULL::int2) IS NULL; + +/* check function build_check_constraint_name_attname() */ +SELECT build_check_constraint_name('calamity.part_test', 'val'); +SELECT build_check_constraint_name('calamity.part_test', NULL::text) IS NULL; +SELECT build_check_constraint_name(NULL, 'val') IS NULL; +SELECT build_check_constraint_name(NULL, NULL::text) IS NULL; + +/* check function build_update_trigger_name() */ +SELECT build_update_trigger_name('calamity.part_test'); +SELECT build_update_trigger_name(NULL) IS NULL; + +/* check function build_update_trigger_func_name() */ +SELECT build_update_trigger_func_name('calamity.part_test'); +SELECT build_update_trigger_func_name(NULL) IS NULL; + +/* check invoke_on_partition_created_callback() for RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, NULL::int); +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, 1, NULL); +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, 1); + +/* check invoke_on_partition_created_callback() for HASH */ +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); + +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config('calamity.part_test', NULL); +SELECT add_to_pathman_config('calamity.part_test', 'val'); +SELECT disable_pathman_for('calamity.part_test'); +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +SELECT disable_pathman_for('calamity.part_test'); + + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ + +SELECT add_to_pathman_config('calamity.part_test', 'val'); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_1_check +CHECK (val < 10); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_1_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_1_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; + +/* check GUC variable */ +SHOW pg_pathman.enable; + + +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 289a4ebd..22f33475 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -768,7 +768,7 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) val; /* If there's no callback function specified, we're done */ - if (cb_oid == InvalidOid) + if (PG_ARGISNULL(ARG_CALLBACK) || cb_oid == InvalidOid) PG_RETURN_VOID(); if (PG_ARGISNULL(ARG_PARENT)) @@ -785,7 +785,7 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) case 5: { - if (PG_ARGISNULL(ARG_RANGE_START) || PG_ARGISNULL(ARG_RANGE_START)) + if (PG_ARGISNULL(ARG_RANGE_START) || PG_ARGISNULL(ARG_RANGE_END)) elog(ERROR, "both bounds must be provided for RANGE partition"); part_type = PT_RANGE; From 275d5aae611f0e866464de3c0eb8648cc8eca12d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 21 Oct 2016 04:47:45 +0300 Subject: [PATCH 0012/1124] refactoring, improved append_child_relation(): make it act a lot more like postgres does, remove obsolete functions (change_varnos() etc), add safety regression test --- expected/pathman_basic.out | 72 +++++++-- sql/pathman_basic.sql | 29 ++-- src/hooks.c | 10 +- src/pathman.h | 7 +- src/pg_compat.c | 36 ++--- src/pg_compat.h | 6 +- src/pg_pathman.c | 315 +++++++++++++++++++++---------------- src/relation_info.c | 4 +- src/utils.c | 120 -------------- src/utils.h | 1 - 10 files changed, 280 insertions(+), 320 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 41ab6ab5..352c2bc5 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -268,16 +268,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; Filter: (value = 1) (5 rows) --- Temporarily commented out --- EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value BETWEEN 1 AND 2; --- QUERY PLAN --- ------------------------------------------------- --- Append --- -> Seq Scan on hash_rel_1 --- Filter: ((value >= 1) AND (value <= 2)) --- -> Seq Scan on hash_rel_2 --- Filter: ((value >= 1) AND (value <= 2)) --- (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; QUERY PLAN ----------------------------------- @@ -1217,7 +1207,7 @@ SELECT pathman.create_partitions_from_range('test."RangeRel"', 'id', 1, 300, 100 DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 3 other objects DROP EXTENSION pg_pathman; -/* Test that everithing works fine without schemas */ +/* Test that everything works fine without schemas */ CREATE EXTENSION pg_pathman; /* Hash */ CREATE TABLE hash_rel ( @@ -1405,6 +1395,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt = '2015-12-15'; Filter: (dt = 'Tue Dec 15 00:00:00 2015'::timestamp without time zone) (3 rows) +/* Test foreign keys */ CREATE TABLE messages(id SERIAL PRIMARY KEY, msg TEXT); CREATE TABLE replies(id SERIAL PRIMARY KEY, message_id INTEGER REFERENCES messages(id), msg TEXT); INSERT INTO messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; @@ -1428,6 +1419,65 @@ EXPLAIN (COSTS OFF) SELECT * FROM messages; -> Seq Scan on messages_2 (3 rows) +DROP TABLE messages, replies CASCADE; +NOTICE: drop cascades to 2 other objects +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE special_case_1_ind_o_s(val serial, comment text); +INSERT INTO special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('special_case_1_ind_o_s', 'val', 1, 50); +NOTICE: sequence "special_case_1_ind_o_s_seq" does not exist, skipping + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +SELECT set_enable_parent('special_case_1_ind_o_s', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s + Filter: ((val < 75) AND (comment = 'a'::text)) + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(7 rows) + +SELECT set_enable_parent('special_case_1_ind_o_s', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + DROP SCHEMA test CASCADE; NOTICE: drop cascades to 13 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 0fd56748..f77efad7 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -82,16 +82,7 @@ SET enable_seqscan = ON; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; --- Temporarily commented out --- EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value BETWEEN 1 AND 2; --- QUERY PLAN --- ------------------------------------------------- --- Append --- -> Seq Scan on hash_rel_1 --- Filter: ((value >= 1) AND (value <= 2)) --- -> Seq Scan on hash_rel_2 --- Filter: ((value >= 1) AND (value <= 2)) --- (5 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; @@ -330,7 +321,8 @@ DROP TABLE test."RangeRel" CASCADE; DROP EXTENSION pg_pathman; -/* Test that everithing works fine without schemas */ + +/* Test that everything works fine without schemas */ CREATE EXTENSION pg_pathman; /* Hash */ @@ -378,6 +370,7 @@ SELECT drop_partitions('range_rel', TRUE); SELECT create_partitions_from_range('range_rel', 'dt', '2015-01-01'::date, '2015-12-01'::date, '1 month'::interval); EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt = '2015-12-15'; +/* Test foreign keys */ CREATE TABLE messages(id SERIAL PRIMARY KEY, msg TEXT); CREATE TABLE replies(id SERIAL PRIMARY KEY, message_id INTEGER REFERENCES messages(id), msg TEXT); INSERT INTO messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; @@ -386,6 +379,20 @@ SELECT create_range_partitions('messages', 'id', 1, 100, 2); ALTER TABLE replies DROP CONSTRAINT replies_message_id_fkey; SELECT create_range_partitions('messages', 'id', 1, 100, 2); EXPLAIN (COSTS OFF) SELECT * FROM messages; +DROP TABLE messages, replies CASCADE; + +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE special_case_1_ind_o_s(val serial, comment text); +INSERT INTO special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('special_case_1_ind_o_s', 'val', 1, 50); +INSERT INTO special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; +SELECT set_enable_parent('special_case_1_ind_o_s', true); +EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; +SELECT set_enable_parent('special_case_1_ind_o_s', false); +EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; DROP SCHEMA test CASCADE; diff --git a/src/hooks.c b/src/hooks.c index 42f9cc79..8ee74884 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -308,7 +308,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, /* Add parent if needed */ if (prel->enable_parent) - append_child_relation(root, rel, rti, rte, 0, rte->relid, NULL); + append_child_relation(root, rti, 0, rte->relid, NULL); /* * Iterate all indexes in rangeset and append corresponding child @@ -316,18 +316,18 @@ pathman_rel_pathlist_hook(PlannerInfo *root, */ foreach(lc, ranges) { - IndexRange irange = lfirst_irange(lc); + IndexRange irange = lfirst_irange(lc); for (i = irange.ir_lower; i <= irange.ir_upper; i++) - append_child_relation(root, rel, rti, rte, i, children[i], wrappers); + append_child_relation(root, rti, i, children[i], wrappers); } /* Clear old path list */ list_free(rel->pathlist); rel->pathlist = NIL; - set_append_rel_pathlist(root, rel, rti, rte, pathkeyAsc, pathkeyDesc); - set_append_rel_size_compat(root, rel, rti, rte); + set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); + set_append_rel_size_compat(root, rel, rti); /* No need to go further (both nodes are disabled), return */ if (!(pg_pathman_enable_runtimeappend || diff --git a/src/pathman.h b/src/pathman.h index 84d71dd9..65e35885 100644 --- a/src/pathman.h +++ b/src/pathman.h @@ -123,8 +123,8 @@ extern List *inheritance_disabled_relids; extern PathmanState *pmstate; -int append_child_relation(PlannerInfo *root, RelOptInfo *rel, Index rti, - RangeTblEntry *rte, int index, Oid childOID, List *wrappers); +int append_child_relation(PlannerInfo *root, Index parent_rti, + int ir_index, Oid child_oid, List *wrappers); search_rangerel_result search_range_partition_eq(const Datum value, FmgrInfo *cmp_func, @@ -142,8 +142,7 @@ void disable_inheritance_subselect(Query *parse); void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte); void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, - RangeTblEntry *rte, PathKey *pathkeyAsc, - PathKey *pathkeyDesc); + PathKey *pathkeyAsc, PathKey *pathkeyDesc); typedef struct { diff --git a/src/pg_compat.c b/src/pg_compat.c index 7474d689..8917a78c 100644 --- a/src/pg_compat.c +++ b/src/pg_compat.c @@ -11,6 +11,7 @@ #include "pg_compat.h" #include "optimizer/pathnode.h" +#include "optimizer/prep.h" #include "port.h" #include "utils.h" @@ -18,8 +19,7 @@ void -set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, - Index rti, RangeTblEntry *rte) +set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) { double parent_rows = 0; double parent_size = 0; @@ -63,31 +63,21 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, rel->tuples = parent_rows; } -extern -void copy_targetlist_compat(RelOptInfo *dest, RelOptInfo *rel) +void +adjust_targetlist_compat(PlannerInfo *root, RelOptInfo *dest, + RelOptInfo *rel, AppendRelInfo *appinfo) { - ListCell *lc; - -#if PG_VERSION_NUM >= 90600 - dest->reltarget->exprs = NIL; - foreach(lc, rel->reltarget->exprs) -#else - dest->reltargetlist = NIL; - foreach(lc, rel->reltargetlist) -#endif - { - Node *new_target; - Node *node; - - node = (Node *) lfirst(lc); - new_target = copyObject(node); - change_varnos(new_target, rel->relid, dest->relid); #if PG_VERSION_NUM >= 90600 - dest->reltarget->exprs = lappend(dest->reltarget->exprs, new_target); + dest->reltarget->exprs = (List *) + adjust_appendrel_attrs(root, + (Node *) rel->reltarget->exprs, + appinfo); #else - dest->reltargetlist = lappend(dest->reltargetlist, new_target); + dest->reltargetlist = (List *) + adjust_appendrel_attrs(root, + (Node *) rel->reltargetlist, + appinfo); #endif - } } #if PG_VERSION_NUM >= 90600 diff --git a/src/pg_compat.h b/src/pg_compat.h index 7bef6778..839a0094 100644 --- a/src/pg_compat.h +++ b/src/pg_compat.h @@ -19,9 +19,9 @@ #include "optimizer/paths.h" -extern void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, - Index rti, RangeTblEntry *rte); -extern void copy_targetlist_compat(RelOptInfo *dest, RelOptInfo *rel); +void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); +void adjust_targetlist_compat(PlannerInfo *root, RelOptInfo *dest, + RelOptInfo *rel, AppendRelInfo *appinfo); #if PG_VERSION_NUM >= 90600 diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a9f3bf31..5c1cd148 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -84,9 +84,12 @@ static WrapperNode *handle_opexpr(const OpExpr *expr, WalkerContext *context); static WrapperNode *handle_boolexpr(const BoolExpr *expr, WalkerContext *context); static WrapperNode *handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context); static double estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy); -static RestrictInfo *rebuild_restrictinfo(Node *clause, RestrictInfo *old_rinfo); static bool pull_var_param(const WalkerContext *ctx, const OpExpr *expr, Node **var_ptr, Node **param_ptr); +static List *make_inh_translation_list_simplified(Relation oldrelation, + Relation newrelation, + Index newvarno); + /* copied from allpaths.h */ static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); @@ -350,172 +353,220 @@ handle_modification_query(Query *parse) return; } +/* + * Build the list of translations from parent Vars to child Vars + * for an inheritance child. + * + * NOTE: Inspired by function make_inh_translation_list(). + */ +static List * +make_inh_translation_list_simplified(Relation oldrelation, + Relation newrelation, + Index newvarno) +{ + List *vars = NIL; + TupleDesc old_tupdesc = RelationGetDescr(oldrelation); + TupleDesc new_tupdesc = RelationGetDescr(newrelation); + int oldnatts = RelationGetNumberOfAttributes(oldrelation); + int newnatts = RelationGetNumberOfAttributes(newrelation); + int old_attno; + + /* Amounts of attributes must match */ + if (oldnatts != newnatts) + goto inh_translation_list_error; + + /* We expect that parent and partition have an identical tupdesc */ + for (old_attno = 0; old_attno < oldnatts; old_attno++) + { + Form_pg_attribute old_att, + new_att; + Oid atttypid; + int32 atttypmod; + Oid attcollation; + + old_att = old_tupdesc->attrs[old_attno]; + new_att = new_tupdesc->attrs[old_attno]; + + /* Attribute definitions must match */ + if (old_att->attisdropped != new_att->attisdropped || + old_att->atttypid != new_att->atttypid || + old_att->atttypmod != new_att->atttypmod || + old_att->attcollation != new_att->attcollation || + strcmp(NameStr(old_att->attname), NameStr(new_att->attname)) != 0) + { + goto inh_translation_list_error; + } + + if (old_att->attisdropped) + { + /* Just put NULL into this list entry */ + vars = lappend(vars, NULL); + continue; + } + + atttypid = old_att->atttypid; + atttypmod = old_att->atttypmod; + attcollation = old_att->attcollation; + + vars = lappend(vars, makeVar(newvarno, + (AttrNumber) (old_attno + 1), + atttypid, + atttypmod, + attcollation, + 0)); + } + + /* Everything's ok */ + return vars; + +/* We end up here if any attribute differs */ +inh_translation_list_error: + elog(ERROR, "partition \"%s\" must have exact" + "same structure as parent \"%s\"", + RelationGetRelationName(newrelation), + RelationGetRelationName(oldrelation)); + + return NIL; /* keep compiler happy */ +} + /* * Creates child relation and adds it to root. - * Returns child index in simple_rel_array + * Returns child index in simple_rel_array. + * + * NOTE: This code is partially based on the expand_inherited_rtentry() function. */ int -append_child_relation(PlannerInfo *root, RelOptInfo *rel, Index rti, - RangeTblEntry *rte, int index, Oid childOid, List *wrappers) +append_child_relation(PlannerInfo *root, Index parent_rti, + int ir_index, Oid child_oid, List *wrappers) { - RangeTblEntry *childrte; - RelOptInfo *childrel; - Index childRTindex; + RangeTblEntry *parent_rte, + *child_rte; + RelOptInfo *parent_rel, + *child_rel; + Relation parent_relation, + child_relation; AppendRelInfo *appinfo; + Index childRTindex; + PlanRowMark *parent_rowmark, + *child_rowmark; ListCell *lc, *lc2; - Relation newrelation; - PlanRowMark *parent_rowmark; - PlanRowMark *child_rowmark; - AttrNumber i; + parent_rel = root->simple_rel_array[parent_rti]; + parent_rte = root->simple_rte_array[parent_rti]; + + /* Parent has already been locked by rewriter */ + parent_relation = heap_open(parent_rte->relid, NoLock); /* FIXME: acquire a suitable lock on partition */ - newrelation = heap_open(childOid, NoLock); + child_relation = heap_open(child_oid, NoLock); - /* - * Create RangeTblEntry for child relation. - * This code partially based on expand_inherited_rtentry() function. - */ - childrte = copyObject(rte); - childrte->relid = childOid; - childrte->relkind = newrelation->rd_rel->relkind; - childrte->inh = false; - childrte->requiredPerms = 0; - root->parse->rtable = lappend(root->parse->rtable, childrte); + /* Create RangeTblEntry for child relation */ + child_rte = copyObject(parent_rte); + child_rte->relid = child_oid; + child_rte->relkind = child_relation->rd_rel->relkind; + child_rte->inh = false; + child_rte->requiredPerms = 0; + + /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ + root->parse->rtable = lappend(root->parse->rtable, child_rte); childRTindex = list_length(root->parse->rtable); - root->simple_rte_array[childRTindex] = childrte; + root->simple_rte_array[childRTindex] = child_rte; + + /* Create RelOptInfo for this child (and make some estimates as well) */ + child_rel = build_simple_rel(root, childRTindex, RELOPT_OTHER_MEMBER_REL); - /* Create RelOptInfo */ - childrel = build_simple_rel(root, childRTindex, RELOPT_OTHER_MEMBER_REL); + /* Increase total_table_pages using the 'child_rel' */ + root->total_table_pages += (double) child_rel->pages; - /* Copy targetlist */ - copy_targetlist_compat(childrel, rel); - /* Copy attr_needed & attr_widths */ - childrel->attr_needed = (Relids *) - palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids)); - childrel->attr_widths = (int32 *) - palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32)); + /* Build an AppendRelInfo for this child */ + appinfo = makeNode(AppendRelInfo); + appinfo->parent_relid = parent_rti; + appinfo->child_relid = childRTindex; + appinfo->parent_reloid = parent_rte->relid; + appinfo->translated_vars = make_inh_translation_list_simplified(parent_relation, + child_relation, + childRTindex); - for (i = 0; i < rel->max_attr - rel->min_attr + 1; i++) - childrel->attr_needed[i] = bms_copy(rel->attr_needed[i]); + /* Now append 'appinfo' to 'root->append_rel_list' */ + root->append_rel_list = lappend(root->append_rel_list, appinfo); - memcpy(childrel->attr_widths, rel->attr_widths, - (rel->max_attr - rel->min_attr + 1) * sizeof(int32)); + /* Adjust target list for this child */ + adjust_targetlist_compat(root, child_rel, parent_rel, appinfo); /* - * Copy restrictions. If it's not the parent table then copy only those - * restrictions that reference to this partition + * Copy restrictions. If it's not the parent table, copy only + * those restrictions that are related to this partition. */ - childrel->baserestrictinfo = NIL; - if (rte->relid != childOid) + child_rel->baserestrictinfo = NIL; + if (parent_rte->relid != child_oid) { - forboth(lc, wrappers, lc2, rel->baserestrictinfo) - { - bool alwaysTrue; - WrapperNode *wrap = (WrapperNode *) lfirst(lc); - Node *new_clause = wrapper_make_expression(wrap, index, &alwaysTrue); - RestrictInfo *old_rinfo = (RestrictInfo *) lfirst(lc2); - - if (alwaysTrue) - { - continue; - } - Assert(new_clause); - - if (and_clause((Node *) new_clause)) - { - ListCell *alc; + List *childquals = NIL; - foreach(alc, ((BoolExpr *) new_clause)->args) - { - Node *arg = (Node *) lfirst(alc); - RestrictInfo *new_rinfo = rebuild_restrictinfo(arg, old_rinfo); + forboth(lc, wrappers, lc2, parent_rel->baserestrictinfo) + { + WrapperNode *wrap = (WrapperNode *) lfirst(lc); + Node *new_clause; + bool always_true; - change_varnos((Node *)new_rinfo, rel->relid, childrel->relid); - childrel->baserestrictinfo = lappend(childrel->baserestrictinfo, - new_rinfo); - } - } - else - { - RestrictInfo *new_rinfo = rebuild_restrictinfo(new_clause, old_rinfo); + /* Generate a set of clauses for this child using WrapperNode */ + new_clause = wrapper_make_expression(wrap, ir_index, &always_true); - /* Replace old relids with new ones */ - change_varnos((Node *)new_rinfo, rel->relid, childrel->relid); + /* Don't add this clause if it's always true */ + if (always_true) + continue; - childrel->baserestrictinfo = lappend(childrel->baserestrictinfo, - (void *) new_rinfo); - } + /* Clause should not be NULL */ + Assert(new_clause); + childquals = lappend(childquals, new_clause); } + + childquals = (List *) adjust_appendrel_attrs(root, + (Node *) childquals, + appinfo); + childquals = make_restrictinfos_from_actual_clauses(root, childquals); + child_rel->baserestrictinfo = childquals; } - /* If it's the parent table then copy all restrictions */ + /* If it's the parent table, copy all restrictions */ else { - foreach(lc, rel->baserestrictinfo) - { - RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); - RestrictInfo *new_rinfo = (RestrictInfo *) copyObject(rinfo); - - change_varnos((Node *)new_rinfo, rel->relid, childrel->relid); - childrel->baserestrictinfo = lappend(childrel->baserestrictinfo, - (void *) new_rinfo); - } + List *childquals = NIL; + + childquals = get_all_actual_clauses(parent_rel->baserestrictinfo); + childquals = (List *) adjust_appendrel_attrs(root, + (Node *) childquals, + appinfo); + childquals = make_restrictinfos_from_actual_clauses(root, childquals); + child_rel->baserestrictinfo = childquals; } - /* Build an AppendRelInfo for this parent and child */ - appinfo = makeNode(AppendRelInfo); - appinfo->parent_relid = rti; - appinfo->child_relid = childRTindex; - appinfo->parent_reloid = rte->relid; - root->append_rel_list = lappend(root->append_rel_list, appinfo); - root->total_table_pages += (double) childrel->pages; - - /* Add equivalence members */ - foreach(lc, root->eq_classes) - { - EquivalenceClass *cur_ec = (EquivalenceClass *) lfirst(lc); - - /* Copy equivalence member from parent and make some modifications */ - foreach(lc2, cur_ec->ec_members) - { - EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2); - EquivalenceMember *em; - - if (!bms_is_member(rti, cur_em->em_relids)) - continue; - - em = makeNode(EquivalenceMember); - em->em_expr = copyObject(cur_em->em_expr); - change_varnos((Node *) em->em_expr, rti, childRTindex); - em->em_relids = bms_add_member(NULL, childRTindex); - em->em_nullable_relids = cur_em->em_nullable_relids; - em->em_is_const = false; - em->em_is_child = true; - em->em_datatype = cur_em->em_datatype; - cur_ec->ec_members = lappend(cur_ec->ec_members, em); - } - } - childrel->has_eclass_joins = rel->has_eclass_joins; + /* + * We have to make child entries in the EquivalenceClass data + * structures as well. + */ + if (parent_rel->has_eclass_joins || has_useful_pathkeys(root, parent_rel)) + add_child_rel_equivalences(root, appinfo, parent_rel, child_rel); + child_rel->has_eclass_joins = parent_rel->has_eclass_joins; /* Recalc parent relation tuples count */ - rel->tuples += childrel->tuples; + parent_rel->tuples += child_rel->tuples; /* Close child relations, but keep locks */ - heap_close(newrelation, NoLock); + heap_close(parent_relation, NoLock); + heap_close(child_relation, NoLock); /* Create rowmarks required for child rels */ - parent_rowmark = get_plan_rowmark(root->rowMarks, rti); + parent_rowmark = get_plan_rowmark(root->rowMarks, parent_rti); if (parent_rowmark) { child_rowmark = makeNode(PlanRowMark); child_rowmark->rti = childRTindex; - child_rowmark->prti = rti; + child_rowmark->prti = parent_rti; child_rowmark->rowmarkId = parent_rowmark->rowmarkId; /* Reselect rowmark type, because relkind might not match parent */ - child_rowmark->markType = select_rowmark_type(childrte, + child_rowmark->markType = select_rowmark_type(child_rte, parent_rowmark->strength); child_rowmark->allMarkTypes = (1 << child_rowmark->markType); child_rowmark->strength = parent_rowmark->strength; @@ -533,19 +584,6 @@ append_child_relation(PlannerInfo *root, RelOptInfo *rel, Index rti, return childRTindex; } -/* Create new restriction based on clause */ -static RestrictInfo * -rebuild_restrictinfo(Node *clause, RestrictInfo *old_rinfo) -{ - return make_restrictinfo((Expr *) clause, - old_rinfo->is_pushed_down, - old_rinfo->outerjoin_delayed, - old_rinfo->pseudoconstant, - old_rinfo->required_relids, - old_rinfo->outer_relids, - old_rinfo->nullable_relids); -} - /* Convert wrapper into expression for given index */ static Node * wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) @@ -575,8 +613,8 @@ wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) if (expr->boolop == OR_EXPR || expr->boolop == AND_EXPR) { - ListCell *lc; - List *args = NIL; + ListCell *lc; + List *args = NIL; foreach (lc, wrap->args) { @@ -1796,8 +1834,7 @@ set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) * Build access paths for an "append relation" */ void -set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, - Index rti, RangeTblEntry *rte, +set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, PathKey *pathkeyAsc, PathKey *pathkeyDesc) { Index parentRTindex = rti; diff --git a/src/relation_info.c b/src/relation_info.c index 70287265..33f9494a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -245,9 +245,7 @@ get_pathman_relation_info(Oid relid) /* Refresh partitioned table cache entry (might turn NULL) */ /* TODO: possible refactoring, pass found 'prel' instead of searching */ - prel = refresh_pathman_relation_info(relid, - part_type, - attname); + prel = refresh_pathman_relation_info(relid, part_type, attname); } /* Else clear remaining cache entry */ else remove_pathman_relation_info(relid); diff --git a/src/utils.c b/src/utils.c index 831d5a24..6b1eaf89 100644 --- a/src/utils.c +++ b/src/utils.c @@ -35,9 +35,6 @@ static bool clause_contains_params_walker(Node *node, void *context); -static void change_varnos_in_restrinct_info(RestrictInfo *rinfo, - change_varno_context *context); -static bool change_varno_walker(Node *node, change_varno_context *context); static List *get_tableoids_list(List *tlist); static void lock_rows_visitor(Plan *plan, void *context); static bool rowmark_add_tableoids_walker(Node *node, void *context); @@ -214,123 +211,6 @@ list_reverse(List *l) return result; } -/* - * Changes varno attribute in all variables nested in the node - */ -void -change_varnos(Node *node, Oid old_varno, Oid new_varno) -{ - change_varno_context context; - context.old_varno = old_varno; - context.new_varno = new_varno; - - change_varno_walker(node, &context); -} - -static bool -change_varno_walker(Node *node, change_varno_context *context) -{ - ListCell *lc; - Var *var; - EquivalenceClass *ec; - EquivalenceMember *em; - - if (node == NULL) - return false; - - switch(node->type) - { - case T_Var: - var = (Var *) node; - if (var->varno == context->old_varno) - { - var->varno = context->new_varno; - var->varnoold = context->new_varno; - } - return false; - - case T_RestrictInfo: - change_varnos_in_restrinct_info((RestrictInfo *) node, context); - return false; - - case T_PathKey: - change_varno_walker((Node *) ((PathKey *) node)->pk_eclass, context); - return false; - - case T_EquivalenceClass: - ec = (EquivalenceClass *) node; - - foreach(lc, ec->ec_members) - change_varno_walker((Node *) lfirst(lc), context); - foreach(lc, ec->ec_derives) - change_varno_walker((Node *) lfirst(lc), context); - return false; - - case T_EquivalenceMember: - em = (EquivalenceMember *) node; - change_varno_walker((Node *) em->em_expr, context); - if (bms_is_member(context->old_varno, em->em_relids)) - { - em->em_relids = bms_del_member(em->em_relids, context->old_varno); - em->em_relids = bms_add_member(em->em_relids, context->new_varno); - } - return false; - - case T_TargetEntry: - change_varno_walker((Node *) ((TargetEntry *) node)->expr, context); - return false; - - case T_List: - foreach(lc, (List *) node) - change_varno_walker((Node *) lfirst(lc), context); - return false; - - default: - break; - } - - /* Should not find an unplanned subquery */ - Assert(!IsA(node, Query)); - - return expression_tree_walker(node, change_varno_walker, (void *) context); -} - -static void -change_varnos_in_restrinct_info(RestrictInfo *rinfo, change_varno_context *context) -{ - ListCell *lc; - - change_varno_walker((Node *) rinfo->clause, context); - if (rinfo->left_em) - change_varno_walker((Node *) rinfo->left_em->em_expr, context); - - if (rinfo->right_em) - change_varno_walker((Node *) rinfo->right_em->em_expr, context); - - if (rinfo->orclause) - foreach(lc, ((BoolExpr *) rinfo->orclause)->args) - { - Node *node = (Node *) lfirst(lc); - change_varno_walker(node, context); - } - - if (bms_is_member(context->old_varno, rinfo->clause_relids)) - { - rinfo->clause_relids = bms_del_member(rinfo->clause_relids, context->old_varno); - rinfo->clause_relids = bms_add_member(rinfo->clause_relids, context->new_varno); - } - if (bms_is_member(context->old_varno, rinfo->left_relids)) - { - rinfo->left_relids = bms_del_member(rinfo->left_relids, context->old_varno); - rinfo->left_relids = bms_add_member(rinfo->left_relids, context->new_varno); - } - if (bms_is_member(context->old_varno, rinfo->right_relids)) - { - rinfo->right_relids = bms_del_member(rinfo->right_relids, context->old_varno); - rinfo->right_relids = bms_add_member(rinfo->right_relids, context->new_varno); - } -} - /* * Basic plan tree walker * diff --git a/src/utils.h b/src/utils.h index 4222f549..7d1aac1d 100644 --- a/src/utils.h +++ b/src/utils.h @@ -32,7 +32,6 @@ typedef struct void plan_tree_walker(Plan *plan, void (*visitor) (Plan *plan, void *context), void *context); -void change_varnos(Node *node, Oid old_varno, Oid new_varno); /* * Rowmark processing. From 2d224b651ebe16b071ee49c69ae242b5a0c19650 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 21 Oct 2016 16:10:03 +0300 Subject: [PATCH 0013/1124] refactoring, extract 'Relation parent_rel' from append_child_relation(), use list_free_deep() instead of list_free() --- src/hooks.c | 44 +++++++++++++++++++++++++------------------- src/pathman.h | 5 +++-- src/pg_pathman.c | 12 +++++------- 3 files changed, 33 insertions(+), 28 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 8ee74884..1ce1b23b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -209,22 +209,22 @@ pathman_rel_pathlist_hook(PlannerInfo *root, /* Proceed iff relation 'rel' is partitioned */ if ((prel = get_pathman_relation_info(rte->relid)) != NULL) { - ListCell *lc; - Oid *children; - List *ranges, - *wrappers, - *rel_part_clauses = NIL; + Relation parent_rel; /* parent's relation (heap */ + Oid *children; /* selected children oids */ + List *ranges, /* a list of IndexRanges */ + *wrappers, /* a list of WrapperNodes */ + *rel_part_clauses = NIL; /* clauses with part. column */ PathKey *pathkeyAsc = NULL, *pathkeyDesc = NULL; - double paramsel = 1.0; + double paramsel = 1.0; /* default part selectivity */ WalkerContext context; + ListCell *lc; int i; if (prel->parttype == PT_RANGE) { /* - * Get pathkeys for ascending and descending sort by partition - * column + * Get pathkeys for ascending and descending sort by partitioned column. */ List *pathkeys; Var *var; @@ -273,13 +273,12 @@ pathman_rel_pathlist_hook(PlannerInfo *root, ranges = irange_list_intersect(ranges, wrap->rangeset); } - /* - * Expand simple_rte_array and simple_rel_array - */ + /* Get number of selected partitions */ len = irange_list_length(ranges); if (prel->enable_parent) - len++; + len++; /* add parent too */ + /* Expand simple_rte_array and simple_rel_array */ if (len > 0) { /* Expand simple_rel_array and simple_rte_array */ @@ -306,26 +305,32 @@ pathman_rel_pathlist_hook(PlannerInfo *root, root->simple_rte_array = new_rte_array; } - /* Add parent if needed */ + /* Parent has already been locked by rewriter */ + parent_rel = heap_open(rte->relid, NoLock); + + /* Add parent if asked to */ if (prel->enable_parent) - append_child_relation(root, rti, 0, rte->relid, NULL); + append_child_relation(root, parent_rel, rti, 0, rte->relid, NULL); /* - * Iterate all indexes in rangeset and append corresponding child - * relations. + * Iterate all indexes in rangeset and append corresponding child relations. */ foreach(lc, ranges) { IndexRange irange = lfirst_irange(lc); for (i = irange.ir_lower; i <= irange.ir_upper; i++) - append_child_relation(root, rti, i, children[i], wrappers); + append_child_relation(root, parent_rel, rti, i, children[i], wrappers); } - /* Clear old path list */ - list_free(rel->pathlist); + /* Now close parent relation */ + heap_close(parent_rel, NoLock); + /* Clear path list and make it point to NIL */ + list_free_deep(rel->pathlist); rel->pathlist = NIL; + + /* Generate new paths using the rels we've just added */ set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); set_append_rel_size_compat(root, rel, rti); @@ -342,6 +347,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (!clause_contains_params((Node *) rel_part_clauses)) return; + /* Generate Runtime[Merge]Append paths if needed */ foreach (lc, rel->pathlist) { AppendPath *cur_path = (AppendPath *) lfirst(lc); diff --git a/src/pathman.h b/src/pathman.h index 65e35885..41c9c554 100644 --- a/src/pathman.h +++ b/src/pathman.h @@ -123,8 +123,9 @@ extern List *inheritance_disabled_relids; extern PathmanState *pmstate; -int append_child_relation(PlannerInfo *root, Index parent_rti, - int ir_index, Oid child_oid, List *wrappers); +int append_child_relation(PlannerInfo *root, Relation parent_relation, + Index parent_rti, int ir_index, Oid child_oid, + List *wrappers); search_rangerel_result search_range_partition_eq(const Datum value, FmgrInfo *cmp_func, diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 5c1cd148..f0479142 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -436,15 +436,15 @@ make_inh_translation_list_simplified(Relation oldrelation, * NOTE: This code is partially based on the expand_inherited_rtentry() function. */ int -append_child_relation(PlannerInfo *root, Index parent_rti, - int ir_index, Oid child_oid, List *wrappers) +append_child_relation(PlannerInfo *root, Relation parent_relation, + Index parent_rti, int ir_index, Oid child_oid, + List *wrappers) { RangeTblEntry *parent_rte, *child_rte; RelOptInfo *parent_rel, *child_rel; - Relation parent_relation, - child_relation; + Relation child_relation; AppendRelInfo *appinfo; Index childRTindex; PlanRowMark *parent_rowmark, @@ -455,8 +455,6 @@ append_child_relation(PlannerInfo *root, Index parent_rti, parent_rel = root->simple_rel_array[parent_rti]; parent_rte = root->simple_rte_array[parent_rti]; - /* Parent has already been locked by rewriter */ - parent_relation = heap_open(parent_rte->relid, NoLock); /* FIXME: acquire a suitable lock on partition */ child_relation = heap_open(child_oid, NoLock); @@ -552,7 +550,6 @@ append_child_relation(PlannerInfo *root, Index parent_rti, parent_rel->tuples += child_rel->tuples; /* Close child relations, but keep locks */ - heap_close(parent_relation, NoLock); heap_close(child_relation, NoLock); @@ -600,6 +597,7 @@ wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) /* Return NULL for always true and always false. */ if (!found) return NULL; + if (!lossy) { *alwaysTrue = true; From 7986446bb2552cab8cb503053f14b2099cfd02ff Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 21 Oct 2016 16:49:35 +0300 Subject: [PATCH 0014/1124] add SpawnPartitionsWorker to backend's locking group --- src/pathman_workers.c | 19 +++++++++++++++++++ src/pathman_workers.h | 10 ++++++++++ 2 files changed, 29 insertions(+) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 0cea2103..c398a9b8 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -29,6 +29,7 @@ #include "storage/dsm.h" #include "storage/ipc.h" #include "storage/latch.h" +#include "storage/proc.h" #include "utils/builtins.h" #include "utils/datum.h" #include "utils/memutils.h" @@ -256,6 +257,12 @@ create_partitions_bg_worker_segment(Oid relid, Datum value, Oid value_type) args->dbid = MyDatabaseId; args->partitioned_table = relid; +#if PG_VERSION_NUM >= 90600 + /* Initialize args for BecomeLockGroupMember() */ + args->parallel_master_pgproc = MyProc; + args->parallel_master_pid = MyProcPid; +#endif + /* Write value-related stuff */ args->value_type = value_type; args->value_size = datum_size; @@ -286,6 +293,11 @@ create_partitions_bg_worker(Oid relid, Datum value, Oid value_type) segment_handle = dsm_segment_handle(segment); bgw_args = (SpawnPartitionArgs *) dsm_segment_address(segment); +#if PG_VERSION_NUM >= 90600 + /* Become locking group leader */ + BecomeLockGroupLeader(); +#endif + /* Start worker and wait for it to finish */ start_bg_worker(spawn_partitions_bgw, bgw_main_spawn_partitions, @@ -337,6 +349,13 @@ bgw_main_spawn_partitions(Datum main_arg) spawn_partitions_bgw, MyProcPid); args = dsm_segment_address(segment); +#if PG_VERSION_NUM >= 90600 + /* Join locking group. If we can't join the group, quit */ + if (!BecomeLockGroupMember(args->parallel_master_pgproc, + args->parallel_master_pid)) + return; +#endif + /* Establish connection and start transaction */ BackgroundWorkerInitializeConnectionByOid(args->dbid, args->userid); diff --git a/src/pathman_workers.h b/src/pathman_workers.h index dfa14d53..a6b06dd6 100644 --- a/src/pathman_workers.h +++ b/src/pathman_workers.h @@ -20,6 +20,10 @@ #include "postgres.h" #include "storage/spin.h" +#if PG_VERSION_NUM >= 90600 +#include "storage/lock.h" +#endif + /* * Store args, result and execution status of CreatePartitionsWorker. @@ -32,6 +36,12 @@ typedef struct Oid dbid; /* database which stores 'partitioned_table' */ Oid partitioned_table; +#if PG_VERSION_NUM >= 90600 + /* Args for BecomeLockGroupMember() function */ + PGPROC *parallel_master_pgproc; + pid_t parallel_master_pid; +#endif + /* Needed to decode Datum from 'values' */ Oid value_type; Size value_size; From d4cf177b03b059d53a4d4b52f99d9f0e23f48224 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 21 Oct 2016 19:36:28 +0300 Subject: [PATCH 0015/1124] improve xact_bgw_conflicting_lock_exists() for locking groups --- src/xact_handling.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/xact_handling.c b/src/xact_handling.c index 44d9195b..a53fb4c3 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -82,6 +82,10 @@ xact_unlock_rel_exclusive(Oid relid) bool xact_bgw_conflicting_lock_exists(Oid relid) { +#if PG_VERSION_NUM >= 90600 + /* We use locking groups for 9.6+ */ + return false; +#else LOCKMODE lockmode; /* Try each lock >= ShareUpdateExclusiveLock */ @@ -94,6 +98,7 @@ xact_bgw_conflicting_lock_exists(Oid relid) } return false; +#endif } From 14e05f22b2e8637f88468719d62b187a23e1ede2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 21 Oct 2016 19:44:36 +0300 Subject: [PATCH 0016/1124] [WIP] add 'allow_incomplete' option to function refresh_pathman_relation_info() --- src/init.c | 4 +++- src/pl_funcs.c | 4 +++- src/relation_info.c | 24 ++++++++++++------------ src/relation_info.h | 3 ++- 4 files changed, 20 insertions(+), 15 deletions(-) diff --git a/src/init.c b/src/init.c index 5ae06a92..c06c7d39 100644 --- a/src/init.c +++ b/src/init.c @@ -752,7 +752,9 @@ read_pathman_config(void) } /* Create or update PartRelationInfo for this partitioned table */ - refresh_pathman_relation_info(relid, parttype, text_to_cstring(attname)); + refresh_pathman_relation_info(relid, parttype, + text_to_cstring(attname), + true); /* allow lazy prel loading */ } /* Clean resources */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 22f33475..4a454a35 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -623,7 +623,9 @@ add_to_pathman_config(PG_FUNCTION_ARGS) /* Some flags might change during refresh attempt */ save_pathman_init_state(&init_state); - refresh_pathman_relation_info(relid, parttype, text_to_cstring(attname)); + refresh_pathman_relation_info(relid, parttype, + text_to_cstring(attname), + false); /* initialize immediately */ } PG_CATCH(); { diff --git a/src/relation_info.c b/src/relation_info.c index 70287265..5fe8ce0f 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -67,7 +67,8 @@ static Oid get_parent_of_partition_internal(Oid partition, const PartRelationInfo * refresh_pathman_relation_info(Oid relid, PartType partitioning_type, - const char *part_column_name) + const char *part_column_name, + bool allow_incomplete) { const LOCKMODE lockmode = AccessShareLock; const TypeCacheEntry *typcache; @@ -103,14 +104,14 @@ refresh_pathman_relation_info(Oid relid, } /* First we assume that this entry is invalid */ - prel->valid = false; + prel->valid = false; /* Make both arrays point to NULL */ - prel->children = NULL; - prel->ranges = NULL; + prel->children = NULL; + prel->ranges = NULL; /* Set partitioning type */ - prel->parttype = partitioning_type; + prel->parttype = partitioning_type; /* Initialize PartRelationInfo using syscache & typcache */ prel->attnum = get_attnum(relid, part_column_name); @@ -245,9 +246,8 @@ get_pathman_relation_info(Oid relid) /* Refresh partitioned table cache entry (might turn NULL) */ /* TODO: possible refactoring, pass found 'prel' instead of searching */ - prel = refresh_pathman_relation_info(relid, - part_type, - attname); + prel = refresh_pathman_relation_info(relid, part_type, + attname, false); } /* Else clear remaining cache entry */ else remove_pathman_relation_info(relid); @@ -611,10 +611,10 @@ try_perform_parent_refresh(Oid parent) parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); attname = DatumGetTextP(values[Anum_pathman_config_attname - 1]); - /* If anything went wrong, return false (actually, it might throw ERROR) */ - if (!PrelIsValid(refresh_pathman_relation_info(parent, parttype, - text_to_cstring(attname)))) - return false; + /* If anything went wrong, return false (actually, it might emit ERROR) */ + refresh_pathman_relation_info(parent, parttype, + text_to_cstring(attname), + true); /* allow lazy */ } /* Not a partitioned relation */ else return false; diff --git a/src/relation_info.h b/src/relation_info.h index 5b50005a..bec9bca9 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -120,7 +120,8 @@ PrelLastChild(const PartRelationInfo *prel) const PartRelationInfo *refresh_pathman_relation_info(Oid relid, PartType partitioning_type, - const char *part_column_name); + const char *part_column_name, + bool allow_incomplete); void invalidate_pathman_relation_info(Oid relid, bool *found); void remove_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info(Oid relid); From f775ebb84e0490212763fa8dfe1c0fb4cd8c3015 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 21 Oct 2016 20:37:48 +0300 Subject: [PATCH 0017/1124] refactoring, relax locks in refresh_pathman_relation_info() and find_inheritance_children_array() --- src/init.c | 44 ++++++++++++++++++++++++++++++++++------- src/init.h | 16 ++++++++++++--- src/relation_info.c | 48 +++++++++++++++++++++++++++++++++------------ 3 files changed, 85 insertions(+), 23 deletions(-) diff --git a/src/init.c b/src/init.c index c06c7d39..461441f9 100644 --- a/src/init.c +++ b/src/init.c @@ -466,8 +466,12 @@ fill_prel_with_partitions(const Oid *partitions, * * borrowed from pg_inherits.c */ -Oid * -find_inheritance_children_array(Oid parentrelId, LOCKMODE lockmode, uint32 *size) +find_children_status +find_inheritance_children_array(Oid parentrelId, + LOCKMODE lockmode, + bool nowait, + uint32 *children_size, /* ret value #1 */ + Oid **children) /* ret value #2 */ { Relation relation; SysScanDesc scan; @@ -485,8 +489,12 @@ find_inheritance_children_array(Oid parentrelId, LOCKMODE lockmode, uint32 *size */ if (!has_subclass(parentrelId)) { - *size = 0; - return NULL; + /* Init return values */ + *children_size = 0; + children = NULL; + + /* Ok, could not find any children */ + return FCS_NO_CHILDREN; } /* @@ -540,7 +548,25 @@ find_inheritance_children_array(Oid parentrelId, LOCKMODE lockmode, uint32 *size if (lockmode != NoLock) { /* Get the lock to synchronize against concurrent drop */ - LockRelationOid(inhrelid, lockmode); + if (nowait) + { + if (!ConditionalLockRelationOid(inhrelid, lockmode)) + { + uint32 j; + + /* Unlock all previously locked children */ + for (j = 0; j < i; j++) + UnlockRelationOid(oidarr[j], lockmode); + + /* Init return values */ + *children_size = numoids; + *children = oidarr; + + /* We couldn't lock this child, retreat! */ + return FCS_COULD_NOT_LOCK; + } + } + else LockRelationOid(inhrelid, lockmode); /* * Now that we have the lock, double-check to see if the relation @@ -557,8 +583,12 @@ find_inheritance_children_array(Oid parentrelId, LOCKMODE lockmode, uint32 *size } } - *size = numoids; - return oidarr; + /* Init return values */ + *children_size = numoids; + *children = oidarr; + + /* Ok, we have children */ + return FCS_FOUND; } /* diff --git a/src/init.h b/src/init.h index effb2675..2e889373 100644 --- a/src/init.h +++ b/src/init.h @@ -109,9 +109,19 @@ void fill_prel_with_partitions(const Oid *partitions, const uint32 parts_count, PartRelationInfo *prel); -Oid *find_inheritance_children_array(Oid parentrelId, - LOCKMODE lockmode, - uint32 *size); +/* Result of find_inheritance_children_array() */ +typedef enum +{ + FCS_NO_CHILDREN = 0, /* could not find any children (GOOD) */ + FCS_COULD_NOT_LOCK, /* could not lock one of the children */ + FCS_FOUND /* found some children (GOOD) */ +} find_children_status; + +find_children_status find_inheritance_children_array(Oid parentrelId, + LOCKMODE lockmode, + bool nowait, + uint32 *children_size, + Oid **children); char *build_check_constraint_name_internal(Oid relid, AttrNumber attno); diff --git a/src/relation_info.c b/src/relation_info.c index 5fe8ce0f..e87237ca 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -75,16 +75,16 @@ refresh_pathman_relation_info(Oid relid, Oid *prel_children; uint32 prel_children_count = 0, i; - bool found; + bool found_entry; PartRelationInfo *prel; Datum param_values[Natts_pathman_config_params]; bool param_isnull[Natts_pathman_config_params]; prel = (PartRelationInfo *) hash_search(partitioned_rels, (const void *) &relid, - HASH_ENTER, &found); + HASH_ENTER, &found_entry); elog(DEBUG2, - found ? + found_entry ? "Refreshing record for relation %u in pg_pathman's cache [%u]" : "Creating new record for relation %u in pg_pathman's cache [%u]", relid, MyProcPid); @@ -96,7 +96,7 @@ refresh_pathman_relation_info(Oid relid, prel->cmp_proc = InvalidOid; /* Clear outdated resources */ - if (found && PrelIsValid(prel)) + if (found_entry && PrelIsValid(prel)) { /* Free these arrays iff they're not NULL */ FreeChildrenArray(prel); @@ -136,16 +136,38 @@ refresh_pathman_relation_info(Oid relid, prel->cmp_proc = typcache->cmp_proc; prel->hash_proc = typcache->hash_proc; - LockRelationOid(relid, lockmode); - prel_children = find_inheritance_children_array(relid, lockmode, - &prel_children_count); - UnlockRelationOid(relid, lockmode); + /* Try locking parent, exit fast if 'allow_incomplete' */ + if (allow_incomplete) + { + if (!ConditionalLockRelationOid(relid, lockmode)) + return NULL; /* leave an invalid entry */ + } + else LockRelationOid(relid, lockmode); - /* If there's no children at all, remove this entry */ - if (prel_children_count == 0) + /* Try searching for children (don't wait if we can't lock) */ + switch (find_inheritance_children_array(relid, lockmode, true, + &prel_children_count, + &prel_children)) { - remove_pathman_relation_info(relid); - return NULL; + /* If there's no children at all, remove this entry */ + case FCS_NO_CHILDREN: + UnlockRelationOid(relid, lockmode); + remove_pathman_relation_info(relid); + return NULL; /* exit */ + + /* If can't lock children, leave an invalid entry */ + case FCS_COULD_NOT_LOCK: + UnlockRelationOid(relid, lockmode); + return NULL; /* exit */ + + /* Found some children, just unlock parent */ + case FCS_FOUND: + UnlockRelationOid(relid, lockmode); + break; /* continue */ + + /* Error: unknown result code */ + default: + elog(ERROR, "error in " CppAsString(find_inheritance_children_array)); } /* @@ -156,7 +178,7 @@ refresh_pathman_relation_info(Oid relid, */ fill_prel_with_partitions(prel_children, prel_children_count, prel); - /* Add "partition+parent" tuple to cache */ + /* Add "partition+parent" pair to cache */ for (i = 0; i < prel_children_count; i++) cache_parent_of_partition(prel_children[i], relid); From aa162a14324f0e4d0fc2d6143ef50454c49420c6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 22 Oct 2016 00:00:20 +0300 Subject: [PATCH 0018/1124] fixes for refresh_pathman_relation_info(), use allow_incomplete in find_inheritance_children_array() call --- src/relation_info.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index e87237ca..50575475 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -23,9 +23,10 @@ #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/hsearch.h" -#include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/snapmgr.h" +#include "utils/syscache.h" +#include "utils/lsyscache.h" #include "utils/typcache.h" @@ -144,8 +145,18 @@ refresh_pathman_relation_info(Oid relid, } else LockRelationOid(relid, lockmode); + /* Check if parent exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) + { + /* Nope, it doesn't, remove this entry and exit */ + UnlockRelationOid(relid, lockmode); + remove_pathman_relation_info(relid); + return NULL; /* exit */ + } + /* Try searching for children (don't wait if we can't lock) */ - switch (find_inheritance_children_array(relid, lockmode, true, + switch (find_inheritance_children_array(relid, lockmode, + allow_incomplete, &prel_children_count, &prel_children)) { From 41d87d8d1179be04ee8ca9c7e832c0ab012261fa Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 22 Oct 2016 18:23:50 +0300 Subject: [PATCH 0019/1124] lock parent pefore first access to its structure, don't forget to unlock children --- src/relation_info.c | 44 +++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index 50575475..c56e1631 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -94,7 +94,7 @@ refresh_pathman_relation_info(Oid relid, * NOTE: Trick clang analyzer (first access without NULL pointer check). * Access to field 'valid' results in a dereference of a null pointer. */ - prel->cmp_proc = InvalidOid; + prel->cmp_proc = InvalidOid; /* Clear outdated resources */ if (found_entry && PrelIsValid(prel)) @@ -104,6 +104,23 @@ refresh_pathman_relation_info(Oid relid, FreeRangesArray(prel); } + /* Try locking parent, exit fast if 'allow_incomplete' */ + if (allow_incomplete) + { + if (!ConditionalLockRelationOid(relid, lockmode)) + return NULL; /* leave an invalid entry */ + } + else LockRelationOid(relid, lockmode); + + /* Check if parent exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) + { + /* Nope, it doesn't, remove this entry and exit */ + UnlockRelationOid(relid, lockmode); + remove_pathman_relation_info(relid); + return NULL; /* exit */ + } + /* First we assume that this entry is invalid */ prel->valid = false; @@ -137,23 +154,6 @@ refresh_pathman_relation_info(Oid relid, prel->cmp_proc = typcache->cmp_proc; prel->hash_proc = typcache->hash_proc; - /* Try locking parent, exit fast if 'allow_incomplete' */ - if (allow_incomplete) - { - if (!ConditionalLockRelationOid(relid, lockmode)) - return NULL; /* leave an invalid entry */ - } - else LockRelationOid(relid, lockmode); - - /* Check if parent exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) - { - /* Nope, it doesn't, remove this entry and exit */ - UnlockRelationOid(relid, lockmode); - remove_pathman_relation_info(relid); - return NULL; /* exit */ - } - /* Try searching for children (don't wait if we can't lock) */ switch (find_inheritance_children_array(relid, lockmode, allow_incomplete, @@ -189,10 +189,16 @@ refresh_pathman_relation_info(Oid relid, */ fill_prel_with_partitions(prel_children, prel_children_count, prel); - /* Add "partition+parent" pair to cache */ + /* Peform some actions for each child */ for (i = 0; i < prel_children_count; i++) + { + /* Add "partition+parent" pair to cache */ cache_parent_of_partition(prel_children[i], relid); + /* Now it's time to unlock this child */ + UnlockRelationOid(prel_children[i], lockmode); + } + pfree(prel_children); /* Read additional parameters ('enable_parent' and 'auto' at the moment) */ From d83475b57a5a251f64b08dd715305b20f7d84888 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 24 Oct 2016 13:11:57 +0300 Subject: [PATCH 0020/1124] Add test case for index scans for child nodes under enable_parent is set --- expected/pathman_basic.out | 65 ++++++++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 13 ++++++++ 2 files changed, 78 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 352c2bc5..5834ede3 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1478,6 +1478,71 @@ EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comm Index Cond: ((val < 75) AND (comment = 'a'::text)) (5 rows) +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test_index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test_index_on_childs(c2); +INSERT INTO test_index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test_index_on_childs', 'c1', 1, 1000, 0, false); +NOTICE: sequence "test_index_on_childs_seq" does not exist, skipping + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('test_index_on_childs', 1, 1000, 'test_index_on_childs_1_1K'); + add_range_partition +--------------------------- + test_index_on_childs_1_1K +(1 row) + +SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_1K_2K'); + append_range_partition +---------------------------- + test_index_on_childs_1K_2K +(1 row) + +SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_2K_3K'); + append_range_partition +---------------------------- + test_index_on_childs_2K_3K +(1 row) + +SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_3K_4K'); + append_range_partition +---------------------------- + test_index_on_childs_3K_4K +(1 row) + +SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_4K_5K'); + append_range_partition +---------------------------- + test_index_on_childs_4K_5K +(1 row) + +SELECT set_enable_parent('test_index_on_childs', true); + set_enable_parent +------------------- + +(1 row) + +VACUUM ANALYZE test_index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test_index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + QUERY PLAN +---------------------------------------------------------------------------------------- + Append + -> Index Scan using test_index_on_childs_c2_idx on test_index_on_childs + Index Cond: (c2 = 500) + Filter: ((c1 > 100) AND (c1 < 2500)) + -> Index Scan using test_index_on_childs_1_1k_c2_idx on test_index_on_childs_1_1k + Index Cond: (c2 = 500) + Filter: (c1 > 100) + -> Index Scan using test_index_on_childs_1k_2k_c2_idx on test_index_on_childs_1k_2k + Index Cond: (c2 = 500) + -> Index Scan using test_index_on_childs_2k_3k_c2_idx on test_index_on_childs_2k_3k + Index Cond: (c2 = 500) + Filter: (c1 < 2500) +(12 rows) + DROP SCHEMA test CASCADE; NOTICE: drop cascades to 13 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index f77efad7..eba61771 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -394,6 +394,19 @@ EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comm SELECT set_enable_parent('special_case_1_ind_o_s', false); EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test_index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test_index_on_childs(c2); +INSERT INTO test_index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test_index_on_childs', 'c1', 1, 1000, 0, false); +SELECT add_range_partition('test_index_on_childs', 1, 1000, 'test_index_on_childs_1_1K'); +SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_1K_2K'); +SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_2K_3K'); +SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_3K_4K'); +SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_4K_5K'); +SELECT set_enable_parent('test_index_on_childs', true); +VACUUM ANALYZE test_index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test_index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; From 4bd4295b14034c896dd5098f7aef327ddb4de38d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 24 Oct 2016 14:10:28 +0300 Subject: [PATCH 0021/1124] mark 'prel' invalid before locking parent --- src/relation_info.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index c56e1631..e9b0c52a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -104,6 +104,9 @@ refresh_pathman_relation_info(Oid relid, FreeRangesArray(prel); } + /* First we assume that this entry is invalid */ + prel->valid = false; + /* Try locking parent, exit fast if 'allow_incomplete' */ if (allow_incomplete) { @@ -121,9 +124,6 @@ refresh_pathman_relation_info(Oid relid, return NULL; /* exit */ } - /* First we assume that this entry is invalid */ - prel->valid = false; - /* Make both arrays point to NULL */ prel->children = NULL; prel->ranges = NULL; From 5b0c96ddb473b08105fcd778f8ed8d59050f70ec Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 24 Oct 2016 19:20:14 +0300 Subject: [PATCH 0022/1124] Incorporate tables for testing in pathman_basic.sql into schema test to remove their once after testing stage --- expected/pathman_basic.out | 192 ++++++++++++++++++------------------- sql/pathman_basic.sql | 124 ++++++++++++------------ 2 files changed, 157 insertions(+), 159 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 5834ede3..1a21d744 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1210,17 +1210,17 @@ DROP EXTENSION pg_pathman; /* Test that everything works fine without schemas */ CREATE EXTENSION pg_pathman; /* Hash */ -CREATE TABLE hash_rel ( +CREATE TABLE test.hash_rel ( id SERIAL PRIMARY KEY, value INTEGER NOT NULL); -INSERT INTO hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; -SELECT create_hash_partitions('hash_rel', 'value', 3); +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); create_hash_partitions ------------------------ 3 (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM hash_rel WHERE id = 1234; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; QUERY PLAN ------------------------------------------------------ Append @@ -1233,43 +1233,42 @@ EXPLAIN (COSTS OFF) SELECT * FROM hash_rel WHERE id = 1234; (7 rows) /* Range */ -CREATE TABLE range_rel ( +CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, value INTEGER); -INSERT INTO range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; -SELECT create_range_partitions('range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); -NOTICE: sequence "range_rel_seq" does not exist, skipping +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); create_range_partitions ------------------------- 12 (1 row) -SELECT merge_range_partitions('range_rel_1', 'range_rel_2'); +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); merge_range_partitions ------------------------ (1 row) -SELECT split_range_partition('range_rel_1', '2010-02-15'::date); +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); split_range_partition ------------------------- {01-01-2010,03-01-2010} (1 row) -SELECT append_range_partition('range_rel'); +SELECT append_range_partition('test.range_rel'); append_range_partition ------------------------ - public.range_rel_14 + test.range_rel_14 (1 row) -SELECT prepend_range_partition('range_rel'); +SELECT prepend_range_partition('test.range_rel'); prepend_range_partition ------------------------- - public.range_rel_15 + test.range_rel_15 (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt < '2010-03-01'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; QUERY PLAN -------------------------------- Append @@ -1278,7 +1277,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt < '2010-03-01'; -> Seq Scan on range_rel_13 (4 rows) -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt > '2010-12-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; QUERY PLAN -------------------------------------------------------------------------------- Append @@ -1288,10 +1287,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt > '2010-12-15'; (4 rows) /* Temporary table for JOINs */ -CREATE TABLE tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); -INSERT INTO tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); /* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE range_rel SET value = 111 WHERE dt = '2010-06-15'; +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; QUERY PLAN -------------------------------------------------------------------------------- Update on range_rel_6 @@ -1299,14 +1298,14 @@ EXPLAIN (COSTS OFF) UPDATE range_rel SET value = 111 WHERE dt = '2010-06-15'; Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) (3 rows) -UPDATE range_rel SET value = 111 WHERE dt = '2010-06-15'; -SELECT * FROM range_rel WHERE dt = '2010-06-15'; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; id | dt | value -----+--------------------------+------- 166 | Tue Jun 15 00:00:00 2010 | 111 (1 row) -EXPLAIN (COSTS OFF) DELETE FROM range_rel WHERE dt = '2010-06-15'; +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; QUERY PLAN -------------------------------------------------------------------------------- Delete on range_rel_6 @@ -1314,13 +1313,13 @@ EXPLAIN (COSTS OFF) DELETE FROM range_rel WHERE dt = '2010-06-15'; Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) (3 rows) -DELETE FROM range_rel WHERE dt = '2010-06-15'; -SELECT * FROM range_rel WHERE dt = '2010-06-15'; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; id | dt | value ----+----+------- (0 rows) -EXPLAIN (COSTS OFF) UPDATE range_rel r SET value = t.value FROM tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; QUERY PLAN -------------------------------------------------------------------------------------------- Update on range_rel_1 r @@ -1332,8 +1331,8 @@ EXPLAIN (COSTS OFF) UPDATE range_rel r SET value = t.value FROM tmp t WHERE r.dt Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) (7 rows) -UPDATE range_rel r SET value = t.value FROM tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM range_rel r USING tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; +UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; QUERY PLAN -------------------------------------------------------------------------------------------- Delete on range_rel_1 r @@ -1345,49 +1344,49 @@ EXPLAIN (COSTS OFF) DELETE FROM range_rel r USING tmp t WHERE r.dt = '2010-01-02 Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) (7 rows) -DELETE FROM range_rel r USING tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; +DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; /* Create range partitions from whole range */ -SELECT drop_partitions('range_rel'); -NOTICE: function public.range_rel_upd_trig_func() does not exist, skipping -NOTICE: 44 rows copied from range_rel_1 -NOTICE: 31 rows copied from range_rel_3 -NOTICE: 30 rows copied from range_rel_4 -NOTICE: 31 rows copied from range_rel_5 -NOTICE: 29 rows copied from range_rel_6 -NOTICE: 31 rows copied from range_rel_7 -NOTICE: 31 rows copied from range_rel_8 -NOTICE: 30 rows copied from range_rel_9 -NOTICE: 31 rows copied from range_rel_10 -NOTICE: 30 rows copied from range_rel_11 -NOTICE: 31 rows copied from range_rel_12 -NOTICE: 14 rows copied from range_rel_13 -NOTICE: 0 rows copied from range_rel_14 -NOTICE: 0 rows copied from range_rel_15 +SELECT drop_partitions('test.range_rel'); +NOTICE: function test.range_rel_upd_trig_func() does not exist, skipping +NOTICE: 44 rows copied from test.range_rel_1 +NOTICE: 31 rows copied from test.range_rel_3 +NOTICE: 30 rows copied from test.range_rel_4 +NOTICE: 31 rows copied from test.range_rel_5 +NOTICE: 29 rows copied from test.range_rel_6 +NOTICE: 31 rows copied from test.range_rel_7 +NOTICE: 31 rows copied from test.range_rel_8 +NOTICE: 30 rows copied from test.range_rel_9 +NOTICE: 31 rows copied from test.range_rel_10 +NOTICE: 30 rows copied from test.range_rel_11 +NOTICE: 31 rows copied from test.range_rel_12 +NOTICE: 14 rows copied from test.range_rel_13 +NOTICE: 0 rows copied from test.range_rel_14 +NOTICE: 0 rows copied from test.range_rel_15 drop_partitions ----------------- 14 (1 row) -SELECT create_partitions_from_range('range_rel', 'id', 1, 1000, 100); +SELECT create_partitions_from_range('test.range_rel', 'id', 1, 1000, 100); create_partitions_from_range ------------------------------ 10 (1 row) -SELECT drop_partitions('range_rel', TRUE); -NOTICE: function public.range_rel_upd_trig_func() does not exist, skipping +SELECT drop_partitions('test.range_rel', TRUE); +NOTICE: function test.range_rel_upd_trig_func() does not exist, skipping drop_partitions ----------------- 10 (1 row) -SELECT create_partitions_from_range('range_rel', 'dt', '2015-01-01'::date, '2015-12-01'::date, '1 month'::interval); +SELECT create_partitions_from_range('test.range_rel', 'dt', '2015-01-01'::date, '2015-12-01'::date, '1 month'::interval); create_partitions_from_range ------------------------------ 12 (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt = '2015-12-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-12-15'; QUERY PLAN -------------------------------------------------------------------------------- Append @@ -1396,22 +1395,22 @@ EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt = '2015-12-15'; (3 rows) /* Test foreign keys */ -CREATE TABLE messages(id SERIAL PRIMARY KEY, msg TEXT); -CREATE TABLE replies(id SERIAL PRIMARY KEY, message_id INTEGER REFERENCES messages(id), msg TEXT); -INSERT INTO messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; -INSERT INTO replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; -SELECT create_range_partitions('messages', 'id', 1, 100, 2); -WARNING: foreign key "replies_message_id_fkey" references relation "messages" -ERROR: relation "messages" is referenced from other relations -ALTER TABLE replies DROP CONSTRAINT replies_message_id_fkey; -SELECT create_range_partitions('messages', 'id', 1, 100, 2); +CREATE TABLE test.messages(id SERIAL PRIMARY KEY, msg TEXT); +CREATE TABLE test.replies(id SERIAL PRIMARY KEY, message_id INTEGER REFERENCES test.messages(id), msg TEXT); +INSERT INTO test.messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; +INSERT INTO test.replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; +SELECT create_range_partitions('test.messages', 'id', 1, 100, 2); +WARNING: foreign key "replies_message_id_fkey" references relation "test.messages" +ERROR: relation "test.messages" is referenced from other relations +ALTER TABLE test.replies DROP CONSTRAINT replies_message_id_fkey; +SELECT create_range_partitions('test.messages', 'id', 1, 100, 2); NOTICE: sequence "messages_seq" does not exist, skipping create_range_partitions ------------------------- 2 (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM messages; +EXPLAIN (COSTS OFF) SELECT * FROM test.messages; QUERY PLAN ------------------------------ Append @@ -1419,22 +1418,22 @@ EXPLAIN (COSTS OFF) SELECT * FROM messages; -> Seq Scan on messages_2 (3 rows) -DROP TABLE messages, replies CASCADE; +DROP TABLE test.messages, test.replies CASCADE; NOTICE: drop cascades to 2 other objects /* Special test case (quals generation) -- fixing commit f603e6c5 */ -CREATE TABLE special_case_1_ind_o_s(val serial, comment text); -INSERT INTO special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; -SELECT create_range_partitions('special_case_1_ind_o_s', 'val', 1, 50); +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); NOTICE: sequence "special_case_1_ind_o_s_seq" does not exist, skipping create_range_partitions ------------------------- 4 (1 row) -INSERT INTO special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); -CREATE INDEX ON special_case_1_ind_o_s_2 (val, comment); -VACUUM ANALYZE special_case_1_ind_o_s_2; -EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; QUERY PLAN -------------------------------------------------------------------------------------------------- Append @@ -1444,13 +1443,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comm Index Cond: ((val < 75) AND (comment = 'a'::text)) (5 rows) -SELECT set_enable_parent('special_case_1_ind_o_s', true); +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); set_enable_parent ------------------- (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; QUERY PLAN -------------------------------------------------------------------------------------------------- Append @@ -1462,13 +1461,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comm Index Cond: ((val < 75) AND (comment = 'a'::text)) (7 rows) -SELECT set_enable_parent('special_case_1_ind_o_s', false); +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); set_enable_parent ------------------- (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; QUERY PLAN -------------------------------------------------------------------------------------------------- Append @@ -1479,72 +1478,71 @@ EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comm (5 rows) /* Test index scans on child relation under enable_parent is set */ -CREATE TABLE test_index_on_childs(c1 integer not null, c2 integer); -CREATE INDEX ON test_index_on_childs(c2); -INSERT INTO test_index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; -SELECT create_range_partitions('test_index_on_childs', 'c1', 1, 1000, 0, false); -NOTICE: sequence "test_index_on_childs_seq" does not exist, skipping +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); +NOTICE: sequence "index_on_childs_seq" does not exist, skipping create_range_partitions ------------------------- 0 (1 row) -SELECT add_range_partition('test_index_on_childs', 1, 1000, 'test_index_on_childs_1_1K'); +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1K'); add_range_partition --------------------------- - test_index_on_childs_1_1K + test.index_on_childs_1_1K (1 row) -SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_1K_2K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1K_2K'); append_range_partition ---------------------------- - test_index_on_childs_1K_2K + test.index_on_childs_1K_2K (1 row) -SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_2K_3K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2K_3K'); append_range_partition ---------------------------- - test_index_on_childs_2K_3K + test.index_on_childs_2K_3K (1 row) -SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_3K_4K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3K_4K'); append_range_partition ---------------------------- - test_index_on_childs_3K_4K + test.index_on_childs_3K_4K (1 row) -SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_4K_5K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4K_5K'); append_range_partition ---------------------------- - test_index_on_childs_4K_5K + test.index_on_childs_4K_5K (1 row) -SELECT set_enable_parent('test_index_on_childs', true); +SELECT set_enable_parent('test.index_on_childs', true); set_enable_parent ------------------- (1 row) -VACUUM ANALYZE test_index_on_childs; -EXPLAIN (COSTS OFF) SELECT * FROM test_index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; - QUERY PLAN ----------------------------------------------------------------------------------------- +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + QUERY PLAN +------------------------------------------------------------------------------ Append - -> Index Scan using test_index_on_childs_c2_idx on test_index_on_childs + -> Index Scan using index_on_childs_c2_idx on index_on_childs Index Cond: (c2 = 500) Filter: ((c1 > 100) AND (c1 < 2500)) - -> Index Scan using test_index_on_childs_1_1k_c2_idx on test_index_on_childs_1_1k + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k Index Cond: (c2 = 500) Filter: (c1 > 100) - -> Index Scan using test_index_on_childs_1k_2k_c2_idx on test_index_on_childs_1k_2k + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k Index Cond: (c2 = 500) - -> Index Scan using test_index_on_childs_2k_3k_c2_idx on test_index_on_childs_2k_3k + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k Index Cond: (c2 = 500) Filter: (c1 < 2500) (12 rows) DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 45 other objects DROP EXTENSION pg_pathman CASCADE; -NOTICE: drop cascades to 3 other objects DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index eba61771..19b59263 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -326,87 +326,87 @@ DROP EXTENSION pg_pathman; CREATE EXTENSION pg_pathman; /* Hash */ -CREATE TABLE hash_rel ( +CREATE TABLE test.hash_rel ( id SERIAL PRIMARY KEY, value INTEGER NOT NULL); -INSERT INTO hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; -SELECT create_hash_partitions('hash_rel', 'value', 3); -EXPLAIN (COSTS OFF) SELECT * FROM hash_rel WHERE id = 1234; +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; /* Range */ -CREATE TABLE range_rel ( +CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, value INTEGER); -INSERT INTO range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; -SELECT create_range_partitions('range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); -SELECT merge_range_partitions('range_rel_1', 'range_rel_2'); -SELECT split_range_partition('range_rel_1', '2010-02-15'::date); -SELECT append_range_partition('range_rel'); -SELECT prepend_range_partition('range_rel'); -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt < '2010-03-01'; -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt > '2010-12-15'; +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); +SELECT append_range_partition('test.range_rel'); +SELECT prepend_range_partition('test.range_rel'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; /* Temporary table for JOINs */ -CREATE TABLE tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); -INSERT INTO tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); /* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE range_rel SET value = 111 WHERE dt = '2010-06-15'; -UPDATE range_rel SET value = 111 WHERE dt = '2010-06-15'; -SELECT * FROM range_rel WHERE dt = '2010-06-15'; -EXPLAIN (COSTS OFF) DELETE FROM range_rel WHERE dt = '2010-06-15'; -DELETE FROM range_rel WHERE dt = '2010-06-15'; -SELECT * FROM range_rel WHERE dt = '2010-06-15'; -EXPLAIN (COSTS OFF) UPDATE range_rel r SET value = t.value FROM tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -UPDATE range_rel r SET value = t.value FROM tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM range_rel r USING tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; -DELETE FROM range_rel r USING tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; +EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; +DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; /* Create range partitions from whole range */ -SELECT drop_partitions('range_rel'); -SELECT create_partitions_from_range('range_rel', 'id', 1, 1000, 100); -SELECT drop_partitions('range_rel', TRUE); -SELECT create_partitions_from_range('range_rel', 'dt', '2015-01-01'::date, '2015-12-01'::date, '1 month'::interval); -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt = '2015-12-15'; +SELECT drop_partitions('test.range_rel'); +SELECT create_partitions_from_range('test.range_rel', 'id', 1, 1000, 100); +SELECT drop_partitions('test.range_rel', TRUE); +SELECT create_partitions_from_range('test.range_rel', 'dt', '2015-01-01'::date, '2015-12-01'::date, '1 month'::interval); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-12-15'; /* Test foreign keys */ -CREATE TABLE messages(id SERIAL PRIMARY KEY, msg TEXT); -CREATE TABLE replies(id SERIAL PRIMARY KEY, message_id INTEGER REFERENCES messages(id), msg TEXT); -INSERT INTO messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; -INSERT INTO replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; -SELECT create_range_partitions('messages', 'id', 1, 100, 2); -ALTER TABLE replies DROP CONSTRAINT replies_message_id_fkey; -SELECT create_range_partitions('messages', 'id', 1, 100, 2); -EXPLAIN (COSTS OFF) SELECT * FROM messages; -DROP TABLE messages, replies CASCADE; +CREATE TABLE test.messages(id SERIAL PRIMARY KEY, msg TEXT); +CREATE TABLE test.replies(id SERIAL PRIMARY KEY, message_id INTEGER REFERENCES test.messages(id), msg TEXT); +INSERT INTO test.messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; +INSERT INTO test.replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; +SELECT create_range_partitions('test.messages', 'id', 1, 100, 2); +ALTER TABLE test.replies DROP CONSTRAINT replies_message_id_fkey; +SELECT create_range_partitions('test.messages', 'id', 1, 100, 2); +EXPLAIN (COSTS OFF) SELECT * FROM test.messages; +DROP TABLE test.messages, test.replies CASCADE; /* Special test case (quals generation) -- fixing commit f603e6c5 */ -CREATE TABLE special_case_1_ind_o_s(val serial, comment text); -INSERT INTO special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; -SELECT create_range_partitions('special_case_1_ind_o_s', 'val', 1, 50); -INSERT INTO special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); -CREATE INDEX ON special_case_1_ind_o_s_2 (val, comment); -VACUUM ANALYZE special_case_1_ind_o_s_2; -EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; -SELECT set_enable_parent('special_case_1_ind_o_s', true); -EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; -SELECT set_enable_parent('special_case_1_ind_o_s', false); -EXPLAIN (COSTS OFF) SELECT * FROM special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; /* Test index scans on child relation under enable_parent is set */ -CREATE TABLE test_index_on_childs(c1 integer not null, c2 integer); -CREATE INDEX ON test_index_on_childs(c2); -INSERT INTO test_index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; -SELECT create_range_partitions('test_index_on_childs', 'c1', 1, 1000, 0, false); -SELECT add_range_partition('test_index_on_childs', 1, 1000, 'test_index_on_childs_1_1K'); -SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_1K_2K'); -SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_2K_3K'); -SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_3K_4K'); -SELECT append_range_partition('test_index_on_childs', 'test_index_on_childs_4K_5K'); -SELECT set_enable_parent('test_index_on_childs', true); -VACUUM ANALYZE test_index_on_childs; -EXPLAIN (COSTS OFF) SELECT * FROM test_index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1K_2K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2K_3K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3K_4K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4K_5K'); +SELECT set_enable_parent('test.index_on_childs', true); +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; From d7685a2a76be172e7c08561847e152c79359ad64 Mon Sep 17 00:00:00 2001 From: EliSnow Date: Mon, 24 Oct 2016 12:28:49 -0600 Subject: [PATCH 0023/1124] Add files via upload --- LICENSE | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..df8f0642 --- /dev/null +++ b/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2015-2016, Postgres Professional + +Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. + +IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. \ No newline at end of file From 55507b8c47ba52be6ade1d091f63dffacc29751a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 24 Oct 2016 23:23:26 +0300 Subject: [PATCH 0024/1124] Update LICENSE --- LICENSE | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index df8f0642..5c6dab26 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,11 @@ Copyright (c) 2015-2016, Postgres Professional +Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + +Portions Copyright (c) 1994, The Regents of the University of California + Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. \ No newline at end of file +THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. From 7421589df4cb991ae9fbc955deb77c8e08e71108 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 26 Oct 2016 16:57:53 +0300 Subject: [PATCH 0025/1124] fix COPY (columns) TO statement (issue #50) --- expected/pathman_copy_stmt_hooking.out | 149 +++++++++++++++---------- sql/pathman_copy_stmt_hooking.sql | 37 ++++-- src/copy_stmt_hooking.c | 59 +++++++--- 3 files changed, 165 insertions(+), 80 deletions(-) diff --git a/expected/pathman_copy_stmt_hooking.out b/expected/pathman_copy_stmt_hooking.out index bedc8035..d0fcaaf7 100644 --- a/expected/pathman_copy_stmt_hooking.out +++ b/expected/pathman_copy_stmt_hooking.out @@ -1,7 +1,11 @@ \set VERBOSITY terse CREATE EXTENSION pg_pathman; CREATE SCHEMA copy_stmt_hooking; -CREATE TABLE copy_stmt_hooking.test(val int not null, comment text); +CREATE TABLE copy_stmt_hooking.test( + val int not null, + comment text, + c3 int, + c4 int); INSERT INTO copy_stmt_hooking.test SELECT generate_series(1, 20), 'comment'; CREATE INDEX ON copy_stmt_hooking.test(val); /* test for RANGE partitioning */ @@ -20,47 +24,47 @@ VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; /* COPY TO */ COPY copy_stmt_hooking.test TO stdout; -1 comment -2 comment -3 comment -4 comment -5 comment -6 comment -7 comment -8 comment -9 comment -10 comment -11 comment -12 comment -13 comment -14 comment -15 comment -16 comment -17 comment -18 comment -19 comment -20 comment +1 comment \N \N +2 comment \N \N +3 comment \N \N +4 comment \N \N +5 comment \N \N +6 comment \N \N +7 comment \N \N +8 comment \N \N +9 comment \N \N +10 comment \N \N +11 comment \N \N +12 comment \N \N +13 comment \N \N +14 comment \N \N +15 comment \N \N +16 comment \N \N +17 comment \N \N +18 comment \N \N +19 comment \N \N +20 comment \N \N \copy copy_stmt_hooking.test to stdout (format csv) -1,comment -2,comment -3,comment -4,comment -5,comment -6,comment -7,comment -8,comment -9,comment -10,comment -11,comment -12,comment -13,comment -14,comment -15,comment -16,comment -17,comment -18,comment -19,comment -20,comment +1,comment,, +2,comment,, +3,comment,, +4,comment,, +5,comment,, +6,comment,, +7,comment,, +8,comment,, +9,comment,, +10,comment,, +11,comment,, +12,comment,, +13,comment,, +14,comment,, +15,comment,, +16,comment,, +17,comment,, +18,comment,, +19,comment,, +20,comment,, \copy copy_stmt_hooking.test(comment) to stdout comment comment @@ -92,31 +96,62 @@ SELECT count(*) FROM ONLY copy_stmt_hooking.test; (1 row) SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; - val | comment | tableoid ------+---------+-------------------------- - 1 | test_1 | copy_stmt_hooking.test_1 - 6 | test_2 | copy_stmt_hooking.test_2 - 7 | test_2 | copy_stmt_hooking.test_2 - 11 | test_3 | copy_stmt_hooking.test_3 - 16 | test_4 | copy_stmt_hooking.test_4 + val | comment | c3 | c4 | tableoid +-----+---------+----+----+-------------------------- + 1 | test_1 | 0 | 0 | copy_stmt_hooking.test_1 + 6 | test_2 | 0 | 0 | copy_stmt_hooking.test_2 + 7 | test_2 | 0 | 0 | copy_stmt_hooking.test_2 + 11 | test_3 | 0 | 0 | copy_stmt_hooking.test_3 + 16 | test_4 | 0 | 0 | copy_stmt_hooking.test_4 (5 rows) +/* perform VACUUM */ +VACUUM FULL copy_stmt_hooking.test; +VACUUM FULL copy_stmt_hooking.test_1; +VACUUM FULL copy_stmt_hooking.test_2; +VACUUM FULL copy_stmt_hooking.test_3; +VACUUM FULL copy_stmt_hooking.test_4; +/* COPY FROM (specified columns) */ +COPY copy_stmt_hooking.test (val) TO stdout; +1 +6 +7 +11 +16 +COPY copy_stmt_hooking.test (val, comment) TO stdout; +1 test_1 +6 test_2 +7 test_2 +11 test_3 +16 test_4 +COPY copy_stmt_hooking.test (c3, val, comment) TO stdout; +0 1 test_1 +0 6 test_2 +0 7 test_2 +0 11 test_3 +0 16 test_4 +COPY copy_stmt_hooking.test (val, comment, c3, c4) TO stdout; +1 test_1 0 0 +6 test_2 0 0 +7 test_2 0 0 +11 test_3 0 0 +16 test_4 0 0 /* COPY TO (partition does not exist, NOT allowed to create partitions) */ SET pg_pathman.enable_auto_partition = OFF; COPY copy_stmt_hooking.test FROM stdin; ERROR: no suitable partition for key '21' SELECT * FROM copy_stmt_hooking.test WHERE val > 20; - val | comment ------+--------- + val | comment | c3 | c4 +-----+---------+----+---- (0 rows) /* COPY TO (partition does not exist, allowed to create partitions) */ SET pg_pathman.enable_auto_partition = ON; COPY copy_stmt_hooking.test FROM stdin; SELECT * FROM copy_stmt_hooking.test WHERE val > 20; - val | comment ------+-------------- - 21 | test_no_part + val | comment | c3 | c4 +-----+--------------+----+---- + 21 | test_no_part | 0 | 0 (1 row) /* COPY TO (partitioned column is not specified) */ @@ -147,10 +182,10 @@ SELECT count(*) FROM ONLY copy_stmt_hooking.test; (1 row) SELECT * FROM copy_stmt_hooking.test ORDER BY val; - val | comment ------+--------- - 1 | hash_1 - 6 | hash_2 + val | comment | c3 | c4 +-----+---------+----+---- + 1 | hash_1 | 0 | 0 + 6 | hash_2 | 0 | 0 (2 rows) DROP SCHEMA copy_stmt_hooking CASCADE; diff --git a/sql/pathman_copy_stmt_hooking.sql b/sql/pathman_copy_stmt_hooking.sql index 00d6a5d2..b7e9868a 100644 --- a/sql/pathman_copy_stmt_hooking.sql +++ b/sql/pathman_copy_stmt_hooking.sql @@ -4,7 +4,11 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA copy_stmt_hooking; -CREATE TABLE copy_stmt_hooking.test(val int not null, comment text); +CREATE TABLE copy_stmt_hooking.test( + val int not null, + comment text, + c3 int, + c4 int); INSERT INTO copy_stmt_hooking.test SELECT generate_series(1, 20), 'comment'; CREATE INDEX ON copy_stmt_hooking.test(val); @@ -27,26 +31,39 @@ COPY copy_stmt_hooking.test TO stdout; /* DELETE ROWS, COPY FROM */ DELETE FROM copy_stmt_hooking.test; COPY copy_stmt_hooking.test FROM stdin; -1 test_1 -6 test_2 -7 test_2 -11 test_3 -16 test_4 +1 test_1 0 0 +6 test_2 0 0 +7 test_2 0 0 +11 test_3 0 0 +16 test_4 0 0 \. SELECT count(*) FROM ONLY copy_stmt_hooking.test; SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; +/* perform VACUUM */ +VACUUM FULL copy_stmt_hooking.test; +VACUUM FULL copy_stmt_hooking.test_1; +VACUUM FULL copy_stmt_hooking.test_2; +VACUUM FULL copy_stmt_hooking.test_3; +VACUUM FULL copy_stmt_hooking.test_4; + +/* COPY FROM (specified columns) */ +COPY copy_stmt_hooking.test (val) TO stdout; +COPY copy_stmt_hooking.test (val, comment) TO stdout; +COPY copy_stmt_hooking.test (c3, val, comment) TO stdout; +COPY copy_stmt_hooking.test (val, comment, c3, c4) TO stdout; + /* COPY TO (partition does not exist, NOT allowed to create partitions) */ SET pg_pathman.enable_auto_partition = OFF; COPY copy_stmt_hooking.test FROM stdin; -21 test_no_part +21 test_no_part 0 0 \. SELECT * FROM copy_stmt_hooking.test WHERE val > 20; /* COPY TO (partition does not exist, allowed to create partitions) */ SET pg_pathman.enable_auto_partition = ON; COPY copy_stmt_hooking.test FROM stdin; -21 test_no_part +21 test_no_part 0 0 \. SELECT * FROM copy_stmt_hooking.test WHERE val > 20; @@ -66,8 +83,8 @@ SELECT create_hash_partitions('copy_stmt_hooking.test', 'val', 5); /* DELETE ROWS, COPY FROM */ DELETE FROM copy_stmt_hooking.test; COPY copy_stmt_hooking.test FROM stdin; -1 hash_1 -6 hash_2 +1 hash_1 0 0 +6 hash_2 0 0 \. SELECT count(*) FROM ONLY copy_stmt_hooking.test; SELECT * FROM copy_stmt_hooking.test ORDER BY val; diff --git a/src/copy_stmt_hooking.c b/src/copy_stmt_hooking.c index fec975fc..c8cb1515 100644 --- a/src/copy_stmt_hooking.c +++ b/src/copy_stmt_hooking.c @@ -269,9 +269,8 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) is_from == false) /* rewrite COPY table TO statements */ { SelectStmt *select; - ColumnRef *cr; - ResTarget *target; RangeVar *from; + List *target_list = NIL; if (is_from) ereport(ERROR, @@ -280,20 +279,54 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) errhint("Use INSERT statements instead."))); /* Build target list */ - cr = makeNode(ColumnRef); - if (!stmt->attlist) + { + ColumnRef *cr; + ResTarget *target; + + cr = makeNode(ColumnRef); cr->fields = list_make1(makeNode(A_Star)); - else - cr->fields = stmt->attlist; + cr->location = -1; - cr->location = 1; + /* Build the ResTarget and add the ColumnRef to it. */ + target = makeNode(ResTarget); + target->name = NULL; + target->indirection = NIL; + target->val = (Node *) cr; + target->location = -1; - target = makeNode(ResTarget); - target->name = NULL; - target->indirection = NIL; - target->val = (Node *) cr; - target->location = 1; + target_list = list_make1(target); + } + else + { + ListCell *lc; + + foreach(lc, stmt->attlist) + { + ColumnRef *cr; + ResTarget *target; + + /* + * Build the ColumnRef for each column. The ColumnRef + * 'fields' property is a String 'Value' node (see + * nodes/value.h) that corresponds to the column name + * respectively. + */ + cr = makeNode(ColumnRef); + cr->fields = list_make1(lfirst(lc)); + cr->location = -1; + + /* Build the ResTarget and add the ColumnRef to it. */ + target = makeNode(ResTarget); + target->name = NULL; + target->indirection = NIL; + target->val = (Node *) cr; + target->location = -1; + + /* Add each column to the SELECT statements target list */ + target_list = lappend(target_list, target); + } + } /* * Build RangeVar for from clause, fully qualified based on the @@ -304,7 +337,7 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) /* Build query */ select = makeNode(SelectStmt); - select->targetList = list_make1(target); + select->targetList = target_list; select->fromClause = list_make1(from); query = (Node *) select; From 687e6ca5078a2da03a02fec95ee5df2cfa1d54f2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 26 Oct 2016 17:38:29 +0300 Subject: [PATCH 0026/1124] [WIP] introduce 'partition_creation' subsystem --- Makefile | 2 +- src/partition_creation.c | 259 +++++++++++++++++++++++++++++++++++++++ src/partition_creation.h | 8 ++ 3 files changed, 268 insertions(+), 1 deletion(-) create mode 100644 src/partition_creation.c create mode 100644 src/partition_creation.h diff --git a/Makefile b/Makefile index 58e5e939..c03614ce 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/runtimeappend.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/copy_stmt_hooking.o \ - src/pg_compat.o $(WIN32RES) + src/partition_creation.o src/pg_compat.o $(WIN32RES) EXTENSION = pg_pathman EXTVERSION = 1.1 diff --git a/src/partition_creation.c b/src/partition_creation.c new file mode 100644 index 00000000..9d0ebf76 --- /dev/null +++ b/src/partition_creation.c @@ -0,0 +1,259 @@ +#include "pathman.h" +#include "init.h" +#include "partition_creation.h" +#include "relation_info.h" + +#include "access/reloptions.h" +#include "access/xact.h" +#include "catalog/heap.h" +#include "catalog/toasting.h" +#include "commands/defrem.h" +#include "commands/event_trigger.h" +#include "commands/tablecmds.h" +#include "nodes/makefuncs.h" +#include "parser/parse_expr.h" +#include "parser/parse_node.h" +#include "parser/parse_relation.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + + +/* TODO: comment */ +Oid +create_single_range_partition(Oid parent_relid, + Datum start_value, + Datum end_value, + Oid value_type, + RangeVar *partition_rv, + char *tablespace) +{ + CreateStmt create_stmt; + ObjectAddress partition_addr; + Oid child_relid; + Relation child_relation; + Datum toast_options; + TableLikeClause like_clause; + Constraint *check_constr; + RangeVar *parent_rv; + Oid parent_nsp; + char *parent_name, + *parent_nsp_name, + partitioned_column; + Datum config_values[Natts_pathman_config]; + bool config_nulls[Natts_pathman_config]; + static char *validnsps[] = HEAP_RELOPT_NAMESPACES; + + /* Lock parent and check if it exists */ + LockRelationOid(parent_relid, ShareUpdateExclusiveLock); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) + elog(ERROR, "relation %u does not exist", parent_relid); + + /* Check that table is registered in PATHMAN_CONFIG */ + if (!pathman_config_contains_relation(parent_relid, + config_values, config_nulls, NULL)) + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)); + + /* Cache parent's namespace and name */ + parent_name = get_rel_name(parent_relid); + parent_nsp = get_rel_namespace(parent_relid); + parent_nsp_name = get_namespace_name(parent_nsp); + + /* Make up parent's RangeVar */ + parent_rv = makeRangeVar(parent_nsp_name, parent_name, -1); + + /* Generate a name if asked to */ + if (!partition_rv) + { + char *part_name; + + /* Make up a name for the partition */ + part_name = ChooseRelationName(parent_name, NULL, "part", parent_nsp); + + /* Make RangeVar for the partition */ + partition_rv = makeRangeVar(parent_nsp_name, part_name, -1); + } + + /* Initialize TableLikeClause structure */ + NodeSetTag(&like_clause, T_TableLikeClause); + like_clause.relation = copyObject(parent_rv); + like_clause.options = CREATE_TABLE_LIKE_ALL; + + /* Initialize CreateStmt structure */ + NodeSetTag(&create_stmt, T_CreateStmt); + create_stmt.relation = copyObject(partition_rv); + create_stmt.tableElts = list_make1(&like_clause); + create_stmt.inhRelations = list_make1(copyObject(parent_rv)); + create_stmt.ofTypename = NULL; + create_stmt.constraints = list_make1(&check_constr); + create_stmt.options = NIL; + create_stmt.oncommit = ONCOMMIT_NOOP; + create_stmt.tablespacename = tablespace; + create_stmt.if_not_exists = false; + + /* Create new partition owned by parent's posessor */ + partition_addr = DefineRelation(&create_stmt, RELKIND_RELATION, + get_rel_owner(parent_relid), NULL); + + /* Save data about a simple DDL command that was just executed */ + EventTriggerCollectSimpleCommand(partition_addr, + InvalidObjectAddress, + (Node *) &create_stmt); + + /* Save partition's Oid */ + child_relid = partition_addr.objectId; + + /* + * Let NewRelationCreateToastTable decide if this + * one needs a secondary relation too. + */ + CommandCounterIncrement(); + + /* Parse and validate reloptions for the toast table */ + toast_options = transformRelOptions((Datum) 0, create_stmt.options, + "toast", validnsps, true, false); + + /* Parse options for a new toast table */ + (void) heap_reloptions(RELKIND_TOASTVALUE, toast_options, true); + + /* Now create the toast table if needed */ + NewRelationCreateToastTable(child_relid, toast_options); + + /* Update config one more time */ + CommandCounterIncrement(); + + /* Fetch partitioned column's name */ + partitioned_column = config_values[Anum_pathman_config_attname - 1]; + + /* Build check constraint for RANGE partition */ + check_constr = build_range_check_constraint(partitioned_column, + start_value, + end_value, + value_type); + + /* Open the relation and add new check constraint */ + child_relation = heap_openrv(partition_rv, AccessExclusiveLock); + AddRelationNewConstraints(child_relation, NIL, + list_make1(check_constr), + false, true, true); + heap_close(child_relation, NoLock); + + /* Invoke init_callback on partition */ + invoke_init_callback(parent_relid, child_relid, InvalidOid, + start_value, end_value, value_type); + + return child_relid; +} + +Node * +raw_range_check_tree(char *attname, + Datum start_value, + Datum end_value, + Oid value_type) +{ + BoolExpr *and_oper = makeNode(BoolExpr); + A_Expr *left_arg = makeNode(A_Expr), + *right_arg = makeNode(A_Expr); + A_Const *left_const = makeNode(A_Const), + *right_const = makeNode(A_Const); + ColumnRef *col_ref = makeNode(ColumnRef); + + /* Partitioned column */ + col_ref->fields = list_make1(makeString(attname)); + col_ref->location = -1; + + /* Left boundary */ + left_const->val = *makeString(datum_to_cstring(start_value, value_type)); + left_const->location = -1; + + /* Right boundary */ + right_const->val = *makeString(datum_to_cstring(end_value, value_type)); + right_const->location = -1; + + /* Left comparison (VAR >= start_value) */ + left_arg->name = list_make1(makeString(">=")); + left_arg->kind = AEXPR_OP; + left_arg->lexpr = (Node *) col_ref; + left_arg->rexpr = (Node *) left_const; + left_arg->location = -1; + + /* Right comparision (VAR < end_value) */ + right_arg->name = list_make1(makeString("<")); + right_arg->kind = AEXPR_OP; + right_arg->lexpr = (Node *) col_ref; + right_arg->rexpr = (Node *) right_const; + right_arg->location = -1; + + and_oper->boolop = AND_EXPR; + and_oper->args = list_make2(left_arg, right_arg); + and_oper->location = -1; + + return (Node *) and_oper; +} + +Node * +good_range_check_tree(RangeVar *partition, + char *attname, + Datum start_value, + Datum end_value, + Oid value_type) +{ + ParseState *pstate = make_parsestate(NULL); + RangeTblEntry *partition_rte; + Node *expression, + *raw_expression; + ParseNamespaceItem pni; + + /* Required for transformExpr() */ + partition_rte = addRangeTableEntry(pstate, partition, NULL, false, false); + + memset((void *) &pni, 0, sizeof(ParseNamespaceItem)); + pni.p_rte = partition_rte; + pni.p_rel_visible = true; + pni.p_cols_visible = true; + + pstate->p_namespace = list_make1(&pni); + pstate->p_rtable = list_make1(partition_rte); + + /* Transform raw check constraint expression into Constraint */ + raw_expression = raw_range_check_tree(attname, start_value, end_value, value_type); + expression = transformExpr(pstate, raw_expression, EXPR_KIND_CHECK_CONSTRAINT); + + return (Node *) expression; +} + +Constraint * +build_range_check_constraint(char *attname, + Datum start_value, + Datum end_value, + Oid value_type) +{ + Constraint *range_constr; + + range_constr = makeNode(Constraint); + range_constr->conname = NULL; + range_constr->deferrable = false; + range_constr->initdeferred = false; + range_constr->location = -1; + range_constr->contype = CONSTR_CHECK; + range_constr->is_no_inherit = true; + + range_constr->raw_expr = raw_range_check_tree(attname, + start_value, + end_value, + value_type); + + return range_constr; +} + +/* TODO: comment */ +void +invoke_init_callback(Oid parent_relid, + Oid child_relid, + Oid init_callback, + Datum start_value, + Datum end_value, + Oid value_type) +{ + +} diff --git a/src/partition_creation.h b/src/partition_creation.h new file mode 100644 index 00000000..ac8792fa --- /dev/null +++ b/src/partition_creation.h @@ -0,0 +1,8 @@ + +#include "postgres.h" +#include "nodes/parsenodes.h" + +Constraint *build_range_check_constraint(char *attname, + Datum start_value, + Datum end_value, + Oid value_type); From fe30c62ea2ae46566d6e7056b6ee6d5c083094a0 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 27 Oct 2016 17:14:15 +0300 Subject: [PATCH 0027/1124] Add parallel nodes support --- src/hooks.c | 13 ++- src/pg_pathman.c | 291 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 303 insertions(+), 1 deletion(-) diff --git a/src/hooks.c b/src/hooks.c index 42f9cc79..a3d44f1b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -324,11 +324,22 @@ pathman_rel_pathlist_hook(PlannerInfo *root, /* Clear old path list */ list_free(rel->pathlist); - rel->pathlist = NIL; + +#if PG_VERSION_NUM >= 90600 + /* Clear old partial path list */ + list_free(rel->partial_pathlist); + rel->partial_pathlist = NIL; +#endif + set_append_rel_pathlist(root, rel, rti, rte, pathkeyAsc, pathkeyDesc); set_append_rel_size_compat(root, rel, rti, rte); +#if PG_VERSION_NUM >= 90600 + /* consider gathering partial paths for the parent appendrel */ + generate_gather_paths(root, rel); +#endif + /* No need to go further (both nodes are disabled), return */ if (!(pg_pathman_enable_runtimeappend || pg_pathman_enable_runtime_merge_append)) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a9f3bf31..4ecd2005 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -26,6 +26,7 @@ #include "access/transam.h" #include "access/xact.h" #include "catalog/pg_cast.h" +#include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "executor/spi.h" #include "foreign/fdwapi.h" @@ -98,6 +99,11 @@ static void generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, PathKey *pathkeyAsc, PathKey *pathkeyDesc); static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer); +#if PG_VERSION_NUM >= 90600 +static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel); +static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, + RangeTblEntry *rte); +#endif /* @@ -1756,6 +1762,12 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) #endif add_path(rel, path); +#if PG_VERSION_NUM >= 90600 + /* If appropriate, consider parallel sequential scan */ + if (rel->consider_parallel && required_outer == NULL) + create_plain_partial_paths(root, rel); +#endif + /* Consider index scans */ create_index_paths(root, rel); @@ -1804,6 +1816,10 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, List *live_childrels = NIL; List *subpaths = NIL; bool subpaths_valid = true; +#if PG_VERSION_NUM >= 90600 + List *partial_subpaths = NIL; + bool partial_subpaths_valid = true; +#endif List *all_child_pathkeys = NIL; List *all_child_outers = NIL; ListCell *l; @@ -1831,6 +1847,18 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, childRTE = root->simple_rte_array[childRTindex]; childrel = root->simple_rel_array[childRTindex]; +#if PG_VERSION_NUM >= 90600 + /* + * If parallelism is allowable for this query in general and for parent + * appendrel, see whether it's allowable for this childrel in + * particular. + * + * For consistency, do this before calling set_rel_size() for the child. + */ + if (root->glob->parallelModeOK && rel->consider_parallel) + set_rel_consider_parallel(root, childrel, childRTE); +#endif + /* * Compute the child's access paths. */ @@ -1857,6 +1885,18 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, */ live_childrels = lappend(live_childrels, childrel); +#if PG_VERSION_NUM >= 90600 + /* + * If any live child is not parallel-safe, treat the whole appendrel + * as not parallel-safe. In future we might be able to generate plans + * in which some children are farmed out to workers while others are + * not; but we don't have that today, so it's a waste to consider + * partial paths anywhere in the appendrel unless it's all safe. + */ + if (!childrel->consider_parallel) + rel->consider_parallel = false; +#endif + /* * If child has an unparameterized cheapest-total path, add that to * the unparameterized Append path we are constructing for the parent. @@ -1868,6 +1908,15 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, else subpaths_valid = false; +#if PG_VERSION_NUM >= 90600 + /* Same idea, but for a partial plan. */ + if (childrel->partial_pathlist != NIL) + partial_subpaths = accumulate_append_subpath(partial_subpaths, + linitial(childrel->partial_pathlist)); + else + partial_subpaths_valid = false; +#endif + /* * Collect lists of all the available path orderings and * parameterizations for all the children. We use these as a @@ -1942,6 +1991,37 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, add_path(rel, (Path *) create_append_path_compat(rel, subpaths, NULL, 0)); +#if PG_VERSION_NUM >= 90600 + /* + * Consider an append of partial unordered, unparameterized partial paths. + */ + if (partial_subpaths_valid) + { + AppendPath *appendpath; + ListCell *lc; + int parallel_workers = 0; + + /* + * Decide on the number of workers to request for this append path. + * For now, we just use the maximum value from among the members. It + * might be useful to use a higher number if the Append node were + * smart enough to spread out the workers, but it currently isn't. + */ + foreach(lc, partial_subpaths) + { + Path *path = lfirst(lc); + + parallel_workers = Max(parallel_workers, path->parallel_workers); + } + Assert(parallel_workers > 0); + + /* Generate a partial append path. */ + appendpath = create_append_path(rel, partial_subpaths, NULL, + parallel_workers); + add_partial_path(rel, (Path *) appendpath); + } +#endif + /* * Also build unparameterized MergeAppend paths based on the collected * list of child pathkeys. @@ -1995,6 +2075,217 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, } } +#if PG_VERSION_NUM >= 90600 +/* + * create_plain_partial_paths + * Build partial access paths for parallel scan of a plain relation + */ +static void +create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) +{ + int parallel_workers; + + /* + * If the user has set the parallel_workers reloption, use that; otherwise + * select a default number of workers. + */ + if (rel->rel_parallel_workers != -1) + parallel_workers = rel->rel_parallel_workers; + else + { + int parallel_threshold; + + /* + * If this relation is too small to be worth a parallel scan, just + * return without doing anything ... unless it's an inheritance child. + * In that case, we want to generate a parallel path here anyway. It + * might not be worthwhile just for this relation, but when combined + * with all of its inheritance siblings it may well pay off. + */ + if (rel->pages < (BlockNumber) min_parallel_relation_size && + rel->reloptkind == RELOPT_BASEREL) + return; + + /* + * Select the number of workers based on the log of the size of the + * relation. This probably needs to be a good deal more + * sophisticated, but we need something here for now. Note that the + * upper limit of the min_parallel_relation_size GUC is chosen to + * prevent overflow here. + */ + parallel_workers = 1; + parallel_threshold = Max(min_parallel_relation_size, 1); + while (rel->pages >= (BlockNumber) (parallel_threshold * 3)) + { + parallel_workers++; + parallel_threshold *= 3; + if (parallel_threshold > INT_MAX / 3) + break; /* avoid overflow */ + } + } + + /* + * In no case use more than max_parallel_workers_per_gather workers. + */ + parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather); + + /* If any limit was set to zero, the user doesn't want a parallel scan. */ + if (parallel_workers <= 0) + return; + + /* Add an unordered partial path based on a parallel sequential scan. */ + add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); +} + +/* + * If this relation could possibly be scanned from within a worker, then set + * its consider_parallel flag. + */ +static void +set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, + RangeTblEntry *rte) +{ + /* + * The flag has previously been initialized to false, so we can just + * return if it becomes clear that we can't safely set it. + */ + Assert(!rel->consider_parallel); + + /* Don't call this if parallelism is disallowed for the entire query. */ + Assert(root->glob->parallelModeOK); + + /* This should only be called for baserels and appendrel children. */ + Assert(rel->reloptkind == RELOPT_BASEREL || + rel->reloptkind == RELOPT_OTHER_MEMBER_REL); + + /* Assorted checks based on rtekind. */ + switch (rte->rtekind) + { + case RTE_RELATION: + + /* + * Currently, parallel workers can't access the leader's temporary + * tables. We could possibly relax this if the wrote all of its + * local buffers at the start of the query and made no changes + * thereafter (maybe we could allow hint bit changes), and if we + * taught the workers to read them. Writing a large number of + * temporary buffers could be expensive, though, and we don't have + * the rest of the necessary infrastructure right now anyway. So + * for now, bail out if we see a temporary table. + */ + if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP) + return; + + /* + * Table sampling can be pushed down to workers if the sample + * function and its arguments are safe. + */ + if (rte->tablesample != NULL) + { + Oid proparallel = func_parallel(rte->tablesample->tsmhandler); + + if (proparallel != PROPARALLEL_SAFE) + return; + if (has_parallel_hazard((Node *) rte->tablesample->args, + false)) + return; + } + + /* + * Ask FDWs whether they can support performing a ForeignScan + * within a worker. Most often, the answer will be no. For + * example, if the nature of the FDW is such that it opens a TCP + * connection with a remote server, each parallel worker would end + * up with a separate connection, and these connections might not + * be appropriately coordinated between workers and the leader. + */ + if (rte->relkind == RELKIND_FOREIGN_TABLE) + { + Assert(rel->fdwroutine); + if (!rel->fdwroutine->IsForeignScanParallelSafe) + return; + if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte)) + return; + } + + /* + * There are additional considerations for appendrels, which we'll + * deal with in set_append_rel_size and set_append_rel_pathlist. + * For now, just set consider_parallel based on the rel's own + * quals and targetlist. + */ + break; + + case RTE_SUBQUERY: + + /* + * There's no intrinsic problem with scanning a subquery-in-FROM + * (as distinct from a SubPlan or InitPlan) in a parallel worker. + * If the subquery doesn't happen to have any parallel-safe paths, + * then flagging it as consider_parallel won't change anything, + * but that's true for plain tables, too. We must set + * consider_parallel based on the rel's own quals and targetlist, + * so that if a subquery path is parallel-safe but the quals and + * projection we're sticking onto it are not, we correctly mark + * the SubqueryScanPath as not parallel-safe. (Note that + * set_subquery_pathlist() might push some of these quals down + * into the subquery itself, but that doesn't change anything.) + */ + break; + + case RTE_JOIN: + /* Shouldn't happen; we're only considering baserels here. */ + Assert(false); + return; + + case RTE_FUNCTION: + /* Check for parallel-restricted functions. */ + if (has_parallel_hazard((Node *) rte->functions, false)) + return; + break; + + case RTE_VALUES: + /* Check for parallel-restricted functions. */ + if (has_parallel_hazard((Node *) rte->values_lists, false)) + return; + break; + + case RTE_CTE: + + /* + * CTE tuplestores aren't shared among parallel workers, so we + * force all CTE scans to happen in the leader. Also, populating + * the CTE would require executing a subplan that's not available + * in the worker, might be parallel-restricted, and must get + * executed only once. + */ + return; + } + + /* + * If there's anything in baserestrictinfo that's parallel-restricted, we + * give up on parallelizing access to this relation. We could consider + * instead postponing application of the restricted quals until we're + * above all the parallelism in the plan tree, but it's not clear that + * that would be a win in very many cases, and it might be tricky to make + * outer join clauses work correctly. It would likely break equivalence + * classes, too. + */ + if (has_parallel_hazard((Node *) rel->baserestrictinfo, false)) + return; + + /* + * Likewise, if the relation's outputs are not parallel-safe, give up. + * (Usually, they're just Vars, but sometimes they're not.) + */ + if (has_parallel_hazard((Node *) rel->reltarget->exprs, false)) + return; + + /* We have a winner. */ + rel->consider_parallel = true; +} +#endif + static List * accumulate_append_subpath(List *subpaths, Path *path) { From ed2a7789768df443474953956d1a6c97e2f42b01 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 29 Oct 2016 16:15:22 +0300 Subject: [PATCH 0028/1124] [WIP] rework rangeset.c, fix several bugs with OR operator handling and IndexRange union --- src/hooks.c | 4 +- src/nodes_common.c | 6 +- src/pg_pathman.c | 20 +-- src/rangeset.c | 328 ++++++++++++++++++++++++++++++++------------- src/rangeset.h | 90 ++++++++++--- src/utils.c | 78 ++++++++++- 6 files changed, 389 insertions(+), 137 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 42f9cc79..d0a215e1 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -270,7 +270,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, paramsel *= wrap->paramsel; wrappers = lappend(wrappers, wrap); - ranges = irange_list_intersect(ranges, wrap->rangeset); + ranges = irange_list_intersection(ranges, wrap->rangeset); } /* @@ -318,7 +318,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, { IndexRange irange = lfirst_irange(lc); - for (i = irange.ir_lower; i <= irange.ir_upper; i++) + for (i = irange_lower(irange); i <= irange_upper(irange); i++) append_child_relation(root, rel, rti, rte, i, children[i], wrappers); } diff --git a/src/nodes_common.c b/src/nodes_common.c index f75bd2f1..8058c7de 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -303,8 +303,8 @@ get_partition_oids(List *ranges, int *n, const PartRelationInfo *prel, foreach (range_cell, ranges) { uint32 i; - uint32 a = lfirst_irange(range_cell).ir_lower, - b = lfirst_irange(range_cell).ir_upper; + uint32 a = irange_lower(lfirst_irange(range_cell)), + b = irange_upper(lfirst_irange(range_cell)); for (i = a; i <= b; i++) { @@ -565,7 +565,7 @@ rescan_append_common(CustomScanState *node) /* ... then we cut off irrelevant ones using the provided clauses */ wn = walk_expr_tree((Expr *) lfirst(lc), &wcxt); - ranges = irange_list_intersect(ranges, wn->rangeset); + ranges = irange_list_intersection(ranges, wn->rangeset); } /* Get Oids of the required partitions */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a9f3bf31..ab77bc67 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -333,16 +333,16 @@ handle_modification_query(Query *parse) InitWalkerContext(&context, prel, NULL, false); wrap = walk_expr_tree(expr, &context); - ranges = irange_list_intersect(ranges, wrap->rangeset); + ranges = irange_list_intersection(ranges, wrap->rangeset); /* If only one partition is affected then substitute parent table with partition */ if (irange_list_length(ranges) == 1) { IndexRange irange = linitial_irange(ranges); - if (irange.ir_lower == irange.ir_upper) + if (irange_lower(irange) == irange_upper(irange)) { Oid *children = PrelGetChildrenArray(prel); - rte->relid = children[irange.ir_lower]; + rte->relid = children[irange_lower(irange)]; rte->inh = false; } } @@ -1317,13 +1317,13 @@ search_range_partition_eq(const Datum value, IndexRange irange = linitial_irange(result.rangeset); Assert(list_length(result.rangeset) == 1); - Assert(irange.ir_lower == irange.ir_upper); - Assert(irange.ir_valid); + Assert(irange_lower(irange) == irange_upper(irange)); + Assert(is_irange_valid(irange)); /* Write result to the 'out_rentry' if necessary */ if (out_re) memcpy((void *) out_re, - (const void *) &ranges[irange.ir_lower], + (const void *) &ranges[irange_lower(irange)], sizeof(RangeEntry)); return SEARCH_RANGEREL_FOUND; @@ -1525,8 +1525,8 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) break; case AND_EXPR: - result->rangeset = irange_list_intersect(result->rangeset, - arg->rangeset); + result->rangeset = irange_list_intersection(result->rangeset, + arg->rangeset); result->paramsel *= arg->paramsel; break; @@ -1544,8 +1544,8 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) foreach (lc, result->args) { - WrapperNode *arg = (WrapperNode *) lfirst(lc); - int len = irange_list_length(arg->rangeset); + WrapperNode *arg = (WrapperNode *) lfirst(lc); + int len = irange_list_length(arg->rangeset); result->paramsel *= (1.0 - arg->paramsel * (double)len / (double)totallen); } diff --git a/src/rangeset.c b/src/rangeset.c index beff56de..c044a39e 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -10,39 +10,203 @@ #include "rangeset.h" -/* Check if two ranges are intersecting */ + +/* Check if two ranges intersect */ +bool +iranges_intersect(IndexRange a, IndexRange b) +{ + return (irange_lower(a) <= irange_upper(b)) && + (irange_lower(b) <= irange_upper(a)); +} + +/* Check if two ranges adjoin */ bool -irange_intersects(IndexRange a, IndexRange b) +iranges_adjoin(IndexRange a, IndexRange b) { - return (a.ir_lower <= b.ir_upper) && - (b.ir_lower <= a.ir_upper); + return (irange_upper(a) == irb_pred(irange_lower(b))) || + (irange_upper(b) == irb_pred(irange_lower(a))); } -/* Check if two ranges are conjuncted */ +/* Check if two ranges cover the same area */ bool -irange_conjuncted(IndexRange a, IndexRange b) +irange_eq_bounds(IndexRange a, IndexRange b) { - return (a.ir_lower - 1 <= b.ir_upper) && - (b.ir_lower - 1 <= a.ir_upper); + return (irange_lower(a) == irange_lower(b)) && + (irange_upper(a) == irange_upper(b)); +} + +/* Comapre lossiness factor of two ranges */ +ir_cmp_lossiness +irange_cmp_lossiness(IndexRange a, IndexRange b) +{ + if (is_irange_lossy(a) == is_irange_lossy(b)) + return IR_EQ_LOSSINESS; + + if (is_irange_lossy(a)) + return IR_A_LOSSY; + + if (is_irange_lossy(b)) + return IR_B_LOSSY; + + return IR_EQ_LOSSINESS; } -/* Make union of two ranges. They should have the same lossiness. */ + +/* Make union of two conjuncted ranges */ IndexRange -irange_union(IndexRange a, IndexRange b) +irange_union_simple(IndexRange a, IndexRange b) { - Assert(a.ir_lossy == b.ir_lossy); - return make_irange(Min(a.ir_lower, b.ir_lower), - Max(a.ir_upper, b.ir_upper), - a.ir_lossy); + Assert(iranges_intersect(a, b) || iranges_adjoin(a, b)); + + return make_irange(Min(irange_lower(a), irange_lower(b)), + Max(irange_upper(a), irange_upper(b)), + is_irange_lossy(a) && is_irange_lossy(b)); } -/* Get intersection of two ranges */ +/* Get intersection of two conjuncted ranges */ IndexRange -irange_intersect(IndexRange a, IndexRange b) +irange_intersection_simple(IndexRange a, IndexRange b) +{ + Assert(iranges_intersect(a, b) || iranges_adjoin(a, b)); + + return make_irange(Max(irange_lower(a), irange_lower(b)), + Min(irange_upper(a), irange_upper(b)), + is_irange_lossy(a) || is_irange_lossy(b)); +} + + +/* Split covering IndexRange into several IndexRanges if needed */ +static IndexRange +irange_handle_cover_internal(IndexRange ir_covering, + IndexRange ir_inner, + List **new_iranges) +{ + /* range 'ir_inner' is lossy */ + if (is_irange_lossy(ir_covering) == false) + /* Good, this means 'ir_covering' is not */ + return ir_covering; + + /* range 'ir_covering' is lossy */ + else + { + /* which means that 'ir_inner' is lossless! */ + IndexRange left_range, + right_range; + + /* We have to split the covering lossy IndexRange */ + Assert(is_irange_lossy(ir_covering) == true); + + /* Left IndexRange is lossy */ + left_range = make_irange(irange_lower(ir_covering), + irange_lower(ir_inner), + true); + + /* Right IndexRange is also lossy */ + right_range = make_irange(irange_upper(ir_inner), + irange_upper(ir_covering), + true); + + /* Append leftmost and medial IndexRanges to list */ + *new_iranges = lappend_irange(*new_iranges, left_range); + *new_iranges = lappend_irange(*new_iranges, ir_inner); + + /* Return rightmost IndexRange */ + return right_range; + } +} + +/* Calculate union of two IndexRanges, return rightmost IndexRange */ +static IndexRange +irange_union_internal(IndexRange first, IndexRange second, List **new_iranges) { - return make_irange(Max(a.ir_lower, b.ir_lower), - Min(a.ir_upper, b.ir_upper), - a.ir_lossy || b.ir_lossy); + /* Swap 'first' and 'second' if order is incorrect */ + if (irange_lower(first) > irange_lower(second)) + { + IndexRange temp; + + temp = first; + first = second; + second = temp; + } + + /* IndexRanges intersect */ + if (iranges_intersect(first, second)) + { + /* Calculate the intersection of 'first' and 'second' */ + IndexRange ir_union = irange_union_simple(first, second); + + /* if lossiness is the same, unite them and skip */ + if (is_irange_lossy(first) == is_irange_lossy(second)) + return ir_union; + + /* range 'first' covers 'second' */ + if (irange_eq_bounds(ir_union, first)) + { + /* Save rightmost IndexRange to 'ret' */ + return irange_handle_cover_internal(first, second, new_iranges); + } + /* range 'second' covers 'first' */ + else if (irange_eq_bounds(ir_union, second)) + { + /* Save rightmost IndexRange to 'ret' */ + return irange_handle_cover_internal(second, first, new_iranges); + } + /* No obvious leader, lossiness differs */ + else + { + /* range 'second' is lossy */ + if (is_irange_lossy(first) == false) + { + IndexRange ret; + + /* Set new current IndexRange */ + ret = make_irange(irb_succ(irange_upper(first)), + irange_upper(second), + is_irange_lossy(second)); + + /* Append lower part to 'new_iranges' */ + *new_iranges = lappend_irange(*new_iranges, first); + + /* Return a part of 'second' */ + return ret; + } + /* range 'first' is lossy */ + else + { + IndexRange new_irange; + + new_irange = make_irange(irange_lower(first), + irb_pred(irange_lower(second)), + is_irange_lossy(first)); + + /* Append lower part to 'new_iranges' */ + *new_iranges = lappend_irange(*new_iranges, new_irange); + + /* Return 'second' */ + return second; + } + } + } + /* IndexRanges do not intersect */ + else + { + /* Try to unite these IndexRanges if it's possible */ + if (irange_cmp_lossiness(first, second) == IR_EQ_LOSSINESS && + iranges_adjoin(first, second)) + { + /* Return united IndexRange */ + return irange_union_simple(first, second); + } + /* IndexRanges are not adjoint */ + else + { + /* add 'first' to 'new_iranges' */ + *new_iranges = lappend_irange(*new_iranges, first); + + /* Return 'second' */ + return second; + } + } } /* @@ -51,94 +215,60 @@ irange_intersect(IndexRange a, IndexRange b) List * irange_list_union(List *a, List *b) { - ListCell *ca, - *cb; - List *result = NIL; - IndexRange cur = InvalidIndexRange; - bool have_cur = false; + ListCell *ca, /* iterator of A */ + *cb; /* iterator of B */ + List *result = NIL; /* list of IndexRanges */ + IndexRange cur = InvalidIndexRange; /* current irange */ + /* Initialize iterators */ ca = list_head(a); cb = list_head(b); + /* Loop until we have no cells */ while (ca || cb) { IndexRange next = InvalidIndexRange; - /* Fetch next range with lesser lower bound */ + /* Fetch next irange with lesser lower bound */ if (ca && cb) { - if (lfirst_irange(ca).ir_lower <= lfirst_irange(cb).ir_lower) + if (irange_lower(lfirst_irange(ca)) <= irange_lower(lfirst_irange(cb))) { next = lfirst_irange(ca); - ca = lnext(ca); + ca = lnext(ca); /* move to next cell */ } else { next = lfirst_irange(cb); - cb = lnext(cb); + cb = lnext(cb); /* move to next cell */ } } + /* Fetch next irange from A */ else if (ca) { next = lfirst_irange(ca); - ca = lnext(ca); + ca = lnext(ca); /* move to next cell */ } + /* Fetch next irange from B */ else if (cb) { next = lfirst_irange(cb); - cb = lnext(cb); + cb = lnext(cb); /* move to next cell */ } - if (!have_cur) + /* Put this irange to 'cur' if don't have it yet */ + if (!is_irange_valid(cur)) { - /* Put this range as current value if don't have it yet */ cur = next; - have_cur = true; - } - else - { - if (irange_conjuncted(next, cur)) - { - /* - * Ranges are conjuncted, try to unify them. - */ - if (next.ir_lossy == cur.ir_lossy) - { - cur = irange_union(next, cur); - } - else - { - if (!cur.ir_lossy) - { - result = lappend_irange(result, cur); - cur = make_irange(cur.ir_upper + 1, - next.ir_upper, - next.ir_lossy); - } - else - { - result = lappend_irange(result, - make_irange(cur.ir_lower, - next.ir_lower - 1, - cur.ir_lossy)); - cur = next; - } - } - } - else - { - /* - * Next range is not conjuncted with current. Put current to the - * result list and put next as current. - */ - result = lappend_irange(result, cur); - cur = next; - } + continue; /* skip this iteration */ } + + /* Unite 'cur' and 'next' in an appropriate way */ + cur = irange_union_internal(cur, next, &result); } /* Put current value into result list if any */ - if (have_cur) + if (is_irange_valid(cur)) result = lappend_irange(result, cur); return result; @@ -148,38 +278,39 @@ irange_list_union(List *a, List *b) * Find intersection of two range lists. */ List * -irange_list_intersect(List *a, List *b) +irange_list_intersection(List *a, List *b) { - ListCell *ca, - *cb; - List *result = NIL; - IndexRange ra, rb; + ListCell *ca, /* iterator of A */ + *cb; /* iterator of B */ + List *result = NIL; /* list of IndexRanges */ + /* Initialize iterators */ ca = list_head(a); cb = list_head(b); + /* Loop until we have no cells */ while (ca && cb) { - ra = lfirst_irange(ca); - rb = lfirst_irange(cb); + IndexRange ra = lfirst_irange(ca), + rb = lfirst_irange(cb); /* Only care about intersecting ranges */ - if (irange_intersects(ra, rb)) + if (iranges_intersect(ra, rb)) { IndexRange intersect, last; /* - * Get intersection and try to "glue" it to previous range, - * put it separately otherwise. + * Get intersection and try to "glue" it to + * previous range, put it separately otherwise. */ - intersect = irange_intersect(ra, rb); + intersect = irange_intersection_simple(ra, rb); if (result != NIL) { last = llast_irange(result); - if (irange_conjuncted(last, intersect) && - last.ir_lossy == intersect.ir_lossy) + if (iranges_adjoin(last, intersect) && + is_irange_lossy(last) == is_irange_lossy(intersect)) { - llast(result) = alloc_irange(irange_union(last, intersect)); + llast(result) = alloc_irange(irange_union_simple(last, intersect)); } else { @@ -197,9 +328,9 @@ irange_list_intersect(List *a, List *b) * which lists to fetch, since lower bound of next range is greater (or * equal) to upper bound of current. */ - if (ra.ir_upper <= rb.ir_upper) + if (irange_upper(ra) <= irange_upper(rb)) ca = lnext(ca); - if (ra.ir_upper >= rb.ir_upper) + if (irange_upper(ra) >= irange_upper(rb)) cb = lnext(cb); } return result; @@ -210,14 +341,19 @@ int irange_list_length(List *rangeset) { ListCell *lc; - int result = 0; + uint32 result = 0; foreach (lc, rangeset) { - IndexRange irange = lfirst_irange(lc); - result += irange.ir_upper - irange.ir_lower + 1; + IndexRange irange = lfirst_irange(lc); + uint32 diff = irange_upper(irange) - irange_lower(irange); + + Assert(irange_upper(irange) >= irange_lower(irange)); + + result += diff + 1; } - return result; + + return (int) result; } /* Find particular index in range list */ @@ -229,10 +365,10 @@ irange_list_find(List *rangeset, int index, bool *lossy) foreach (lc, rangeset) { IndexRange irange = lfirst_irange(lc); - if (index >= irange.ir_lower && index <= irange.ir_upper) + if (index >= irange_lower(irange) && index <= irange_upper(irange)) { if (lossy) - *lossy = irange.ir_lossy; + *lossy = is_irange_lossy(irange); return true; } } diff --git a/src/rangeset.h b/src/rangeset.h index ffe7f31f..e8ad39a8 100644 --- a/src/rangeset.h +++ b/src/rangeset.h @@ -12,7 +12,7 @@ #define PATHMAN_RANGESET_H -#include "pathman.h" +#include "postgres.h" #include "nodes/pg_list.h" @@ -20,26 +20,39 @@ * IndexRange contains a set of selected partitions. */ typedef struct { - bool ir_valid : 1; - bool ir_lossy : 1; /* should we use IndexScan? */ - uint32 ir_lower : 31; /* lower bound */ - uint32 ir_upper : 31; /* upper bound */ + /* lossy == should we use IndexScan? */ + /* valid == is this IndexRange valid? */ + + /* Don't swap this fields */ + uint32 lower; /* valid + lower_bound */ + uint32 upper; /* lossy + upper_bound */ } IndexRange; -#define RANGE_MASK 0xEFFFFFFF -#define InvalidIndexRange { false, false, 0, 0 } +#define IRANGE_SPECIAL_BIT ( (uint32) ( ((uint32) 1) << 31) ) +#define IRANGE_BONDARY_MASK ( (uint32) (~IRANGE_SPECIAL_BIT) ) + +#define InvalidIndexRange { 0, 0 } + +#define is_irange_valid(irange) ( (irange.lower & IRANGE_SPECIAL_BIT) > 0 ) +#define is_irange_lossy(irange) ( (irange.upper & IRANGE_SPECIAL_BIT) > 0 ) +#define irange_lower(irange) ( (uint32) (irange.lower & IRANGE_BONDARY_MASK) ) +#define irange_upper(irange) ( (uint32) (irange.upper & IRANGE_BONDARY_MASK) ) inline static IndexRange make_irange(uint32 lower, uint32 upper, bool lossy) { - IndexRange result; + IndexRange result = { lower & IRANGE_BONDARY_MASK, + upper & IRANGE_BONDARY_MASK }; + + /* Set VALID */ + result.lower |= IRANGE_SPECIAL_BIT; + + /* Set LOSSY if needed */ + if (lossy) result.upper |= IRANGE_SPECIAL_BIT; - result.ir_valid = true; - result.ir_lossy = lossy; - result.ir_lower = (lower & RANGE_MASK); - result.ir_upper = (upper & RANGE_MASK); + Assert(lower <= upper); return result; } @@ -49,11 +62,33 @@ alloc_irange(IndexRange irange) { IndexRange *result = (IndexRange *) palloc(sizeof(IndexRange)); - memcpy((void *) result, (void *) &irange, sizeof(IndexRange)); + /* Copy all fields of IndexRange */ + *result = irange; return result; } +/* Return predecessor or 0 if boundary is 0 */ +inline static uint32 +irb_pred(uint32 boundary) +{ + if (boundary > 0) + return (boundary - 1) & IRANGE_BONDARY_MASK; + + return 0; +} + +/* Return predecessor or IRANGE_BONDARY_MASK */ +inline static uint32 +irb_succ(uint32 boundary) +{ + if (boundary >= IRANGE_BONDARY_MASK) + return boundary; + + return boundary + 1; +} + + #define lfirst_irange(lc) ( *(IndexRange *) lfirst(lc) ) #define lappend_irange(list, irange) ( lappend((list), alloc_irange(irange)) ) #define lcons_irange(irange, list) ( lcons(alloc_irange(irange), (list)) ) @@ -62,13 +97,30 @@ alloc_irange(IndexRange irange) #define linitial_irange(list) ( lfirst_irange(list_head(list)) ) -/* rangeset.c */ -bool irange_intersects(IndexRange a, IndexRange b); -bool irange_conjuncted(IndexRange a, IndexRange b); -IndexRange irange_union(IndexRange a, IndexRange b); -IndexRange irange_intersect(IndexRange a, IndexRange b); +/* Result of function irange_cmp_lossiness() */ +typedef enum +{ + IR_EQ_LOSSINESS = 0, /* IndexRanges share same lossiness */ + IR_A_LOSSY, /* IndexRange 'a' is lossy ('b' is not) */ + IR_B_LOSSY /* IndexRange 'b' is lossy ('a' is not) */ +} ir_cmp_lossiness; + + +/* Various traits */ +bool iranges_intersect(IndexRange a, IndexRange b); +bool iranges_adjoin(IndexRange a, IndexRange b); +bool irange_eq_bounds(IndexRange a, IndexRange b); +ir_cmp_lossiness irange_cmp_lossiness(IndexRange a, IndexRange b); + +/* Basic operations on IndexRanges */ +IndexRange irange_union_simple(IndexRange a, IndexRange b); +IndexRange irange_intersection_simple(IndexRange a, IndexRange b); + +/* Operations on Lists of IndexRanges */ List *irange_list_union(List *a, List *b); -List *irange_list_intersect(List *a, List *b); +List *irange_list_intersection(List *a, List *b); + +/* Utility functions */ int irange_list_length(List *rangeset); bool irange_list_find(List *rangeset, int index, bool *lossy); diff --git a/src/utils.c b/src/utils.c index 831d5a24..3147ad0a 100644 --- a/src/utils.c +++ b/src/utils.c @@ -165,15 +165,79 @@ __attribute__((unused)) static char * bms_print(Bitmapset *bms) { - StringInfoData str; - int x; + StringInfoData str; + int x; - initStringInfo(&str); - x = -1; - while ((x = bms_next_member(bms, x)) >= 0) - appendStringInfo(&str, " %d", x); + initStringInfo(&str); + x = -1; + while ((x = bms_next_member(bms, x)) >= 0) + appendStringInfo(&str, " %d", x); - return str.data; + return str.data; +} + +/* + * Print list of IndexRanges as cstring. + */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +rangeset_print(List *rangeset) +{ + StringInfoData str; + ListCell *lc; + bool first_irange = true; + char lossy = 'L', /* Lossy IndexRange */ + complete = 'C'; /* Complete IndexRange */ + + initStringInfo(&str); + + foreach (lc, rangeset) + { + IndexRange irange = lfirst_irange(lc); + + /* Append comma if needed */ + if (!first_irange) + appendStringInfo(&str, ", "); + + if (!is_irange_valid(irange)) + appendStringInfo(&str, "X"); + else if (irange_lower(irange) == irange_upper(irange)) + appendStringInfo(&str, "%u%c", + irange_lower(irange), + (is_irange_lossy(irange) ? lossy : complete)); + else + appendStringInfo(&str, "[%u-%u]%c", + irange_lower(irange), irange_upper(irange), + (is_irange_lossy(irange) ? lossy : complete)); + + first_irange = false; + } + + return str.data; +} + +/* + * Print IndexRange struct as cstring. + */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +irange_print(IndexRange irange) +{ + StringInfoData str; + + initStringInfo(&str); + + appendStringInfo(&str, "{ valid: %s, lossy: %s, lower: %u, upper: %u }", + (is_irange_valid(irange) ? "true" : "false"), + (is_irange_lossy(irange) ? "true" : "false"), + irange_lower(irange), + irange_upper(irange)); + + return str.data; } /* From 25cff809d18acb9dcae9c66ff9d9b8d37a243a0f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 29 Oct 2016 16:17:11 +0300 Subject: [PATCH 0029/1124] [WIP] further improvements in irange_handle_cover_internal() --- src/rangeset.c | 71 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 19 deletions(-) diff --git a/src/rangeset.c b/src/rangeset.c index c044a39e..955aeeee 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -56,6 +56,7 @@ irange_cmp_lossiness(IndexRange a, IndexRange b) IndexRange irange_union_simple(IndexRange a, IndexRange b) { + /* Ranges should be connected somehow */ Assert(iranges_intersect(a, b) || iranges_adjoin(a, b)); return make_irange(Min(irange_lower(a), irange_lower(b)), @@ -67,6 +68,7 @@ irange_union_simple(IndexRange a, IndexRange b) IndexRange irange_intersection_simple(IndexRange a, IndexRange b) { + /* Ranges should be connected somehow */ Assert(iranges_intersect(a, b) || iranges_adjoin(a, b)); return make_irange(Max(irange_lower(a), irange_lower(b)), @@ -81,43 +83,74 @@ irange_handle_cover_internal(IndexRange ir_covering, IndexRange ir_inner, List **new_iranges) { + /* Equal lossiness should've been taken into cosideration earlier */ + Assert(is_irange_lossy(ir_covering) != is_irange_lossy(ir_inner)); + /* range 'ir_inner' is lossy */ if (is_irange_lossy(ir_covering) == false) - /* Good, this means 'ir_covering' is not */ return ir_covering; - /* range 'ir_covering' is lossy */ + /* range 'ir_covering' is lossy, 'ir_inner' is lossless! */ else { - /* which means that 'ir_inner' is lossless! */ - IndexRange left_range, - right_range; + IndexRange ret; /* IndexRange to be returned */ + + /* 'left_range_upper' should not be less than 'left_range_lower' */ + uint32 left_range_lower = irange_lower(ir_covering), + left_range_upper = Max(irb_pred(irange_lower(ir_inner)), + left_range_lower); + + /* 'right_range_lower' should not be greater than 'right_range_upper' */ + uint32 right_range_upper = irange_upper(ir_covering), + right_range_lower = Min(irb_succ(irange_upper(ir_inner)), + right_range_upper); /* We have to split the covering lossy IndexRange */ Assert(is_irange_lossy(ir_covering) == true); - /* Left IndexRange is lossy */ - left_range = make_irange(irange_lower(ir_covering), - irange_lower(ir_inner), - true); + /* 'ir_inner' should not cover leftmost IndexRange */ + if (irange_lower(ir_inner) > left_range_upper) + { + IndexRange left_range; + + /* Leftmost IndexRange is lossy */ + left_range = make_irange(left_range_lower, + left_range_upper, + true); - /* Right IndexRange is also lossy */ - right_range = make_irange(irange_upper(ir_inner), - irange_upper(ir_covering), - true); + /* Append leftmost IndexRange ('left_range') to 'new_iranges' */ + *new_iranges = lappend_irange(*new_iranges, left_range); + } - /* Append leftmost and medial IndexRanges to list */ - *new_iranges = lappend_irange(*new_iranges, left_range); - *new_iranges = lappend_irange(*new_iranges, ir_inner); + /* 'ir_inner' should not cover rightmost IndexRange */ + if (right_range_lower > irange_upper(ir_inner)) + { + IndexRange right_range; + + /* Rightmost IndexRange is also lossy */ + right_range = make_irange(right_range_lower, + right_range_upper, + true); + + /* 'right_range' is indeed rightmost IndexRange */ + ret = right_range; + + /* Append medial IndexRange ('ir_inner') to 'new_iranges' */ + *new_iranges = lappend_irange(*new_iranges, ir_inner); + } + /* Else return 'ir_inner' as rightmost IndexRange */ + else ret = ir_inner; - /* Return rightmost IndexRange */ - return right_range; + /* Return rightmost IndexRange (right_range | ir_inner) */ + return ret; } } /* Calculate union of two IndexRanges, return rightmost IndexRange */ static IndexRange -irange_union_internal(IndexRange first, IndexRange second, List **new_iranges) +irange_union_internal(IndexRange first, + IndexRange second, + List **new_iranges) { /* Swap 'first' and 'second' if order is incorrect */ if (irange_lower(first) > irange_lower(second)) From 4012acffe7dc5f7a2321261bb10beb4ae4ee0547 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 29 Oct 2016 16:30:41 +0300 Subject: [PATCH 0030/1124] improve comments and declarations in rangeset.c --- src/rangeset.c | 21 +++++++++++++-------- src/rangeset.h | 4 ++-- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/rangeset.c b/src/rangeset.c index 955aeeee..39e95436 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -11,6 +11,15 @@ #include "rangeset.h" +static IndexRange irange_handle_cover_internal(IndexRange ir_covering, + IndexRange ir_inner, + List **new_iranges); + +static IndexRange irange_union_internal(IndexRange first, + IndexRange second, + List **new_iranges); + + /* Check if two ranges intersect */ bool iranges_intersect(IndexRange a, IndexRange b) @@ -175,13 +184,13 @@ irange_union_internal(IndexRange first, /* range 'first' covers 'second' */ if (irange_eq_bounds(ir_union, first)) { - /* Save rightmost IndexRange to 'ret' */ + /* Return rightmost IndexRange, save others to 'new_iranges' */ return irange_handle_cover_internal(first, second, new_iranges); } /* range 'second' covers 'first' */ else if (irange_eq_bounds(ir_union, second)) { - /* Save rightmost IndexRange to 'ret' */ + /* Retun rightmost IndexRange, save others to 'new_iranges' */ return irange_handle_cover_internal(second, first, new_iranges); } /* No obvious leader, lossiness differs */ @@ -242,9 +251,7 @@ irange_union_internal(IndexRange first, } } -/* - * Make union of two index rage lists. - */ +/* Make union of two index rage lists */ List * irange_list_union(List *a, List *b) { @@ -307,9 +314,7 @@ irange_list_union(List *a, List *b) return result; } -/* - * Find intersection of two range lists. - */ +/* Find intersection of two range lists */ List * irange_list_intersection(List *a, List *b) { diff --git a/src/rangeset.h b/src/rangeset.h index e8ad39a8..fb55e5be 100644 --- a/src/rangeset.h +++ b/src/rangeset.h @@ -20,10 +20,10 @@ * IndexRange contains a set of selected partitions. */ typedef struct { - /* lossy == should we use IndexScan? */ + /* lossy == should we use quals? */ /* valid == is this IndexRange valid? */ - /* Don't swap this fields */ + /* Don't swap these fields */ uint32 lower; /* valid + lower_bound */ uint32 upper; /* lossy + upper_bound */ } IndexRange; From ce86f410e0ebeacd487ad03bd854fc3770023637 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 29 Oct 2016 16:53:01 +0300 Subject: [PATCH 0031/1124] move python tests to tests/python --- tests/{ => python}/README.md | 0 tests/{ => python}/__init__.py | 0 tests/{ => python}/partitioning_test.py | 0 travis/pg-travis-test.sh | 2 +- 4 files changed, 1 insertion(+), 1 deletion(-) rename tests/{ => python}/README.md (100%) rename tests/{ => python}/__init__.py (100%) rename tests/{ => python}/partitioning_test.py (100%) diff --git a/tests/README.md b/tests/python/README.md similarity index 100% rename from tests/README.md rename to tests/python/README.md diff --git a/tests/__init__.py b/tests/python/__init__.py similarity index 100% rename from tests/__init__.py rename to tests/python/__init__.py diff --git a/tests/partitioning_test.py b/tests/python/partitioning_test.py similarity index 100% rename from tests/partitioning_test.py rename to tests/python/partitioning_test.py diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index 44552ae3..06db341d 100644 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -88,7 +88,7 @@ pip install $pip_packages sudo chmod a+w /var/run/postgresql/ # run python tests -cd tests +cd tests/python PG_CONFIG=$config_path python -m unittest partitioning_test || status=$? set -u From b4712b0c025187886706d3e1f72a9fa86db02955 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 29 Oct 2016 19:49:15 +0300 Subject: [PATCH 0032/1124] move various print functions to debug_print.c, add cmocka-based unit test system --- Makefile | 2 +- src/debug_print.c | 101 ++++++++++ src/utils.c | 84 --------- tests/cmocka/Makefile | 26 +++ tests/cmocka/missing_basic.c | 43 +++++ tests/cmocka/missing_bitmapset.c | 16 ++ tests/cmocka/missing_list.c | 142 +++++++++++++++ tests/cmocka/missing_stringinfo.c | 293 ++++++++++++++++++++++++++++++ tests/cmocka/rangeset_tests | Bin 0 -> 23232 bytes tests/cmocka/rangeset_tests.c | 48 +++++ 10 files changed, 670 insertions(+), 85 deletions(-) create mode 100644 src/debug_print.c create mode 100644 tests/cmocka/Makefile create mode 100644 tests/cmocka/missing_basic.c create mode 100644 tests/cmocka/missing_bitmapset.c create mode 100644 tests/cmocka/missing_list.c create mode 100644 tests/cmocka/missing_stringinfo.c create mode 100755 tests/cmocka/rangeset_tests create mode 100644 tests/cmocka/rangeset_tests.c diff --git a/Makefile b/Makefile index 58e5e939..6c9e8122 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/runtimeappend.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/copy_stmt_hooking.o \ - src/pg_compat.o $(WIN32RES) + src/debug_print.o src/pg_compat.o $(WIN32RES) EXTENSION = pg_pathman EXTVERSION = 1.1 diff --git a/src/debug_print.c b/src/debug_print.c new file mode 100644 index 00000000..36016861 --- /dev/null +++ b/src/debug_print.c @@ -0,0 +1,101 @@ +/* ------------------------------------------------------------------------ + * + * debug_print.c + * Print sophisticated structs as CSTRING + * + * Copyright (c) 2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#include "rangeset.h" + +#include "postgres.h" +#include "nodes/bitmapset.h" +#include "nodes/pg_list.h" +#include "lib/stringinfo.h" + + +/* + * Print Bitmapset as cstring. + */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +bms_print(Bitmapset *bms) +{ + StringInfoData str; + int x; + + initStringInfo(&str); + x = -1; + while ((x = bms_next_member(bms, x)) >= 0) + appendStringInfo(&str, " %d", x); + + return str.data; +} + +/* + * Print list of IndexRanges as cstring. + */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +rangeset_print(List *rangeset) +{ + StringInfoData str; + ListCell *lc; + bool first_irange = true; + char lossy = 'L', /* Lossy IndexRange */ + complete = 'C'; /* Complete IndexRange */ + + initStringInfo(&str); + + foreach (lc, rangeset) + { + IndexRange irange = lfirst_irange(lc); + + /* Append comma if needed */ + if (!first_irange) + appendStringInfo(&str, ", "); + + if (!is_irange_valid(irange)) + appendStringInfo(&str, "X"); + else if (irange_lower(irange) == irange_upper(irange)) + appendStringInfo(&str, "%u%c", + irange_lower(irange), + (is_irange_lossy(irange) ? lossy : complete)); + else + appendStringInfo(&str, "[%u-%u]%c", + irange_lower(irange), irange_upper(irange), + (is_irange_lossy(irange) ? lossy : complete)); + + first_irange = false; + } + + return str.data; +} + +/* + * Print IndexRange struct as cstring. + */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +irange_print(IndexRange irange) +{ + StringInfoData str; + + initStringInfo(&str); + + appendStringInfo(&str, "{ valid: %s, lossy: %s, lower: %u, upper: %u }", + (is_irange_valid(irange) ? "true" : "false"), + (is_irange_lossy(irange) ? "true" : "false"), + irange_lower(irange), + irange_upper(irange)); + + return str.data; +} diff --git a/src/utils.c b/src/utils.c index 3147ad0a..eee02928 100644 --- a/src/utils.c +++ b/src/utils.c @@ -156,90 +156,6 @@ lock_rows_visitor(Plan *plan, void *context) } } -/* - * Print Bitmapset as cstring. - */ -#ifdef __GNUC__ -__attribute__((unused)) -#endif -static char * -bms_print(Bitmapset *bms) -{ - StringInfoData str; - int x; - - initStringInfo(&str); - x = -1; - while ((x = bms_next_member(bms, x)) >= 0) - appendStringInfo(&str, " %d", x); - - return str.data; -} - -/* - * Print list of IndexRanges as cstring. - */ -#ifdef __GNUC__ -__attribute__((unused)) -#endif -static char * -rangeset_print(List *rangeset) -{ - StringInfoData str; - ListCell *lc; - bool first_irange = true; - char lossy = 'L', /* Lossy IndexRange */ - complete = 'C'; /* Complete IndexRange */ - - initStringInfo(&str); - - foreach (lc, rangeset) - { - IndexRange irange = lfirst_irange(lc); - - /* Append comma if needed */ - if (!first_irange) - appendStringInfo(&str, ", "); - - if (!is_irange_valid(irange)) - appendStringInfo(&str, "X"); - else if (irange_lower(irange) == irange_upper(irange)) - appendStringInfo(&str, "%u%c", - irange_lower(irange), - (is_irange_lossy(irange) ? lossy : complete)); - else - appendStringInfo(&str, "[%u-%u]%c", - irange_lower(irange), irange_upper(irange), - (is_irange_lossy(irange) ? lossy : complete)); - - first_irange = false; - } - - return str.data; -} - -/* - * Print IndexRange struct as cstring. - */ -#ifdef __GNUC__ -__attribute__((unused)) -#endif -static char * -irange_print(IndexRange irange) -{ - StringInfoData str; - - initStringInfo(&str); - - appendStringInfo(&str, "{ valid: %s, lossy: %s, lower: %u, upper: %u }", - (is_irange_valid(irange) ? "true" : "false"), - (is_irange_lossy(irange) ? "true" : "false"), - irange_lower(irange), - irange_upper(irange)); - - return str.data; -} - /* * Get BTORDER_PROC for two types described by Oids */ diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile new file mode 100644 index 00000000..3b6d978c --- /dev/null +++ b/tests/cmocka/Makefile @@ -0,0 +1,26 @@ +PG_CONFIG = pg_config +TOP_SRC_DIR = ../../src + +CC = gcc +CFLAGS = -I $(TOP_SRC_DIR) -I $(shell $(PG_CONFIG) --includedir-server) +LDFLAGS = -lcmocka +TEST_BIN = rangeset_tests + +OBJ = missing_basic.o missing_list.o missing_stringinfo.o \ + missing_bitmapset.o rangeset_tests.o \ + $(TOP_SRC_DIR)/rangeset.o + + +all: build_extension $(TEST_BIN) + +$(TEST_BIN): $(OBJ) + $(CC) -o $@ $^ $(CFLAGS) $(LDFLAGS) + +%.o: %.c + $(CC) -c -o $@ $< $(CFLAGS) + +build_extension: + $(MAKE) -C $(TOP_SRC_DIR)/.. + +clean: + rm -f $(OBJ) $(TEST_BIN) diff --git a/tests/cmocka/missing_basic.c b/tests/cmocka/missing_basic.c new file mode 100644 index 00000000..d6c3808e --- /dev/null +++ b/tests/cmocka/missing_basic.c @@ -0,0 +1,43 @@ +#include + +#include "postgres.h" + + +void * +palloc(Size size) +{ + return malloc(size); +} + +void * +repalloc(void *pointer, Size size) +{ + return realloc(pointer, size); +} + + +void +ExceptionalCondition(const char *conditionName, + const char *errorType, + const char *fileName, + int lineNumber) +{ + if (!PointerIsValid(conditionName) || + !PointerIsValid(fileName) || + !PointerIsValid(errorType)) + { + printf("TRAP: ExceptionalCondition: bad arguments\n"); + } + else + { + printf("TRAP: %s(\"%s\", File: \"%s\", Line: %d)\n", + errorType, conditionName, + fileName, lineNumber); + + } + + /* Usually this shouldn't be needed, but make sure the msg went out */ + fflush(stderr); + + abort(); +} diff --git a/tests/cmocka/missing_bitmapset.c b/tests/cmocka/missing_bitmapset.c new file mode 100644 index 00000000..7e986d5a --- /dev/null +++ b/tests/cmocka/missing_bitmapset.c @@ -0,0 +1,16 @@ +#include "postgres.h" +#include "nodes/bitmapset.h" + + +int +bms_next_member(const Bitmapset *a, int prevbit); + + +int +bms_next_member(const Bitmapset *a, int prevbit) +{ + printf("bms_next_member(): not implemented yet\n"); + fflush(stdout); + + abort(); +} diff --git a/tests/cmocka/missing_list.c b/tests/cmocka/missing_list.c new file mode 100644 index 00000000..9c07bc10 --- /dev/null +++ b/tests/cmocka/missing_list.c @@ -0,0 +1,142 @@ +/*------------------------------------------------------------------------- + * + * list.c + * implementation for PostgreSQL generic linked list package + * + * + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/nodes/list.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "nodes/pg_list.h" + + +#define IsPointerList(l) ((l) == NIL || IsA((l), List)) +#define IsIntegerList(l) ((l) == NIL || IsA((l), IntList)) +#define IsOidList(l) ((l) == NIL || IsA((l), OidList)) + + +static List * +new_list(NodeTag type); + +static void +new_tail_cell(List *list); + +static void +new_head_cell(List *list); + +static void +check_list_invariants(const List *list); + + +/* + * ------------- + * Definitions + * ------------- + */ + +static List * +new_list(NodeTag type) +{ + List *new_list; + ListCell *new_head; + + new_head = (ListCell *) palloc(sizeof(*new_head)); + new_head->next = NULL; + /* new_head->data is left undefined! */ + + new_list = (List *) palloc(sizeof(*new_list)); + new_list->type = type; + new_list->length = 1; + new_list->head = new_head; + new_list->tail = new_head; + + return new_list; + +} + +static void +new_tail_cell(List *list) +{ + ListCell *new_tail; + + new_tail = (ListCell *) palloc(sizeof(*new_tail)); + new_tail->next = NULL; + + list->tail->next = new_tail; + list->tail = new_tail; + list->length++; + +} + +static void +new_head_cell(List *list) +{ + ListCell *new_head; + + new_head = (ListCell *) palloc(sizeof(*new_head)); + new_head->next = list->head; + + list->head = new_head; + list->length++; + +} + +static void +check_list_invariants(const List *list) +{ + if (list == NIL) + return; + + Assert(list->length > 0); + Assert(list->head != NULL); + Assert(list->tail != NULL); + + Assert(list->type == T_List || + list->type == T_IntList || + list->type == T_OidList); + + if (list->length == 1) + Assert(list->head == list->tail); + if (list->length == 2) + Assert(list->head->next == list->tail); + Assert(list->tail->next == NULL); + +} + +List * +lappend(List *list, void *datum) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List); + else + new_tail_cell(list); + + lfirst(list->tail) = datum; + check_list_invariants(list); + return list; +} + +List * +lcons(void *datum, List *list) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List); + else + new_head_cell(list); + + lfirst(list->head) = datum; + check_list_invariants(list); + return list; + +} diff --git a/tests/cmocka/missing_stringinfo.c b/tests/cmocka/missing_stringinfo.c new file mode 100644 index 00000000..8596bf7e --- /dev/null +++ b/tests/cmocka/missing_stringinfo.c @@ -0,0 +1,293 @@ +/*------------------------------------------------------------------------- + * + * stringinfo.c + * + * StringInfo provides an indefinitely-extensible string data type. + * It can be used to buffer either ordinary C strings (null-terminated text) + * or arbitrary binary data. All storage is allocated with palloc(). + * + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/backend/lib/stringinfo.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "lib/stringinfo.h" +#include "utils/memutils.h" + + +/* + * makeStringInfo + * + * Create an empty 'StringInfoData' & return a pointer to it. + */ +StringInfo +makeStringInfo(void) +{ + StringInfo res; + + res = (StringInfo) palloc(sizeof(StringInfoData)); + + initStringInfo(res); + + return res; +} + +/* + * initStringInfo + * + * Initialize a StringInfoData struct (with previously undefined contents) + * to describe an empty string. + */ +void +initStringInfo(StringInfo str) +{ + int size = 1024; /* initial default buffer size */ + + str->data = (char *) palloc(size); + str->maxlen = size; + resetStringInfo(str); +} + +/* + * resetStringInfo + * + * Reset the StringInfo: the data buffer remains valid, but its + * previous content, if any, is cleared. + */ +void +resetStringInfo(StringInfo str) +{ + str->data[0] = '\0'; + str->len = 0; + str->cursor = 0; +} + +/* + * appendStringInfo + * + * Format text data under the control of fmt (an sprintf-style format string) + * and append it to whatever is already in str. More space is allocated + * to str if necessary. This is sort of like a combination of sprintf and + * strcat. + */ +void +appendStringInfo(StringInfo str, const char *fmt,...) +{ + for (;;) + { + va_list args; + int needed; + + /* Try to format the data. */ + va_start(args, fmt); + needed = appendStringInfoVA(str, fmt, args); + va_end(args); + + if (needed == 0) + break; /* success */ + + /* Increase the buffer size and try again. */ + enlargeStringInfo(str, needed); + } +} + +/* + * appendStringInfoVA + * + * Attempt to format text data under the control of fmt (an sprintf-style + * format string) and append it to whatever is already in str. If successful + * return zero; if not (because there's not enough space), return an estimate + * of the space needed, without modifying str. Typically the caller should + * pass the return value to enlargeStringInfo() before trying again; see + * appendStringInfo for standard usage pattern. + * + * XXX This API is ugly, but there seems no alternative given the C spec's + * restrictions on what can portably be done with va_list arguments: you have + * to redo va_start before you can rescan the argument list, and we can't do + * that from here. + */ +int +appendStringInfoVA(StringInfo str, const char *fmt, va_list args) +{ + int avail; + size_t nprinted; + + Assert(str != NULL); + + /* + * If there's hardly any space, don't bother trying, just fail to make the + * caller enlarge the buffer first. We have to guess at how much to + * enlarge, since we're skipping the formatting work. + */ + avail = str->maxlen - str->len; + if (avail < 16) + return 32; + + nprinted = vsnprintf(str->data + str->len, (size_t) avail, fmt, args); + + if (nprinted < (size_t) avail) + { + /* Success. Note nprinted does not include trailing null. */ + str->len += (int) nprinted; + return 0; + } + + /* Restore the trailing null so that str is unmodified. */ + str->data[str->len] = '\0'; + + /* + * Return pvsnprintf's estimate of the space needed. (Although this is + * given as a size_t, we know it will fit in int because it's not more + * than MaxAllocSize.) + */ + return (int) nprinted; +} + +/* + * appendStringInfoString + * + * Append a null-terminated string to str. + * Like appendStringInfo(str, "%s", s) but faster. + */ +void +appendStringInfoString(StringInfo str, const char *s) +{ + appendBinaryStringInfo(str, s, strlen(s)); +} + +/* + * appendStringInfoChar + * + * Append a single byte to str. + * Like appendStringInfo(str, "%c", ch) but much faster. + */ +void +appendStringInfoChar(StringInfo str, char ch) +{ + /* Make more room if needed */ + if (str->len + 1 >= str->maxlen) + enlargeStringInfo(str, 1); + + /* OK, append the character */ + str->data[str->len] = ch; + str->len++; + str->data[str->len] = '\0'; +} + +/* + * appendStringInfoSpaces + * + * Append the specified number of spaces to a buffer. + */ +void +appendStringInfoSpaces(StringInfo str, int count) +{ + if (count > 0) + { + /* Make more room if needed */ + enlargeStringInfo(str, count); + + /* OK, append the spaces */ + while (--count >= 0) + str->data[str->len++] = ' '; + str->data[str->len] = '\0'; + } +} + +/* + * appendBinaryStringInfo + * + * Append arbitrary binary data to a StringInfo, allocating more space + * if necessary. + */ +void +appendBinaryStringInfo(StringInfo str, const char *data, int datalen) +{ + Assert(str != NULL); + + /* Make more room if needed */ + enlargeStringInfo(str, datalen); + + /* OK, append the data */ + memcpy(str->data + str->len, data, datalen); + str->len += datalen; + + /* + * Keep a trailing null in place, even though it's probably useless for + * binary data. (Some callers are dealing with text but call this because + * their input isn't null-terminated.) + */ + str->data[str->len] = '\0'; +} + +/* + * enlargeStringInfo + * + * Make sure there is enough space for 'needed' more bytes + * ('needed' does not include the terminating null). + * + * External callers usually need not concern themselves with this, since + * all stringinfo.c routines do it automatically. However, if a caller + * knows that a StringInfo will eventually become X bytes large, it + * can save some palloc overhead by enlarging the buffer before starting + * to store data in it. + * + * NB: because we use repalloc() to enlarge the buffer, the string buffer + * will remain allocated in the same memory context that was current when + * initStringInfo was called, even if another context is now current. + * This is the desired and indeed critical behavior! + */ +void +enlargeStringInfo(StringInfo str, int needed) +{ + int newlen; + + /* + * Guard against out-of-range "needed" values. Without this, we can get + * an overflow or infinite loop in the following. + */ + if (needed < 0) /* should not happen */ + { + printf("invalid string enlargement request size: %d\n", needed); + fflush(stderr); + abort(); + } + if (((Size) needed) >= (MaxAllocSize - (Size) str->len)) + { + printf("out of memory\n"); + fflush(stderr); + abort(); + } + + needed += str->len + 1; /* total space required now */ + + /* Because of the above test, we now have needed <= MaxAllocSize */ + + if (needed <= str->maxlen) + return; /* got enough space already */ + + /* + * We don't want to allocate just a little more space with each append; + * for efficiency, double the buffer size each time it overflows. + * Actually, we might need to more than double it if 'needed' is big... + */ + newlen = 2 * str->maxlen; + while (needed > newlen) + newlen = 2 * newlen; + + /* + * Clamp to MaxAllocSize in case we went past it. Note we are assuming + * here that MaxAllocSize <= INT_MAX/2, else the above loop could + * overflow. We will still have newlen >= needed. + */ + if (newlen > (int) MaxAllocSize) + newlen = (int) MaxAllocSize; + + str->data = (char *) repalloc(str->data, newlen); + + str->maxlen = newlen; +} diff --git a/tests/cmocka/rangeset_tests b/tests/cmocka/rangeset_tests new file mode 100755 index 0000000000000000000000000000000000000000..1d523924b536f9cce2255ee724c6cf4eeb71dc80 GIT binary patch literal 23232 zcmeHv4|H6`b@!|!TLK$Nb}*4Kf1Xu~XaUKRF~-Iio@A}PCNe0t1TMs^R@${KkaorX z5jGzNxw0vn#fd_JI!)4=#BJU4rQb1aN-Za`X@z&E%H? zWxs~1da<;Z<^<6S@kctST~C}aTw+$k9Ky}RAIXAjCwZ#KOYeiC^eC@CB$WGN4WI?KgD8| z@cFiPghRe)%pZyQe8SfjYKsY<Z~$INs@t1*5U3@cEpm6ca+b$v%qXS}2Kz4Noo7soaLo)kwf98(zC?NrXC z@)0iAQkh&edXUREQ<+>e+RNo-R3_Jqc60e!Dw9h_JGop+WeSPWW-eb&WeSDS1}@L1 zGP!EhM7jE%4T+cU^CsT#_M9DWXxuP3&|GQ?@8F5?#RLXFxd3fiudp24Gp%0WrQ z$GyoW{1L~i#Pl6IlfmM|lxRvNHlFqN?4A^{LV%Qro;ZOqoG5j{=agTz8@YF>U3U#cDYfW+V$Bz+aE&TFMTLcZ+nw?z$)84FDtmh-UGYlFTP`4{t~v?H`<;@XC&|y*C3E7- zq$FCT9cW3nJEf11biI<69mjr%UEbH>QgEhwP5PPwoFiMeklVhnFXogNOCbNo zd9;w2Q zlb-Y51ElR)mO2T>l&m9_Y&xH4AWr}>n979~?QA^}CB+%3T8(O|WzghRLGM;MJMB3) zLe(d}Q_f@k?+gad&}{Eb?B4-UpV&wDg8Ib%odA0WVn&hM(_Q4=*jwaoI#}f1dZfr5 z94K;k3>CTKBSr4r!Av^L^C!mX zLr`^W3!>_H2QK})aY_BbN2CdpgY+VL(0wv7l<=JI*?qp~vHYV3WX3vW4pKaw2(? zYL+X2q-#j_)wmS~>3F5-74E1&9SP{!h-kxDPq2?tFf!eCmPw31;h7*7v`EgM$jx1K z@)20|&RsY8JGg-CN+sfF zdv>1{yULTEv$!80K%mL{@nfuXR>@L>y?+G7)OEOkPa1@q6r9nA@V?FKKH*Kgh#5R` zRsTOwsLq$dKdO|%Q4;?7`zn0Rf}R1F3u-8q&AF>RXLsMN@~#1d^H}c7!SkqQwded} z@9)`-Ic(~-NAoekP=>;l{ef_VVZ1P@`w*Hb(lX|nG1v6|?jI)mB5cd7MasS(pqRP? z7hVC;bi{K$#*e&rj?H49s5z+T94$r?@l;~y3C~%0ybOjt#zQHkvJ>bWT1j^U+YR@| z<|I863IW==)t*$>!oHg*2t*kM`2t+}%Wz43l|AQ44=d`Wk$zg`q-u$#*CafUIq0kr zSOwN$%v>WhTzr@?H--UH!tFs84PQ1M5-$}$FTay?#?y-cQgfN<{T7p<7&-2=A-_;OB z1BWdCkmrZU!{moIX&ifAk)bdMHkmsE91PFzyAxH6&FX6e0Fh9~BFN|tzXlOZSRvR@1SSs3&b+$8NMHnDu5&KJVBae363*_+iTb%eN;RqC)6|j&q6kJ z_&vSy-;9TXm1xI{%y2va)_&qo<;!Kv$T2D%UxrIRz2Qo|D_0jZIdLp}8hpozhn#Q| zkI7SHF%_5EX^WWl6T%beb;tjjng_P8{Q^rfJbbRaSB=5n zHb$q>i~nDa;UTj1O_WZBT%!9xRaPfIk)QMLsz0BEbN5F<{zbCa8yBIP>wJ_x5bCF zg2X8+j0aPj5dEu8#h_8eIEYI3VCqID#vO{$apmcblQuSC;W3gP)g`zCqp~!@p&+w; z6CVzpNL`(v_5oH8VU?H90Y0`E^DK=h*{)B3flF9&Ia`TF8fYZj!fPVFoATY{dAN7) zK)fmgJtENy`#cn#`Pb+kH;_yHY@CKX)m*5nv^louI2tQ^f^62_FIsytal&#P2(sH& zoh({+GVxyGdx-l63jSGWkn=*Va-dh^40Z`+?e`I?8kr~t>%izMh#IKW z)ku#*-y+hD;iH20ha4?oS;-Md9-&_SCoa=c)>g^=nU(a8MKXN-V<^Ys==u!wbwFcs zT#$kO2kYO181$Tl2M`P%PzO^R zY26}kcmq^kBg^BXZ-EIb7~&oDkkjz$lHMWu0r0-#9w?ZEqmx1oTk^?xx{O;ILWDWE zHxNH+u}+nZH7o8k4YIt38y~Ba_53FB#nJ|P?4>%hXu)VeJkb*ZqR=xkw#d7m9=SJ? z86ztAo|FS%gzGEXS468uYFp91JY-qJu2RWAn0nd%5W_<&`VM=J*G6(qrJ=h6Za=_7 z7t22O8pR46dsaRj5IdqD@hi(CqfIf8 zb`RqfLhcAu9#o^`5UY%N>@&eP^-Mr<$ST&d!B{dnWbfd3 z8B9IT)ZDRkVCSK7kP#FXkYnFO5LSi>`DR5~ET3QE2~e$@ zbUETFg|`6w_DQY6zcir?+~P10TICXjS6&!_6?!qF5Roh3i(z~Q03O|&;LdrXST4p& zq%PZoRQx$!w0##JFS^M;HFwwCRg;*iX{u?g-(U8@A_E^w_E!+xydm+yhQze3&3drrB$t6q!0P2a5U-CFb6nyoeW z)%Z^JroYGh@)XTE(5Jp?(RWBnjhkv3ZZ#lOAi7_aeMnrvDVQg1t z&{(s^X!O;i0^^ZK3|UbczabQxiF9XMfJo`u*c50}I#9P;`9ar592K{Qf;(eW>&$#) zcbd=9G}gGdyqsO)CN<^dY~i(ThYzSR;3{tohtRS+!hztmZYAV)SKLZ1WNF!SH=Kr~ z`$2w6ZwqY;XYCD;vw2mBwosS99lapsjbNx95fP+_Fe3OakI(f+wC!Q;_lpHSvK!%T z2EMb0BfF%dS~{XW@{SM4mSCjZeX|h?$Bed)&UV%k3>dqDF(E1|m*Rgk(z-O_4{Z-d zgR#mT*`g-=P%vWLvc`yab_OGEfp8Po8~CDL7B6cR_m;&g%Hms*i$&r=vCZEe4N5b_ z|1h{6h|lQtMte9K-KDcoh;00NsgknBZbADHeANuFyFghacPGGq_f}O@ui9FFy>aid zs)`${wys6)#_Edds;aH3y8mi>vbcdhP*HrhNi(o>A{LB9gRQZ0|MfcEByJDI_{eksunKShumLdlEyx2_0`>w<0UiMy{2Al{{{wIw zFpm3ZaUS#oRsrrDg*;&JZO8*Y1=tJtH-JX~uNs3q;0C~Pz^?-qW9PyttT(CvZ^B}( z0q}Exoq#K_zCHkW7vNFAF2G^HHvlIAFTt9>WH#&sTnYGTz%77Z1l$SuF5p4H>+m2y z0QeB#2w*v$07bqK{|0CPcK&i|$^`s-tT||>L&PXNA#Wfi@AdlXB$A;4lh5{v_O z1Fqz6>T(|&jWgH7n}lm;v1`%X{DNLre(|Ni(cZ^_pG-~BH!qzCPszzI#1i0$f9N&X z3MdMT*AjiR2IA@2^1*5<=_GkqU@@8h`JBewTy(8LoinXYp5#7`@#= z_gHk=Y_JJ*dLFj)=Wcfq1IaG|{!bChdmQqQSn_qCPyGgZ0Q6?We7Rk|mBm^AUeI@d zzSd5EG)+GQdJ=T*H{JeiY5HlwKnrGezX0O*H7Kj5JMktM$ubOU4k9S1#S(GP+C4(Jtj zI^~hL|3Uu=MJ`S3FwDFpN~218+Lh5Pj1fF zWE%-hdR_wGv*0rbq4n7OaPGIz=jKWInf!D;=x;#(On#~d{Tqx*ys>|2gPa z+UZ-;^d~_d1AU#Ho>OD_;TxdO!<>Gfolbsebo${H@T~{mEq1<#()CP$-U@oXou0Fv z8OV>9VlMvz=+8Uo->~S*K{s#@nTg&A`tzXwu_GRy;&@mBoiWhMagSM#xU~9d{gb&p zdAX}|R8z=~uYhko?k!yoJr7!Xz76_UaZfp7r^6q)f59wdJ;%ja@Ri}-@*E*_T;zPk zYEK^SL4N^y(m{Wm>2<&@0lfs@K!QZCEiC>*PHkby=W{)U#{Rt8!ty6)c?+u^pY1JN z*^|Gq(2NwWtSPLjDJ-uoG{6V`+QI_fU)>HpCiGCVFQf||xZr^c9=PCv3m&-OfeRkE z;DHMsxZr^c9=PCv|DGPGy2xb1ixt$Zphr_mnQ)6li@6Hw-)Yi@TS|dalK_6HDU<$P z^&(YPuR8_5b0i#oMn#F9n<;7f`yWq*seB;EBw+p4dniwq@kq*PILBm2-~1@OEdhQ( zDpQGq3l!7u&72ib)>iY4)n%k3_ zWin+-m9>BIN}1`$6kUn`e*^k=?3oq5kZ&^TjDoK!_?ChnC^)-7@?ECjH43g!aGip8 zDVSN$`n79sHp=lUaJR9dvbwU`SXNcNqH1N;GU1@$*RP6l1O0U5H>X@g@5&r=8Wa6G zwlOO{SH!a7^Q<_uE6YVx>iBcwXA2#tPJF(|*1x%kdL0)|{)>c;FDL$Dq2thrpZ>dP zqUU0?9#sA0+bMb3peJ+8Wn2TR*ce}Ik&$rH9aPs4q z!D>Em;x85SbIZ(>D;8MiFxY9i;xg-Z%T7E-u^R7Ae2M7HickN}-N|pwUKX0hah~|a zl!Z_knisO+={L|Wyj{y|mjQf^xJ2}3jFVD@AIiYf@4tyZbN>D`@Gi{4hgG}uJVWoo zUGxj=41exXdi4DOMNa#1b>SEGdLCD3LGQG)wd>n6;QtADvUAqOCUW?;$5$a<0VMqW z3eVqyfO|vX2NeEx#ZSBJiT~m`CS&;94|227z-;y`ngPFb2D}M8>5t5p`VFo{h}#wZ zDTU{E?V#PI@Eh3{J^@%(Nbv{w|qVUfi1J9Xe^8ZU2y--Y?4s!H;MmD42Sr;j5dX7f)T-{|L0 zi~j|{yRytDS1JC?_^)RC^m(t2^-P~sPJ)xKd9QZR^jPeAhma(Sn?lG_%AbF6>Q)b@Z^U~KMyKBKPr-X9#%3x z2HvpA>+Bft^KweWPP18~7)~lZh06Yy74@P#Y3C(Zn~dRaP{@@sUNPCgjljH9v4 zoa&UG4>SCByTa444wUpfe-Gms<+y}qz~|1AcHVW7$^4fqsXtKqJ!-zKQ23`5{y{bW z@V7?LiV+XRxDOu5@XvFKze)Ai14_?Ph5zF#O~&vyJmdzHKlOQ7dOZFk^DAK+cpZ4E zmyVyXr!l3U&by$rnXfo!Md9yLeoHF6OZlNw`JqhVFIM=!RB^Ig;THo>@sL?>h2nok z>FHGbwF*B+%|G=Df49OHsCiq@OO0&j^mS%L@n5I>zfS4dtMK1f_*)eI8Q`;>=YtJ@ zZM0#tH4=-);@h?%>+{{dc9XAu!{$aD+=DZ7g4?l2G8pm2I()6|;ZP9g_4oo|-}d%! zi@)6$h=n6jpFh4+w1#PuUo04?T(PQpMRq1W)W_$KMEtvO#!d{o2De509YJ3p-qEoO zBDNAA8RTHqdmCH)(Y96?s>`$s6U9*QA>Mw<8QOp8YYn!ybBT6s%2Ml&VCw_S2K`1buSznBWtKbb+=+^1(!DX<3xa zil%lT>b-}ki&axQL|dcQ9UaKToviZ|1eU7+A2V zt*Q6jxo+KNPouA~rncTgHSj@8tC&oG>%891Fvzehzgi@PdZ3M&z%yP=kbf{IkA6r&& zjFbdvC3n^Em`fuRM{QfkAK7I?Pdlk9qh22!Tcr-H%B*DV4nG}rLhuFmlKznUCYV#X{ZKDH?8`2P1I7Xn>r`inad)NtFLP*C-uPtfq==9M(V{D3#TISgu=~FQ;Er`m2N&9-WhTwy~mT){2h*B)$s4{0uL}heW zN6g;>7>h`_L+7}fPEm=S*1^h}+6@&k|8`Z{9*S4C#M^LQU|T>`QfY@Dr>Ip1c7>o% z!dOI7uwyt%JZ-~%lZZ1o2bOyg?6E<+u8pyp!cIL{9(DmNx!$! z=VI#jql(PDI#2NT@JH_?wR~faiBkI9NrOx23vT0%RE?5dK9*xLJ*E^c;ZpjNDPIQ? zegG8K^&iMFnfZW1$Wr<}EOS!G%;(mPpQzkW}54C@q1*Ggc$ht_-=J^(tsGtmho#bm3pMF0``0A*YJsPcq~K zr6!XCr3`RRtutlXe-LDqn!SE~ZsquGEQV(> zXVUT-j%COTvsnt55_C#nUA3Hs6QJ1Z*U$BxrWBc}ey#8$WJs58zkYt}HYG=u1lHBC zaxz0No~iy!J}tjIgI~FmWr#Enf4XivYxO2s(({L=X?c5Dj~$I8c+`}oA)4Hg=)K^2 Z{As`G`HRM9w)&q?52SyUG5#{^|KB<4BXs}( literal 0 HcmV?d00001 diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c new file mode 100644 index 00000000..cb8328a2 --- /dev/null +++ b/tests/cmocka/rangeset_tests.c @@ -0,0 +1,48 @@ +#include +#include +#include +#include + +#include "rangeset.h" + +/* for "print" functions */ +#include "debug_print.c" + + +/* declarations of tests */ +static void test_1(void **state); + + +/* Entrypoint */ +int +main(void) +{ + const struct CMUnitTest tests[] = + { + cmocka_unit_test(test_1), + }; + + return cmocka_run_group_tests(tests, NULL, NULL); +} + +/* + * ---------------------- + * Definitions of tests + * ---------------------- + */ + +static void +test_1(void **state) +{ + IndexRange a = make_irange(0, 100, true), + b = make_irange(20, 50, false); + + List *union_result; + + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-19]L, [20-50]C, [51-100]L"); +} + From 6d0da5ff35830b146d246587ce2b91f4434e1766 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 29 Oct 2016 20:15:31 +0300 Subject: [PATCH 0033/1124] fix CFLAGS in Makefile, add more tests in test_irange_list_union() --- tests/cmocka/Makefile | 2 + tests/cmocka/rangeset_tests | Bin 23232 -> 188528 bytes tests/cmocka/rangeset_tests.c | 78 ++++++++++++++++++++++++++++++---- 3 files changed, 72 insertions(+), 8 deletions(-) diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index 3b6d978c..a7c7343c 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -3,6 +3,8 @@ TOP_SRC_DIR = ../../src CC = gcc CFLAGS = -I $(TOP_SRC_DIR) -I $(shell $(PG_CONFIG) --includedir-server) +CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) +CFLAGS += $(shell $(PG_CONFIG) --cflags) LDFLAGS = -lcmocka TEST_BIN = rangeset_tests diff --git a/tests/cmocka/rangeset_tests b/tests/cmocka/rangeset_tests index 1d523924b536f9cce2255ee724c6cf4eeb71dc80..a9362bf558726869b857c7a8538848692bd04713 100755 GIT binary patch literal 188528 zcmeFa3t&{m^*??eo6X(bB)drn1a|Jt@=kz|@GQu~AV!0L0)nD~A%pzem5w6PlHrdTChB;T60c(=iM`YO>zXaRip(zb0XRwS!sYGj>i=y3bY2OTF>Y*ywM%D{s0{f*j>b zDA7*m@3xe9=`-c6#(6}q9l%+nB}{nfX16~t_1aBEJJwU0ed%|^<(TI7cOtvwy83&m z+r3s)Q$2V1(4v}og*DZ6P0I?GjTl}yeCWW2MFR)(xJf?oo_Y2R?#Z)b3^~%d6aHuo zQ2h^In6&eTU%$Ee`PSR-`{|eqXFWRmo(!tOHj~J`FfORwr57Z`UL#VCn&%21m)MBpuG19 z%71u*a#D77 zSN@n=-kv`X#7^Ia^WbiZ>lR_P`{{Yad~4^Lt}$hUeVA{1-^#H`s%s` z+N#>h#mlu?;YDFpeLWN-O00tibX<4xAF=8vpNW*#34Ok~@fKW8ruG78Dr@Ri9Uuv>Q6)b6 zPMyOkU2}8|dY{&Wax{p44kwfcK_3eGi63V^3o9d<{l!rWKq%#xB(-O+(B$THolrKyuPu!f! znh#lJ>r!e*#I&+CI~tASyULoMupZZr535Jf@!pO24(FACqWnUVcwpage0pC(c{1IB zU7Vjwc{1679h|?A@+RfCa()`+$#e&{aQ-aHlgSQj;QVQnCsQ3*#rabxPbNCBnDc`v zPo_CAhx7d?PfOsy6wddcJWc(9666Q{c|yx`7nilXQP#Zw@RaEjcJA1ME46Irqs5%; zT%Ch4<eWZ83{HRi;ng*<8DJjj7a^uKD% za_FKpWzImu);f$}_LC{|QKTQ?o*p@d8-=pwQ?;g)gEygFT-bZm;P|fD2QEh&qA{b9 zvX&Lc%GQ-0D{I-=`q*d3k3ao*QcX+K;j(oz@JAdk5z|A7OfhawOf557TPE)>YhH0o zYs^5>sx_t}Ijl8hl&v45*|?7}(ehxQbxr%r)=h4ORO@&^H1Yi$68h#dW80Z)?pY*SqqdS1$UP4_3%h8)kLL+R6o3K#-Wx7 z5j#tKv<``61zM(HNylksi5q~%)+)XH4{6k{7qr75O3(%a5jirbmMJ8sbzO;X{S=+3 zmMOjs9Z6l@CKWANllGEGyAw&0sUw4jS6_)yf%OnW zBi+nK$_#Kb-=Iw1wgn<(yhxcSQhq>Ee&9(d@iiaQn}$-mxM|4PBOgQiOQgLi?L*T3 zXVktbvAxkWLfT`bo_XUkYDC5(-Pl4qIT=F(YVGdIkjZ5$HO98p3jdA5|1IGsJG8#{ z$?@ZD^DDDLR|H=Q-pJV|dD|LA$^?-zRis=>QtpKmZqgKagqT}1Ne!&IgK0bnTm|!z z6vIWyYnrdz18!A(nI)=mSSH}`I(KUFwl#|yk9Fn2Kf}qG{m`bhm#Bfu)>Y$rDrhM^ z%xa*^ovlNpFI+%mS6cjh-B8tBx2jDIxfXByn~cR93GLlP$NBhL+gbVv-Ds~X{iJLY zNxOhsJqE^B>7#Ysj89sokUdbav(2!DqJvmL%3kZmTJ>I z9pQr7UEzYJec^%?hj*?i1}Am1bInjt#ni+4Qq01d5y+OUUqhY;IdV8<>u)CRvUTgZ z>vd~Hk>Amn`lDgXnn|c`eu%!c#;)sbCZbH(WDmX`3*_KBZ|u#7ifOAVYiZhtZFmoF zzPPTJvJN>_GRX}x$yXw2J@sRfhmA#SiFqt|tYueA=_k!AJ_)Z(y+4MQ2N%mcteb(o zDTsM)T{q1tt9-s}{pd{W-+NGk-kxk7dknHLUD#xK-bR%kUJ_;DygCl+Jb&09d2c`1 zjjGDlxjUqVmijK4vBUAj(6)AIYlczY`RMrZ7RZ^5HHZD6g)P+zCEF~(ZCVbmEj>ys z7?JF~pwdP@b|o|}J@zo~{17+E-7G~7VkR{n3;(k8=vpJWx$NuMf=FSHlN4^>bJNl0 z6-TwJ&RsWy6_9Cqlk6CymSyXTJ;7!02{VqhoW-3Ueo~7y*B;fW@$#31^Tb`zRf6F+ z9d4O%NcM`nZO@|*?DQ+UP%>;iCu43w(s}`A29A$}iaOK5x~}w4S>-P22v=`>Opk(w zt)DDw`JdK%Kjh_c40|Tnc><699flz7T3ed-H?P>QEiYVGx}V*8AE+X3eJG1f88$G6 zYhR;zor{vzKYRc+OZSUb@O7}uHmvtk%L*PXYk3MAetl2e9i-8S9s_IHx|Ab@aS1V! za4Q$na)!S9^u4p=_P17YPX?^dgdkaJJ$o>IW zmiXd>DQqQok7_T|Ltu4FOmhQ(a^oDjT`66p|$)lJsx@}*D(p4~dV`yFJQMUkf zhenmQF3H>WHCY}Ol*01p%*mKMQt=d?j?%Wvj^=OQBXkGV`bSdVyWimpofl>N3M~ff z9_(-XXck9eHxct4KN!Uh&E@NdJ)46rTXxFaH-m?&)|KuNqdtLK3)yhA<-bAkzPRiu zkbVD?r61`}mVP+t$+@U%9tV2VSS~u?b~SmF`%z;v=momm94g zHZf6&&mdr3>5er!mf3D~QDXHUQGMWQ$XhpK$GXxd2JK=);}&TMa%*-h_lF-Y{qUd< z-yeYE>NjXc%GR&g)$%AT`zN3~GSp2Fb8t)tO`d3Z?&{a6QBv!1Ej!TsNAcztpgBhG z4hmNTO#|QLt?7iu80)UMsc0H@vAJQApVH-C`g>U&Pl8RTM^Ogl1K1;`p_7BDAkGE> z1}RP%3~8BMKPFh%QA#?fT7 zlg31HhsrGf`H0xR&e)m)xMWlSJ9@L)lM~yRJ%x2vqZJCT-AQ`m(N>m4>k#m%vFu8M%Pm74k?dY ztw&<_ZycdUan<~Zbx(2~TdfuzAkk`7;M+!BV6D~S4i{G2NeQf$)q!ebl2jvuJ%Vdn zYwg`mwd;_NTkXtb)h))o@^T2o#ReJ_}-h8@sv(Bq~#OwSJJSzN|rxI1EMSx< zzH8>&#Gk;tt9LXFZ&$oeiu1OWLKZHeKjRh+>+}gu#vDV^y7drwZt6e7>o~!@;fFC~ znGe$3n}hK9$6Daq+a^aQZ{%a)GahSszvXwhJWq!Mi{C4pNbQfo)PdRsp4wAI?ct(! zx~M&l)c)&1SM6n~UqNkX*Rk3dkEp#K9we^z99VlRmedwJ+1|ab^yl#zyu+Kpylrzt z#hUDt>>|dKgS(k02kKQ17)x)Qd!WYcna7e; znz&SO-Jxr@71rAwUaKwK>SzO6#mjDRTOH-Qb?Q5B+Y-_6J-WkKKkO+^#ym(G{`?&> zqNp*d^w^T|>n5YGPx$77Cvhb_11|jdmi{{D>1pKdwnvmz*zM#I_=}2yVlH7yzzzAk;P-uH^~O{4nY&_|BG>Pj7h}^e@E$?vTOmY|TZT%o+63Ty1}a1?v^I zM8LY^!0)kkAkynbEDCuh$;PGRF5#DXwM>;xwxZ=$n$@3io>ogjC)3(@vf*HTJ6k^i z;dt@^dYLpk*(k=QnbWTL4HO?N@&5+I-#LclQ? zdYvX0FWKa9TOO_sON2RiHu!=AzdV2)oa1u0)uAod9X@!v^z%i{yNeD4eo>ztNWs;B z^+ad@R$=MBgVD0}1iVe&g_3>l;-j5~D!IS#nlL>M(%8ald>~oE@*cv!vvnIVGckCp zyS6Smz|HyjM)p^_bQi+xoA}b*S-OXk=|)OI+pmz}NO=34XM8DQs2Wi1!bvOsIzi~6;)!S);^qcYFD z&++(Op7>1-HoYg@O>e0n)v``nJL0G(j(zS*qIpHsEQDkq=Jea}z^qL36AQh&AVGTf z>?}R%&J+(+xoFM7CE%wuPXA+_B;GdcW1R%%O}d|uijj{sPVegA_1M&m+PK@!$Es=t7RY$C`#jymk3R@#%uG@fQsvf# z@5^u<0y>nN9Jioep@Th$Dxw^d+`?0|HX^s6xgtw0Q&Hr5d`tH z<@mT6@N`Hx-s#n{=EwE2k$-4Zp_{GNgI zm_BXXlv84*%POlDH&!pItEd^jsBT_0k*CDwR?Lf4)GuhNt*XOsHRL1wy&Lj-^ls=e zAa+J|P1Pwep(j<>f$lx8fP&by)eQ~w8<3jnhQ@)F+8GsKoHvet9s=1~_k5}-9E;z3 zENEO98yhPwh?gy_LciU|#Pai(EUKOt>sL@*&{o+9L7%Hcf1bd`<%_FgW5&d$mrp_m zu`92PNk?tv6Y3gIRCP}EJgREzja7j(R|fhnaP8prM;wJ?>#CMDQm-fGBfZ=Bl7_|- zyXWV#NeW0zem-m1uOP_=G#GH@S1zi<$j)9guc}{x+o+(Ra4e0GTg%W3U^INZd61vv zRM*X4)ZuKPx+AONcUpBzDrztb(mqyISA!K%MJpm!kKgy;XFsuq>dSfFgBpIU6I(Pt zhTrQfs$VXKnp@jYPSz<$;oPeF{DM&HYZU*_~~>RqW_)Vry$chhW8jrC1c z+Wd-|hAL4*yCTNp!1`<$5UW|#(6HPiArNHK01-)KY=uU@XDY8|Rd@-PH`URym7h6z z8p^MpV9$$+3kMY!&z|HF! z$4rBy-Qdp){YUNAfw>7wqU%UT-GEc6@G*5qHH2Va157cHtOhz*j9i0zU%5HUoDDqfYyccKAf%osH;^aSI$GHZB+C)@4s zmRX?bQF9dirj;-*>lH+IKtF|)9Mi+EA{q?zU=Pw_q@N<4g0%4u$B!>Yx)|a-_SEhW8ymei-R3NF%1E*>Fe2NK27ULAn;{Vx&Jqx&i5GL?N~!o%SZ$BfS>s zVWb)ycEqP?mm)1j8h8usk*2(j_DH{mbOX}!k#0r$G}2v2k0L#cbjqL59yb72;P$2% z=_g31AUz-Vkc*N25D$i%kUov{exwJG?m>DHV5(zCuSJ@RJBPcFjzIbZ(wRtuz*(0e zU5IoG(mRmuKzbPIKBRX6z1D#8T?~LdhExamS3;;CZgJ=rUJv62Y%S6{+{JB0nu9yY zT}bandKf7^RE?kz7CxOM(e%rwY5KB=9!*QNHt4AlDp&ZkUOs-jzso~O$NU<8GcNqO zd!Z{aW<<`&$UQ3@T57G*PVF|T-;iECsF-A)j=wu0w;L6V%ZOZSj8FH^K*z{Z{XG0_ zL49we@%lM!^{Y|*1Jo}~tS1^DYJUg*K0$pI6?pOu5kU1%P%+lvc}ewCxdW=-kH4j; zKP#!eG+s}?j`|+zZ$TQDZ*J6yf!g;%c^9nR>yp}E6K{Vy>aY5O`gy2-3-$Sl?JK!C z%U_N9KcjwpV*OQZ^>?5?f@_86%^Uyxw)!VfA4UChiS3PNc5Wq%M!ts9%q3pZ1)%{zT(L@}EHccTpds0?$4dwdud#t*=k4H)`YZeb_tR zL;Dliw-@R^LH(?x_UFgjpN{$w*pvFh&hh@uKJoQ45B1}*M_rxN{>pg!)u_J%^%M$; zw>Qq?`d%oy1NDDI{WM=feQC34mid^6nvorz1z!MrR*VWdu*0D*sZWD^YJVa-4M6=x zs6UaNCZT>l>bocQKTA|3{c2FZ0QLCIW1D_6+v;yX{XEp4kyvkxi`(Ho)L)PKixcZ% z2V;8N3~YzrgKr1;Mkn$uZR_VK>R(0uq{MpTOlBZGJ7F)ku;>3Oss5+&`k|=*?ibWg zNBwX>Ki^MU54Z7p=mnXLsNV)S>P)Q5_&l9?gW2peM;Y!=NRQ3n+XEl7BuUO?aXAm7 zejt3v*2H>R7k4rXY0vi9558Z*$J|8)-nuYukB`R(A2bHOXI)bLwOoHX%6g%GE9$GL z{p<$o=oV3763!JpTNei5`z)1_7w7^LVhy`MS5+S_QH4NS2(WiYnQ;Ic& z)%mk5wS-grR)Ig>`OB_u-aAK)(--`4VFQhy-BEIQ%p5MK`%n6K^&fn8d=ce086}j& z&vrlMlRR$i__NC>;gm3X`n)3vPKe4U*G=&o)T9p{P2^KDhg0t!(Q*gu;S_*7_=8_Y z^JlTYgj26P;^sZQ=Ws2aYuwc9|Fh#Vo_#4LT!UC5kI%C&ZkIX#Q@7r2{_ji1&gro4 zJKg#IrJFwKrZ2ha+iv=yoBFL1mebiy`?%?FH$B5m+jn;6`0=O2@^K2hAU1s9pn-#8 zgNp|ZFCI}mSWBwGDOk5=NU=5mLS!CgHqLO@^e@{Bck{yk4yIE0KXLhg7+)O84TXOj zma_2wAiabcT7AiUUQYPW9pdq?&k}yd6+EVdRQNMSdHj#K{Po^qOW-9^_-pXvA<<`0 zw-V;~$XooJYx{Z|azs^2FFZ}t(%xGQaPwQ5b|OO=`@=XVyoa7^O)K?XJV%?lbwwW} zM(Syfu9?2nKLZ3Zb$`YYDoknUg~Ry16yW1%kn%xqwC;ZuYMN^y!g%vVkgJJ=v{$}^ zi`w#A-=?hY&w+qcqOFf1agXo49+=6%7;G!P_vfQn3*4Rp*&p3REP-SHgYsiUM*>st z1pQZ{a|6G+5A?@G#{y$#gZ>-Q`GI4=(tV#0T^v~d8_=H;JtDCCF3_J5T@pNY8E8$X zOj+2!bHAoeI_#31Q!C3^mWqbLt1liIF^|&S6_n6 z+Tc+V&|O~%whh4#UPVoTz7d&C!A~gDk270><{yyh&zZY}LCOr^%+_EgWd`bl(B}T& zRj{zHh;`c$tV9RCLHdp@T{O(mD!;FV@;8JEBVgb%+28KZP658Dre9xe2k zn?a9Z+HY-vkkns*CGtC{m2wYtAL+CmHEZpr@Q0O90~UW^z@It_AnLOr<}BQGzeO15!(ygl{Tz)l54dG}UlE zRZE)-`eQF|YSsqy8413Sjmtmq@D|80$D(emo{~Z(q(|4Oxa96npP;XSmcfHDR8Qm! zLJMArv87MaYoWL=_`3&?nar7#;3`aJ`q`ZE2WMcE>F01JHF!@JI-J58E7)>7YNqPE zVuKYSWTvs`+~67#a4y%xf*IIi(x-DKKj^2-49*k>zfBsP$8AOgr*1{&eBOLYf)!9I zeWtrd)m9*LfxAbQ0xVCz(A}d7-$mvk&ddq^G6vDJSoFf+^LLzeRS>rb$Sz)eslBU<#xG=gA zcZ_VcI9*e_A4fKw*6bWta__k0he-0*f9^4mpar#8MPTy~(T3Cgi(`P#1 zI{iWzH7HJh8CbNSIQ`o|T0wF8L`Q<+^oh<5iqj`L78IvXbbe5rKGDTNar#7$2#V7u zx+LT}eae)DT&GW&DIwSCQ)Xt!b^29MZ%)W{`j;ZJFyuOY(spskb^4TP47pC9eD<=C z>+~y;Sru}f{w2s<6LOvYWsue!a-IIw$gB;yPXBtaZ3wwee={9!-}I{oX=&l6gjIQ@X@^l6Ymar#7RL2>#tTq`I}pM*rT zRBI^8QY~yyS~|@rsht5Qg-@zQQ^}>@@=CL{Fgg8*>-4ES;yQh%U8hfUs)fy8vvz>Y z6sJ$!XNuEjQk*`M;`B+U^e4e(nDizf>mzC)PM=C#r=Ja{KM>iVIDI0~j8^wpkgR-I z^NG{%%FPm;zE7O~EU^2;=~ogdPJc9!;`DiWCprDkJNl;~-F5U#ilg64rQ+xhfW*#^ zZz^IIC`qg6h}{_PaORSkm=*W zS_Z-uA8;N0aufx{(HDZ$g5v1O<|RRK^kly>Eu9*-cmg0>96hxUxQ_nUpo8M*iPVDP z=wAZuIeJ>Yw7$NHvMyASOC>1y4QQV@daBD5NB=&^RO?lIM_fnm!?u=cksfhJzZzS7 z=wJ+EoT!uDTF7U~SE=56yHK8L{RrP(?#EXyeWBn+BoWuq-%7OW=&6H< z>*%THi0kON+;#L^?mBv!p@{3~iS~)3C)F~=(Pxpz-Es;Ztmw)Oce^SR(aRr~z-Out z@Euh5o{9Tk_0eQqOH(Tc;TA*v33P_Kf4HuNRTExt5K*s>*R^@-or$_ur9OkK`D$vR zt}RgS7wOtU_31cWt5y|f=-MUf>9ck1QuY0zx>lnG;HINieQ$uS)v4`cb#0Nlqo1xV zR!8xDnQDNqu2+GJ`XuQV*eIiMj^5Eme~tf0^0^3CmS0BwwzgLv-y5 z6@qS8s%iLMp}Gy!wX0Mu%2%p)PuI0o>I!Ibwb~B;Yt#(T&FX&Wyjs1Ce%Gi*@L#K* z$MLI5^O*u=~u2-!X={j}ssk(N9x&k~;DF>r?T8+a&jb~KK*L3YSsu`Tm zs^3EY->Q$n`8#zhtnokUee|+RO&YChkg`@FgjV|hUhEz`B%t5;F-q8g39UQ!{9 zZnx@PobIMb)9lSJj`;%OBLWn2XocwXo+O)stwoPn{3?b@dd= z-%$Oa;hXAF^t)g6hK6sc*YN$eI#{f0e^MbxYgOAYzX#MSkoJyx4>})ISEKfjS~yA9 z{%pSv%?IfAbr4jj+b@pMwSl_*D5{Ed`*!qEtlJkuxpJalU#bbAG+bEIzT==l`gUWN%8rQ6*hWwdVp7&6D`_TP|< zMei`vsk;5U3A$FI+Y>P<<8=E?=z5xN?}Yl}QHN2VuG`&Uf>Pb?+fUcd(CvNb{Y>31 zLVsntodP8$==M5jc9w2$MBPN)eh;}xx?NJLYm;^R=V*7fZqG)8b96f!cAlc!cYtLo z3Xz+p+wWj0o~zpnz%pI8{|n1uhHg(sx94Geka9l8fV!EwT?KV7(CukxcOgswLtccj zqla0#eFa9eLANn}ZKG~CK*~3C`(Cizq}$fny7o=o{wY?)&ANRM2DwGI-$D=H((M~y zkWIS%8<_6fy8RqF`Y+vHiH>fC3DA7AZvP4jen+>b;!?d$w_7mx-_`9WForGY0aw}g zbh|(5ZrAM{X#RcO{w-MU(Cz1k>Dmu;`yQC}PTdY*I_}c#8t8eqZqLMo|4_F}(fl6W zUXOP7Vtn8{s@oA5@&nzTfZT^r5TZZQ?F|^_G2I@CRr^=n{yj$YF@^$Rf79(-(clx^ zo`u|}x_utneWu$VpQUTZb-PP{Y&?ct1%v2@Jr*-<81^s-GY$JjjL&D-Jz$p*U(m3BjjKIm*x$g?NHgqiSbXV*eG$fxVc3312^;pN z$=JRO`v`<(LMAMcW!R^nFx#-_!Q`C`n~q@R81`N0sIy_ejOJYoy8#{Lf)keLYS`&W zx*7Jz7;O|XF=NWG@5PmG8}_X*q+{5NAuP|Zw}LZf*ze;a=?=}1^f2t5klE9)Ps3uE zY1o%ziZ3wiM=^%$4SObptuyQjOzI8DAz5$O!@>D=!%l2ReG*ux~-T7YzGVSoima{ZkD3Ma&Ua>q~}x z4Vv#p9op?N>|FG|*RYRbnZ688u)Jc}&qBdhAqCU_2gCjhQ}r4q4n6$Qu=}BI9|~cm z*C7*HzhT&yqK7vP`+6*}{TKr#=PgV>M*Fs5e*%^MWZ1P>46TOUgfSd2?2oWa-!bg} zLhlDLS|o=I`z4J1&zLw!`HNv^p!atT`#Mx$DsB5hJ7Do9x?1P=yKGs z?}5w@40{Py!H0%@7=!)@ixvtVGwc^(iNAsctMy~7av1V&hW%}@e1g@A=AXil5cZj2 zKM$>s8}=`tr)Jv4Xs(-f9~jFp?G`9#n)Z>2xK>PiGn7v;?Pt->Z`zMyF{GOIuV6vT zw0{6~1E&2OC?7O!Um>m))1CtJq?vX#>e5Yn57f;t?VT`B7|r|PS~2ZsAT!gnXQIC> z(@sTQwrPJ4QaYLT16cDprhRY#t`*ZRht^#n40XAt{S!3kYTCD>_imXW_RcUm3o+V3JlWIv=<}kW!i5*m(`}7fq7qp z5y7n2n)VDxxz4n2Md4Z$V$dz7eG9t1-n8F=k=L2_DPXz5v`>d->rMM>(DUo2y%3ha z(X>CH%gMC!=yEdc3fTG^ru`=LyvejPk$ls%7hxzjoAv?N>=x5*h3IdY_Iz}-$+Uk7 z(cd=hTuAvZm=2t`n)V;DA~&1%D_Dcyfdw(5+f2I?tovQl_M`b0)4mhxe$TX@hh1(r z?Qg*#--k*_?tn^XV{10;i(u9}P5WW!c^B*ggWPS}r7+eHQ3ql7n07WUwR=sw5azkh zwD-XvKQisRpv#Z3LcsFhrhOY4Y=!Bd*-uQ{f|Q?{_9!U$Gt-`c`TMzPm!SDCO}had z{mQg^V;=4|?bo2oHuM0KKY(=xb+?;#8Akh{X%9r*L#F)}au1^&*6t&w){X8e!~6Ao z++lY8J#_Ge59vR^?PcgyfL!6f=ya=zJ7KB{zpG!5obP#9DANBykO3wm!(Im+>yq!k{T&up~07~L4#ttEA|B!Hh~_*4T6tc4SF!q z^kDT9>XJl6hEO+FFO+w(B44{0Y0guam&izZ;>vj*W{I3a(sGW&P?1rCKx$5RtOot+ z&0u!i5ZHB8j*q^XO7m`q+F(EdfULY#y->mI1Z1UhZ;p-YN3SRDZ>5z`Q>{(LK9a$$ zn1N)l#Cnl9M~l4*3+uD=khHu7`vHCQ&Gb|*EoTvlmguht;$oRtU-~(}3YIV@VIuU= zH&2SUIER!mHqPum2Umwam25@rV|UYF##Oj2VQc>Ao3D+x9Otz>j#a3S_gePIrX8w~ zDm~l{ke=H*k7i9Tq**gI&g#yrMIvX8Xi)6Q(dbQMKJ7eu=#;?SDN_Z`YafC92GC}L zrwj?q8G_-Ng5lAa4Z-jX3F8?O#xn%NGm_(Xg5ixoBU3OuBGEoEaEF#(6u6_X!GBud z?lwFg#{+ltOYeB#jr;(vha zz#TpABXBqU2(XHL>+m_)7E&|{AqM76gAdgBiVZwG!=rtHv%r(0ZG;djFe^?++6Vv0 zP;T&#NH6$DCi8-SqzDCMYEhOD{G*|Bq=-sM6%PK<$r~}evb{-=V2I;@hCqTwLL4wON2jSZBo1hJalo_}y8^pncra}@lLQZ@?Jb~Ef(O%H z9tl!QyA=vip{r)voyb!S=Me||3Gx%XycY*ddtok$I1ZTh>bu~8-H3_ffN5`0CC35N z-X_u{crZP0Eyy(50ycqg#Y-G8qsP-I;y7SN{tA#12h1oS`%4@!qaWE4aX@Ok*AoEQ z1P^A+qV^I83?Iq?M#OQz@L!0OIAHkQKA;6249DYu`_ZfmRd{j0C}@-5!SH)j$8o^$ zk<(D>w+7&w7X4~8e`zXveHalr6Io%EJCV0e=LAQYE4V0bcTBn}up zn==vz44=api35hGa7N;Q;i)<;-U!D5!_!!_!~w(Sa*f0R!_zq*%v zIAAz24j4|11BMghfZ@bAU^p=j7@ozVB@P%)j01)f~93{4#DY(&?%w`XCIuG>O zM5hFDsr;Pv_+ge6IE3viGUW|4jRZyz&s3sw1LsirG@@gHfmD7j(fNVBcYt&HO%Puk zD2DHh%(w}1Mqm-pymK5fT&-V-Wx;XC@DhC~+WD;&`0heS;d1E<1?5OM4jEpmUrKcO z6ZmT80dgENoDhc$uaRE9^G>O+g(3iO51vikDI8CXDcKG6})q{dkVrD$uu4kKsv-vrWckubj9 z_0e9J-HoO&${=c#LDVRNs8I${qYR=(8AOdTh#F-OHOe4rltI)egQ!skQKJl^Mj1qn zGKd;w5H-pmYLr3LD1)d`22rC7qDC1+jWUQDWe_#WAZnCB)F^|f(QEK1ur$gbYLr3L zD1)d`22rC7qDC1+jWUQDWe_#WAZnCB)F^|fQ3g??45CIEM2#|t8f6eQ${=c#LDVRN zs8I${qYR=(8AOdTh#F-OHOe4rltI)egQ!skQKJl^Mj1qnGKd;w5H-pmYLr3LD1)d` z22rC7qDC1+jWUQDWe_#WAZnCB)F^|fQ3g??45CIEM2#|t8f6eQ${=c#LDVRNs8I${ zqYR=(8AOdTh#F-OHOe4rltI)egQ!YCR3#v)5)f4hh^hocRRW?a0a2BJs7gRoB_OI2 z5LF3?ssuz;0-`DbQI&wGNCa z3pzJYM?7PQjs?b3`B#*dmps-Xud zQjTj-cqZsBOT2kIly=E?j}^ONB=-;W0?@Ee?Yq9N0!~kQ^SuWSED&m%I-Q6DPalp0}=Y@ zo3D<`*(Guo#^{kq-$hUBK&fNrU^MzJkuytk_F`i7?Q2~*fPDC)Z{8l4v)z+(3&y5z zmzMK12aooe{=*EnC4N2Ywmced`9rs5Ugbh?(htUsjc9lktf7DBO6;_oz*bLe#>TSl zcaUitWhgb>&nJ^KF(f6A?_ih+vz|oe@$1^Jc%+7==AT05{}+$c(Dl$UGXIYr zsiD#SryZ#woxb2mO&m(^=tvDg=?O<_7)qBTHHaqBk(xv(-D-oJ|C5f?FqG~dsUavmj?AZOUvi{|?El3_Y8XoY*B+^1DE*&1QbT(G zFCD2N(O>mQ4Ym1CJyJuu#gEibrsE?uRP(=jq=q`^=tvEh{~bqa7|Q#B15tef8DO9I+NOjdksZFDwRgRkGvkFcI`c_ zzWL{1P!*^q0wH``Ki^}fJ1f7{fO73Z+~oMQ7l8k}uKUOmaoz2Y-9CythPhK9Nn+ow z2RAJ@y3QwLQudq+k#2O|Jq6=#itg^lzq0q|(G8$~9KL8+sRWwi+Q*@U?}BnqA`%HE z68PWvW2X3_1Zo!KU*%gp4ua;eo0Y!^q=en9f(t>)USahk0cDzR5kh!Y@rTIx@zkUF z@p}mt3k%TAlr@X0LRJ@)r}e;BI(?yloLk7kgP4|)k1sdepMjqSY2i0kVh8SFWfI;0 zGSIy(m>KM|@f{&0YCwpj%$vcSALwgsM^P3dWNz}5gxByKHEd0ti1?$1p0F(H){>C4Pb?0S_VnZj$MhOu@?1|UAF@CsTNrzd+S2bB`m%Z9c${G zM|kw=n2R1&h6kJVY$f9uu`Djw;J$fk`pGOY8Pd&=d8bTj+$K9-)M-Ck$ zTFxVf{z|l*M-F{Vw46r{{f%fjj~x1hXgQA@`jlunj~x1p=#nftniSG>%9Lf%(WH>h znJHOxG$~|oW@Z*0O$zz+SE1gVEc)$7$gjVF%)%^+WQQ!Bv|XGY;vgl}1s6S`!&Z479p#hxPnng#GLId?O=yrb=9Zd=q zv2HuE=x9=Ckp3C^c|yyfqe-D#f_yY7w3!CUKS>CEhe-KJLg+RcuKXk+bUTd}KV`7) zMOnz|3t@hmMN->c@ga+*l1noX0p`+dtuq}>%BG`9p;0=OXVXs-5aVE)ev%Lx!*mxq zkCb)~_1T5aBcngx?LH>y#|P=%ND)6CDYZHX%hfU}k#wOSAEZaA#J%<56Ga)t z?}Cp{6lDyedEgU88AE8<@`|7Q30G zpASU#W>YEsd?502Pmr2_4Gk2OyT|x9AWt=%Pu2XlB0t2-n_7@h7e!u}gd+a=K;%^l zqGNX;rY>~4DDoCncA?Wnk++Eq^0A-HLz}1#9s9}r3y~7n&wTfXpsgsK@69|+yA93V z0+gjuMLLzB;5N{F>?iX*s_R0>elm~z8f3`&Z+vIdv7gMb`fe_v-7S+&oZO!|K|dc` zeb&JkN+$9G)3WH;Pv#{3PAKlnqGLaqlR1-;MaO?d;$XVzxXv7gKe&TPn{V?UX5S+`ADbnGXyk~5pL z=-5x@JZ9UHMaOKbcGQI-)7Ap4rSDWYe*q%+=g;HXZxPTqEUlGm?3&l+&@F z%`JVr_H8 z+UAJ0%@J#xBi1%YtZj~1+Z?gBIbv;d#Mv4ZF9uh=7_b;5o?`JVr_H8+UAJ0%@J#xBi1%YtZj~1+Z?gBIbv;d#Mv4ZF9uh=7_b;5o?`JVr_H8+UAJ0%@J#xBi1%YtZj~1+Z?gBIbv;d#Mx){aE19f?>w z60vq9V(m!8+L4I0BN1yyBG!&XtR0D1I})*WBx3DI#M+UFwIdO0Mx){aE19f?>w60vq9 zV(m!8+L4I0BN1yyBG!&XtR0D1I})*WBx3DI#M+UFwIdO0Mx){aE19f?>w60vq9V(m!8 z+L4I0BN1yyBG!&XtR0D1I})*WBx3DI#M+UFwIdO0r$-rnu4yx){aE19f?>w60vq9V(m!8+L4I0BN1yyBG!&XtR0D1I})*WBx3DI#M+UFwIdO0 zMx z){aE19f?>w60vq9V(m!8+L4I0BN1yyBG!&XtR0D1I})*WBx3DI#M+UFwIdO0Mx){aE1 z9f?>w60vq9V(m!8+L4I0BN1yyBG!&XtR0D1I})*WBx3DI#M+UFwIdO0Mx){aE19f?>w z60vq9V(m!8+L4I0BN1yyBG!&XtR0D1I})*WBx3DI#M+UFwIdO0Mx){aE19f?>w60vq9 zV(m!8+L4I0BN1yyBG!&XtR0D1I})*WBx3DI#M+UFwIdO0Mx){aE19f?>w60vq9V(m!8 z+L4I0BN1yyBG!&XtR0D1I})*WBx3DI#M+UFwIdO0Mx){aE19f?>w60vq9V(m!8+L4I0 zBN1!IjaY}>i1puahv}n3JbM3yxVa4Sc^-Y(deHp4OMNt*Z_xtu!%cn6GoURk$me@~lGg3`AdsW>;~_?~ETeOOa0b>xs1f}Mo^rInc64kgqJ5h0 zfpTPQlI9~EuaiyXLB{bq#pY8}!tpxwAjyK`b?QmQKFvQINfZ9^2mx zoD$S%+BDK0AG{O?Ve0PdfgAby5u9aiynkhn3cM1HhH7k_(qnF%F6nt%E!YaF1wzU& zC>gLp~r_c7yv2lLS%PWx^D!HmYnVqDo z7o{hyC#!f0sc3H8x)OsrYYzCkky=-}YRz(a_04ObR$u%xNnjunab(d-xr6Gl3}-jV zsK&+`Sen*G?-j6tG3IHoPxXcwmq(>+Bx8&_b3My2TgItWK?k%#7M1s|nu|i7+&^ZO zaO?WGjDpUz6lir1pp{Dpu6TXBxtZO`9L4}K&UBGKPmH<_+A4;qfeC7%z; zeQQunglO{$7_}R^(&~Q~L1Y(Qehs}-V7UED8e@5y_86?kZ(zzg(Hoe2duBlBS9}9g zO7nO$Vnj1#HIZNX1}6V|@i#E}-(Q9n@&+dVN4J5LH!%5+5iM_E^8b}+c>|OGW1{5^ zO#Z(SEpK4*e?qjpfyw_V(eef+|7S$Y8<_l>P8oRvlV9hIyn)GYa7Nz11Q4a|4NQK& zPH+E_H!%4vey5APfytl78F>SfKZD5qD`F$H4O!Hsx-ggolH5Av=?6ie`i8X z({?i{hprA%sT{ic8%TL46BQ=DlZk2)-^mohHp6-EolI#j^gTU`j84Iie}O)HM}$Xu%E zN+l?u7c4S38-9=KfPVs|J|b^m`U?v9olH1%)fp&YszrM6>wxaZp{rXEeB*aA;m{TD zHvCQ|9J->3lXo)V&=qCmolH1%MHzV~6AoQbM&8L3p28V&3W6|j9k35Tv^ry_6`hpuF&B5)Rmt|&7lMBpqA zT~TIch`?DKx}uD{lL?2eNVL3@35TwzM&8MULsyiMcQWD76=md|OgMB!8F?oY4qZ`3 z-pLf6%eu)snQ-We6qR=};m{SaZQ*w^;m{S;$UB*E=!)K@6n`fZ*}W5ivpDpmFfE-3 zoW-Fhdf8&C^$5OmpTSpG`a;3?knlU1aOjC%*u?K-!l5VXfZxf4Lr>H*zmo}vp19mS z^u*-^&f?G$O&7nD35TADW^gvri&W!xGDSx9!mV@LJDD=OZOA)0BG-JOL`&Y_+?9a5#5+-{IVszm~MCFWlQ=XZon^F(gzbwPfVM^ z>w>e?jTJ_D4#3$7NIMHSOOH~W1)L>moduj71X9ZeIC}|oEZ}Sl?whrsfU`u$!C8{# z_~cPaOg5q^pet=4!qflw8t;+PsfJnm6)o_;-4jpp~Yr<8J~t9pGs( zMiy_>iKb&VS3(dq029vH#27!kQE!F{bW((n#*I3S7#LWO$6Se%T!{i8-mKF&=oL^f z3K=2q8B*d3mib|?5@x>3ZK7}feq4zwTqUHN6<#+C)0tQ4WCr}Mvq#kJl~5h}NUztp zy-w-*1dX8CWuJrY^Qhod7Jf$05^ADth0W-rZ>|HR+N;8=H#e?b*=ufFZq>@}m8cx& z>N2x?B`QyIb5lgK@t)c}R=d(?xYE1VqwOSDv6AlfXgis61h4kk?sC5@s%Sm^WTt5@ z!oYvu3FhALSwz17-r%sB;#da*g_^x zwa6sB|3KvzQTf|;2{V5`Zub`e3HN!Llw$^*Xa0aHCD32%H=kOZO(jL{Sbya*>zmWNXj=aYw;I=Yf2=TO zr7%Xn2_)+@YPn36aGSQ4XSyx9+HL95Q?vn!`W!?X+5oqux2fO|pWAYf41)V~X|xR@i`+#BM zc7+Uyu*j;L)kh*MlAGB_A}nMYxDSy((6xkC_y9(upG)pzsw+7bmwXW<_eX@~T-kya zYK0PyA#N|2(#GB5a!)2gbnga6P0!SOXJm)WjA%xto|PKu^6k?wlSXH3R;PqUg(l!y zHhXEI&{(j*tGXn@Hz{G10&}EveipGFVCOEvIo4ykpBV znSte@5_uoh%an#4_ZbG)V@>D+NLt#>FCV=KT zConrrZC>*1#RfteN$Q(C$=%yVN0Z#6jV)n{Pdtrn7n12&muBw&=T}wxnTvWeCsT-- zv+25&?ICFrlddY7LiZw}xrrsu=;vpy?NUydJ6a1p(OT;j!p=zl6atRFw{c2`-;yi7 zPhw4i3TP#$YHhBiy-Q2pR72jLo96kFbnzt_vImC~H?2t9y3UM?VONutycuV^`%ZkD?F2D# zkC5XH1P;yHn7g#?RN^CZX&>&I=n1lI7tA&;KyNQjxX5T7^zp9IzR3$B|MM-}OZG%-Bhi|Pw>zF;|L^jzuGW0- zlan~(bh{J3t?QCJf`>Ku4_ZDl0tk z`nwq3%+QMPt?PfwNXNIX)BOVeFKTM2FRH1YTeP6EvS`_e;pM}J7A~$Vtf{VRT2{EA zuBmAFK>Vw|Mb&kcHBIxXG-h`*MRThg8bH8jqfS2ceiUKxWIRWDy$)c_K} z3JhDl2qd1fz!4{xHb)C3=BvepN+d6J`L_Bpc;K;$~y%qWXCa)t6Tx z2b~)lo90plp0=1Q96nTXJT9=gV=F(iymC?9{A%hN@|IREtf&_fJ>yp@pkWLic6m*8 zEwT{qtpemQFfUhLwTwngB@0}k9Tc_asI91Ls;FsSx`@<+By)8fX3^;H$~JRQKmb66og%P0!x*DR_aq13#w9&1l(mtnFi zYFve-f`$Z*$?Uv}Fa_SspfOfmox2`AGNBazO1f7XY$M@)xwH_=ow`CkvD-KeL5mmF zyYpB{;}KPGRnK$hp(H&!AGp3;no7YHAwe2 z`kR)%Z1m}he@;0C`{@918`|>!S9jR2u&o!>k^Pc)&Hs~|GViLD|Dk&_+XgF*uKNYp z71;q$#&wQuyB;p!T;1!VLjq9KE3je@f4F&&odo{T_ zZ{Nml(D7akrvIS5x`PcGD+9Y)(q4^xe1C>lh|{#?{k?lN#w~kwoIvaNrW>cfe6M!* zXLbNktZlDO+PG2Eey>jdd-rPZ-*qty^{u-f+P(2d+aH`X_@Uho9@%bnDtT!4QA)P| zH}Bu~`K{edJe5G~krI3xPvOf%-OJm#%sLBfE_sdD!picS_0~&fBsTa6b2V~HzqP07 z=!0)$u5%)vrF;0KEPjdYb`y?pV=97{f6l&W0B!nfsM+tQI z1+tqZK~7g~F^Tiq|RdOfziXw>x^ml!=dnU_9bc8;-Q`UYH* zp*5X!O!fcn2N|GiwH1pA7y3_qnrYKe;EpeOWcfetByhvpv6G;^warN&_f?!kvh)6@ z90nBp*EtLb`*Mc?rvD&^aq=OCTw^=H$$NvQ8Pwm}?!rqK0V9B$!JXLW9~ZvLjk91g z0|yr2|Au-O+y1{C{r@WC|F=9iv>`nJW9uvG7F5N-{&`h%n--KW#$7n=@|a<-x*Q;L zV?(>0A9qXf5C9?EUethSFB|~JU0Dkhl<;HFys?1<3&zfnD@q9+CK#B=q{j}Di=ZHrZ5}C>wS4K&qvq1leaS-U~q3tr5 z_!EU#7IPqGPmD;QDk&m?yf0YPPc(f@n7IG z%-I92q90vdB0$%C|BsmmsMVf{gSYSf@kF0#o;UE}?M+42cdVP}f#RWw6DK`rP1?J_ zTGC`~w{lM524~^hn2#@?eqke|L+Q}e}T93oTIo(II4JP+uJX3;rzXBF41|O*yY9j zDqZ3UKK4spq33+Qz}I;!*ff&-j0D3Ii`sL1v485=Z^Hj}>^I3RwE0cszKVZ~J8QnB za36-qR$lZS*VNzX>*z7;U*_u|?8|)}-I63n-=X=x&DVW(^bEHBhkPBz-3~pY8T$u( z-B&};(23{k;Ql-krC;Fdydwo2`?|p$y1(M@TJX#TLz*^TxB!lD+&~WMSQo(6 z=vE%SP4+`RazTFog+r`t5QaruhIRGl>X%D%V$yehu6D?F>w+as`0s+DM~MUf{yGxF znvb{==cyNyzI=`T^Tt?tMnTSyjyMf}2b{yLu`I-ziGMx2Y15A+Ga89uWsyuGeSSg` zeP2{bKW4D@{?=M<^zV%CHAZ318nn?T4YL+nvznW)wZ3OnTNm7fum8s1Pp>#-!6>6| z7o&fU8Q4H{Z=&m{iTpUiDm>eof+CcdlW+c@dGC=AF5FAi=!oFP-M<>P+g$d5nKp3A z?!8NPKfL7o<^>krzUcn@{AU0An!q!0h?TPj?RM|A?z9&C*qtE$?S+*$*(mC4^y*>; zj2^mm0gP;(_kb(k9B|*njr zQ`2s9IoX--T7r!iFd#bajPmo&J!kyH*c6x0YAY_qkE)jB*H&CIpticMAcmiqm7}2C zt%z|=jHyYFRDSOCannmDm!3Vn{Ooa)OJkGAO)RBy zF3#^exbJ|z!|_)f|0~eS%WLLUFDY+CFZ1y$xp}cc+PLxKOQ%dPpMJrV((-ZB&Kw-W zU3gPvV~j52|BtzMQEwwlt~K!}X?ffNt0@%$-rZg80|Fq476_nmk*Mn9FF%R|CAuh* zO;S}Vx9w-2e?{z_xsd=RmB%x4+Ex)rBodkX5|WJJ}#bNI_@1E zb;sRK-fB;|VBS5|J?PSvXVAUx7x%Ac(K8(OhplP;W;pKT{OgYMJj=tYes4PMck}L` z(`ya#yf$f{HJjq{X-SOMJ-5RvJeTJZN_uJh0Zn*mff4A)JC%qrL!^3FZwGC@b~0*>nX&+Cf#Yil^?vF zb|-Z{SX--2vCAI#!qx4~H-D$w?;RYpI@#Vv?PC6Tc6D9rPDZ^^H*4DJdWp5w0m56O}B?AgKDg`M|ot=2x79OZ1{qd}e9(}?)r-)G`Z-d<*P z{mmcOvKLunE&E;h{=e*#>KSX=b+f@_)NQj+*p}nrbeOk}v+^Ym(wdw|O4QG9&OXl9 zvf@B9tFsGOCV9Vg(Cz1=ardytW?0Ys%o-A=)0>@0K(MK|inZ0v$9XUB=7U+kpCgo5 zEra2d2kQ^tEbj%am2@X-FU0#h9*%~i>7=;d@V?eP?Q~yfjXh4xM=a$D56HXs^T$U_ z+x+JC_OAY!@%!vTVQRa$v4J=lv6<C=rF!{@jU1v{oeS7mh z14}4wWYV6Fio4L8THYJxKc5-FiXlTjpU1Z~+lxq#oO@^2u>I^ve#E+Nk7hk2B0skv zkQl$sEe>8i7*dyC*U#6lH5XS8hAMENe!jMz<-LiWu%6YgZ52M8{XD-wEj;|hCd;S2 zlP*I3WHgv$o7)+y< z7Lsi;?PR+2xXV)DYU8ZQS$9_#iLQ_y;L}ciI2^N_Bv~f;aL|97)wou>&!JZR%+_=L z>L!=+U=edfYnbuh@c*r4Km3qwq#CW$Jw=|4$1?_UNr;}ZaP%*#v8EQz0KFpN$BpJ) zCbVBaTYtwd=kk)~HxFz>RzUsl8N2cUUFIt7>So98D*Ggy!g|D|J-KtuN(iWhtBhNm4mbysxvvbd}edinH*d`b92?1oB#TU z>_2UBT|(fxy8Jc2{B&coms`>(-ZO>QznlTB6oL?`!rwEpc0dt?BTjhvGx2 zkI}I$lwo7Dv4MU;SN2Ay(vpvYRQaxSdGf!0tvekKPg;YwNI33f0|jx= zV+p@$AGgN&G269NejSA1fTiA^jVHr#Rk2H$Z6#~c%@V(k8B)246@<;S(`F$yv~{pZ zj`B$tBdRxji>Ms*5l(&fXD569`nfh=dvMB@{PFg*BeazS`b(`7?PtF#%x@6n_>Aqh&W{hT`*!=UiQu50=5CYW^fcb~$6v z3d3_UJ3Q>Qd$Ko2h=4qi6(%}jkLY&iiX)0^`FnKKcVYT zu)*dCM_eGeIJ&%)JTXZkBhV%#7<%q( zSWX9hmT56`o$d&ObYa}ElKoMK;|Z&TM_^=nlWBFO={XO=GrI&t+aJlk&8OqH^|kCj zGv3ZWpU>}Pd*oyzod1M)lESDS{zC@XeS5FG2-BYn=Pn#XGGxDez_iQyy@GH8*5x>GK z%#J+hZ!-Mh-k8}G93IHph5Ui=A* z>;tJT^LsNaHZYQ9QheUfVrAWQhKQ(ke<;VPVVhQCd@wVqzg>GX7`|!sruu?^D(@-? zZ)T*{#h*X|H)~QtKMq;kNB!YJtDgx~{P)|Nd964b!PdtR!FI~?n>i+0UYLh7ZiGG4 z8)CuAGselky|3flr*1xnGCRiiC69Nu+!4<+vrRu?$-I}FE+e&ja)`*yxRlwMn376w@^Su{-7tcrjvV_@ufzefyWTSz`!*}=@X`v`M{70H9ypBc ziA5;gja8bO7G?Z!bj$tD@8!yJ*Akh>tqISBAt8k?DYEL4X#^O|3^PfcM5tid}CJqpgTnZzsj1s8@@8|h)?)2B)U7K1MG!j%%V(| z=(M>qOQ*L=a7JA|GNCD*i>ACmr(Rvqe-lx|f7c`=*jb0&v3s=r?f?fEU(w&%e?dF^ z6eOcQUjsG1c?5M~dd&he5I`V?vCy7E0`2A2DBiZuXg z)xw&{8v8Yqopu8(ADN}@Ue~p?+_&bEW@G86a2Q{1?lc=4n~kl_Mx(j0zq!4!)7+WA z*xKK0_G_SUOZ!)c!v1yNFWLbbfQe%?(;mh^KC%b1NgMQ;zS5FrhMD4|Ocxa(M~)i#4ntGKk0iht zRu+R$C5}emDcjsEj8@Z?W8`)K8lT_Demv}Br;R2(hFOdIWn-T)YwqoDZ*T1GHJaNJ zyz>{khsDxEc?PT61lbC(Y>$3ewQ;OS$%Eoujo9$5_T}o zPgXC4=EDaZ8;Q=f%)Ts-q21~o%4N{sTK;(Z>F$m#%ifpb)J2NRhtF302=-qOQ0_0a zlljNV{IPXyGP;gAb^o}|_Eew4H=X3>xX$pCIFGo zx}OZlp`nTwTCg{f-@%td#f)0hWBd&uEqu&L_U!xlrTfKe>w z?kS+ppoM=noMHJJFfXv_i*R-o1e4$B7~+W%c@xBt3IF+4;#F9pe^>z&55YR;7yfN5 z|7QpeBHpE3aVQuMVL{l`*&c$3Iom`-`(Qj}fP_eny4_b;O~QDr1##P`d8}lNT{uG)vzXWC59eohbMo%be|oq+d-$OJf6jj~ zBcK28=g+hM_>c6!|9-A*Q(yWLgJ{qxs%p~K%ecCvy^?@J#iN!)moeJ=F@y5uuWC_I zn!P0C<8_NM$MFaLKI+L}m?{>T0y^UsLdn?-%`uXqxw1w1`TF8n$pTNO%}7(P0YB9xLrNwg|`KTGXk9$z~Io3xTV8DL$J_Gg34z`v|@nN;hl2 zmd@?*w^&<;oj0rucPjCaB{mJ4syx9l%jtEpq&kwAMYMy9dfY>k{Hps_fD9o{?7RHp z_R~AigGX1v{?3+yZU%?b{ia<`S6nTij0Oan5;+`qg+GE|f{;&IM_6-JXfu9bLLlz^ zph#HLwwj8yr7?PUdwVUIPl7fd9v)^}1nSI^$o5#r+O$$zO^?RBkDfpmrQ)OCnLD(~3KC&KyNce88{yV*H0+IQLvN(MYwWbmp`o)Nf zu;tll^@lT<#RQCrwea%t?Bmt-uZ(PQIQz$c^n)vBSI7k(fQ5-h1XIzQ)pc5(<$Vbb z?(%Lv|LL5drewhU!ktcgl!LCG2_N*upLCw;UgSU)*`F|@Du*Gv3du05h)XdZc(K3u zX_EKEVzYm-|Ne9KU1>ZG=yfhL3)+bzQ70foYHo?=XZb=0u3F+1K)PFn3T2#2{d z2NMLXg<+yBH-U#nV^zt9ClF|m3P$vf7I$kTIokqXWK|b$Twjm-=2Zka%!fh(wQPx` zGp-Gs9)ZjX)q7}$OGYg!0D1tH)oV{okv|!6|1sHc4sBDX=I-o#{w48bCj{|Cy5Z0B zYXYr{)>dS4)OsJxkGHMoq%u{44&0rY<%85d=YUK2WHEKZtx1QbsiS8gP_05 zwAh(WW>{{vneE&YwS3-vy5z;+up@sutM6ssW$w_p&npz+(gTPL+n|?sWYp=V-e`{S ze?fXbv9N<)Av%HLhnN7xBJAI==F7EKk<=0Si@IiqJuIlm(1(av^$w-v<$cCq5(7k> z2IwqPVB(20LaG8n7mpzYSr@`^?V6YYdGF9Rn!V>~%-G-;00|)#)_?X0YeYGdqqVL> z?hR=HJ!KEU*C?^HX}h$TcoXHx7AY^7aTo%0bd5d%n>lF*5tK|*U{%cdDR=enE(h!46A+n5&G*z4cyKi3wVT)V|7F zv~)o&c6p2Of-Z&^;;}Kr&)~yr30A`EA>1&<4>Zkt(daJB5NS1*i!(&Vtv6Znx_HvC z=ANKr#eJUMSt;$<5ZTW5HdZ#Yf>+p0c}g04x$(>=H)u}jM;1BZ%gK+d60>ZuF^mNh z-kG@v@nfc_sT5*-T|7n)1i5$)y)mmTItb=jM0dfU{qbMg+4fG&94%A66&tuPf9;IK z{tZJM;f9mh6y5@?EgVc(W|MInSK0%zr}lK-eJW8t&?P(%|~yjZ0w7 ze00SvAlR6j9$w3+J#B8|H5Ie)g*|sJ2ao@SJy&otxL0U$0DWK?vu;QD6jjZ%b>l1J zFTBC}2&YJsd(cF4p1c<8?s$gp*m*O~idQ7s%#}k^ciUi{@^Qdz;UL!f$Fr-O_*url zPCJC^dxQ1NPBd{gY=a3f+EI02#S|`t$WPGxR!0D}kdn|J?5 z$SO|0Lc?W1n1>>|cCs>p`KHFO!&0R3F$XBXl|{Y5XR{fY^K?>=sY* z4}d%PXbNFr@i>d1I9^=K{0CM#Yuq>ndFIG#A$1ru8wD1-;>8nr?WfpbEg%Ys0CAH> zuu1z(=vU3%1(Z&f$qUE>YZd`@W#DW+*=L4VM~RR%xNd$pAu``)Knph;dz_!@=QFea zbc-^@$~9K^)yI_ji|pBpXBlUtoL4<#iLHG!Za>-~J?KDw{0K7(hU*C?VJ5;Ap=P-N zLOR6a%6~a~$c@zH@{nNOcqTVfqXFUq(&fSCIu>FybTBu0rnW~(;wFebv>u1i#p;+y z@2-mHM9Do0dPOLiEf}zDNo)4P;#-N9ut^A~5awf3g8C3N<$Dg8ZWasLy`V4a zs=Xk6)p&gMi}i=~yLBT%@*kF11Ba}R@cC!r(2S!!Kl34rq<4VSC2~COVpDM|nn|fT zN8B=hsa-X#187zLXGF<M3EU&`6IuSVaC(M^Hof>=vP$ZIF^79XFn-}8{ zC*<^`51P`pXn9q<$C-#a^n~%SL_~`}*el^)XQFf{!hvKmp(GstfSEE~83}gW?Czxr zb$JuS?^Oac`2U|o;A55A-=$kDZ23UnR7~Gk1QqKhy*2s0Ert0vnp%LY2e>x*!RRpC23+e+$QrKm zPg=|V{4v|pN^&}y9aNL!qGqdiK7}lw&y!(JOGr0Bj2@0$eK+pw_wl!Xq;sG z8#^gpiqX4z1EeFU*Si`))FGdmd-O14zD_nzEjXW` zRHR^RRVf(fw;zFppg>=|#9qEX`$dMEZq+shi;!xvS2xf~Wer4I;LW&ZAzzVL6!rz; zLdqP#9>&0)9%q}WrR?yaL7NlkuBeO1Kl##*(f}Vbz)~x)f7$IutUnm{ItW<(n!Q@| z_7Qz0ueNxG$H!kG*@SrP6z+_Y8eGjooXwKguIABn?6+U+kISv;NiGTro5bcSKoBA= z5c^QD+h`-*k7th`cq7(8;00~VL@CE59VAxaq<6|NTFin;Qt0AECRfJ@F`I{_Cm%PT z2Ts_0V1-@M6{-W6x=q^S?r?CET5~S)KxMLijc0=zdUpM{PxJdm9$5nEwaSVTIm4dMr0 z$P#kT<@d!)zYJ2w>?E01c3o}UTj@BBhVAL&hPJuZHqz2Y-w>+K92M@_Ui5Z*?S`Ezn63f7yIt3r<<_EW0(M5V#cKo z19xJaCHa~rLZSyD68P?=ehI=B5?B8feZ(=kUbmfZZ|?_q^VG<{2EVr^peGh81e-zV z=Hz#XJF6fea*ufv?tnaGzgUont)jmyAI_1J8(-9*fx~U93y5Fi=L?01g$Q)FT6D`3 z7cU{|YTp=93FO%`t7}epr?IoKw~2#WwalDCyfV76ibov39@8sAvQ!ZQ4=FjzG1wSf zPS;C^(Cg8r#1=e16`Rf*JC-uBM3gU#^@25#!DS;ppMMq=VEJRF_H*DSlpI3ylFkr< zcS0OL!~EwWd_Vqr@p)s%Pk@suY5;b$lbxPHQ2izQzxLL%fBI$P85ZEPAMT&&RA_U& z;gDTdug}k)@%wz@JoYG;NQTztHcb|oX3}GcB72HU=u6U8DFPHn5aKm!Y!swyj1PY= z-R){q5!r(OCqN7fYT7$|i_ibM+b4i+GG8%)l97WLOB2$z0^zB50++x=KVVV&?ymI^ z$Yxg;^Ba-vwU7U?d?OThFg*NAXCQZqeJG;Gv6Yp0h}s6XLhRF0rFLdw%7ND9!-GFV zLwiaKD3BhtKu~`}c(zdVWrw2oNVhBA$|yonIK%;t46uF30CLn41R(>4e3vIUb}CxK z=?auyT;0R);6=HO0(@!&TybL=;pMfj&DpAKQsv?7h5RxMo=+bojEt-L++eau6xJu}v>&p!}b z$z+NZ0Vk|S#cFO7OPB*+$wJ)YZe-SZIwto~zOghB?E{V~GsGNm49K6aZeb~C^)7B( zr%`>=&P6~$Y+)H;+SkRtGCLpr@*k)i=)r}lOuP+rK$|y<8L)K&nhB!vH-1=ZhG;%T zyVG)?;9imxm!d{~i0$b|wwtB8B> zCgCfYCyb%_sg*Hg2+;=7|IeP?KMQe3J^og6e`|kdx4FMf0Fu)@uf^z-+#|P+EW6CU z)BRO5TCgnmLggO*kad;j()KKuHc;I=<;3^}FhCY^Up(--aP$utzs1iAg}K&Ph5K+a zW-)dUIu}1TssRud0Q=up+#5IiyXsep)g_A$KWRu7+gpXV6ce>r41r}xBV%TCh3eCS zOYKvs53P%X))eokcvTC@h|IjwAV0mtbMy4Xz1ksI4weGYo@L24Y6>E0X$lk5i<(Sh z&|A|r()GrnOjvG_?h_F+1>S8ll~gq{+L$Z8E0&OaVmNeNhHwH^C*bA3n;b-R`^#1)~jxe&WHv*C8sjxI* zWTBM?@I1(ZA>UF4GxML&#=9?eI#9%Wo$*PwC3YV_s0co*&T!b}l_e#rfLC*e3?0k~ zQ;CRp`-{F?w-kh65bSI*^mai0Z9K)<{{lT@w?CTTS z7z00Pl%N<0pW>jA^%V?dUJ|Y#Wh)?Wsrm*+AkiCHa!fxmia5zZch-~f9$rc_YeAPO z`V;X%vE5#8VIuL%vQsxms^l5+(UJL0cp%xc?XAsbh?$3gncTS#D)4`EYte%6dNE$$ zKW%A2Y(Rn(L|3`}_tZ2yrIj-Kj>oe%cNoj*{fc;nGMzXrRUVy0?fj7hhMO@>bY{NSEz;~A86aHxMg9^?c z&)%CVz!$XiZ-ov|YnlRv2KQPcjdqX<1zGE6Tf4Q}Pmgz>fM##=&@vA7z``M5zlV+V zw`+z|wGqvyCSrr@r=(9V=PD~?3rJy=#$d`|4SWVx?h&2=VH782f&=J4RrjPpT(60t z$77^*=Mrg1zJiP#ZBNGuxisgA-$xjJxn7PmS|q`{XBxt13NSh~$i2mh<#8)wu8=x- zXvkh8$<173PjQRhvZu7InA$2w*`_*>0J%@+KFCfmh5u#guK`xk!7P2Tk4k{uk zn+x~i+}Ny{#IfF?PoU7U^t_7FLa7p#pcgACypIIzO5mxc;Kby;im@QSmGc$~98Fq> zU8p-~Mx-HiJ466vF?PYN5KY_)1~KUi_EL0K?OSqch%EZ3_&uj|UuRQ@@B6|Z!CAn5 zSk2`OioxBWz!bj*fg8Kfz$oPdxQpUY8^%c!B5t;e++g!5a9~h!oT3QA0_QP3|7|E% zLCknbFLgjz#B>%%mnm%NJnOZRsI75dxOZr1F#&|OwDDh*1mLUHeLDbfqiVLt0fm|vSL-ANya}a(-eu8h-Xy=k!bmSAHiTPDgNEf1vso#cq)v~ zx(*`sgg~(>RrE$QaUlH|p5LJqEGTpO@?00X;y>_*zO&HrEE;MKchgdpVl=wJ}cABs_R>%3WRwm{+EG07jL*ndD@mG z)_fIwwCN0wWzrrJ%TsD7HY2v1awjOj6BA8kE?(g+%PA{-^~U}_so}+#T2NM6Yo#!c zf%Ui(C0Q#zqG&y`rsJM3;{~O6Q?&)+Wk+ms4TL5FnhHL{7K&CCwY1mBiK0YUSr0I^ zj!HTeo(r~fMI)p}s-&(LFI3qAd^Y(FB^}P0Z$%0_pNT<^{Nqf z7wA#8x1W}P47DFc0Lf1#-x7kqy30R4JQ;xxby_U&{$$)eo)ngh3Y#cTHUs()oZm7z zVGeh(crd6|uEQ8+(lAN?N#~V{Ie0iBL#I40vodi=iZC$Rs`y2eP{=qMcNJ8GU|MJ> zn>w<;ev_ioQqyK5aQd1JSqAxS?9**ak^~kc;Z;@t5!RIFzkIrv{j0iI6j2DZvG~Aq zT^+%cU4zt4Vy?ny%=P46OOB4@K8sd7*dp6Pp{9}))p(cBE0t+*PfLkbZsl)E(3A)x zHU`T*>BYG&e;rvS@+u%hz zkbq25fLP&SyOZ|#E!Dna2ooDs>_$orwvoOo2Z+>=XvH z+*@+r$%wxPNK~&qoaaf&c^0N6C&st)!cLYK7WW4iQ%<2EI4=z;SW~cyDsWP;iFyhA`oEHc7vA*e|>ekD6#(jH_m5aN)^D2%=b&@LNa>_W4* z0I*k;`$%m=Z8OCjuZYyYd$_$;b;sBSA|xrNFr7n4l!SXF)->i)|2r)+pZPQ>gOHlK_ZZn6#d_g&-;Z zJEBGu;UupGF9Wq)wju-ivK7`RQu@@(XFH+4*FGs3{d!X@QtHx@aU_DKmMi$xOIfuW zjqZ#6ZGzW1ZZ;T=`Y{-1&`c+edWru0aDYDKtyUC^D;i+S^+BRM=D`7%Iu=fi^%|^t zvTAR?>A@A75qSjiBX9ng2xNb`poYX(TE#(9gF8TXnBrSD6|5%xVGm?JE=#vKybiss zMrv(nfdZ#J-rruc%~Wk8D)qoR*Lr2)u2}+?7u^nr%t_LsUc8KQycWN!6GaL~-Ww>V zGALF%Dv=GEIf)mW25L%So=H(g>ugov#hyQ&_(WoKoB&8Smd*}7*riHRp;0RcNh+E( z;7W^%mmucUg&Hpa=Ib2N2xvR#t~fkTvdT`6R|nt`ii*X3<*~9SQ`QPjZd5yO^+jWJ8tU z5!J^@eo?80JXb6!{mg--^bC>CF?7sfIUc@I1xrim;F52s<}j7em`##=s^X=1-C8l0 zdO$j<3vfaN{TbC>H(<*B`&_<4Yr?JXAz+s(o`eIbfMwvoDWHHKVTJa)BpSI!^s2}d zh%S)`jfpB!?a0N%%(jt-V;)K(YFJWhQ&10gh5tXI)QtEri-pzg=vwUqgm65GkHXpd zSjslM9==Gc$yvPOYX#)QN05_;Tw$yv;JD~4Wc8M1vWf%&_Fa-M4drl5om?_)En|A& zv&e?gC#s~z{}3aXqt$$B6}s0v#q@Nh`orVxyZNSt8Dkc~X488@N90>4Jh)gCJOO+kQ2k1=!l~}E10+4Px)teR6C#^@k8E&7& zR`F*Hdr3RYZ*IAu3TER+Y@hOqOyM=r=da?1~#MU{Wthi0$XjcF<=K)v)iFFDY-Pr6V1Rrd#)&2 z%9K!yU+YX>O2Vs&;*V>Qz7y(jMYE7{IzjZi@aL<@^sq$v(Q<7nTRBu?M-pjFRw!w3 z4RSrN&V9plVaS)8M&Tzgp9f2hqLV~==7bHGoGB(iJoOKml8ls+Vr07E@W9A){N)xb z$miUyoEV=C9=uC0=54!88Y&_UU)BIvGO#p5RUS$sFfL!wPpX|3Cp*TR0&&G?re72ZEQAeYWoTHH6+^>q~5X#>>6^$VZ{4Y>6vmUO?X6w{vn zpH>2{6fO=o_=mEx@&Bu(f_*s9e^1s5wl;7crf0*W*v;$06)*)G3LROA-lHt0YkSGw zBZJE*D*)h?y^t0%MD|jyqfwislmhj+bco}kNeuj|+>N-Sfan-$i*lEIW3g-7!!G^g zzVt<1MCf&}X2vs;(X>!4Vxv|YUM$4%7+G!Z+e-QHB=D_E*;iu3l8|Mu2v`@EQC74! z_;LpU zWf!;ehm4qY_JPC_JD-E;xw@)dBC(OmaGq?WW#(3#`sfjvM~9he$tz-}F?waRKp2?Q zCFrFPSBPhUU?@VbvF-*yA(qzg%^4tqLgXM>^~{p-w^&RwaR1`3hKOT8eFZQOXL8m1 z6|2Hfb&M_e8kiuEG>a#}d{(OI5#{Xab1SiKe~ff*9ikefLMXArWt)_Ttvol7EGB|E zBwHP2rgDBJ8&?$cE03G*2st9nolW08(z))Ci6C4x`rDJ&98x$f*uiw-!Ke(X;Y($u zWDlh{myJ8k8)c&c%nF2!*G!A!h#u^4PClN&KYB97G%hCnj{hs|x0J|{RE~1Mf;KU* zgL(bMLaRd0?3RO7hK*f-={uWyM*p@@=t6=AR;mh`#*~mikcPn>kpwiK6(`9_lB2Cw z=?pM8%Gqb3PR(yM19pwM$pv35C7@Y4n(FWupRQ&9qs+6#mP^&=aB0Kfv7c5w zh?YvaOw4Xz2Rprm#f{*9)$NWrj&ol!jmpC6F`ibvdR&BJmCNW9&xoW+MgAY|uHgU# z$LC>j_u57*tCuu)WgQoKMUnL2{=o3^x{={y455oFq2<=jYw+pgyZOCAxav#URZ1Hd za-XkUT4Iv+H@?6UHcgzgH{@nQVYUQ&-xZam7soVSAeK8;>vQToJyon3G7zdL&p%K*?CR$H z`qRa{&UTV9og6|9>R>P&j!#rzM1`_&cg#$J?NO2t0?JXiJK(nQyhX+WFl`Bn#Ig$I z$??cP7hex;oa}}p^l#ufs5!|0Cj7@mUT8I_UW$I4#Ui9_=8(iiWR!B6f?C$m#ApZ@ zNJ*XzHXlqN>)~qzuG;kWZu03J8@=o_%ZOJGS#=PLQK5bky0tNk%E%R7Y!L%cv`r0v zNa}*NGEnznM%_f)n=!3z#IL})XZ=Pq6w|X?*ra;H+!EIj`UA=%l#KtPEk&1&D)NJf zS7Zo3+}^9uh2xJ(NZUt&DYk8uOo#;vO$fE<$nDF?Qiau01e*D9*(1(4xZpqrj5M;{ zvaM1{F6bY1VKcCz)I>`KO{*be*|rLisA*gM^Ew29&#EuvXFpoECR zhm!S*CR-P5MVc8mt@fd(7Ln&>O*!>@i0oi7*uFZ=Y?reNtX`k$hNS;1MXJsEApb2j z_4>@cLT-!9#;b?-B*A`&=&CkcHHXPgU=eud-9Wi2vtXPHUQ&z6TBTr%_)%Rogi+g4 zD@DX`=!yAb@hJoKxEIR|!ayRU<4l!rVl|==^724&o_nY`=)^!sGZq#STLrPKPp_p3 zwypl1B_76ZYW!??xd73}>VbyOFBJfCKt9Cceq*Ox(40zRTcyBz$^C^-yu0nHPrRG$ zs!zOoEn$G0Yt|;%in`W%n&lV-nTM74F+3^cMB=1o9sCHj)g{36zhvngy_Ts z3p^3REnty~SItD~jCo>0Okh$V(R+ZbHD2!S@9ykxHa55Rc7UC?cQ^JL+wA$tJy|vZY-JQSKY;N%ks`=0rHsOEc=!dF= zD0yd*M#>M;mq4(Td!Z}nwA?yXQgTf;HuuJ&bZ8*saag?`X|FM@pA~sDGL}VsHeYUS zH23IozJm+7-Q3&T-lrEFqq`@bgc!;f^Yd$GNgD0NJ56T>pbBkqEOiMWUd3)UTTl=R zt76=5kKS^>$YC!WZA}A2&yY4Mo8watO=_VwpVypb%8TEq~e zEIgG^V1FoHDT-&cKOiLQ`HZ9NJ}h1xjb$+?yi5R9IgQg#j`+sj#=eSalss@P4HPVs zVExC`;j~z_iRaQ866%!NT-jDwcOSivXeilmnmZQw0U8nS3Bzwz!jeTmGDj$5rv`mF zI_mVvIh10`Y9EQAi;oWOb#?yoJUptkEWsn?AqI5%1ui0nqbwCWL_>K?{p5HWs}QKG zJWf*dpJ3p@Yttv0M49P*iB?f{RgE5MVClU?$A%$-roP(zO^joC9uo# zSOs^n)*LkF?$pP`I@rpu3S}4>e;H3jEYgOutkg4Gs~z@PSxWQugjO$YAm`kO&^)ku zWDQnjgV~O-4u0%{uQxdzkSS+4ezt2?PSVT^O;x@tc7e9Wku_O2!_pg9K8dnPg*!?g zArx?+6RX!iR@mMq)*K>p(rubQhcc$5?C5n{c89-mn_o2?O6XJb|J!aOE`}&q6wpJM zsfeBD8fCl-#-AS`9JXZ3h6CxA#G$zUDa4#(%`-V1*MU^wUWPz$Iw*axPD)}AKvxVU zYfA~+5+j9;WYOZ)ID&5(0|xA@1z^CAL|}m1rw#nCYF|*1H{V-)O?CHlh4kV%o0ImsS+O}%>Kmn_U6LlJ*|QNPTHfE! zf#XT?+)*nFUzSM-x@?)M$+z~9lITWGe||?D6tT(quLYVO9v**+AQ(*BBQ{}@dc*~o z0RH=pt}ACEgO%!5=>tiN+WF(B`x~-hDnZxyi5YI7(nUc>>s4pIt=WHGq(26tu#f<- z#gefJa6gW{Q6}{!V_#59!f4o+t8(y^9jJeC>8Ds)X@_a?7~QJ^rfKh1O>jqvR8?;> zJy2FHK^1QJZ24d8u_E^cPf4 zf3_I5FZp*XBKHd)Y6!!WOM!(j5<{wvMwNN10z%S$7oR?U{FUWu=AhZJ=E2a)-I8`> z(544ifidsG(k_r2%D$i)1kJ7je0Yb+o0E|l{b>frXz&iIFwo4 z4X%ivWwo!V5~ULM(KPo&EjHCwt>}{Q4LuS(D3ivWN>}))?Gv%+XoQH=Begm@<;TbK z_oMsU^Z5lNvfjgMSQ-~}WXS0IUB1(j^ukX`QN-sD&n=HpJFm)I)OM83jKq?Kwjes( zE%IUk&SX0}J6s1 zsvGu&wMLS9?;n|N?;mN(&3|y)hCXg1QH!)=JMA`|N_MCng^A%Ub1-R5?2O)Yg8gi> z7@sf-#hIV=#>$_D6Bkj}66|L2^}#_m%i;@>7N6mFtTJf{;32BqO2+?RHhQZjEiu~S zJDj~L=}@~)Z&Re^!VqMXxEUYkvwB9f}8o{FSqwUSuFyDRE+Whpo^?2 z6c{0M=}iQceC45k?N6EV(I4`(T;*$hzPNg?B;GQGBpyi(FnQFQbt7 zqXmv{+lZ)UFc`qN@L{C6>XY9>0B)jDFq+5^d`|+S{#(qLrad$@Fa<9~>^*b7qG*H+ zS&wP?+@k%MMJRhvKSlPk;x<)7r^|f!ZD%(Hg?uB+2lUx!ApBUCj znjQJhgA-69Ney6{Mst5}YX|gtYp=1pL1EYC{KeKb^%%4Wr-@aIugHBGI}}8}7@9Wq z{e57((!Yi!!2+)ymX=pIkghIN9x#I-D3bUig&;mdn!L;%Bt2`bGUKGI%-F0YI_dV^ zzftnKNAvUuuGj13F`8!fZZHC_F8B(w7wV!y7!y5rp)#rpXCVFLDAVbM)lost4sd-T zS>Tn?-WqkOq%2A~te!smNai1^YoS7>_3#$f0;Q}h9xvsiQ06jrMa9D9-EU^gqC_AN zt{`}+1EMeX8<^~r&=!(!HMCvEQnnD;8AAz3C0`;*k*#FRFFlG*Zly>vXo-J#>W zcoPq)X&dojK|5Lc1_hf_lU-3a*q_re<#0WIw5sMMJIi9Al@(N^v#{+#0LV7n_aAOQ z&cDB)cESCx-(%Wnp*(y~l>lX|(hc(aM{))qzCZs+1%R`{te6gGu=5+tudU$K^J^cO z3n6o}L5nH!gxmdb{qkZRYjpmI;Us%i$1)wv!cmo`Fv~?u2*?Ti5OW{Xj+OQ2QQnxO zY*hfhENr@?lo+jaeJY#F)eX2JSew}@l-o4g7B~|w2?-Ed>E>j(MF(7DZ5N+Uq`ys0 zs=8RMNFLC`{ek$OBA{#2-u2>&h=cn7%(aCb$Q z$f46exvHYVD~cUGp3+rywe*w~5$iK5IE`+8H9nwsONu^prXTET^x@#{%U)Bi}tbuXKepBocF|6)O2^$O68g=^FpVa&GQm16& z=M7efwb*GLcrS*=hH#a4w+~kJEL{dBh^?3{o!lPDXZ4l5An5SJi%O#@+-TNPX*Dxa z>}8=1W^LJPgZDzeiG{eamy7lvGf16^O`994&f8^kj=QgiQn^l3lWi1|-}uQQ&W)~@ zePe*Bi2vt%Dq!cwx8=H-d-Dd58<}=ZIQc*Li;zmVfFOjz%8CG$#;slP0 z+8by+hw|n$J3JB2@xcKJVgeP6`66Fi3ybF}!iy@j{-OL~pI+=;1=IE=9I#8KT52l%SF0AYiT~xa>7}ec$CU-R+7~lN)dq42x8|>GXD0)eyFUXRi~6o$zu0N zk=TiSMY^`PGpY77vFtg|id9Qe*=<*7UTPvJGcLU3nF56g&&w&FB+o!DQRR*=*MvXPq)ud#ad&r&&s`}K5V;xJ!@T7|OM6(+EjKz-q zRy?2|5g*D@6E`FoO4(cD9IL~%(SwUKExGM;rC~;LKKrLg&TBWLnm7a$I+_8GTe~|J zfh`LAtCc{fJNOGGlEnH}zv*MVQ zS<{gY7E)eWG;EV4lagbLCM8v(JfBJ64Z*Wh;Me%777Mf3TpEUr1oB!>z|9t$Cm(^u zYsie8)h`1xGQlmPLFp_@%*fweTwZ^A_zUiY+wkPy`xOtz_b(U*;*Cvy`NyE^)6yGStyG6h#`{+LcYn8;T5b8Y0YZ z&gVH^H9YW#npFtkzOASo^vF0{ETj>_5AB6)8B&3zGDnfU?)FQxVHEEUg=t4a);rdO zc}XGZm<8Hysa+2yGB+E0i50rZFe2npP)+jpe(a8i?9Rm7(-G}WJ0#UHKK%<-8xG0# zE_oZeYgoqArWqeu!{DWee<~OyjbdVv#cM6Gh=v6!fKY8>;Kxetsi}Evlv2?b6~rv! z&qrTSiJzX;4zF*|N;cTx*~6nipv9vcsgjEcpJ8=5}Oj9U3Jb}=vJ*W z+gZWEVt0dt@TRY{2oJ-s3f|ac8u_$XAllAR@JG~ec7okp{nhGj_M#rjLFU&mn`GV< zW)l=AQ+nVm*4$k(w(Yej%a_O}PeUB_P714$*IaK#26r(L1!fVk$6g6NvfEZ6k3f2p zaYmjK10lAU_~!xV6No7o;T6$|!y%SgQO?OjiAL(%*?Mwo#?>x>)1}#~4m~nex4_;S z2j3h-HV$IJ8TfIMPYApgUWIeRAw*gQhU$Q)dRE(0PHMbNoH6l!8AHnQQfWY*jUy&2 zY7|tjHsxvz4w>Sa@vr7*Je*?NP0OWXPwIk1t{?TW+}&m~ho6rzHVc)p}PK zOiTxQ#}h6-CfN;l-t!@KJ_*g+816FEQHj?@|GdwT zg3LtF%Hl;RkvNaG{@Hd<=elX;MM#SVx(q+|b|^MJ$!w>`cQs`*sHqoI>s?KaysW;X zp}pzqex*z(y3Klh`&5!vfs<`VZEAUM{lTSpu~e*>DGKe7WuaMTg~(sbY?NT6SQ zE2GY$R+3_dqkP%2vydA%Ty|Ed!7rQ4AE&^i(tDwL0Apx`<1b~c+X?hV#wwCF6{dp2 zt}b;1gq9Bi(;FFFzv5fXO9cuf_0^zPS|fmi%#^C*NEujA*WUg|#`~DpS{~8Y{Cqnh zXpiJbtT~t-Lh#^QOp-%MRUZGLBGhefgA<&5p`aKf$4`|o z_Y_?B)7{;i;wh`*R`wAxT?0=(BHpk;+Fq1L)5UZ~yf%qiC7Do)g;`pZs^56*j)X{t zp;+BVNerRKn?mv|n(;+hYK(@M<5$&3RRchIp@-lYP;`9vsxlV4S#bdt^R@HD`1pmJO&>3*w4gH+0tmDG^eHTZn~a zv0xzJ48IouEPe5n1xD;7l zu>>MdoRy40(wo?lDA>~l&E;9381hS|(y^zom^Jjp`asFP85BM17P{VrY9dYtMAd~Z zckyuXrI3Cv6Uj~6Cn_BVlgCOPLW%8k_(D`ArAt^OX$HGOW0!RIgfonsF$}HPo`@M4 zbVjWMCsFgPZ?O|)<#_^OfahEbZ)kutS&~jprH6}78^Yccw_V}_8wFdCx`{YjB3@<2 z?$x7MY@TP4ai5YI!`0N@@AQ;4Dw{f11jej6kl*39Ppt1I>8ugZmEzT+ofQAh7HVOc zj|i+u=Z4@abdO5Ql-FEwg?TS?;HbyK7-M+C;?|9a-%cK+_(@{fqG2d2QX4@~+ucgqT$sRw2AWum*{XIF)%*koq^Fdo2)ph`E7rPC2b!@@yH2F@mGYX%D6 z(z;3PSfOVGHWrOp3U|PH2UaO_Nq;r3JsR4D*S-=_j)TNcp<(H&`4d^^{OVvvE`~N> zZ(zkJwE;a#+x}auV-@Y_Q8!{Trn2aGI31S8F0!pMNKPRtZC!;OlBMa5CZudU;wpHM zyDuw*q%#qHS*(xh&s68a$frpN?dztWYDE*a$ac$u0XFmP;dne7A&{y+S!MFdiZ3Aa zm~S+u-Wj22Vm4s1?PJ5Fw`!9%Bo-_LU}Eb!goIAsaus%6N&^Dl6?*6(VlfXP=CGOw z6n%@RIW+FJI6c9MsSVP0M%PSskS05NaqZiW&;u@d7w*XwZ1|^$8c&=ev#}O8p?Nl}0N*e70g^O~&!iELUYFKMkxLh0?ypnjyu_O+Q$oTs zOQZlGSEmLXZbxh^!3R2h`cnpu0zs%CZFON(*W#-<^!UP~H-bTA&Ls<8idgk1gZ(aY z9`=@675bFP7^XSvG}bZ|XGu1<5VR~Lv8q#3 zps^MZ#4qAY|Bkjy$DXBth9*JDN-!=ou)xv_{VU>3a07?EqZybzkKMTs5N1#iz@(jQtYf4g)Wnl zT6Eb1*z?PpoUQC2K_IQ20^|q+WbA zQY_U+tO`y{3OcO_8N?MvqsovF%_^6AwDg#Yj!SVky|!AB?6pc4;Q7r(J@4FJFLMXum*=%a|#P&hR zylT@RJ1tGSn5D=P!4cC^FN(QHLaCqwU*->klNeDhoziGWoZh5;q$stbUcFh?^y&Oc z_t^Xz!&?|MKBK5Il({sie$ao_#>6hgSzu?S{7%2Q$qCnB$Fc_K{~3!#^ZgLetH1>d z7YMFEjN~Y`O4h#VgnoZK9Zzz%tWs1LoicalhHmU;3t;VG^_b+3#`sAB6Z6{7vpoRH zmu6e2HoN1v6x}iT`()OkL~bc|LqH0}B(7Sq6C(Vb1W6cL%!HM-2s8}`{EMc5^K=MG z(T9XKB#2P{^L>`n_PQf^p^c-a$JWC6I)9MJw+Ty`+plSt1pf=uiwpc8Tr=W^c=@&N zaG~79CJjcQmwCwugOe+GTiR-*#NsqEk$8$!VK1tMZkZJ3I@S~S**42%02eC*`0U#1 z5|1c9#479e4~@U&76>tFb!YLFwb=Z|3R#lRbUpt}kEc*`x$k8SIxk%v=*R0(9AW!$DsnM}*l3r>{WKb^qRviKlxy{g{hb30C`0 z)%c74B3#(}xvP^JNfHxK1_7qHH=zh&^4hDIkq&wa1mB_S3p*W|=+4+QzgrHYVw{8Co8WVMd^5|bILM^=o{-=@WrxfijqO^AL2fl4)@@6>*%n(F?0jB8>_~i9;8@o_@Ss04$dbJd#aNUK#Tp8I1R`NFsj?gBP$U+5>h)ro`AYHl zq;@L6U}FzE;Ik1*)JcI*8xcg-sLQtx2lyv)ub(mmjxSc*xb1n)KSQ3eq& z?HVY?IfX9k^XvJ~^Xv7OFJG?t6zOQFMk@`(j!&9MuV;|2g}Ve|ZyRsz51D=C^6KX5 z;X}M0KOYF8be^VUIMqeV6pA9EeHTvm#5unetePJ(n@nqQDR+~S)he-ESADEHF_d&$fs*4V=G2r)oBcb2}Q@$qxK4bx)rgD^ln(7F?E3^U7osNoX3zGl}5v= zp%BZE3Zal-xrByPn8aGNF#!bGLW)S=>-@@&1U$epcszdp`iVzY&w9}o&)bvPZ>(-2AwZo!*EWW>Hb zMW4M@%Aj;^z2BPx8jCzldU8gdX1DW~*^r>Bc$8H(M99dh*zG(WGzcd=6pxJ4XUqE*=lcl{-Uu%AX+Ba7nQS4D714b)Iim)F0an7&g8nLAGkPAKW6SoMWE!9 ztbsUgGC54GJQa3~<0W#ERFr@YMaYa9+T1l4Wa0n^R(k<)q}BifYcx&3E`;Mk%Oq;U zAld#$dnRq`Ig7e00y9-(FHsxidgt58aRFPwM2U{dtV28& zvS#JIFC_Dhlf{HdR%9?01rjYrV@4NjH#PgB)}6@i065f>l^tIqfTinDGn6B7Io-vQ z#+CcPyC<8fa*=>nmFyIjn!PC+A@JLFAn2b3tHYFytOQQtw;};6STjY`JPt{(Q^tOI z&6f<3mnQMx?BEdU1=-?pmeY9Pw}oF0Si~`30&AAYjS99syLVaJIIM4vS61?@+ZP8w-Qbd@fLwedTojXZTlVyUMhE2BKCq<%j$+{?pCqfYHh0i zGNFUO4XZe@tSUP}DUh%b7^Om60(-6^6eh^$;>Ye$AH~FoFrP0p_T@n%$-GeFZdz1d zj*kmdmR%asfJV)T0Lryr#~crPYXy8Xtwyovw)5@n{XqE=nZ+{E;T~##Y*1<^^pjwi zi`RTP>8C>8b^`dVdF{V@N!F8zCX|@#**-K9d3R&Aqe>Fhx2DrCl?zKyPo~f@i)Zp_~?u`c6fsq z92@_DSXs?daunw_qp`$R*VSkVDp%$}$~p;C%LP)Isp z1%~JWerwU76)III>s^+2D~Dcu1~yn9e2-->WDx=a9zb|u5{DZ@)1qaOY=dEIFwF3_ zqY)4tfPmM0yE62=Mp32Za)jhiyB$m^phNRc(D&1~@^-p#4!dKOHYLrP%I=0yy{`0k zlJhK-J!$Ran3kVz09c81{QuI@iMcR6!C>1YPzj@?nkt%rv{eR$!7%q{U`w9nW(97E z!&`w{KnklywK5cApDe~=?9kVPG2K(ehqCRm)XqY?3P*Pr4lOI=>azGPWsT(kgc4UV zFcUaTA9CZ<_#X>dA&#_HQ%%>{wUWM6C$v_#cV`#U-29Ol!XITT@kiU4KiU-w4Tf-= z+gJqTPH>?mM8g}w%J!;27=~Lcd;E?F*oP~?1$`P)K2{(K@FEmSG!SM*0E)u&s!gX`L3>&o?=WB0SStf19B6UqLql`4ok?ze)@(xq95<=#jg|% zyIen8_rhGLJypEEQTnJfWu#mHGdivrH5>^7D))iXcna;b2Dkd$RPwo+E1sj|jKRel zMmYN;wa4I+QXfO_0N=K^p=;n7R7BU!D z8Jgw@KLMx6adu9G){4A5i@Uj($&h@hI;vN_hchx^X}T@Eujes&_y|4eAoDXC%96NgUCFPAO?x z{Y%AqtB#4C5FC>@c5+RZvZ%eR|BB?jDoao;JYLrhfnRj`|lNQBdya1Yhh9p52VfBOWR{h-Xf3L-D-{CPmhV^;~crii?cMn@|KeL9U zbf!;^Bpv*OQxtEKYbis!Gj@78x^er5|cJhymS{?+5L>U+WhSNUHmD&ki#STO*Wxm*sq-r{)iv zdICQ#v&4H2DL>4Gg&p~m2L11%a^9SCMSYPG=b}oR@N;$SuX_DLZAG%@|9j)P_IC(> zm^~c*SKQYa7DcdpDJmN6s1;ECx0Qqv?IvCRMQ*o4GfSD>>7tkBEhzJow^Rt!mK{{ZlEA+k?2*aGH7P#w&oNa@Dbf4RT-zwYMV7RfGwRZc8eXqWW|Zc>Zh z;fN&ie1iBql^`Ydtc_bBi$#8IKLBY)gmDNxJ~e+K>42Wl%avl_-=eo=x|{W?XBH`f z{73o7)P}NEqXL<+I<3^s<>Sg)OBSau`}{mA%%JDK&| zuKo2LKR%6dViN%2O}QsTDRON?<)QbWBdOMlb83;@!GuXs3piWx)Dc^*$LT{}`G&ZMB9_!Qsc%&hSRn&k%S_rKTgD|Gd8;lzd2 z+AHd870Pp8S5@%%Wf7T*`Uja3#5sGR8?78Bg9F9aYixw#W?TCPj$dD2Tu}(EKwmH& zk+T3jR+3BpMhtpr@j>lZfjgl;sVOgnk3-n?tBX{-r+9XK_W??0Sy;JhE87XdR!%qc z4LQ29P#ONmC+`N&@}c+dF;X{&5|Gequ3NbOMXfKGTx)1N?v;VX>5Fe6~4WVZI?#GMopn8eH>`i>H;70OX23Hnw)R_BMC6tchDBl&GXn z_Y}HHnF7QFEs*qaEaKSv!~*Z0w2zN60`nm{tg`je zZgqlh25K*-v?1G7XC0)cVwiS@*cQ=@p0>QbV^O8`Z&Y{W{?zEP87Uqk5=AkvWPg#m z7S@PZ&xqn<5M6plgdb5bgj`%Nfw3K6NJ$%6;XkU97Ed`Gu}{@rB-`J%bdDkT48J+% zFT*h@;?HH%fJIE3EicHueQSt&8;XCZo1zryP{r?b(pw1IQf|-Np8p@MI-YJ7sjbUG zVqJiO%Z*M#2_o_b=6S5%uPpVas2MCDj*j;0YzH+}R=^b_=IoA~S2 z!d7w}fnZUhgzr|;a-XKVx!Dpa>zVP{X^gc+@uzWnv)M))K*EoHWTFS$Y+whT9D16-MsB}U}IK0Q}TsEurX%N2 zQ%=}e0?6rSdPGf5NMafc+moK!&|n)F@K9lM=vc{B`X99?F{-KtdN%l@bG+5Vop%SO_JhD#z@HbxQzVa4MBAQDGtnl96|4z@123&=qHMN!ii5@>=b> z%&}6;8f95LlUY52{1*TU!d!x0<6(=_@UTUx6)&-_k#K&6^CF&MSwH;E3MqGw@=*F% zaFvaNLsm*KObg66HMQ7%v^!Q&M_rH+s{h7H$^W5X5nuJvV08N6Aw3fSqIy3UrD{UQgRU0vue^GK4V<%UFqRILl>OD(ajgBm4y$z8nbQcou zEw&c=K;r?r4xC(3{dJwLcots$<8p&M58$`%-wPnPxDw-oT$=l{yZS#QMRmm{>sFYk2D)v}fFVEHPT4rr^_%&#)w@q|VP6xt$z67=7cEcIt}1=vvpoC5)L zxlAj{?~Uv`e9FRemdC7NLzJM;+8QPL=f>si*}NrN2>W%`SE$-jeE*4cA5(l;^uY~s zsb9B>eu;z`hj}W5~9?c38 z+(J-Ue8;~%*XRJh-_Y-mCF`#wh)yPql>j}`-Ob0h{}jkHNDcQI^rYvpr|rPZ?tHq? zq^vA07VWpia0@wgB&JR&LvxY`|BD-xoYw8>FF#w*D4CWgX_Ts}j0dZ+DfphDkB_Ye zqPAtxwpdUg6l5}Pfi|GEoAQcXW$CtmDv^5Q54cIhQi-5hEV=B!nK~RTl^eAViZTZ4 z=hlH6v{`?Ry}*lrPA-bK^AC;n^AAn_ZSrr6f7{$6_Jr4E=VR+5pBx|~h6QO^$mw~T zcDTW(w4F~*CqabM8*nrhkMlDn1w(^hc2c{%BtqTGJxKjxQ_elsQmc&%(nU5(+~r+_ zVYaQzZ3?d)&0X`QQL<}C|;Xs5cG%p$6u@_kjRfJ z^F}e5Lnxsu(?GR4qX<*XMDZpmByEK#kOJKq+kvO7%0Vsb)Zg?fX~n8t9K8@EA@F}h z-wa~A<`sr`;FF~bbdS<<@jev+h|4dO4e`|%-mp+qM3|6-_3B~h)k>o*$pg%KpOtSH z8fCy1os9>U_v^0E6sX5H-UgJ=6?8&*hv=rutM~Z_)Jz<-k^=li?L7+5CHj*x)_`$W zYbg7uR1%4X@6S$FYxjtsN$RALrs#^OXb#nbfAq>dUu?;uDToOvJ`LQ;p!J%%+R!y< zm_&l-5YaAsgQf!|mK&SlY;Wwy$RYw~?ArWI_UxBuvB$eVacKB1JCDgSo}@P_08%1- zJ=;;_Bveo5cP7*==aQ^S?^|R+2c%eOWpTu_&WfxMub}MC9Ym~RvW!+6$^j~%Y2h7A zh_4lz(NG*Rb0Pose*W|APjgSzEej0Uh^hS`KgAgd&{w4pi> z^@NZH_JPQJ4N4e4Cwx_EkdD+*XI|v zA7K^p<4d}K5kD7f-t7%w2a%>R0av7WQUowe`7L4^0R;Bj5X3mO>Bx4<{ukQhYNTwv zP1Ux1QPi9G(Eg5l!ArXo^~Euxa90K`(fmDTAVl%gou^W|Ll9e)>4aSU35l1WS{+iU zq()s*yD4Gnh<_vPhjn8Foy+U9_YZLBzb{Z0yppaeJF&x=orml0moM*G`s<=jPb; zn6IB>J1jjls|284&pasZm%SNgSoRtt7Kk&8%%D`$gG1%B6ZCd5VdzY;y9}2?AbdB^8-` z=^dz-6wPInr2gOhb#VnvdrsB1s!ZL1M8VR=O<ZLsfxWyCHvkgH866a+m0=NK6P%{+)I^+B}nt>PoI+?NpS_Jx?!QO&5 zhx}rl@PHMOtF@tjuFGSTz`30k??!_%<#pM74 z%BIl&5H=&9Kwp^IKGal~{7|Aqgy6*4rK!x*yp!~_l6n%1WI~^8v{L(nu^5QUfA}#k zBv2yd^syKzsnHBx1psim%EezoFIonxxHu{LqJh-%7G8(gmC|(Z&foB)ENYjOjB4b3 zb@z8Vhb}sdO%*H~gsNiwMyQg6)e#m?aTNyl(!Fzjsp?olCELPpr8t6_k6*~2o z+zHgpN0ptvxVpTAn(zU6COt>SO{g$V)RUjHsr7o#i-});j5E?UY&hpStF))0g{dwD z67Q>)U$yx{6gFi_tLIGNx@RHxbu^Y#7vR|Z&IpSl!K2*2nm3T%m{q}-ka>`sTs0q^9)0twFqShIxSp+J3Q zJyQQD&vt476kWnl?E?fc7Uowkk z1#ujUh=<89lS4e3ZCMkR)H9XLeAFQG(_e!5So8;VuhA$>RwBm#8n?q4B)&TJ@5z6d@fXc>ORj^NcT@?N& zfpgv5B13zv_Th>mNav5AYkzyVr2^I2`n&b>bzEYSOBRoCZ>3ITQ?AN#SxLh%87sF( z>&J>KQo}5eHK0)t$_=)`0&U$S8zml;K$4|zxQ{pDp`{S|!WM`dgA)zOuoNpcs%~I9 zQ<{a6oRE`;J-V(YuO;>6)f8T~xj2D1!Q#Dx-1L}EOgLapMwkzqT)|$!>?!LfDJsg3 zWxHaFo4XgfcloZ`e7Uo=OMN=g^8aH^=a-R4M+mTLN<$B^ z;waDRAa!*kNu9XsqN1~lz{GkI`WfsP*j1=|1>p#{1&;FBde!98(%xFr$h8_eNeS^cw zTB^&t1U#{0LL92p7%Va+5Gw1c^bb6F46@|?)EQYR;^fBu-Yn;*S=Ly0Yx4`V*@1&@2`!$q=W+{v3??xi!Ac!y89A) zy`u_yYKjwT)0E~%WELegj76{#QsV(Nrjhus_$2BxyhX&mC`X})eTRCZNASZVc;s|K zk6Le^wiZm1Wx-*@k(h~1&u-kQS>-e&Fg0OPK0_tsB^!&? zz0hGRsVWhUYllYbYx&{JiaLc5D;XwF>Cste_dAmMG29wr-MaLv;f6r6*BlvHUPt5idl&;XSXzx zN$;{`9POw^akR3okt2t)=wmB%3cCz5TK;k=~vwu7KZ$c=49pB9%wd zeIK^V+Y{XVWZ+)6Q>sI~)^LZ-VV#B87H%PcQ&`S`nF#Sst!vG4EXg3)#|df+Xts&f z>lb7Pz#0S*__vSLb-ahR`EK- za@iuoLJS77$e563EohJ-Y;76I1u1Dk(DDs6xPkTJ97ab7%*SxsyWiLa?{@>Ge16N}OxmRaHTt&2PA zbj2=}#p!WXXuZ=OsIlcTR<(jwidZIE#JV##K-^?eL4tq$?4;PYlh-7jL2812WSvAL zM+vq$-_O$BP*cL7OiL{$4=8v|_s%_;9g2RHVs}O1Q?o<}NW%~w2z*R|&&A?$|0fVO z1CP`B*})Wd$Xgp%9S+#I{3&KuV!6YNrY3G=iHRbOmaHL^qNAeHX6g!qlur_Y zfZ-j{LYX2#-aTk4jR8fK)eTY;^!}f-7yJn*k`=@*T8RDM?7atg6~*^AerNY?F1`1b zOCS&+w9tD=0)$RN5kX=I7YL-8BA|jOMFgaZ3MkS+R62s70s?}71;mC85k;~0F7M|v zJ9ArZ!q;DU-uM4J|2%Nc?CjjxnKP$N*|R*-h*e@42UYG;VsGE)(G#Uz(LP zqf5QWbxWu#dqcMa_H(fcj(wfhF*Fl)NHr=x9OspOmK}crAE`xP1e|dNDdpKY{EC

{-wlLke#0*dN(Ci9 z9z`99O)J_}xXd_}$g}HMh~jQid;7!)L(Aa^P{50R%F{)t2gJjn&x`dEzVsufIQHQf z{|nja@pzLDh6-<=_Qp1{ohGNt7u@Xwp)Gl?NOC(fF-HAk2J$#6gIr;qBOs)*J7p}_ zL42<}@t~M}CXk)-y>e*pp0#X?*Tx;5j5I6^AmG%*tSo-0*ggRXkFRBmwyoR3h+;pd zO^Y`Dz3U!04J9`|FzZZoM>NmN$c~_cIA{>gXuvjuXi{3$o(2>GKY8oxhRT<&i!vE1 zQZ^lR6;ByFBfkUz7ZD7>)kk+z=`Jt;8$Wx7U(&oog?Lwq?eBs5hw4)s*D7Kk>=e9` z37&v}8*&$)`9Z-p^!9pCtPLF>)f;Lf-y3Ixs<(0Av{3F9tMdVPgs!tm6Lh}+VENK) zaxy$DMtF84-)_Git9D|vEc&JyUWY!|+WWh#RL}NK1_TPM72x2tY=a;H(6&+~OO8n5 zLNkv$$fJuK0Q(TLWPWHcd7_yYUMU!ecv-d(FxAMdrNeikiX-bi$mQL?(z6MDfO-6Q zd?7-OHdo2fEO~^{{!}aWjRzx`5KaZ*9ZhU*LWSGe-9XqV9Ev?>zn9tLqM>o%B69e*BIh^ zSRB{J_4A}JPStsX!@#pgqvyT~Y-URQU@!TZ#1MD~KS0GSVx8ihem9knVUN$EiZLqGdLRKBOH(re5oil zJq1ztagmskIl}ee} zwPp0wnCPjkqNn2Ato#|X+};wcqHx=d`)(9y#44d9QRD46jz2UfZ2}|kA-7jnjt8&d z_47;{l%1E5IX(+{3HT1))Ipi)(~?qCGI5Pp0tTc^Rc+~!jtne@DgX*uKYri?p%JkJtGrDx*2m4+NZ zzOsY+6VPR!#>*C zS4GedmEo)N2&%4r4KwTaYk5`VuLpQ?CU_LX*=#xp{A^Ra#{_)4Wjxtl=M2h>g$Spk z-AWjGrFqiF=}xO(rKhFjqLVySMKkVzAY;r!bc?Kzmy(uFR=^MXq~uQQi`Wki5*gP6 zP+ty$+Utk+NbbRvO8w+|rrTm33+Wy)(L*id zR~X7Omoy%cl{48Ea$8Z|l##A#Mx%yi#d*f3KyRR+trpM=s4E62)KV)ah`!EgQcDR^ zdPcHWq9#O_407xK#5#6;o`+IJAtv#RgFC^sKQd=?*tu05m94fr)E{df ziCI%}b^DQ44Ym2dk0&Kt2}Gcdwe5wi#qD&W2g3l3JZK6IDyk=Oz*K!B(y+ch1&pO1 zrDMb)* zYdiXFsm>~K&}8AT=(HT#Opx(*k78i_o)*`CEWH+6|H@}`P}-*QFpS@Tlx*q)&1=bO z{N>N}SW4$(aDO;Eda%7tg|1yHzc{?X)4O*PzYvj})GuK$zG1>|2iQSnh#I){efaU- z68bnebb{nMh(4OA845|z8d4$Uz^i^F5t2N&sTTPuDr!bAL#V;V>rK)z@NxreH>qyZ z!B#^!eqxU_qGG4w6@pM|Kg8%c0(y-dJ2`S6oqznuI=3mq}kmu59?SIQUWaa)(@f?H}K3sQg-xEGM%V zH906t9<&U~!yDTPaKF?sa~e=EYg(!&7vz%tj@O}VRmbf?j?c``kQrVV2<4$bq4aS? zR_Z-Xe>m`X_0ML*VTk9Hi_o~Lt|4@E@k4s`41J_L1nI13r_mPrAP>Q(pgBbPN#3DH zmC|qlOVNu@_hcX~)gN#Vp6CU^Vea_$333X^gs!p&zduI}3Ej{?Epswb@%kS7$}dZL z$YHWawwq|PdXTH=Rd^H>pQ5p?p{@~@)sy38DaHs2+CFmvD(;n@mEv=G zdX3YTHa0yUJg6OlL=V)YXR6{zu<(GCoXOrXu(@5@GF8HMbn`G-)T4&c^v>AuSSdAw zvtXOGuCUbtyfEoxNz;LHy*==D(o%gifgIqasiBYs+)b3+|282;$ zhjX$rB*V~6r7xf#iF*MKqFt@O7o{RB+$pC+2>k`u8UA7?xBZIyfRxmnEWC*&!-ioS zNcmkH2?h{Tr8xx5qK6jnQE~pvTgska>Cg+_Z0>@gnKr!@sRv_c1jbO&oRvyoV-vX9dbA49iTzaN|Z2mcjJ^Dc{k)MAfO2 zWk7f+FT@f`!&9$uKAze3%RWRIX3hR%k3ORGf!1 zJ$(Z%xo^e7){KL4RAZG>t$G@$Hr1@wRk(K1eZsUO*WPLK4h;VG&4%7nl%lqA)+Y_k)zIwSRX55LKj z?cI21UQ#}E4T&DU%%$iSK+A+YX$fNChWZ%}sh^bO^hkyA%N?APb~~C(73@jLR5I!J zl>uG1oJZwh^l7R6McHni{Q|mjH3H2eM;S9DtU<}T#%JL(#d#B>IL&MsO*X(g=19lL zG~N(L0cbyY8ZmPsE6|ZXr=u;ghWqXeoiJhM{#iP9LQ24 z2E{u7{(4T2zjUgU1W0a5nua=Uheau9zdCHe!pu#Z1iN2;CG=EI*Dsk-DP$#K(%(?r zHNYp^vJn`%WPXxVt|3UeN8{6{c_gs^xN_+5j*z;c1|)cZF_INQd)qLn4XpE84P{SMJB0jdt&z9Q@+Y;(^JJwLRovj z(RNu*yRN$J(wp(|C-sxRT9BNh9?zn(7XN9SHf-FKZjGne%qg{glE0wE!iUL{tDq?4 z^GWnC3Qk7t`M6vMmg|}*nV9dDqd{roD0dG3rsz2*n-YWKwpawu2lPBY9sz9V%j;DY z?+-%wcER?7W#ufvUYMa_Rf8raS<{w_hzz8ol#HL4*(DU0`#4-LH5bj@2{OoREDU~6 z%nRt^bJQ3e5=mQ+D}NA2rV!-^v2a7kk-ya7cYcdKfwoa~%qFJo5lSYYM@)TgFEr8s z*xMU=k{z_gp|kxxZ>m_|;=I%i%HvTF9WfD{2{VLAy+2bbJ?@?UW5Fy3QZ`7g__8lt zCGw8DoHfCrQ3Am6R!Nd=L|WcN3{ftAMnO>s&!k)JNXS);#xY_HExz)_m4AG`Bq6tl z9aT#eLHn7NiE+UsGupcj$PB+(fvm!Rd@S4I_ay)Eth46uJUNeJq9NrpA$|{|2Q6?>N90Y6rOyvH9Nw?@(Ox$}*Y3C2lPfFq@?RS*&sxlV+i&`gPtsf4S>8Fe z+J^I1rnr>MTqq~ERGciS7Mqo=4XRB5O0su)x84@l!FG^X^A$ zdoCN|=N{l}HjZK;Hpf(a6={9Uselw`wL$)$P6pIsyFbNj-^ZVB z1J|bYtYIuf7Fv|#H|sn@$@MB9|MtG;pgGum;~2?coM^RN(0jjt}ZAUrO}s0!Yk$Wdis&wh5N>`93rpoI z_e`_TgK#n1rc_pT;DRqy91aXP^mo|!fe!rlq&RXs6O!c@XXODEuhu5zv?4^wmd7*FiPwDfV93q;}l=SJ>=1!X>>H|FA46W7#L zB@Ry}wiqXX>AbDS$Aj?=5UQa(tg6~w^xYob1FN5&ah`EFVUHKi#^oY@3(k-7%p_i* z!V4$~1MyZr&q`y{t1-_0;xm+vaj-Udcr{z?;^4H~K%AM3i|sRPV81>%R*qwh`~{95 z+NZkJsx?$W<~aC*u@kWajy-;y#=z+Eyn(MhcnLKv&yjLF&ZNks-<`ZHEE42$dlQ7q z%}tr$(Vb4qmBb6Ar}rZR0mRd}dhq?a8_yHCh^xdv^$2O!A0ZC-L8u&#OLFSiT!&tJ z&6UJW!$CEyz>dRiCYd2qe&rB!%pA+PY;M2@4+@Yy7Vl`m}MMOJw6e)pR+ z2`s_S*I`s-uakUSBv;lQIN0M!&Xy>ZgP7|W3a=baWp0Y}TJ?ZT$kq{OWIcXFmOKT< zuvrS>wiHXHX655xj}(_a`QTs3NbO59wpekGkyK#jV9lm*=OwQn1_~INq!m z2W|>9ApvoT!J~L1LG!!!3@P%H+y%06W}Ux?E_g?aNQhgir~Dun0wDC4!DYNU;Xm!5 z?08_e^iP!tl04p||5CG@+Uq-LU*P^LPydBtse&}oU35qJh71ai)j5L4!8=qF)n!ul> z@^ofiRx!4P^?{agmk-96tSAPPBaopQEOU2q+Ej1e)LX8#^+b{E=JzO75md zQ$bDOaZ{#a?z!D8>u8qYi&FNCnQ4kk0~h3gau) z73qhB>V>=^Kk~)pGq_Yd2|y1B@3sg0ww{-b-1U4!2*JyAs#uAye9;o&_edNv!_Kax zj{bEr4*#Xw;i7`G^tZ`E%Y|w@LKxbrN&nhk3g9$n0j#~OWyQRTsI)1&W(sUsgV11Y0X<0+H%@O^OnD@^z(j}SSp z?o-s0@)WI~;U!pdaJd_3KU*wsq$Aa1Zm3IjGeR?f%~$r2fPuWk2bqIz`55VjoL!{F z@jU_)0op^(faLt9phhX2Hd}8h zkC91;nBl;gVXt{h_FgD6XZsUH89Q6Zy*{5L)eXcjSODd3{*78LS}Drvd@@!*TTw6i zDJ)*PBa-1{E2X7SVV%azZP;L#c)b0C?HsS0CRsT5zsti-@tIcT1t~U^hNN(U2Duc$ zr1h`l-%warTfiIgYuvw^9Pcuigtxk?dJ+az1yBqkJ=C-`v*DYbFceC5aGI zVL8>6F12=M7<2l6(4i?(1Nu8P1V6Xa`&SR28*!gy>8c*ZIFpgX80&Z#&95S5b} zEk7sVGa~+Xi&j@TsnAtiNkG^=nJ&7x{Tz6GcxH&%V}Saw6{|L)etcr9NX;e`O_@^^ zr^o`*hbX|sdPxFYtdj*8S>LeDg>QyrGhktcvcTOyhx|XE#$&bC%NOuz{WXc5AFMzE zmdd)iTPRld*20Y~xWPF5qkASTf%cGTM}4b1SXRWsQIws*WVGKsmA0*p&bJqXz)p(T zLG}4p<<`G}AbJ_eFVD{r_0NSL8m)e+44%HLo~^Y_Xr=8{YyN9t!PZAv2tU8=MiWFe zzT8j3GL8LvB_7^sR{{RhTGXD6z?o4?X=+;HWy%6NR&`we0Mw%r26Ot3?WuVj)m>5nOYBtV@X6B8@iH?jEc!H7j(x>7> zR(OvGyMVsAuL?X2=8#-tOMB2n*vES8jO}B3xer|oDvq^Ylyb}ZFaqAYG9MX!#hk8akinTks%=ibTE4n;*E z2G5|xj_wFI%D5v&XGVaN>LnvMto&3BVo!E*vzjR2>Fz1m(ZP`f#J^EZQtl}zP$+Au zX|unr3O(2NcOxZJs@Xn$n;VOOVq?u=p|?cz-Jr*0HdYnM>rTdX8^t? zCUY&}y1Li6N-i)_RgU`cE$A))qRH!{VI6VpdX&wrdXso{{S8|76?8 zZ^NhHb{}k)Z_0!{J-s!Ex6}~`5MgW4IH`cu?mBg{9`pkFMGU7`hpCl8so`t|=daFW z&VOC~`H^v4en!SL9Ko2JmK_B?k+>i;1Sp*UhC}0<#QHPe|AbTHb5Nz-3E`jy%~wJ= zsFFG9k7)I(+~-0UMb+#tNORb^|Ah{!%{BY?kb0$4v|kC2*&cVr5B~d`GE14l!Z%lB zl)7ZS0OR8|EhaWc>8nBwZUOfBFWO!%hTD7Pd>NEm#)lO^_TtAr{V>;(txPWdk$Fb%Jz2)9$(KCkcYSJ0ma3bx@tU3OM% z5NPsk<*8i~zp1HSdD5FxL55%ct|$jn|K7K91^e%>l(oOC)Br{L26oO@1fICBuT2HAwt@@;6Vb8tXS`CJnae2R~<%=*{ z2m@>P4T0+MJzt!Yjl-gsFWxre-Q#sZylRArJ3oFEyig)lFDyaGeA_X_x|=HH-LbQ; zxmD-g9?8{(yn7@n=Rc`LiTuCYhYBeDFZZAVsHqtWnxc^PDYc#?yZv8UkE-W0@>qll zxKK-*etG}p6)8o=kgFVOt6cJ4VMp4E+%EUCPqzFQ>NW-1%i!Pn8_gSehI&F7db%l} z|dE)y7$ZTS1B z3EoPWW*>0Xl#(tym$h;bMMJ(@sz~U$Em{5|SC@qMiv_>jH16#N=Ekri8=cG=*i=m@y2QHOskXBFFsBDDV2cBodFC68el$W$GHP2wX_fnPCN3*gb*1vsb&rHkut>{lE>WI&aIE!nb`}_d^;@6=`_t6R?}*wg~M!K+-lZsCGuO;w5nY%i<}y@tYYEv=NbG}SgV$~ zxZS3<>kD3*ZR@yJR%4=PxGSD{|9eTsErfx-9QEj&Rkgfqd6Q zKy$fK&vlTA*`mIyZcUb7DkAuZWx{ZcVumXN(0oTkx*lMnegn(Z@e1>w;AQ8AR<+7# zidjn$(Fz~>BrfYkS=DE>WwCRzl8*_^_e3MBia3V*bsC#hG})Wh;=Q^q*B~Zb$$)0l z`mTIlS)SiIUcAT<&8qqZ$F^n(t73`Ykf_?-s`TVV zBx>}qDt6nBM6I5#RgF;CY1Nc$;C)7eSl2e(Fl(xZS`l-?!JiG{TnAa+^%HMy5%I25 z(Y(9@2)XQKir$uScn~g}eN2(y+S8PkyRJ&)bR{*%r5V}RmCuChIQnPKG8;Nwvs?3q zyPxY5B+SO@p-!`Lf2Zq5d9T6%*Y!5IhjEfraFvzp7Eb6iTa>oSb~%IFhz@e53UAUWq7*|rEt;nl@>cU>3W-6fy(Y$>Mim;9e| z57U<}5Pk}|hE&R@be6RJN{;gI%B-my7)uAaZhcTJ( z!HR^#+CDdUey-tI2B_jfrWf8WZIzeyf`dl1u~NJ&3dj`a_beH{3QtbZTjA=#cS ztC%kKJ7HIY@OVc;k9^icQc^m}@-?K)-XmpjrlfzaGP5wA{H^dcRFGH1j3_|wmrXPI(sl-lH}yoYni_7Pr}wxrfMH%dVLQ?B+w)3?g;`ZZCOk?HHD znVx^$0fmlK#nuY}8~jN+Hmm+NV7tG`6CHmZr%Tk>;BbVcJX#NM?3dudu+*0b$L&XZ z!#s6+0**Jo2ApsKZ3&xrcnDzH#0w zgLKgN{BEEZj2(Q!o5qLjP|IcGI)&^LqbuB7>#7k)b@{pR1;_T9aUKI_56$6&4u=te zPwzOw>Y4LVrt!2V0Gn*B57_SWn*iI3N`M`Xaex!sF9%HOvLEoK3U>mIe7iQP9KCiI z(zi??>wB(K<&A&lF{JNU5C(YXq*H)*En5#bJ!vK2jIFHs?v7^xXNwP30nHKfsXFEf zF$-wE=sp~1f#{D>u^tkgFb38xG5HwK0by1EIw(G)-h5t6cLKd29%Yd?#Nj4D?};_E zoS%wGWZK9QB;|=2fxdCXtQ4(BXJ!rf@)_u%q^Es|I8F{CHwi}*d zpye}0j|RBfZ}ebSj~EHB;p)86^If1fjJ}tF-ZvgD26Wjt`zX*SMn0?l)hHwIgSqA- zw8ZzB_w-U!cW$E6D={n!g-T5VJ>*#IGv67BjAzUiR89NM!!>~pn8PVB&zfO5K!?nC zDeBLewVqNxD)AnPTJkm8wNfW~0+znafiJU`m6t7ZGhn$kcLA1P(Gjr1a~!COlh~q4 zpSM7|^0kwIRf==)t3K6I)f-N?)N;2!udWY?-D{EYocM?O@p-X=KYc;;eh}!e_=_|@ zBI+IiIx0p!5A>p#Od1>$6NrwBKf{4eh{YVTm&C8UIw{VARjpG(OagjYEM=Xi#jigA zofnPcK>BaQrQ3m)ntyCY=0oO&3%GjJJPgs6G#fdQPTm;jJED1K4^lTi>Q@4dk~S zPLj3zh32jOZ!`%V(#diiJ)~i$w_gM7{2N`dE_YCLyDlDwoNoVAMY?+ihqlK}4+HkB zyaF(`@j<}2VP^p2$FhaJ{w#%(z1tr~x=$)cBH zC|pO*xgW57eVEO#jt?(Kx@QE_apf=rvHHYaQ&h1gkgH2b1}UQ%Exio*eJGm)GJtVgk)ay?05U z`e6_SE3)nkzy@#AL^b@1^o+`G2G}T<^lvGrvhA9_9_azEvetnSa{vcbnGHBtFg;`h6or*oinSyq z<^vA>@=3ta+X-)3#3zsWm>Mm)K2{*CTjR(hDgQJ^&e*tPfT=S$=HnW0^gMYmY}WXS zG^TX=^PQNn} zaE6m|c=sXJIaBmy)9(>EbQSLvT^iwPme_v+XtwC@0h%MmZULGr-g^M(KC$9ipn2l{ zK0x=2byT49#jZPn7KktStc7AcJO6-~+6w4L(X2Gk5+jba95(7q1v+As;biTokjdpdLS&=T&lV;TjT;hoB!vg`^$0q=G zI7mM4`04#fclwp1-Z_WU+Ai-bLb~g(_W*WV+5kD-^GYDyV>c<%^OwH?W3RH#xQ{>_ z`E2X@W4JB`1p=dym<(7Ziqc#u9ctF9dYl7V<5rqZcOBBNQAPHl`5k`%#zgi4?0k*g ziGOV_V4q>xfc@Vhj}LD}2_7A}7;xeea^-Emkh`XoCI8;Jgl)R}`cc4H;`fTUai6G7 zeYHUBnFq9344DPATvX(MJuDu12k0^J1X*&k==CVj0r49jbVzjm73iplp9XY7Y~)nn zlowdraI^o#k` zaiG7<-QYs&Ugrgpb)NGWRr(@lTXNJ==P0)NG3P4s@LFe+D?l5ZW$p#q>^#F6%M;Et z%JfstKBUqPXWx!MyPY+@06O3dqs2Sq{QE^*z2FQZYaDeRdll%oa|;>lw6hi+@YkJV zOW^7a=TGf`-gW-<3efw`m)VC)&hKd>K55sz{0Ij!vS`W0@tWdhKC>%)pb2d%eC0v)j~lfjNzjU$0xvi@cxUbe1Km|wHrph`M#HKVe4(^|~o zc*nYpG<(n5$x->xI>4d&*h*dpbjA9%0nk7T3M4Z>&nIfWEg5QZ@W!WyS&h zYDMe@`olWW9O!Q=ej(7Zu%A`~tq6;~3FwiqOE&|p4$BM!S`$|LGSIrPaod2lg>`QN zv^VT18SFsV)D)oS!d|Nn^j_GxAAmj%YevI)CG0c0*0Wq?sBY%D>hwk{oJdx^>nFg- zF2!-T*`!3ITfIg0ZkOAU^OSH?6YUrK?nUPNq7Ti}A7TWx%`D>z+p^8r2D>cVQs%%(WK=neX_>s1 zE4Nnd36%LdL4J_6z$jZ2`3sFl%K|-M?9BpNWK?1!78@to_a#Q`A3#fuo4*8FW(+4< zZd@J#^q}$3UZ54mvps@t4EBTtZb!m233b`Hcnv}!`?HNzYA2Tqg*|b zU96FD2C(+K_W{;vOV$W~b1|U%epF>eYsmWV)CWW(ER!Kj4woWt(NS6z>E;@CPUGJH3QKW61Aq{o&Yjr63g=xo^Cn<1L~ zrS^;c<}8PVt1gN^FGjx%bW}u8s@@PK#;a#lOdW%a@SiC0?laQ?>t6g^-D~v77r1WP zYbIbb&k(@o*QlOae8H}_-fOCI4O+8uxpg!gcpA>!STV8XrTt=OP-ExONi& z;}^~Y?9~|kvU*@yB*I0>X%Io^kY;_|^+a``FcCJlLn~pXGZQ5Bmhe&HL1+D7UAHlWf`L9)N zt$stR!aChVuc%8(1fQID1h88VvSq&{_*vHAe>k5TF_>jW$FX0xeE%@un4dADv64UD zr0S}4B^sIq zMI+QqN1a2vP&{=M=mF9851>UNhG?;^apXpvivn!?HcYg9LhHe_F;g3i1Y9cQqWv-<*Wy=;Ci{^2sE9cS^q43^ z>%2zH=76jf`Q(FjV%PgX>qYTpKpVso*0NdTQrfnN$#a0Vifg-po)Bx3fu0tB@ZNS& zawgCYkxP}dQ+(G4Xt(%hInW+)bwAKvafp@e6VFh09T3Zh0zE6rQ|JzgopXT>iP>z~ zVbP=s&{1(I0q8~X>QbPSV)j!&FN--Gvoj+3XP~p<=2W0_qIDe51@VI$=q>RC2kULI z;CrC=MT^luABzc8g_p$vj`b(vUAF8iQJGbLBX&>{z86R7GXEwLf5p}Bq74Z$(>M$H zg0owG8qgdgf#bQB>^j5;(%0IMn@w%5u15iqJ=%H~xlmUB*T;|(*>$pzXIK8c4ANE3(HW^)D;DW$mG%QxUqctU#+WHc*Idb-)S9^# zu=e*539HWIKLLg>=m_Y3qAXzDxLW}u8j;cK^=G^5&!*mp{F~aPVMChVM$PC3H~x}k znoOhNk8X;#T1}IvKbmDze>NY?&6yV8H$hIzC-{_@)3lPU-lZenx;|Gj+H|@P`EBpn z3)pVJw}9;{@>d;xCL4C_$>~_9ab(`k9^UP;o5I+2IH}fcE@{&JwhMqgW>a>0R;~jW z`#^8NxTk2Ie=)Exsu+Khf(}cTD0QUW^G+@8;_XGCd`XJzdR^I^*oJ=kr zl*k?pUh^T~kjCs|;*n^ilQPo)hrX8sIPB~yz~O1+uo2Tqy_Tz)4fs=E(;j2UhxPd`iY=4L#d3 zsS0aWC&#o6Z>G4c>Yn+yuBk1~1+Y$Uia>Y@ z2hDAASn6KQ28>wEX4b34yY-Kfgpn=V05<5iOf{~a=)`+b!+9_EY2JIIKVZX24FRKC zJ_*=p|F3|JuaoVYEGEN8XH(3YuB4DRdx(SByx&7euMu^)s`|LNz~!^GVkTu|ow!JG zUoWcN0<=NwtqrtMw1J0ZZ4%!t0@^I%I0M)se&FzI6*G?lJt3yN1++~JW3yfs@pz7; z#niY>$e6yUDd3D;w*2n4tYxMs%d7<=!2)_*M7IZ8D{k2bv_;foW4DVd-vFHvvq*;5 zM7bG2Z;52`_C?Xw0NP{39R%8I+&cp3fU)Fppl6Nsn}LoRzfqsQ( z;T0p}QJ`0ii+t>B##E5ldfhO30G%@ikekjMqv=h*VRR)YT`-DG2YS;O&Q`x`4DSoH z*qlmYFEQs*^RF~FkULkI_eTOfZeE~%JYZJk*nDH=w*dOZyn}4~yLl!YXqMB(M%?cl zxf5uyv&=t0%ba5 zg|!@aMs@~z$$7B?&`D=kj>^l<-n)U$I;Y$T^op}J+4EKBeYJpIb55jAc-`5aUd%hr zq12l1J5#y1XwVbw&{{i%h^#(Q98`hHpfIheWVjr$qE2vOrhFzct%nJL7^6*I5 zy$gWWgl*vOHim7XmUt%Y6RPhMVYgKTIv2Jx1L%ja^Ob=XyIy(HcHL;#;{z`QF z1y^5-Wo*bCRV=YPiym6R*-VtN_Z$L+l$caG5jnk!pP8b7j1$xPtGZ5&c z@hv-c$|y&@{<1M-9?)r{CkOb9@dl@lXN~P1T)kp^$F96;EViZgvtGD*!zlYX&;{cK z42DhcQO_Zx;kRYgb*0K9aa}`C4Xj!sAJ^d{8w1w8tqowk5g!9KSV3-x8biYy{lzh) zTlM7A+dhbf$(kFDqS0ym5hqGbo|uVr^sG+-o8At`%xc#3cchzdTLjo*V@JT2U(y+f zsmqB=s|!COr}a%yfNiF-MQtC518j#1VEXq`C3hH3p6hr^ZNN_H>|N)%a@qPK3DtEY ziO_8iTiLzMw}3tJDMUTXZ2*kzToW+v@d1GGJl1LTn%xwzcOR;UK4Vh=6W-*n`feg= z`&DlV*nbwEHDGfkz=7?UKWG6<4xY-|hV0q|nAmm=VAA4vz@cuEepu%h0Ec&iuD3=e zu!ftPlGjGve;45BpUAei?D-pTOd88152h8owSgNjWx(BlW2f`!sVD9O9M_(d^kl9B z9N*(RzzG%j+lim?iD~T{0#2Ip0N~_kQX~B-nxTxIJ&=?6`m=ypiy8rDze6K%TMpZn zGjKm(ZVE>?Z$NLr{Nx#cx4%ZmV@msLNKb9bahdkjbfoVX$eQnL8H4m)ztjSpUY~Ym zMl{Fq?gL){&J^)%?mgmO%FexF<6@v$!b7KZw!kQI3#bm2)m-rh39(STwHD|B(d8o0 zBGD%nXtD6*0xc1@kpfFam4!gd#0(15a`D!Apa;b&I)W?2DL&yLF|-2E!{Yd-K#z#a zY}ZOrgRHVjd~-X{YO!@M(4*qan?R3=CM4(@ar!l&$Hn(A0<9JCta_a=I|FSLYx<$R za*e30%eVa)u)^}=fEA~6h$^)v(^js& z4e2VEo&c$ZFkQsqGdhq7w7$QKVZPCTq00PS>Yx_$I)1e{=zC zpWYX+L(_KwJC0}&T1e*^a1n*^8HHukyYB;z?nKVK<;7lrW8||^9-WW$*aIAm zaTWP1PtzBW9)E`kI3WYVZB2?84>)=6D8Tf`egn*SX%FCSb;+~2W4j`qFX?l;q|X#d zpQ(~Q(hS%dWFoE(h<#+eXGQI2fDVe`6wyOs!4ODH*4)ZY)e+K9!F^%Y?_{>DfQ=-IoxO!PU#8rpW zVkCw4jM(x!GS7-PP6NFn(mA@XilHTdUK6c<0(xEaBg35&zmPl5i;H~T8=~t_ptnSZ z6Scf8QU>GdJu!9~(EDNrIp{-i_9k3?B>wpv=(6~58_*}>QiK16RjP%`cgE?z|}XR7M;v*#e3}S_o5rTOR3zNOs#?2O&>`CY@W0Ou;pNq zywwvNqBiwuRN8$q6R^XjMu45%v>{y zU55b&+)@s3&@G(F4EcQv(n)0>1{}7UY(3)d*8xWsV-2J3ZvZ&H0`n*4WC2bJs}7hx z{wcuB;uN;*V#NV-5;)SE#ru@zEuu#%&{pv|2jB@2Ns-+qO7#YMN~BPcJS{q>0c{sw z*9F=k7QF!k2IKQ~i%;nq?-75ng?mNQRmj{YE{y@&FOINz2Sgkd$w6_IjCn|`BL!X% zSGntdL<}v1s~5$+1@xo2z7f!Ma}dR5mh(K#+I>!oeO%x?Pld7A`56XT zvPa~nxyWebb)1`)h{JWW`F{X5kLFKXyhmHrvTaqQW7bm6I`-=Y*yCAfY^&#N4n*85 z)XKdMk}vxXAyNC?d5V@MsM1G3vRuoa@ zaR7gCrjV6>bf%65nu+N-?%iW0UBK0BYa{t)j+H^#o@))Jj=c}I0$1~_`4NgXRX(u>fiz#{icC-T=@DU%A*_VCKJI`l*Aa<9<#fM^fHqc3<`(&Uu zjGdI^%f=rh+dA`sjX=+sd!S0?BVtRC9=GOqRgb9D@m3V5GKlT0n%5TTYO$2Y>KzDc zlxJnN#=DVU=WY_b?xNa&k!LZyR@C^4fK85&S6d(20odm3PQXt6VgNh0M>njl+y4QK zjoJVh_eG9+sue-GFSdmWsd)LpfF=5t2P}D0f51}L_@vU4sd>xHnS^xN52#Bjl#T+d zSn&X0l~X?hR$n&-u*Rs_fHgag1gzC;AYkotLjmj5Br}EoNPQOp=|sDmcL0oPd<3w` zpT_~C*OEP(Un>RJ0tyd$Ywn|fZKtu%?dsAc?3hc(q)P?D?&}&Lr^j{5QqM{y0b?JD z1B_cw?GgVuMWR<34pQ%`p8@vymh6-eI}Wh#_df#m+eb<7A5AqnV9`pX2fjczYS7c2 zkRE(5MSjQ}_AW8weZZu6QefyzQghfwu#Po+%_o2(_EXc`bdXOOS-dac&8O*_jcU{q z>CxS(fNrUl1UP2#EWqR|TLEu<`~|?2ug?IEU4ikiQcLXt92ZAn^VFmo8Go@k(i4t6 z2RJc;td`c1hGf#fXyi=(o-It@%pPTQ{{k?x^>o0jkD3BzkIM(VE%`&hoF>$8`6>4S z-oBBPpR)NX;M7k^t7)4Jz&n!v2E6kWXHGMsZ%6tbQE33sy`oG#pjl!&mzHLWz2D<% zj%Y&(oGbRYfbJ91$vX4IU*zfg#nTILHD45a3}}J4z8Pqt`0xbKV$l_xhZ)jRTC^3S z9Ch_WB8OG46hCknSBXVD3h|hD>~37G6=`%n)`^<;;A*`nn+~)=l)nVDQ9OMs&=&D| z8=!4sB%AxB2q!V064ko{JuN12NVkhWI5F8JUZQT>Eh?t~?GdM(Kzl_)O4~khVIR;l z;;DW>&x)8ofewlUYS`z+&Gbu;h(ld)bxb_?4bX9Mh4efj@@@foNo=bDbW&WI19V#a zL~cGK-sV7^75hs7y(0e30(w=PqcprGx<>-NE*_Z(bWXg$d*?;%SApITJ3j)tAXY2| zdQ&{`8qiy!D|Nuz;sV9-9ns_a7?gkoEd|w@5SWTfqoFf*uNje zj0!+MiLTVEzuNMiRa2hm4u@=~f!c$!DvKWa_ll#_yGY9yNaG0Q9&qbQ92e zdfLd}0<_)O zzZYnSapDf3okn~f&@N-hd7#}!-A{q`7-6-6_8O;p1MM@$Bmg~QT&V-J-}sVh>45Rs zLZD}j$hklVjU8m@L&gO@{W)XXDWK<#c`pIIV0?86=&-SQ1JDs8V+YVNV=CF^xbZFZ z;|XK*eq6m|?B_U~G}5UuP8&bY2RdUs&&HoM%DEi2ZJStv(ssx7C}dCnmLsrHY+wsd zihVBwJ!(XD1KMPqcn9byBcT+~eq;Oypatf#2|x?Y*sDMfn0ajdBJ)1TleC+)JFY}V z_kkpS4>Jw0XIWBf)O_%SEHSOZbGW`M`VipPqIOxJZ^e_W^m}oLO!cFf@(|F^;?8nF zzly);PyZ=aox#=LVsbm6>ta9o{9a>rI?!z63G&ZeqfRHFc}9)jfF3Y%Zv|Rp7)d~j zjYX}1mKh&D3$()cl7xNOnEwONBgWY;fL0pg*xgk|_(`BgjWsQRb{Tj52(;UHhV#C? z#y4!zKBF%gZojc)GOi97Ka()e8Zq^O4jJX%0(#EqMf>r*v4fiFurXr}u8tZ&w~#*igICykM7fle7&^lwia&)$NoGe!*SJZrSz?_M=rbjMyd?w*RPw~Q~p2KvA_ zIS=SJV@Ly_zl`o{fc`epNZfynK0g7?G_%S4_nH$+0?jhrR1veyoP9uZ&F3}&Ej6RQ z1X^WYr_Nkwo_Y{ys~NQoXoq=dCeSlxIl4H{n+JJy+-$TJ=!|)W2Ijo^^m1IiYo6xS zCG*=SaCOz3ZvuT|?!5r?vw1(S{xWa*2WXb_hmAnnxEHI)#a~)AY*BKm9pheLU1zI|N5d~rjL=M-*u7 zjVRE@FQPzO9Z^6xC@`WxI~`G=y^1K%!4w@$(Md-X=v-t(fiA%j1-gccD9}wu6zHxa z3iK!tQJ|-eC=jb73d9u_Q6N5aM1fv`5e0e&MigKa0Uc4GuPOQ!5>cRkU_^mNP90HT zfGaqnz(8+AfkFS(hyrRz3PuzdTtq|xvwS(L!Nq0>q7YTqRVp6p$u*VGs^lIBC09AJ zKVX$#Vg>Y;6H2V?FT;@Wy_ppabdPgZ2cSot?iN6gIqS9udfXWq2eiqm)CTCN^-KcL zi`K(EfR0(Eh>lzD_5nI?&1LOxSP`v&E?CuC1HEar=m=+v&x$Y4holW;1(UWjJ$ZH~ z!1OoU0%q3h3z+o_AC|qZCt%L-et@}ObOFr!f_2`0r3v7ao0|eotwngpP?o$iJO=Qt z%x-`)?q~;icNNw-Q~bn2bHw|+Ggn0O)_vk`l473d-Wce9v4C|h5Kr*|3&rY|Ko5w8 zeDop_4nkob6V@AOskp=rEfYz+S}x|Yr4NcttaF99nG{$poc!Ge@l{u#?V=o;zF!QA zmlQCT!E*ZSL}hI!s%Se=QU_|Os_jIzkanWFwi7k9ov5kpgiA-Usio~iZEYv&1lfsj zvxNJGc0&Kiwi9)Wwi6Md?L@s0b|Ng4ov0sZCyF8DMq*@WJJCSfiH3o8B1+qdMmMk% zjg_5f5@;u)wVhBM*LI?5XgkqN+ll686-_E-CyEC|o@o?lCt3vAiI#zO0^wim$TPam zU^~&OXgkr`&rY-{U?tXiXclSk&kJCE_~KQc;e-c|`om z;wy!lYH8X!Th!r24F+)n{d` zKC5W;>2a&JfqUH_Nr~Z+xK5G@NK5K_opLIg0&+t&{(;cWj83Zh` zZfNxxq19)-K=oN)tIx**(kL7Y^>F16SImYRY*5GGEjX+ z2dU4df$FoFR-d}gVD;I&X!Y5`Pkpv5pgvN761`iu`$ zpS=RrXKz#V(dsk76nzU(pZx;WXOvfc_Aj752YA)zz#`RW5rMnZkQ8*Y2L-CnfWTd* zQP!0>*sB&R7asvlRqGm*Bi93tR`n_>rx{*VE2?U)23Tiunug_Kq(Ow}Lu5A@Myz^u zt!`>)byG7)-PAHm)Yj_8tR1G6zbeMoD}O`A9y6WAT|W%xGr|n3RMp#{)yvk=da|r` zGplGl86Hwky0xCHtMz1r){~`Nvqm}`W?f4Ki>p^chdtEowl!Y8kQ%R~4trQ%X*^Yp z4tp3Gpz&7XNlw>}QFz{F(I8OAm8OnssC1mJCDIN~7^QVwqd*$v8jbzBRr<64?kG;y?!({Tx#1gf`~AobQNP`zO)Z+nNjx?uIz zMyWU5pP;yeZT-|+JEh)KQN$%|?^ACUA`y1*DzuJfaX24#B*IQwq3I?BDzwg8p> zg{CreB*K1y3ax*jLZc6;71|(E3@${W4GC0e&AbXNQ7JU_P(&h3@+!2UN};KHL6Hc{ zyMCfT^d1(dq$&jXc;$p%3s4@Ny%ylg9!c{1hh!RvA{Ahoh2R?({g~;i1v2@Z4$Fmfi{ab#sFOwkyC*F z5^vlN^ssT7)jwlYP62wwn8|xrjL*`6W}3@K0-ZMZ-wbrgTyO`_@8(T2f$$OqAGFmu z^j4r3ojXSXec-G=8t7-Imt+Dm6BfrltP0D> z2ih6-6`%KVSXZ|5QrJ`M*q>p$Cj%{aon&R(U3cdJz34Ld*mqrBU?Z(-u3fhP{pFf5 z9%xfBgALqYtXeYA>0+lR0)1HQW;Wv6VsU)z%;Iy|+vUZtjRo3V{0lbXQ1Q1()>n%U zk-aTmZW6A3DZZIcSXg2{d$g{^H|*bm64yzIS4;d&>V90}HrBGOWEM$quw+$I{^gQo znE7_e5m`W=mOM8OXhA6_srz85G3>+EQhRvsc&U}7%WI{cW!2|OO=HP-N`00K^g$_$ z=t`+X4%OGC{vefpDpiS8`lr;J9Pl}%2eZz3rBAT83re3Rb(fazc{k9i(oS}4L+QR8 z%}u2bu(_K{PayiV^nKHTww1ZcZeA*rOj`d~CWp1mD!ZPKT~f9@pYT;#bDEkr%23Pp z863Z`TmK93j{g7HM*p9WZ8XNIw9u=VS0WLuI2~M zz_94BO&uG$Lk^U!yKoa z-5tSyro-jfZH3A&=C~9xzqq4ZSg7(P9N8iBOFE{yLX|J&SQ0Y7v}16wQ02=wHZ};A zU)HfRWPUlvyy#Hn%R8c)h03qs_`J)lj^IDjQPJ^T$oxu<-$LeBb~w6*s;7$MQnyg~ zSV8L^D!-Z|J7j)!$IOuVHG-QiuT4kILh@@Bl8*y;A^uE9oe;U0mpfpgL;RTzcY$1( zblCLouLs{KR!DwD_PoLda`}| zyBYsQh;hFM`FyTBsGbL8`GWlO6!Js$7l*Gx=ATjZ|E22p;F^Cp4kn+QQldLA9a!y@ zf0qH-p8YDnugd>U<%^QtWzIvYV>k~O^0_q&$!}gres|>ax&NoNtBsMXD#M4xj~Z*) z7PKfROG<%A8M;j+7!mI7cDG%!ZDqSPksqABGjn%$w)5qkJ7ufJ_yMtvu@NLn43Nf; zV1hq@fbvWEK{PRt)Wm2tA#I`|euqRu2oToi{hB%V&W`fKlgv5iea`#7=bZQ4d+wdn z**-ttCjQ5`7w7I!{Ji3+;&fI_`gdL``Zp+!?{JKNL2;bbaKg7v#?P5PWJ>rB#`wkW zk68XP=gyr{{Ji4x?~sNwz&DD&jeq<@;%7|#Jg$E3EZ{b;&njM=*B=$Pc|D_kwp=0p zuU7v*Q~V~y?N0lp;xB(#^uG_8(w`JRbydXje*D^x(y;sGb?9H@wz_kBBi6HQ6YMs+ zkEng4_*};QY#Z43cCw$r{jH?>#r<=F@xgs~f&C2bd#k{=;W{~^`#GCqjGt3{?aGLT z?0(9*bEg#_-4Zck@qbkDSFaKLG1Y&PaTS8%7enyh1IMo?2hs}{J2&PA_ve2M@v}Yz z{{Z~Ze1G`9i2Jx+d)f(n6#k3*{4UkMf2-)fsrniFAACOE#`>xh6eC0M`62kK_WPa9 z((l*QSE~5tO9aPn;5pr=_=`HPDa9XD{9mIHBabTnIO8e=#jk)<9E#7u->IK1>IXk3 z=k#~rV_FZ3H-K*)x!kQqiy`bbVV|J*e?qUHUu)|J6`$D=G4cfND@uD9mwK2_1K;ZU z?;#L5EvTQ5XdEnVcPYO6+KBaRpI>EMY{UGEL-32aXV<*y6u))imWIP6q>lg8%{dVTIy{3_sdJrw)B zLG|xbKliA9r1&n~f94cFs`yR1Z`;09wEuv4 zuSQIGl{D*Q-K8Z|y}x5>-p|b*S@7OflQdbbbwKy$vxcwKTVTF}Mz!UYz_XgQQV{WV z*IcDVOn)FbE#3U_@tZeR@}QTic_umZNi!pI!8C3po=MVS5=fw@t7SRWMva*FmVG(y z)G8RL$wWyAv6UEZga$Ka!N&-r3B{4i6r?t3=|83bk2{CxBxzq~W zmup!gZVO?caR+)PB85_)(Yz5KOO%~5&*ofibiD9O_N+xzDnJp)p_;I7$aehB@z_S zNp>V7=Ehkpg6VLOmB`SjVr8j>7~93=R+>f8{<+!7DZi(*r}Qz`$*Qex0c&@&PG0MH zr^(@8%EPpl!7zMMjkDM#O=dw28ZZb9D{l+FM{uOaMLu6wkK648+=^24oqT$N@4-Hx z8k4mq$TtI61IS_bjks3q*V7=qtV*$ysg*dz3M@B>k(1Vqm%Q3(=_NBTs$A$pvPuV5 z@=E(ef6n83ou54nuT{{Lb~~WjtP>e)xZZTpxCT?4_w8trmX_+>&PpDm+ohSTWgZ=s zlT-)N_|JVU`tAq4cvQQ?sPOF6tLW+nCLQ1+H5y4e~CLLv2buoZ;|P>O426A z)3`nbCem6+e)_gKK9x{ZxHpMG)yCAPPemk?G|dOal{zOHSzJb$rBbd~jaR4bN?`6z zN)wZ_H)ip&O3Te|sobsAt2frbfJ)Lz+*xs@>WL=&Ntvb60!GjdCYKXD+R~&RlY!RS z^~{xcK}#7{$(3kFM_Y?;ky5gvd-zHfwkC;%Z02HN_0e9+%5$a@?dkm|Crlcv-a(SLlCR_&+2%5usNWy!LRq zWhBy)3wNayyi_Rnojs#5UFbRmrFCB zKj2lbXK&|<^qxoQuTu2#V*Kd6b3GWpoj*RM1OBL5+W2jpRvtt9TY{U)6-4QD$(#rT~DMk=b_J0D%6 zivo#k!lYsCL8nj)?d=@(n`-}DPg5$RDqHy=+j;Qy zB{6#17Djy(`+pAj7JTR)pk(JBfBvM%MSYX4d;w~g;A8f7zWmDDv_A)18o$}wIrMA) z!yYD_UP4Y&H2dCp^=;CALE~p(uyNaU@G{iG{_R}*!tG)|CPgnddn^BjmeAfsi=rDz z(Uzi@o1K;aMoVaK=L79XOl+Ly!0b$Z9TmcC{`NiaOOa?4^)(#Qh@1|v3ESO6q+<4a z3%J=^N&AD5B*#Gzt``e!t zqg$^Fj8QGx<4?sljNj+ai`^Ap&0CmcTzlrly delta 7429 zcmaJ`3sh9sx!>Z$V~wI1C~TC@+l&ilT|)K^+-<7CuEj~DTH0x9r7n!B`j5u1tkDx-#j1N{do|EM%An z8Ah|^C+1{!%zNVb6a7~{b?2MNtc(+j$DL4a8W5&GetPF#YO_itxU?U`O!&p&mo@0+ zq)E0kYL#-d2nnMl zL77q`eKu&s$l6eaWBToNG)%-o8gyGh%!Z6XpDNRu4h4ls^xfbDNv59%7esj{mUA06 ziD7hRh&eBu>)K7v84Q*6UcdDC43DKtLy9E>eLKWV-wKJ08Y^;#f(xeSfSv_v?}LWY zs8F*dLJtNxzYp7wP6##A@u87XPxnEi=*rN+4&B8!`pL0Y;!Cf@Jvv1}i|J1@y1UDV zD}4de*#dNTaD6t@*~)dhxjv2Q2BtT1eG=1ynBKtkN14us*Im!`kxX~6S?aFkz)%LT zF?LsRJ&oyMOfTnpBGcJic9(KJhUsj~-3s))D<#g8i|oz|_PUGS(m5sV?(#H+*xTE@ zsZ6w+<1wiCrwXj(W^~FFLJs~r=Aw+g7`7pG2S8F|DziH;c~&9R!lS{P07?Hi%x0oB ztdI^4-yRj&x*h>N!9VzXJws?~c>L(reE<#;T%Ks4c31GeF;-%CwtEz)p1X` zNXvQLTCd^eDdkCQy>^!pX5Z?ru{1v!Xk{1-tD*O;jGEPwKr1V=3KCW>GNB%e=%^mW zvy%xspjBrGWhYZC&C3D)Ic_TBQK`SC-9f{2=1G!Gdr6E-P@xx-4jdth8Uc0l#Yp>hbdw&pY zeCSGbpg$P`{|k392xeSkY-MJiBGxyjt+ywVj{{5jOWs@Ri;R~oKhObD$;E7Pykg!E zx9%ZvE3~*?e_S^MoHj3y>t!XfoNkUvwCvXA!h3%VtqF+dIYIv%HCo!$^lg+yk~Yzx z=rqjEb1c(*^t)vxZmk>H2wtF*qo+%o=_}DovZ+PHVV9@Ic}8F;vgvxY4}Cbh<9FIKk3f zz`9D(F}`@*TGMg!ympJ8NlcU{-J(llhbfEi_*FU`NYi zeFJ8(fHwD5_xc1*7WO6kf$Z7nDs#JRNAfz-(Jcq|WgfJx?&m|ITWq&_f^h#17Mm|H z*1mmXhqJ9kK@hFK$I+Rd$AklYD=s~<#pWkprsv}lr4)K6Zjj?rFYCuAJcqO0Wz(`b zj{-xk(B^-GS5I*B90-sMI;25|7{n&3$a%ZS>1`=zsys)~|6S52ILf?$f3T@p5!n2Q_j!%gH7mk6m$#h`7v1VJ)7Ob zECJ(2^yLO}?hA%DhB9-T2X-E&zYM$P9K<;E8{}(woAn0p>({Z_?R7`SKCw{S@!0Kj zT;dkJaypptCEQxS!_D*g4PH5$ZZv(L_yuI7Zl2Z%+#F?9G!R*zU-va9~xNJ9Z zl4!9k&Fcblb+KIcuF{W^Q!-!sCogXQ+T7;)yGXy!?!uJvaqYfN<5RMwnRH@`*}Ugr z7Rq(HGNnqYqhF*XO24GHQ%2*-HNu*h|9h-cca4O_bCuO2Dhhyyd;?LoxT})@@3y;U zHh`+PJ3nM=xQ1@D=8W2nZR0#W#rZXmPuOmB+T1W|U>3RH>`vB6`0Abf=oF_ z`L+3{>a_DiIIPwJG{U2i1^9sONsW_NT%(gyQzx8-2X^EmqKC-9dpHa*dKmuzd4DT@(@eHea8Q7Bvn&&ua3o7{CwUdK-lB4O=(Shqql1T=F0wOHt7(iK z!=&p67fAbQt1W!{UCfiZqW(zHyrQ{9PG3=3(VQ9E({By5k}~`D;Y|Ld z#Cfa4`RR;o_8JssZ@XvkEWXsVIHSgpkVmSQuB~26)>N!owsh^%>cw2mSweq0G%18d z#i>)6J+A4$hhCB#?(=wFphvY`KHovebzk~?Mm(|WA#)-B0$B=~j$^h8avNklWEW&3 zi7hCgHuWId!0ve7^sr2QKN0Ps&pFJz4e z0fLYKG8b}vHv&K|{T2ZrcS6=f{t2=XGPwu-kR^~_$TuL3cpW~BCCY^?xB`F3ZID%v zW3KvqyCLU79t_5N`Z_c^0bGFWg^a?1l7M;2fgB4t9dbV84#@S8*B~1phk1QIH{^4W zU65HgMoB0kpF>(9tN!8hDUb(H0POX9I7<2@$a5$&b~10m{_2QC!-(DCg{-B6a)!z7 zFX*(K9J%8h{drD?V-c1ok*!!!f8&etSV7pQQ@`f(HU!OC*vkt7OtmUyYdi(K3p~Cn+z(|K=&ylF z#$)TVzas9Z(k71iZvlTEJpbV0@5Fjj-~N^Q$DAq zV|~5}_a)$G^yTGZ|MbP-lz9dG{s8_hKc5fYisS9a_&LF3Fxd|SHDFSA@=3*{kZJ_T zx=c@v%8-k%(mSJ)rPrt>KQlEQN7J5wq`&be4Z;!jAG9JrMNYUuU&x;ttlaSVDw_>)Buq6O zMgDkVWDT?ARkM%EtJSW*%R%ZR?Q(uznD`c`C$OR+lPFRj7MLJS;2r9Yc1(*=tQ$~% zO~+lrm$!Xt8>7hsQ8$BTQFGPnpru|l{Fq#v7aXB*=cW8k$S1;Tg~0hE0@wwCyVcFd zWOH~#KLr3G&!~Hj$@3g*Vl>stFkI76zbw~If%E+ce6GNM5cn`|jZG!+>{x{({Ln#b zrNHY1&JP>l8wCDF9Jfy+{FDLV6a@Q2j9vWjfUH;B&~4RwOb&Al7@#nUSoxihUkjfx z58(PB<_i45LA)z)eGtDFajpnFRmA@ifR3F3etnk}3leAk2fMBt?ZHN03> zA8nVDQ@;f+q408ZH5{h=>g6~J4I3&}edm2St|ABrJ^SAJU=hRvmk3Jnf#OIO_WDO_ zEifT@9RJrnFh$M#09$DR=oqYXd78pKxHVcU1U@f8!});+e3QVxiqdd>>V6^c|FvrP z9@w&LkJ^Sb(*JN_ISwoKX<7dyjAt9a-cheJt8<$1HO)ALJwDL2^&(Z4FpgJeosi9W zNrIklQaHh%u4q;Ap0Ll<8qP`Fa@75BlWu4^CULD|fe5lLQZwExte+J4`TiPyOyD&F zZxDT&EO7QLWPNoTHGBRl0Pfd3&FDj*jU<6oDdoaJ6Yaw7OtiuQ{tANDS>RTD!H<=C z)IER0V0(pOf66!^2+_92Uey%8&AL^U)j z4J*eSBi8@H(&^j?%x0oIMe?fXtzOVK7Jp5Q5z3N0b(j)S)@8s;# z|8vCtR$-I6NX&wZv)FcG+*M-S=>m@s_#eb7<_kO(xJgb>4}2t>9K(gttHQZT7)=s* zKe5qf2z;Kv!^F{|Z@@Xcb9gj{D#B6|YlQtUG09T}yG6s($VYSg(ekhN=&-RxM#_ zgBWpx>Vz7jFLt>&MC3&E)M+^#Up*;Xi>3)oEv8sXy$)3e|1U!}XKcs2)En7nt;ahIuDy z`S8wgPHv6L)imu++YGU)_nfS#jpyZr@Vwa?9HyQ;FBilgR5Z1tpC;MUrdS=+B@YXK ONtDN%>Z~p~<^KQz@oV=0 diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index cb8328a2..97001e3b 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -9,19 +9,26 @@ #include "debug_print.c" -/* declarations of tests */ -static void test_1(void **state); +/* + * ----------------------- + * Declarations of tests + * ----------------------- + */ + +static void test_irange_list_union(void **state); /* Entrypoint */ int main(void) { + /* Array of test functions */ const struct CMUnitTest tests[] = { - cmocka_unit_test(test_1), + cmocka_unit_test(test_irange_list_union), }; + /* Run series of tests */ return cmocka_run_group_tests(tests, NULL, NULL); } @@ -32,17 +39,72 @@ main(void) */ static void -test_1(void **state) +test_irange_list_union(void **state) { - IndexRange a = make_irange(0, 100, true), - b = make_irange(20, 50, false); - + IndexRange a, b; List *union_result; + + /* Subtest #0 */ + a = make_irange(0, 100, true); + b = make_irange(0, 100, true); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]L"); + + /* Subtest #1 */ + a = make_irange(0, 100, true); + b = make_irange(0, 100, false); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #2 */ + a = make_irange(0, 100, true); + b = make_irange(0, 50, false); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-50]C, [51-100]L"); + + /* Subtest #3 */ + a = make_irange(0, 100, true); + b = make_irange(50, 100, false); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-49]L, [50-100]C"); + + /* Subtest #4 */ + a = make_irange(0, 100, true); + b = make_irange(50, 99, false); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-49]L, [50-99]C, 100L"); + + /* Subtest #5 */ + a = make_irange(0, 100, true); + b = make_irange(1, 100, false); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "0L, [1-100]C"); + + /* Subtest #6 */ + a = make_irange(0, 100, true); + b = make_irange(20, 50, false); union_result = irange_list_union(list_make1_irange(a), list_make1_irange(b)); assert_string_equal(rangeset_print(union_result), "[0-19]L, [20-50]C, [51-100]L"); } - From f552d2b09927e728f0726a01191247b457786701 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 29 Oct 2016 20:54:18 +0300 Subject: [PATCH 0034/1124] add macros IR_LOSSY & IR_COMPLETE, add more tests for rangeset.c --- src/hooks.c | 2 +- src/nodes_common.c | 2 +- src/pg_pathman.c | 44 +++++++------ src/rangeset.c | 4 +- src/rangeset.h | 4 ++ tests/cmocka/rangeset_tests | Bin 188528 -> 189064 bytes tests/cmocka/rangeset_tests.c | 114 ++++++++++++++++++++++++++++------ 7 files changed, 127 insertions(+), 43 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index d0a215e1..e7d08e84 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -256,7 +256,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, rte->inh = true; /* we must restore 'inh' flag! */ children = PrelGetChildrenArray(prel); - ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); + ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_COMPLETE)); /* Make wrappers over restrictions and collect final rangeset */ InitWalkerContext(&context, prel, NULL, false); diff --git a/src/nodes_common.c b/src/nodes_common.c index 8058c7de..ebfeed4c 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -556,7 +556,7 @@ rescan_append_common(CustomScanState *node) Assert(prel); /* First we select all available partitions... */ - ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); + ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_COMPLETE)); InitWalkerContext(&wcxt, prel, econtext, false); foreach (lc, scan_state->custom_exprs) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index ab77bc67..c08d84b6 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -324,7 +324,7 @@ handle_modification_query(Query *parse) return; /* Parse syntax tree and extract partition ranges */ - ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); + ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_COMPLETE)); expr = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); if (!expr) return; @@ -652,7 +652,7 @@ walk_expr_tree(Expr *expr, WalkerContext *context) result->paramsel = 1.0; result->rangeset = list_make1_irange( - make_irange(0, PrelLastChild(context->prel), true)); + make_irange(0, PrelLastChild(context->prel), IR_LOSSY)); return result; } @@ -1041,14 +1041,18 @@ select_range_partitions(const Datum value, if ((cmp_min < 0 && strategy == BTGreaterStrategyNumber) || (cmp_min <= 0 && strategy == BTGreaterEqualStrategyNumber)) { - result->rangeset = list_make1_irange(make_irange(startidx, endidx, false)); + result->rangeset = list_make1_irange(make_irange(startidx, + endidx, + IR_COMPLETE)); return; } if (cmp_max >= 0 && (strategy == BTLessEqualStrategyNumber || strategy == BTLessStrategyNumber)) { - result->rangeset = list_make1_irange(make_irange(startidx, endidx, false)); + result->rangeset = list_make1_irange(make_irange(startidx, + endidx, + IR_COMPLETE)); return; } } @@ -1109,39 +1113,39 @@ select_range_partitions(const Datum value, case BTLessEqualStrategyNumber: if (lossy) { - result->rangeset = list_make1_irange(make_irange(i, i, true)); + result->rangeset = list_make1_irange(make_irange(i, i, IR_LOSSY)); if (i > 0) - result->rangeset = lcons_irange(make_irange(0, i - 1, false), + result->rangeset = lcons_irange(make_irange(0, i - 1, IR_COMPLETE), result->rangeset); } else { - result->rangeset = list_make1_irange(make_irange(0, i, false)); + result->rangeset = list_make1_irange(make_irange(0, i, IR_COMPLETE)); } break; case BTEqualStrategyNumber: - result->rangeset = list_make1_irange(make_irange(i, i, true)); + result->rangeset = list_make1_irange(make_irange(i, i, IR_LOSSY)); break; case BTGreaterEqualStrategyNumber: case BTGreaterStrategyNumber: if (lossy) { - result->rangeset = list_make1_irange(make_irange(i, i, true)); + result->rangeset = list_make1_irange(make_irange(i, i, IR_LOSSY)); if (i < nranges - 1) result->rangeset = lappend_irange(result->rangeset, make_irange(i + 1, nranges - 1, - false)); + IR_COMPLETE)); } else { result->rangeset = list_make1_irange(make_irange(i, nranges - 1, - false)); + IR_COMPLETE)); } break; @@ -1201,7 +1205,7 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, PrelChildrenCount(prel)); result->paramsel = estimate_paramsel_using_prel(prel, strategy); - result->rangeset = list_make1_irange(make_irange(idx, idx, true)); + result->rangeset = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); return; /* exit on equal */ } @@ -1227,7 +1231,7 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, } binary_opexpr_return: - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), true)); + result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); result->paramsel = 1.0; } @@ -1253,7 +1257,7 @@ handle_binary_opexpr_param(const PartRelationInfo *prel, tce = lookup_type_cache(vartype, TYPECACHE_BTREE_OPFAMILY); strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), true)); + result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); result->paramsel = estimate_paramsel_using_prel(prel, strategy); } @@ -1372,7 +1376,7 @@ handle_const(const Const *c, WalkerContext *context) PrelChildrenCount(prel)); result->paramsel = estimate_paramsel_using_prel(prel, strategy); - result->rangeset = list_make1_irange(make_irange(idx, idx, true)); + result->rangeset = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); } break; @@ -1433,7 +1437,7 @@ handle_opexpr(const OpExpr *expr, WalkerContext *context) } } - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), true)); + result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); result->paramsel = 1.0; return result; } @@ -1506,7 +1510,7 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) if (expr->boolop == AND_EXPR) result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), - false)); + IR_COMPLETE)); else result->rangeset = NIL; @@ -1533,7 +1537,7 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) default: result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), - false)); + IR_COMPLETE)); break; } } @@ -1635,7 +1639,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) idx = hash_to_part_index(DatumGetUInt32(value), PrelChildrenCount(prel)); - irange = list_make1_irange(make_irange(idx, idx, true)); + irange = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); } /* No children if Const is NULL */ else irange = NIL; @@ -1704,7 +1708,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) result->paramsel = DEFAULT_INEQ_SEL; handle_arrexpr_return: - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), true)); + result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); result->paramsel = 1.0; return result; } diff --git a/src/rangeset.c b/src/rangeset.c index 39e95436..a26f6d59 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -125,7 +125,7 @@ irange_handle_cover_internal(IndexRange ir_covering, /* Leftmost IndexRange is lossy */ left_range = make_irange(left_range_lower, left_range_upper, - true); + IR_LOSSY); /* Append leftmost IndexRange ('left_range') to 'new_iranges' */ *new_iranges = lappend_irange(*new_iranges, left_range); @@ -139,7 +139,7 @@ irange_handle_cover_internal(IndexRange ir_covering, /* Rightmost IndexRange is also lossy */ right_range = make_irange(right_range_lower, right_range_upper, - true); + IR_LOSSY); /* 'right_range' is indeed rightmost IndexRange */ ret = right_range; diff --git a/src/rangeset.h b/src/rangeset.h index fb55e5be..b2f113cf 100644 --- a/src/rangeset.h +++ b/src/rangeset.h @@ -29,6 +29,10 @@ typedef struct { } IndexRange; +/* Convenience macros for make_irange(...) */ +#define IR_LOSSY true +#define IR_COMPLETE false + #define IRANGE_SPECIAL_BIT ( (uint32) ( ((uint32) 1) << 31) ) #define IRANGE_BONDARY_MASK ( (uint32) (~IRANGE_SPECIAL_BIT) ) diff --git a/tests/cmocka/rangeset_tests b/tests/cmocka/rangeset_tests index a9362bf558726869b857c7a8538848692bd04713..c16316d13bea70893b0a683b09e8eefed239f498 100755 GIT binary patch delta 9913 zcmaJ{30#!b-hSUR49vhFARvMWGa#EN3TT>&4k?CPuBBH^Fu)bb1#>AKR4P%qG9ACh zx>UAk+Vz$-)r41r%C1Xkn){ArI+Zu=mZ)D8-}67`WgM{YzQ3I3od0&7^FQnR(v7-+ z)t?7cC9~D1?VY#y_voXDr$-**V)VL}gVL{-M#rUa~k(8^@@@ z^UR8pTiJ7ofUK2f-Zvn+Uyc-xkiuC2cf=n}O}T4xZs)GG2IoFqw=lM*Kq1fYNNH;k zkTuE7dBAIw>{9I0posP=kY_g|wr9 zti))Z5K_TD?7(+~T9I+VOS6H z<<;R9TUoZSts#S*I{f5T{42LIYae19~5=gy2O0`DP7YhAe_aT9wiYZocBDZ3%n>7ee+wGk+;Xe)_7y5B$XfDM zT6lz!M-X|;(maj{^lrnTc_1C*>WRmDF!>lhNd8{;e4*9s|CL*D(!B``kmVb&zd;uM z;7~J&%qzohw7>4;!u4@VOf7KuwCax)~k2qIP4bEwAc#J}(5rrzOSMYOYh zPuoNVhEE&9VcyLJw4~C_TX+3G;&2p6Rv0TQ}}5w+gI#Z*IT9u4bv^b}3X2VIPg$rfc`6 z>5E(5Lnx*f?I~`jlzZ|P?%n=z>}_6C{|;7+X^Nr8k1Q+Vi&-9 z(tFaQ;%MnGHh0yv-q^tRwO)c{4YW)2ogsH)PpS@}?n%d*8!b}xwzsb21~<2lv;`8E zmnE0qLIjrtqJ*sM`~f2djq>r`h5w!00PE#WAr>9&o9=^B?xE4myQ%BgTD(Rq)Jvyx zAHB~12_Ewyta(%N$d^3U5P(PQpYRxZFArks&S=GB1L~f1jBCkbv6s%~?k$vJ>de70 z={;U2VOX<6a{D}3=V(K!%*rLrzAL7^fUJ671TDMB7bZCGpOzLBTTa8RWRK5& zB)g(1`{C?y!z&Zog_@Ozb1S@fn?e-khTO z(~HQRA4-V`$t!+*!Ga}u1&=>LB0MDRi~5|o#YLq>LQz_@pmc#Ue`?9}q6I~zd7_$D zpiJuCHNE?z1G|{h(}#C4PfE||n$hFYfxI#0DOKV9Q|@=Z)A_t=qZfl7h-)~})N~u= zeZYOdk54u=(arA^&~^}TsSdaVSPu+3+tj4s!uLGT4D5Ig z{=ghyK5#X#4Cn-I(qR`aKaT(qjJbdSz-NGp4;WoU0N`w(6=wx=#^}t_&3WjCWH}D5O`7Qi`jlg`M^D_K_g;(IO$0_t7R`5Oudf|Y&0UQqu#(*vY zb_aeB90d%+8kh_039JH+0qy{91Re%90B-{`uvXjqDavwSZ(vXCa}$8qfQy0aaYStd zp1>liOGaThc9(i!BX&CjR`=^T3zC7oahT=;w*srUB`t~FKE{Wpr7^=XzBDb-_Wbdt zrYM>d*=>TKVFLqwA2#{HT*0e~Qiy+5&?f*r`uw~4a)|#2`bFM)D&rylH}LNo^hG3S zj?ZKO((h3)bm^EYbYHt`Fj}-g2AA<~I`l(Z=yN=JAIvd3^v?r5{yMu#2ISufazD(m zXIuEM^!N`_3l*j0zZeuEz;Wo4y#o{of6+iW^yi@;=&fIVSN{ePJI&W*MA`PB7=ZbG z#M_j8>EUu2`d!c;YN6ln(feRs%sSK5MC;7c*T?TR)d~9L(9`9_qh|}7{cXx17@UH^ z>=p*onhlgf=;N{A=%~CK;02Gq9Qs$GZ)~Bz<Jt z2aNBd7>qkBa~|pOnU1@&M$wn5KK;k&m}25b)OJx_;J}oyF*wO6oKhQO6?iLv)Xrns zmm{MluTquZ?u#M!@jtf^<@A`gu=med>HK<9oslKYc5WzoC?cNTdVQ3mXw#r}eLzu6 z(-)~qxdTDl%AktLsbAJn4Sb|bRg7_l5M(ZBCGWP}%#3Ax^z!!OUik|YDTV=17}{DM zN2Rl27%IVSV`ih0Dk|;UZX>fUh5=w?R@xN9AanyxXJdcEM2JGaS%&fe!;e%p&2;Ww z{zb6pgW8?cK1S+;ntp)#V9Z&`Ut2%6gSt{xSci8pj}&7zy33-e?4$S?@_BI8-)wxm zs`%LZM_XZ_C}NTW)#?toC|lkI9UprfXz1)8+2W=uK0Fmye`X&Q_n4<_jelVd%r=k* zvrkY042MC$yl^6fL#{p3lOtg-*#3C zsfd*gRIB%SVs<04%$`eJ)3u24rBmRRA*0NYY%^sof2h)F=Kat{QUc4vt@u?Gv8Mpl z>N^M=pE&~t(MG&L>+Cm{E583k6IAm{hYeHOkWs?SGCs^6Tk)cfp4>?be#4i%rdrMP?&vsJj+KdGgEY8TKqnK-5%5S)?&!| zP-k@Z9k`z)Y~-=ebqMT=Pa#Q<%~_!6qPn)@4?dS<`x%BTdmiMYn_H9(qD7KgC=$zV zPRlD8Ha3tH%svCdM)CybV-R+y;zez4&-cVQLNUaI>Q0l0*(Y1Xl+tSE2^ONyy35Xt zix&-K$ma5u17H21N7`GC!1ZF25cU;|hD<=m&TnERmFaR_W=o}}i15~1Z zEQ$ZTWkiUGI8aDvJ+)2dBj48E(fu~x13F{=W(qo{rSJ@InDcq}M zud|og5}&8)Yhij z;ODl++g2o4p;MH{#4oH&WOYWiyotKzXP|^76p(xp$xGv`!uWO1N$Bm`h1$i7;$w|MN=d+db)*(b=2@gOww zH2YMKJ*4SMqTCr}YDOi?ossNmgYq+wT-zC`*c8Lf+rx9@PAD@zIZ^I}vNj=6ZiLiD ztb7?SLq^Y6x>`YB9ZM_!VhmrpJVY ziMP9aU`s1!Yt$$fAF2yT`^50XDwYx2^1m?FEsQ0FWgp^cCsX-DCj+#9CiX0e($z?3 z{>c~gtTu)RorvZU=i8~1&_WC!em;=xwym0Y-Zk;O`u<<|S1Ikj{m?QcfzQNSce&2PtT-L_WPw2@TuP@YWK9?2Fl0}ehrTT8vV|Y$!5b7#L#7!M*Tft zNTdcY7~Z8mT{nER98hLFM*Zc+-Xb%`97^RsjlWT14;h~q-8I^ZXQA3){GFOu=l=)w z=~e$sYUv~Y1-RJg5Bbl-omGFyzxFY}TLJG=5_SfZQ1bT&OeW#UfVXiU)_)h!m2$Dx zw3rgS!SpqCVynrYa=6EIlURIZ`h${p#B`kUbjmc*MmFb7lW{H7UpD>g3BbQiB?P~k zdegxCVd_G$pAOtZL01QcQcBhaCK7FX;D?mp7X$Mt!LJ9d$8n(F9N2~UZV5a~gZ^FM zZ&Z7$O&7{>a!V#nlK z7K!=)L@oQ!$CbK=WvN4Z;BWJ-ADt4USpQTWtTg{W0Ny@2B}D0+>3Z*Tc0!lk`i8v) zjWZ=sX?>gDtI1oxKK$tTWFL!g+}OvAHtqIi)*5TjTrRef57p#NntZ-y|A;1Uy>Ww& zpw#*RFlzGVgCwK*Bx$QDS|8EfH2K}jhojG47898Rxr(H8?_<@dNEqvQ^#JRp<~fcW zU=fjXg%$1?{qtK`Epez{K)nuXyyV@J@mCz_P*42;>bA%${m&Kj2y5Wyn%vi7C9&s) ze#s*Aop2h3;N>?oG6J`Ev(jFfYt5(do~O4k zREG+PoCi)%ERr?5xsK5Ym%d0-+e4(fQB&6nGHsOBv$8@4A0go-2_HBvAy(vW zEtV-r#u6XS5bv-fOLi6 z$Y7L0SbJr$HD73&f5vK_eYHf34}DM;YVu=AR$;mWvy+}>j;V)O#E>cq=e6)q70)Z0 zTa1=T@eLE2&6>PUyZjev_8*9F>V5%{Q}8W?x-r;j75XU6>b%2_MkD{N1u>hYdWxo= z?f3-h5sjL9wU&h0nmSaAIweuMVb>H-2hDzzLCTxwSOVEo(rkTD3RWVOvQ|kOtVL}7 z8a+&tw|;A)?^M*Ue63#zl7LdA$$6z0Ij7k*Sy2*WmUVh z(BERbE=5!gY;lcp1%1s@bRn&8H6^ZI$Jr9*`o~EYuVdfDIlex_()^njP>)oH|5LW0v}uU6A12|U{!_Oq;8NbgLmsP@hjpftMRQOnpHvmMV} zVE3`b|8N|*!1^1nekAo3j*yG2mvL^rtiI|PP1TS>tY|z>JM5^kE~gv^F0%gN8)=uq eQ)LpM!~;e;I@Yt)GScyMo9gnF z`_^)?u~5tPS}183vfREjG^=q-ytY}^-2Zdlmto*`zi)m&=6}xff6nv#&vTY}XIS%5 zP~DNB^{H&`IcMALfnRo(#Ouq>&WndFX-a2JnoTt7u}oI%WqiNSjEtX#h>R{F=#vsM$j^)v&&gR20B7dcTY-cPVZ?c%zDMpuK zEQ#f%rmp4ziZM?y&Wz<}Oi!DSMT_B80~6ub81KStO~zOk>k)d9nM(WsLC)oA8O z{FjJCOLdNL-%n!>(5|qTd~7ex4uPX5`CncyzY&?>`04>-mGrL_4u__~>oECyDiwP* z7PZ=5Qt4^)yVrY&80uXAi3a@xTrG8j{khYEyG-`^K|+!Teje)2g5Q3%c=MZsEA8Sa zCIlCK*XwPnpacKyb4qFXqezlrks%5)EXpA`3xjmmV+pK}RLl&QBO#iyhzDH)(B>8&Uk zqbm9OPGco}W0ZDSh@zqP0EVb#>WG%-r(YXu8||MzsHWEn1t+x4HVADF zpSWpo+)rl*duO(ppi3nIDi!u8+OivIqmmuQnUp*!O|(6SzOQb7X{>GLeXps3Y`ijL2k2_4;49Z%op@3nj-OFez>DCCC%vOfZNc0wqMUFW-0EcTKmwTCfP z$us18_o{?SIpTJ(h21Cxc_d4{?fxx!j?5}<@>wZUSpH33lTySk z-*ET0E|b~V>+Tb+Gi27*!|$giF};V!x2cI3q!U4t1Y|#lv$QeGkG-Z3FMH*o*ZBICCK*>)7O1>D}hVE2@{pjx+PvjN%owlnr{FC%} z?oEG+SGVux{(HtH*+Cz(v~q_3^?E&Mp8)Oy9{SSjH8V*%59|Q^9heK8f642e3%mea z56rmi^_Byl0agQ>)q1^kz?Xq$jU>fi!FXVQU@mYCa4ygdT(6NF(#op{07L%Q2mo9S ztOH8d5I`$QGl3m|#%~Y+sQVTHfZc%Wfun%sz>~mg;19q$VD5JqkLoSrNc-S&E4(K#>>jeRg&2c8Fx#RmEXxEdIRy6*sH0gnSm0|Rk)Ed8 z%y5y9>6Ff*F7W3&SsmLidcAlmz{-c1SF^ss`Vpo8+SYOSn~J}U;8TD;zMz3$3G+7a zPy6%K#*4;p#oq())5uUCpD6)|KQ3XZaHgd=l8OY%p{+{2kaSpTcY4Pl5lm5r5LhYjG+TedYCb_UGp`BxnWxMevLK zdA7he-q8;Z9yrWw5r*3CHgBM*OS304u@o0so*8|Fe(Z3ceJ_lCA^a5>Xp3 z#CJ%?;qV3=EM%$QBx4(9a82cl{CPIpmq3eSeHY{ZTZvnNe*k`5qw%BE@mT+UaLB@a z(GI2d3D8)5n`kQdzPLY@H45;oPryp>Tfx)kuWvjXDfm_}Z3X`o_>i_^kLLcHylwX+T^X9^d0zKe{fB6!-{D2wll0fn ze7}yr23IX^(CoYX0Gh4PEWgVyc7I4W3(fQH#2%Fzx2?xcdMlT?Cp(wE{q^XbuVF<2 zyhd&jlLom=nw1AVBwWgju?hS+h-mlcIoG0N&MucFbBBenYXfI2Ks&Q#82kedFLQr3 z_K1;PEp)#$ZD?fj7)@KxLl#w0^1 zHf6JRJfqmc{_5a)#Zki-KMJCap$`Z{Sk)8gv^5Mu#~gN{5S@0S6BV|K+|mqvpvWSH z%z{I6%d;@`HjII(`4daf?qj$~?eZD!gT2PSg!`oB4jA-%TkR}nTi;FFj9@RG}7@%WZl3OU-gNsys& z?sFtflC){<@a2X%bCV|f+P36}X2Y#LjbP5vQjnn<0$M%Z9mYt~wNec(j+zLb2iN3t zkHKL=%p@_)Ch0#w8tMdOk^(7&oKk*7K{cZjahqt=D9Z#(61T!3am7M%P*mBH=FKmX zG-D1b8l{vB-eE(mzGnpdmh-#~ZQ}p62;LpHW0uVMSn}S9=ymFZyv#X0c@cMQXxT%= z>p}4}&T`x{DFX}O6hjK0ZEh2}#hrwfT(%04TTh-n4-Ts$_?-={2QGmemVFZTJk?vA zTj5Psp_gT=RZO;{a;y&#%sB&Q7od`8=Xi~ zU}?&PhAZLHmEZ+Z>J-`zLufnH?^BDI07)N($gO9A9>p*6g!s$YCbXd&PKn}aNsy#t z?iMdqgsK~@;jqY=M)k1p@3swdC>yN=x1AKIY&2?%jRuMF1K`_I+Y2n4{u0`8K`T)A zxO>Pe$2n^4I)%2!4C7K_n8wXzB49qJIdY>UXp zJM4%XLJOuTmT79tIW;L(4PU7GD&d!_;d7IZlc*J{djaw!kfD!I{ctjWb;poU5wWj2 z4|~$eWd8Gxw44u;Z6f5w`oMY;m@firoEsg<%l!N6u~DUjN*ra4BqCKO@~JzASVU}X zeT@>QEWqi|)r(7tP2x2>ExF@K(ncysNrIB}FWBt#lFP1OXIi6u4jSirhxHBWZX$n; zbE(5>CLx>2QR7_WX#IWy&)L{ep$B+?DLO zXR!%(vUrErYVx|Mq}oVx3^34or(7cYSh9Z`uMF9u2A0dtU}IZPQhMP)#j@@o|8Ytj z%s@yUuSkByk<_Q95`$ZJM>>|%$XUwz&Ppw(fwPs~jMPl(J*lpJYG3LVDZPT$m(?JB zRqf@Fky=JBi`7|5N$F}82B|)&oRXC&y6Ccz+M1@*kfCJCi03DEr<(?l&qHKb7{}S0 z>83dH$fPrr7RP(RkVPJC=&01i@~2>UlMIPu*bvLhVfcd#(PS7L%Wu4y?wAJy8%EkQ zVwK5@rvoUE%O*ylfM%ZqbIy|5e?i>=ftnZUCp;zc|1RNHc=kQ@1K1q*zHJP$G#{W2%WMT$MmZR|8!K6~2 zdnm#^=X|k_WybJ3mpaO$FuWzVT@IFWRy=aK;*rbp-bY<^Jz2|&{#k4zb6I+_@QRq8 ztV&aHyEhxIt9Yaz%L#DlvS2P9#0pu3aVTr6ugD$FhK5v>Pi7< zH|W>Jcptq6PZ_#tH|FAfHXnue68RE7a&^zkuZ#w~DEA|W4e}E5aLLaU0bZB0@VKGd zCa;(R*e;)$0JtirJPmjtpTSd)&c&)I{QE3o65upjO58Q}EsnO%u~ftH$e@GtF@Rdl zT3ph)`7*cV^(2w#iHLj-la^sE3 zfbGWXq~ZhPIV$X@#yl}yqr;3VSof20JEeFb@OhfkroiuMhWi4&REtjof2RUp34D^m z><*ekMK}-?NyR@I6hg*xL8ImXz6q+q_gLLpla>nnl4$}>VyDSX+;P*ZRGEL8_EYE@ z(=+6G-t?_w9`KS$M{v!QPs?@N^eYwW2U9o|>JQTwwCJmXb1CSW;E!m&YlEw(z|P=@ z9Dvt?wKTOY!2@YUw*~Jbz1xEyBlsrxIXr>tc87dN^Q;Y-NY%aX;y9{Q^52O|NgzqqVhYmwajJD{H(G{q-_a_LZ$X|DHjY>T#H??N#&&ZJ=6Ha20Yw+~e9O)t2Ss=ev$`JQTTdJ7*??dombPO87oD^;TD z=Oq0?L4ryWTrsDZHTqZdWhqoT-&qkTRN~Fxdh`^_hz=J|1YuHLc0oNOFvYd$6pM7E ztF)&Hog?i3S;pb8%TlBy*&2r{ggp$Z=pT6W62pOuRQtj>8=0jF@s8g}6uYb2X%-s& zy6V*Q`E8GD(rFece=I-X%K~2@4Q%8rGmC=jT(eyKfMa=s*v_e^ww9rqj9iE-NVT? zVK2wdpjSuNCZhX;4)r@R=!(~HR}Il))X^QRHesHouu`EIEf*^wyE3a;{nno(Z2u+Lc=O~sINY$2;Sae-MhEWf4e#+NKTuzqK6wYrinv94kDH)gq2iF01( zD!jxp%)Wo!E9NrSE?7fT(`}-kny#4lR$RZt4m0+k(6zgkb!F9Wxvti--o`%r6uy_M z%@x+oxcDoj8|!+Ux-U=02EgkX*DiEf(Fe%>2fFZ6fS6REjx%7Vs NymshZH!iT2{{}uNNe2J` diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index 97001e3b..4460b2db 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -15,7 +15,9 @@ * ----------------------- */ -static void test_irange_list_union(void **state); +static void test_irange_list_union_merge(void **state); +static void test_irange_list_union_lossy_cov(void **state); +static void test_irange_list_union_complete_cov(void **state); /* Entrypoint */ @@ -25,7 +27,9 @@ main(void) /* Array of test functions */ const struct CMUnitTest tests[] = { - cmocka_unit_test(test_irange_list_union), + cmocka_unit_test(test_irange_list_union_merge), + cmocka_unit_test(test_irange_list_union_lossy_cov), + cmocka_unit_test(test_irange_list_union_complete_cov), }; /* Run series of tests */ @@ -38,16 +42,43 @@ main(void) * ---------------------- */ +/* Test merges of adjoint lists */ static void -test_irange_list_union(void **state) +test_irange_list_union_merge(void **state) { - IndexRange a, b; - List *union_result; + IndexRange a; + List *unmerged, + *union_result; /* Subtest #0 */ - a = make_irange(0, 100, true); - b = make_irange(0, 100, true); + a = make_irange(0, 8, IR_COMPLETE); + + unmerged = NIL; + unmerged = lappend_irange(unmerged, make_irange(9, 10, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(11, 11, IR_LOSSY)); + unmerged = lappend_irange(unmerged, make_irange(12, 12, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(13, 13, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(14, 24, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(15, 20, IR_COMPLETE)); + + union_result = irange_list_union(list_make1_irange(a), unmerged); + + assert_string_equal(rangeset_print(union_result), + "[0-10]C, 11L, [12-24]C"); +} + +/* Lossy IndexRange covers complete IndexRange */ +static void +test_irange_list_union_lossy_cov(void **state) +{ + IndexRange a, b; + List *union_result; + + + /* Subtest #0 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(0, 100, IR_LOSSY); union_result = irange_list_union(list_make1_irange(a), list_make1_irange(b)); @@ -55,8 +86,8 @@ test_irange_list_union(void **state) "[0-100]L"); /* Subtest #1 */ - a = make_irange(0, 100, true); - b = make_irange(0, 100, false); + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(0, 100, IR_COMPLETE); union_result = irange_list_union(list_make1_irange(a), list_make1_irange(b)); @@ -64,8 +95,8 @@ test_irange_list_union(void **state) "[0-100]C"); /* Subtest #2 */ - a = make_irange(0, 100, true); - b = make_irange(0, 50, false); + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(0, 50, IR_COMPLETE); union_result = irange_list_union(list_make1_irange(a), list_make1_irange(b)); @@ -73,8 +104,8 @@ test_irange_list_union(void **state) "[0-50]C, [51-100]L"); /* Subtest #3 */ - a = make_irange(0, 100, true); - b = make_irange(50, 100, false); + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(50, 100, IR_COMPLETE); union_result = irange_list_union(list_make1_irange(a), list_make1_irange(b)); @@ -82,8 +113,8 @@ test_irange_list_union(void **state) "[0-49]L, [50-100]C"); /* Subtest #4 */ - a = make_irange(0, 100, true); - b = make_irange(50, 99, false); + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(50, 99, IR_COMPLETE); union_result = irange_list_union(list_make1_irange(a), list_make1_irange(b)); @@ -91,8 +122,8 @@ test_irange_list_union(void **state) "[0-49]L, [50-99]C, 100L"); /* Subtest #5 */ - a = make_irange(0, 100, true); - b = make_irange(1, 100, false); + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(1, 100, IR_COMPLETE); union_result = irange_list_union(list_make1_irange(a), list_make1_irange(b)); @@ -100,11 +131,56 @@ test_irange_list_union(void **state) "0L, [1-100]C"); /* Subtest #6 */ - a = make_irange(0, 100, true); - b = make_irange(20, 50, false); + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(20, 50, IR_COMPLETE); union_result = irange_list_union(list_make1_irange(a), list_make1_irange(b)); assert_string_equal(rangeset_print(union_result), "[0-19]L, [20-50]C, [51-100]L"); } + +/* Complete IndexRange covers lossy IndexRange */ +static void +test_irange_list_union_complete_cov(void **state) +{ + IndexRange a, b; + List *union_result; + + + /* Subtest #0 */ + a = make_irange(0, 100, IR_COMPLETE); + b = make_irange(0, 100, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #1 */ + a = make_irange(0, 100, IR_COMPLETE); + b = make_irange(20, 50, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #2 */ + a = make_irange(0, 100, IR_COMPLETE); + b = make_irange(0, 50, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #3 */ + a = make_irange(0, 100, IR_COMPLETE); + b = make_irange(50, 100, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); +} From 07bf75f01bfd423fca41dd8b342a162ad67be31f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 29 Oct 2016 21:26:56 +0300 Subject: [PATCH 0035/1124] add more tests for rangeset.c, remove 'rangeset_tests' from repo --- tests/cmocka/.gitignore | 1 + tests/cmocka/rangeset_tests | Bin 189064 -> 0 bytes tests/cmocka/rangeset_tests.c | 97 +++++++++++++++++++++++++++++++++- 3 files changed, 96 insertions(+), 2 deletions(-) create mode 100644 tests/cmocka/.gitignore delete mode 100755 tests/cmocka/rangeset_tests diff --git a/tests/cmocka/.gitignore b/tests/cmocka/.gitignore new file mode 100644 index 00000000..91500ef0 --- /dev/null +++ b/tests/cmocka/.gitignore @@ -0,0 +1 @@ +rangeset_tests diff --git a/tests/cmocka/rangeset_tests b/tests/cmocka/rangeset_tests deleted file mode 100755 index c16316d13bea70893b0a683b09e8eefed239f498..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 189064 zcmeFa31C&l^*?^^eaXvPLf*>;fj4vW*b^WmtY!BhMuUJNf})}!gaFYHFbj(nK?6#R ze1*CcD_Ux8wQ5_sD&m4m#cmdNT-u^V8-!M@sML-B=bUrzTSCy<_WNspzrRoO=FH4F zXU?3NJ9F>nyt%9{pL()onuhzaj0pziru$-CB;SUy{w0g^^c6Ek8Gd~CF?t$aB|b5SlyvwQ$-1dwM7eh_QNMt?IMeKV~Xa+HrzVw}X^ ziIjNhGyRRG`9!ZD!dav-CfxKMr#&}y<<5W{+bLq7`yF;TW;pE~&o2dz{ch?sZ8E{@twlv61(w&ecKvTE8=D8fh^KeC=`gTaHtH>2b=hJWhGvNru17&<{kr)$<@1hHUVohO+fd$_e|DfeY!n!4Vsk+yK2L!R8{Lid9m*ed$~*EG zqCCe48Y?nlpc0=MAajf?WB9;WqL3)e$LhsPsxPXdIap>?RxVt;q`tDTsj8u=veKxm zt*>n|Dutu6p}D?tVZ)Npo%)3xN5n7HobKVmgeJ{u{`6Z%}g=|)^m9^+ZiRMy<1)=wJ#K$ZCHJz*}Vbj{H< z=ziLh${`T{97-w=#7hn1NK$zysl31${|uD5YYA0Hlgg<-?x!fJ++FLat}LlMc|RDH zR8DPSUf z`H_m&UG0y4di3ZoA4{oeZ9Y`dHVc2m@d7bDn9LMqabjwn)!sU7Uq#ERBSupel6Iph z1IZzyIjdsB_>DbrALD`Kfqrex`zqR|wL>elduSqh-ueVsZUGCM+8zWSCR*mms8P{o zP@AJs(*Dy=;#NmnR~>wq)`IpC=t@N^j3Wbg#rS%7xPWRRQbVR6YMXViHB7{=SQ^bk zBH4h}>6p@SnpxrwpuV+BE59R)Ir&_0zp7yp_+J@;SQLWR{Hg=|U zWkM??*^-W$$hzZclA$Aqht^(-UV-%p8eO+@b>a3Z>g0y}i=&i`SjoxwE0DBLV&{^> zVm@5P&4t_N32(XZP7~e>iMLyVH!Qr>!rNVVdvY@VJS6Q0Fgk33F@7U1?WWOAW)o!w zIhn6frf~a0gROfOJ0`4qg_H3kNXvIzEwQwgBWCk(l8d{BoIUzc$X_V(*NObiB0q`b zYm()y=20S#p1SUh>!=eQr;CoWMMqE4(bLf(gUeoOif&&l{Ds2bU--!n?eBbi^k`yy zWmM>j;7h^nIs2q=dy{B+$>yHF#>x1Ppsf9FXkn4&@WaI1o=s+8&K=0)PT(q-pJEs; zN}khvDcs&7X3P*X&J{D7$c#Q>1}s#q9~L-9*Rv_*&al{xRKXx0Qcfv6-};!%~le zv0d6|Z=3aT>vZx53U;-7pv85(yNb0LPl#$~EudC*#rRfYEi| z#m##|#j6hOT2}^6YG&8E;h@T>g$?Bxg>|Elt=O=Rb{^zt!>QPC194ZhZQ!Qc)`=lc zQJ)4vuyx&3RJS}x-$qmSwi}435H?wZuf_y9(B}5NVf$ z|2<8njPS@HXCi4o;X~4gg++XcaV&ncb$4s|$1STq4z13(FAB*6OJy9|W?^j#V4U0A zX80T#4LE85Cj#TD2SW*uogl^Z?ugb{67eAp!6l`jbA@tdNn1l?~w)H>^+ ztQCdZpFtZ~=|hS~S#vVJACmTSFf!QqNU4}J6Rd6J2P>*~OGCJN<9&JrG<^L;MeFa{ z?|z@B#}TZVVCMlm@((zItZQ#=-q*5fpRux}t$ZJE>pzdU+xl=;n?7Q26xY5%<2nx| z?Z1ByW|r?0tFYI>FWWKSPpBw^ukdyDQ#Mtlkd zZRI=H?OdUq>W7o7|A^}SmqTCMtetJ;j}O_+j>aw0Fyz+lTfG})C z5vka)YIo}+SYDSylXP`6)EpS!iOA!vPhb8T38hF+Y~2a*@p18UAdcQUox;^X!@xIr z>+&FszV42@iiTk~yBjX~F16LICWK25WJ!-xk^d>`Cmv_D8|ebQJxzNTVB`fo_8wVcRXXfRsK*H`Q+ z-$(*TH#_N8PB)~}{Qy|*1a}mh7|3nlV)aOOI^0h>+}nj4eM~W`zV{Q`Lp0qVCaQg{-019u!Xs25_4JhZ zFg-f9&*%DG?N4z^%K?^RULY)9Aj*&2PaDib^yvLCeYFpDB(@wQvD>-6q>W6Hps#kr zk+|>}iCa@7=rVtpzS^JSd!$|MqmGfNOOb#NjECuIzI}@$;XOuTOo~JvNe~XwexW1r zT>O5_?YYm9ph-cO<&VMf5JcelUF{8A={O1-%Zu$>VF1SP5ZDh9M*t>l#}h4PUH*}- zl#H;tiSWH|$8W07#AE6x%=9P4zw^N_BlZm4#QyYk6|IlC z78}WRJGt%;WZHZ1E_7oXkbBKa@NGX!Or0jC)>8u3Oqklygs-@!LBe<_VRB&y(=JCo z?y!lerqv|Z^-Y*|ZU@sI2jBKeG403s?8R3pfoT{2^;pxkV1Z$;v1vo`?aY5Ek9+M+ zcD^`nSue}St?U#sYujR67V;s}A9z51-zsB8hVnEqz zv=FubM9Sk{%TI3pd8~&?m%-n`8#jA9*Rj|BiuEtqwAo}@i@5Oe^ffv{R2Yjf`&}+QGEzk&k5l*3usV z0bwHL#( zeH71XOpShYxyF4)y!5bJnsFbkcPUNK69a1q0`Bznt1jbTaDN!o$G#ReVqSBe-|%&gPLFiuX!!;r4Qw zrq6zqiFy-4BPUG1%%>HS8E z-U9?*ftiII_+a8py71>>lu*&U5u>y@?m^7fjqIt>k1i`n5(O93VD2K_h|2|kq?WIu zi(tmuouRdL7^x0bm%_1-z~XXyd=^~q&H@_W-TUtpDN?@MQTPxmY*J9R8F;4C7UP*W zVe0R1PBi3-w55uWHkiyJ3rOTdo<+A~WEZxTlLeul?*YMGo!I)g>wY+O4erKUUp}ez zP0&A=TkW0F*j0phvL^!e4y0r*a@Ar zIsKS6vAWMaZ{nOd{1(F;0tC{$6L0|I3=ZtDt9&0kfXPsf+Fk9XxF5zWID-66$`?xh z(Ec~Uw5xnK@$M=ory(Lsq0uTNh{v1@0SFmwhFrYllS7F-wmM7^=HSub3l97O2OXH} za3|^@%XNnioFwghmUvIGfuMM5vlA`28Ze&-o5U({`g|uQT~*WuZ!L zFSIU1;1u;Ov@Q)KYgpM!_;@s3NH&d*ivCm>l?vUw}8k5W1pYrV&Z4s&Z#*%c?yH zsouyO0G!Yo-~YP}Y=zu5sNCxGfI@y*hY4 zHn*TQzHJv_R<$EC!O<}X&}o(@fFlQHrHr=UThfj#(*1IjVT ztvp2Q!*UB+AiCr-6+^zow{*XJEb;q~iDyqdYhvrsiL)looVuZJ*W4(646>nw(laNw zzCXG3`KiV9%M4$|BcFKMuX+{UT=$oz=#W?3-;YdfeQRp#`zJxe(Y#kHS{^eiM*qI~ zE&46adFM}@Gx7Y17fh^tY-6ImcOQcXNgw}OM}A*L12%KU#OY(Ap}OOUrcc6 z;@ZYW`b}D0ZDZ5mYUAW8FwURIKUafpqh}FSluW?y&lWZcvYv@&ZnwG zYitU%IXcjGvEv7~J>n>tP+zm6iCR5AAK9JYOX(VG>{(RAE-5B8MMZ4kfZ`M%P-nnZ zRK27gJv(j5{F(v9j!lOH0>_;WT{Qagd(~*48gr(&=cRx-+lhccb;os_HNb zA|I`(ufvR}p&1cv!0!$5GsI|P?Ik?!0RulCj4oLa#qS4~G^`Xy&0E}9N#3bM;k=rL zqT(^p`Xx=#+Qm!jSWC_P=*pTV!x%ic6#q9iRF^hX)i11RtZ5p&sIytKWO+?Pblmu8 z^U|d?4aEkgn=vSA%r^Qq_pLV0>)Tw?xA}ZfO%2U8#)7K4#u_oh_-d5?x^ZolQXofxI_%1VZmoj++%bjXmYgQDjR8Co)Q`1zBhxa|C?F2N-unDM+3Lm=YR z4Ih0ziH#sFs3rD9(df~vxU6idQO3#>OhZI&Xjw@|rPQH+a-2F5BlwiATk&!ue(Bm& z)6iH`-BeUHC_1m0X1iNeHUGjTwe?c&T!6Scg1X%wl(_7_81 zj&v^4^+?ws{Rz^oNZ0-z@Uc-HY@vQaY?L9nsw=(y!u%Du(o9q;rw}1b3}#kj}>g#8#v~2T1lL(yt(7 za0uzyNW-{CS&p;}>3*csk>&w5T#9rA(v3)GAl-&^Gt%8i-$!}`=};h_1;7UZ&{aO^? zfO`5ZQ@oyNd`SLw{2fGn4HdZhEm1)AkK=C$=Hyu^^=EJcRKE{@3sHY+N_}~}o_;O5 z3H3K3jqCSpGKqoY`=C4nbNQ+i`77e`C!xOnGwSD~{(00FCCgW{IO|`F`ro5|QgZ!e ziTc}7Z{XVDadZ2>AW{D~>Vv3%I$7TOP29eHsDA+UPo>oVGG3pCYx2UEj~=CKJl_8$ z3H^Oge;Mj8Nv^k=cZ~^;=NiGr9daVj|i1Q`Fyrdi>@wVc+aT{d1`Q z2I@~vuD2#KLm!kKM*U9IUyxi6KUg#4Zs2*Pu-3m1zH!NX%MVv-HJ+9<&scXv$&P*C z%fcSDEJe@7aXo3+ljhT2m0VBr;=9a3y<>k20^dRGS$9x@JI}4#;{Bb9`bO+wZ7KCv za{Wmtt3!P|>T9U}#H{eO)`?jK|Klmoif%|dF{|jB^op#qE4>w2qgpbiWyKn@Mor8r zo0wH}VpbG<;6E|T$NSY!p(l(OF?IXP&zBzf(gXkZdLZhLiScggbcL28`gBZZOR<4? z7JuAxmsPTJU{y4YE2H&fq`jkx%~2=s`WMSG_xHz6y(9jgOD4?iwC(R^ z#JK$DPWptCzTl*9I_djP>h%eKS10Y~q$8d5WGC&|*eR1HjfocF6nJrT~`lpi5F>_6kXp#M&E zf&bonKz~Sd)IZ^T&>s<9sR%ru zhQ>_JOb;ADlq${Q%HN!Bh6d;rg-Wpbt| za43jOmf3`w6@j-t0b7oFDl%&V%i*H5T$5JSD*}s_qbAQ>0BtRSk(g#_1?Dnj)&~xg zf}ZATux$*y_bO_N%}vN`4tz|R0i4+y@cbT`ft0Q)aL^1TyypE`x{D zO4+uZfoe36HpILX$>T;ae+qIp`o8-!cr$Gab<+Rr6)3ueNW*_IdE!>;uFwC6*FoP# zy$u^)UopzOzV9HHNux++cexq_mC{gh={0U?u90;ic7V{K7ocOTN#!B^5a@A2kG}!* zc&5F+tx%HjGq8l8f?4TzQS;%v9U$*!GULx6?_n~#t7&+9kWHC8!1V$CvW<*`Byhuc zm=d89%*=Thot!lpk=__G|GEv(uZe^h&Glg*+4zuQ)eJ_ZorMz2*%6;038yXBn9M)F z2u>)Q1a>Pk7vvBcQ0o*M?K{%&GXMLdVR^vq4i0=QE7V zd7wXZ^B%*u5p9M8&*tLt_dm21dR`@94_zasa1XdG@CH?eZ-!?AZxU%O#@q}Rjs}^z z7_5^)IO=^ToB|Xwv)3gk^3U!ChN8hB4ZjylQf4vvKjt6f1wDX#Sz%<7@YSvY=+26v zat_J+mLI}g>vHf3(Efa^eO>-aq~V`(H|V!tB$lp-i5OiD(Tu0Lb_2?~QAGijpx`&4 zEz;BF9jXhTi5J&&dG}qAUf=KW9eQR0>L!@!=~O~?bUy=^+UV|%t9}_oX(kyz+E|La5`svf!5nlbB4(? zHc%BrW(KP+2&^LoXL3z6kcB0t%S_G`1-z7*#hJ3eO=Q7YEHf%_#x`Wm=EWx#sDe>l zW;<)t;wog$an`7EfJ0r*b=Ihow~;xIGjjt!k3#hvR=p_j%BQT5fJ3y7F_{0T&29@IZf%-rs23kYwhPVKxsK+(am^2s=KuMr%RrjH zSwp!iG33^!xrTm;q%%m`-LA2M^2gjFycxo~$S{756)DZQ7B`p9PXEwYlXv=#w>b0Q z`WZ2_fY7XwT}Z2+yQ$0mohG*cv#6reTY!|kKVA0zG}-hgK`2c&eIlJr-|NfpVHUcZ zKGS|@(=UNj1G4F_0E-cjP5)MqzJP4{M27>i=@VTLkWHWHXh1f7qKg8u=@VTRkWHWH zQ32WXiH-%GO`kFqL1)va%=DnM=~HHQ(Ao5BVBXxIv*}-i%%Y&P>65ifgU+T;nWmt# z>C>LQBIs=T)yS*~I-CB5$XpS0HvNmCttIGe`UDWJ4?3Iv)nMBgbT<7h$ZQTen?7Z> z2AxfxGIs`@O`kH`g3hLYDl+#4olT!?+Zl8={j1Q<<3^@z`hI8Ar%ncB(K6h0X~8cHtxx?7rSglN+bJDWb0hn-EIX=l?X zI>U#>-|+1OnJt?>HJ>e;K9jQPGbx)s+0^9;a9JLD6Oiu%5|B-wN}NqU7n}ZIWCOD4 z6A3Yz-4j5v@gXBkHvR4_mb~ew$)-OC>}j&;R}(3l{x~9K)92}(vgseY(f<p~kl)o?z;=tA4n-`%{&@H-oQP8#|s8~ry?3ttjbwruoN znJpVVk!iBgj{@0c5?CuhIO_e*M!yn80omwFKpFwr=*j1?fNb>SzY3!Z3Aefmpj$S2 zlJ`3s{VzZVWTPk22*^hN0%&)mr|C=ci-72ER8c@BDEKw#G}-8>E?YMGzk$r~y^8O! zv(cwvSlMlb7(H2K7e8A;cWDGpghC(eSCMj z4_^iJg@S95gq@B4W}=;qo*D={8$Gogb~bu0cQ$%1cQ$$&p|G>j6P+d-J(-p*8+{J# zxLe2I!HTZjP>;*9fwKP~hCNfghwp&8`xM;&st=}_My6Ul1h*LK&!Dr^eIrdHq?+-9 zgRpvSl4;CWZ%r|c8ucl3El_8an8rf&w^Gwsq&}Hw8nvqGWYf4%{qi)^xJZ3_xM|d> zLAdEytiCnKH0srk38t|`-9ErHma4<}zF0M4uWnHOp{CKOemTlCn$#=M->e=)$ue~X zY+J6TLH`Q18yZ%sc4)ptMTVKiS5*+UU8-i_dzI=j*fcIvi&4H>y?v5ttWjTuC6}um z;J-r60^OqSgUxHz%V>9KS}rrCx;P>s15vw5sW+n#R?t9X)MR7o1=k*Ql?8 z=SgLw7r#^!@zdyEsr0Xy#;;WiIDeyl3;TboJ_P4e>SlQ3cj|9wWw)9-&NQA@Eok={ zbr1UTtQuZn8qcX$QS!VRhqhi&LGE(fGSm383PM}E z+K%zvuU>(+x70hZ`GC3{wFlLrsiyH4{TeJEWa_J+sKnIIk2j6MrhWufrKY|OZIqe% z0+>9+)EA@gLrwh{IER_~1n3@a>Sa(h!qg9fbEK(;$iI&Hh zdIbh(tf_lK%Q#d206NE;`XeM0&^p|7f~lXHY#K3BPr;x}H1(fg>xrh`1@kAN4!u6f z)IH#Wa#QyoU>YZzdM{c(#nh!}ufo*nFk-T)+hEzLrrw0QDW-l0xv8d(m7B&iQ~wll zr|3TDh`QTMy%XZ!HuZ18a=WRY8DSdVG4)+=>vv7<$8g+X>N?nY zr>SRSz`tkea){q$>J5;)8~p?4VN-|U$oEV=8M*giAXI-~>W%2;5mS%Gto^&GpF@v6 zL|35fBU9fDfsai+2f0s7eHP?CHT8$5n#NI6cN>U>$I>-$h-v8w7-`GWBcRM<>1)xy zG)wn_U(zjoCoUJSr6cHjhNT~ZxX;oGt@|zg3fc=;`WLv`gO>gprbecvdtmZ)vGjT9 zLzbnz&=Rur=4n{IEd4H&WkV-Ckz?sGD9p9=e7HQ%(sbw{-_m!Wp{|yG8RFe6-H3(? zzzI)uw{#aIJuLkpdK-aGjF__Y-MI3#rEi8KZA&kOvO-I5183CIf5S!66P6+AW$9he z+1t`5VlvFO^d%VLb1eM``f#Qg*C2;vgQZ7;^M5Rz3D;d~=@N8hqos>5 z`8HX41AO*1OFxOi>n!bovIi`EA6kFV(ji#=kfmRQlOKi=$nCWBA7J$(mOd4oe$>*q zppC~Y{Tc@RaZ9fN=P%GPTw_mQ9AM`zOV`4!Pg?pK==`OnSHh%US-K4@zqWKfJop<+ zFM_h)TH1syPg!~%`tUnTTUb1HTl!fv`?RHZqM>IjeIw+awe+j-?sJy@F*^M`#t5_Z z1xsH6@fT4CxjmLHKp>n5J)8 z`WtBd0D6n$prv0x-~WPvgO% zpMlj!E&VgtX?S!Q#7&Rx2WMFx-3kLe9{uhVTq_>E1;(d)^lu>N_2@@188STjUU<;w z(cgi&evke&j1PEpS_!TdkDdbiLJ9+;cu(YxTD5X1-ITJh*#L1(r{&qjMW z9-V=@T#x=1wB&j8{h0Il9(`aCt`(22gw@@k40Qz_{UZo;_vqWudJm7j3$BZJ^d-=# zJi0qZQ$t`dt`(0irEA5b7osaskG=uj>z!g0H{k(SL%S*Lid{lIuNs3A%EFNAHKvZuIDOsQ$V~FF-?^J^JTR zeUnEQK+8AaI&j|X(SO8@+~U!%U=H2_528o6dUPJV`%RDbLVT-7e;4L{%cGxxUvBg0 zufrkVhDk_nhe@YlY4+&z;MVVY^h2=o4)_HQxznS|;jHhW4$AKG=v-WCcYAaR+;fjd z?}bCY@6mU_mLFh-faQlCeJcdE!F90gM;`5imLGfcSQz*dkDiS2`>99AApUcYZbU=( zdURim!+jq82iUS5Ex_gXW1hj>9UfhQ-ag>bgHiXON56sGLy*JVeb{64pu5V@KC=jS znBAX)4QZi+=67&=8GIEWSLm-M-D=`an5sf=n{?_s?HPC|Jn#XKekQ{sUIQKI3&r7a z!+|&%f#Ecl+j&rTHw`Y&_h@tP<{pIp+wVz z)sLu2QV||T&G`DDJkJ;Y$^}UCpTxL?N7EBm{xfh(cnoRFKMF^M#|{B$*gY{D%*(fc z*>*x;S5Y}W<`ya~ybWf90R;fE3fJ^O1+x>7mBFpq8rP56K-^!?jFG63(6N_vuoN?p z4%XNp8s{3(SK(oEju{lm8?heH$J|0sMUa&z4|#mW1&v3FBFU;aREiJHhZqLC7N*9+8kw4%|WVvjTS%Hu!G~+}(=D z<9OhXe%Tlg+>y2Mz#U~e58Ta0O+0W1ggFtoy9?UlfxG*W`S%6xPCz^Hz#Z8Z58Tn? zJ_2_$-vw4tFxL4tv@Gr;bl6(=x+=7)zM!N4%` zy@w#dAjbhMfds9jIACzDNkeN%9ME#(fSJ#C2X@8qVCIWV5FUMfXyHr^%4in>h(($aU3wKXcb6_17;PI|0NEXHGq7HI3NkXT0?7)p) zq!{Co#9%UO8(>ktk5M-k5%L>Kr^r}7y@NBx7T{7j;Y{4d=O&Y9OieVM-uduMpo zbXt{Y2(Up(mtA#tr zamY|o95S>{$_btfCB-2_SMdztIAmx&(;k8+v-^-UJnx{R*(E&TUjbQ=U0RIa1Nr-3 zgyJE8Av-u8o;_5S@aL|;;x>#}XemBI8b~aAIFTIG%pOZ#@Ew{7WjVdCAZ2u4nbVgX zVsxc{qaddrO?scdKb)4+pBGO5LZXX^4jUd4&M7X3tmid2IcMNzkX|1tHfX&d3tR1PeOS(O0>_vot_VR5*_vrpyz;IL>J(G zm6&=Pko6r!`SkF}-;$o~_k(j3z53Pgk0X{b9}tUwGVXN4W4q${AngAp=^WP|bb-H~ zc*YYQ^-rSm2}Bq9>*+b;go&Wb{JTjub{6PS{#S^9BE9uC<{wG)NyJ}aXu55uW()Y4 zg*)Q)kxAXi04XIN++P0~1We|vp%knxb`!Y5a7!4)X*`bse{;i15f@}^Sj;YApiylG7S zhDuLk@=3_$ozCRra9W-`w4DZj_pg zYe(k;-^II9P`Lr^1Fel_L#_E9*^VDs_MAk5A4smwup^Y+{cbqM8t4Zi^f9+w9@n#5 z^el?fBaykAp4Nd>;o4G?I=NmR2 z?G5vLS&k%rJ?lswj!S;eku0oU1Wx+FxU~s_m%$t6EiZ>+EHIqE$@D>8T^HQq3O0h# zG2H0&DRsGl+Rt=k`P@13=9cT@vJE0TyKuqcCa{b2^yrD#fvthqtW6a?ZztC_NmuH6 zo=q-kX1Hq8TucD^$-n&y#@9S;Ea5o#Jk8(KQU)Mv8K~2!CC{eKmq}BnQB$5x8!zkK z=Xyt~(UI!OtW5eQ&O>kUd*msfb zgMaJ9Cnm*t7sq)AJ1*$SKeR1#x@q*=NRo;wU3k4HF5SPO2F|wKR*ZWwS<{Rf00#~- z{|k&79jWPf)@NXjVQilQw|Oy@^xsOEq5NDVd6*^wG9{|AoLFqD1Fks8lWaKVN&|8V*xkZGYP zBmWOkaWB$s_#Y;^Hx;Mh6d4lw=SF*6O?4iU3H2F{f($B+d>eT)Kyn>-T64=!!Jw*8 zO$0*t+C1B3raLRIuMy=2{Ua!8#yC_1Yf|H|H5NH>7qiTI*!We{kNYafRaz6r`XiAW@vNZ}9h$4v1< z2_zQaU*-FH9RST?H(${bkP>$D6`u=I)(YPMQcz)}EkOv+SN1+KUOe>}Ui@By)xraG zGv%8@RY6}jlxOzBR~Pz10c~zUA0EVvtRj3l;r=ZAG{^{@wFWD2FJCs%S6mFbuMchp z`}z0|lM!_wL{qjO%tikGz8xsap=gZ1$VVEDoNt~3(PAHdXkz$sdM-g`fDganGQv4F z0v`T>oGHlp9htJWQ`bUTYd`ZR z5A<7yK+8n1@p+}dS|Wo!>SutDCI#Q2mjLm3vL%O(CIt)3Bgm}Jp`%H`o@Q4l*qB2{ zlY+$@$KISnN0WjBII}f}jwS^Ma^}t)I+_$5#F=e5bTlbA*sOqU_vO&hq+luAwljy0 zCIyF>pQ4?|jeI(q6udFON0WkEsFVDYgy1bi%1;u4w^DcICkesZsJHkjgYRyX1%3UY z%uAz4W;-)J=%b1a}LtV!j$bTkPu4yNfR3BmD9ccb%2nRii} z-RL}0=G{!vj}J2MVUm7)(4_|%;>9DSQ4eJWMpiYFZuH}WE)gnmZhiPfQC8X8;NufT zSwmApCM~kcM|1brh7d#&|a(Pc@v+FuXS-Kg`X0i~yf53O_p)Mf~%D@T(L=$Lc^# z-RN{t_zkM;MyHFyZxR{cV?WskHe*1f%eGxj}n|+8@8ydTXD9faZ zE>wboTS4=&pX_(2t{WZu$$s}2AcMXi;yag){bWxtU*r;6-LmP#$$iUYJp<_SUr*kGFhmQT=X_RDqIdtqN z`wWwouW$|>`^lccsta=H*bmMmQB5?5j{RiMIdtqNdoE|z=g_gA>?+P|%%NjH+4I=8%{g@JC%c+6TXN{w zPxgFf+nPhiezI%0=FS{C_LIHHoC8;HGrCe-Kj?eDjl+(>f_LWjj$9}S} z;t|TFV?WvJndW0ZIeo~qZglJ?XY3mw6(9Sd>nOL!8F(<)9I@6MvDO^1)*P|c9I@6M zvDO^1)*P|c9I@6MvDO^1)*P|c9I@6MvDO^1)*P|c9I@6MvDO^1)*P|c9I@6MvDO^1 z)*P|c9I@6MvDO^1)>p(3vDO^1)*P|c9I@6MvDO^1)*P|c9I@6MvDO^1)*P|c9I@6M zvDO^1)*P|c9I@6MvDO^1)*P|c9I@6MvDO^1)*P|c9I@6MvDO^1)*P|c9I@6MvDO^1 z)*P|c9I@6MvDO^1)*P|c9I@6MvDO^1)*P|c9I@6MvDO^1)*P|c9I@6MvDO^1)*P|c z9I@6MvDO^1)*P|c9I@6MvDO^1)*P|c9I@6MvDO^1)*P|c9I>_~Vr@&r+Lnm5EfH&5 zBG$G!&6zXI(u0(_pwETJ6#8rCeO8&kti z=XuOBdYDY}(|I0q2n+b>JdZh)XnJBgA1R;bF^5qzS-bJo-DiF!kc$UCI?rQ{rbnqB zbe_i?L)v;A9RhOfK0K-zKA+WfAUORdlva$vOiTog_|*eyo- zj2Yw>e9SFPamAC96;DZ0?9h(lsjlMaV~tQ*I<{+!RsD#(kB_U#o4z4u%u7ZJ{!6dyW zH^y=U<9+)RNxy#yF_6ukO?R#q@iGyQ^{T5jJUp)f*7jcTRd5caM{R4`x$OGq0$vhO65&6fQ7)v+Abv#Wv_$n-;D~t|?@pb6rFg)mxRVL1v*_-tbmDc7O zMK99&Fpbs(7s8IlH6MlM{&lD(LaccOjK($H=_-92!EOfypMQmeg4;0`c3&o0S7;bd z!F&9ssGQ@yDJpHxEJ%I9H$|nlOo9+2ujy-v{MLapYiUf{BY*;uYkWRp}UzcGAW0{4p6Ba4*LkCygP~tliwXhHOcRe3Sybzy!-B` z%xC+ch~FKR`6?YB!(b8h?I9nW%X)95x+Ys ztLOxf^6sdtVj2Z`cU0B@=iN~xOo%fpfNly+Wz8XZd3RLkAieaF-yIeDE0G4jJ1X>c zJ7`}w0+B<9Xm-<#qcyUCD!Nk%3g|_U3?hf#p*rBSK-1rqH%0vw1^n(P91iOWv@*j- zcJOP9?!)1*8xbVrcSqrH7_T<`?kF4%qk)rmN8xZ7W#rvaI2=Y9d3O{Jhfzk}9Tl3+ z8F_aU4u|c;wG!rcM}=mvYI%1Q4u?^VygLfNAft@DI|_%xC?oHV!r?G7N8TNU!(p;g z5r~Y#VX{&Yh>XKwl$jnR5E+NVC^I`qATkbzQAXY!g~MT_THYOn!(mh-?~cOZFv`fg zqi{HkGV<;y91f$5ygLeq!zd%~jtb3V+vMF*I2=ZX%Dba*IE>h~^1GvOIE-rK-BCCk zM(=!zzdMTjo<|@u4reJ0OCEv9IGjZHi{BlE!&yW#h#c-irt!O@!ejg37C-Us zsO%mav%h?I)R*s$`tsdTU%or)%XdfN`TomyM}7J3s4w3gh1<(7-yQYkyQ99?yQA_5 zM9$u4j!A&Xci`qyUJ7N7_zh@&DU>;`kl-7BDU>;c1^A^<=1`*PiD?UX9T1tC@r6*H4-k1W(yjs`)1y>Z0g*{tR{@cSfHZOe zB40=i3y9o``(`5`ATrT$5Sg^uY4nt9U3ficwFyA8p;iKRGF3MMosG0tkg@8UQCqkM zC7x^fHhkcK7^DoNjlT)qv;m?;8Cks6B$|!`9lBfuz=ZQaQN|CiHCtc;ooFGXag#|s z1_st^x}$Nbqfr3F8%*j4y{ZaMAtw}0CnLVf&kJjdW6XSqBVum(cHD@sIz~t{tK4Q7 zrt@58k{j^*(_Y_mR1m5|A8GXpr`74bPet!NEe`u!G=Cj+=>%5(T>lt}7~A6LYCRy; zK2>hLXVd!CedZ-{YgYHHM&(4umf1b4QF)@1n=Y13a?S3QQzY7FIof+RKz6EQSgdCQ zWT$bC;MHD}9qyOeBJ|@LGJJ-^uyu9s404;B-v~U`C>%^II6woRUC>V&TNdw%4?r<> zMd}H!r>?lUZPFFXbt`wpH0W8~417A>^=Yr~IgP#P+DgF}PnO#?z_L%1 zlef~yck3gDoq~s9`j}f@gJJzP#2IjUX?4F`>dCi=-?M4W>V7v;_BLXqA?SaSqoY7n z7?+@;s0Z=fA(Lym(QCQGLqBGvPthsluOCZ&tbdQbF)m)n-}u*lp`y3}|CYDsSJEmF z?p~KWO!FM3qD<29vM_8VmuL9MC4H-@{5&de>J?+=Z^!-qEFj^2r;~BafU{St`;`#? z9MOZ`a`(=c?rmP(>$i>${4{|-VplD(BOFllKDnw+=6hYyNK&;}a*K-YrFJfMu7#ps zQm#RAbNkozjFfVRB{@XJlHeF zY>9wKmu~pCJgb~)T;l^V!)gw(2%@`+AJoV;W9^(7DxKvc%dU%oFwVcu(pue>`9VQNAdy@gyegU zWKWTtPW^P~m@#NANp>g6mkVPob8=j=6myn5)IF(9hn{Yf{2A6-XVZW^=t%0gWF;i~ zt&KBK+@$0@v}8>q{w)rF&p7|JaeihXeka26Thv_a@DGghQ)xen@mLd`UKL4%g(@hf zk>R6qC&EJHQ9H)c4RIYMhV_cS8J_8wp~%xtr7R&LOf70Jprb27r+ zZaNVoX?4Y7H6}PVI2qTnr;iZ~P5=vbRfk0QCOw2w%v(x`E2E!5i7(gmWCepHYa;9K z5?3?|+yN(>J~@}NLAMzo%ceOwmn6FMgq~7VX5~V39IOou7ab5fp*t0XodUEg1=%#l zIk{${-CRdAWB}uJORC9FScDr2WPH+3vZq_V*}pp(fmhm*VE1@u(y0``bUdNE_@#gM ztIY1?uvo#3C$b3M1?IZU!t_wdypwZNSoLx0K65x6GgF7d^&c6Vo){Y*6by+wHa-~} zf4n<9Htt}+jXW5sW0UFX#*nyUljV+0$Q>JZDB@$2-EnMkXl!zm$L8NQ9LZxGN#l7p zIoF+estDtuFsPadpt<&O+)hJVm^ymV!O%vU`lk+Z&qVKNkb5QAl7{&B!h&ysJgcvzZ*QG2EDTA1DRnZVS7YU6`G<8Ie9l6A%oHTYc7kWe5=o7@s zNdII5j_D^jrNOVu72hwpCP@bZ-NgJ{QboJ#|Q&`h#>b?Qj} z1KyUcD=qmNOkO?RN?z z)#Riu#<|YA6JKWYpeDW}6Lm(^iG&Z7i*u-7;RjD?pmC5ky|p=1@3(9F7vfV>V*79_SZCAC5a8VFg~W~ zw0Nhi)$Za96KVKqai-bqWP7>e>dq3UypJ=?PWCedGg_Y3&Hw7IdNw61nu>B8#j(iNjd zR*oEAvb4ISuC~5;MajbY=F*Xa@vje;*49_oHP5dxnBB>g&Z})|Oerx+7cE&_Q#yZf zZBxU_(xnS4M~`$ROP4NbY+8u-idsup=v!XB zsH#Cow2WV=fQB=;+m&^-i;;zLcNQRrj=8zYnibSzDp}|#?WCw9$KtB`=Bm1mrAx>> zXhOsAK4`M7s_O~!5L289Wc0}sdh2B0Wvl9^J}Zf@yRrLFmGj3P2=(^ zXBgaE$&is?CGXYL)l^ewpe#Ye%Ml1Zib*WBIGK!U{X z%cUil?$i|W$<4-TC|bIt!5PPD>W`R$t9rgO4vkByFXDNQemC>g5GSZ-Zf%^dbhJ1H zRrBZ542xHThMjRS7)Eq0d)BFt3~Klqf~0FrW`acN(&h8TSoE}d$$XyTxP*WtSnfYV z+I`24q3`-iNBS1x?M9}z$=kgAWvgFz{Ik#LSWgFm+cH-Ezq-PHfn~k4p8S`(YW|;G zlzCOH{x4mV**BPJbloq+s>mAvWt`11vFhOhPF=lS7D)eF*U8Rz1uXuZ?+REWl6D2; zc;WgNcZGjqt%iaBzO|aRI(Oa1YS8&w4W|F1wYrl98Z!f{TFP3De0+Vzt`Mhb$@@pw zYV=#y>No-E_@W!9KYy)u)@R-TU|3?UPFc87({Zit@{g|7?!Q0dDKWRb_~471zQ5yv zsY4%p@qu@D`0`>8zId3D9Y5st`yQ|FMJAq1BKAlLzO5(mg2gq9llPsJM+CZ;+7dVrCq7UaIj{#3gp@uc!k>`Am6 z{@b1(XrukyJqb+zMSBv~argPb-H0%KuzIKLNyx|dp?`KyLce8CiWBjD2rb6x&)<`r zeTerY7?#+RQZ^{mblj5=mUJH_(A;M{Ke%sK^(b@Qo(HCQ0)uxf^DX@54o_fQ%QwIO zeNW~%>zwXZNv_o||9VfOZ-FN?*poZX7n2Re>-)WL%EDz6wQ<5&19N&a2myAbx} z`KI{teJ#FR-#H%txHoEj`K7);ZS|c?b)~NKOTK)oDAzXvJo)1eEb^V>>v^tk&kkSD z^L%|C-BCLB>P^e6UU{C2?)P+!^2YQvY)O{U{N={f@SlE=0lK!hYANAD|E*6m2@3`8 z_?$f(D+c2Q)^EV7I{TFQ*Up~ap*4P1X za^Ljn37Bu}aNwmofDvGu!JXK#j|*Sq##ykL!GlZje`AA#ZU0}6{(q7F|3@Ai5=alg z*oLb5g*9=oe}2up=7p6@aTiXjJVw~9t^~;3)YxI=$K6sq1V9M48#N%>4F_Q3u3iij zl<;HFys&`;3;NEGD@q9+CK#B=l*bOzOGG$xM}uE2AWNvq1aFaS+()qV3X|_!EVA7Gof8PmV~SDkUO;ygO|4{31^j zP#2F#B-3DaV-V1sl#_@A3gVjrl*MUkLlhH7QbYnw9X1Fs#3K^YDfA*9ksxt5B7xdO zM1omq{3#+Ka`Cv9>mvxe4-DXN$)wUw9wVR)_ueT_6v)S)D4-`!Ly?1WVbt7+1Q^^K zce2WjIiTP2L=h*bXKrnrcAqGm0w*F7uZD5XQv^Rzz_7#A`T9fr)sdT^!t~`1n64e{Q>g;wc0af=#H0u zFeT07nKAgG9nGb_TYT5i1I2?=rc8anH}$23zGcn69lrcAEO08mtp)f>^G>1fvA$Q2 zQ$e+4NZMF@S-#)Uw`VcQo8J4qp04+BBRfYeAH6I|!$y+uHq$rBrF%)hOQk--(R}z< z9x0iUmgf1{>cMNRtIZx(?_5uC@FTuuE!NQ7)c^CW(mbnw-o&ST&Aw&Ztx@?_|E~Ws znRnF}QI}`=uswIjC|Sk^jNF%7wHxaHPX+2fW9M}@TG@87>BYn3?%Ios@E_eWlQ-hy zZ=dl!nwE>C{rYpaRd*x%jD6E(!2*=BXGvf^xu_$@XYX~L@4?v1JKuw8i%IOk$bFH$ zGQRQi?TPbXMZR()`M9S3!F`h+>Hcl|CX{{tzDc(?sgZnG{vX>nzc>;H-~LnkCi>k0 ziK7wwC-%)RhQy%}ci+TT`u{QaZoiS_*_~giM;O#D{Is@q9~COCrjp&PYBG7(OplyQ zCdr&iX6EFjiq#__z#@}XRI^A9FWprdNrtgrKie=2_|-7rr?n0E#lZUVYQTVBJ<#7_ z@4G+WbAG>wUj~y^HL`5)NY%`YjEs!G%HEd!MfSkB?q`f5-p5TK&It22JPr^Tnf6f>icizw#sf=l5@a{NkHO zfB411AFRwf^NSzPIdlEd&+Q_Q4uAaU@9zGK|Hj|^j6YvI`X{=kUjF`%1xx<^$37G@ z;?dvz?~guw@zp=&zyI=!r+@G-`Ofck*B-rn^ymNLU;M|9{>w*ak6!&}{O7;p-+%v` zKYjDlFP{9fFTVPNAN`~MM6druuP^i!XIeaZ+IuwM5J!G=@Sp#ifAM$!=fC;(@ANT0 zB<%9H|HImEfAs!e|LEU&_Wf`F?)%^VFW>)HKYH~@w$ShW_+S0_M_>J`JI;B&^5_r# zCExq)-#z-TAHDf+!vq_k6!%iaQz=G{;TI7 z{-^IBJ^t;Z|HuwKI^@q6SW^1)qYwXw9mK@>$G6|CJX+>FKkJ|T_|d<2_uC(R&^Vh3 z^p9^}8M@ls&0mcAt>@Vwyr{i9`VNNH_w{#2f4TVX>|!B>xX}8#u*>Zz3UaaTM$#{Q%XUNCJGmM9w zy}kCZy^}XvV=kDt4|NZ^bmbXz?_0(F>sj;+yWM_soWJZ3cXIx#-JIHV64CbD7^=+}%u18pZuJM@-H*-)(lfli|G;GME`zzrVQ7CcO?1m%r%ryUlqs zqt&&-KRu&nX^m$a^}FHr8yk(6I|##SvqJ)_d* zCF?M6?YCRcN0WoBu~M6KE^b#EpS&k-iBa$Ndwc$$L9;Wg?{^lv{e`UCU(EJ9wdM_U z*4q(u!f72o5T^M77K-KIqZ-7d2>H2U-BT$(Sf8y{rKYO z-E<)<4m7elyMSeqcbnVoZax^ccROr`#mvvFAz^kplLH9|HuY+;w%YkH@8s>gH|chB zgc7Tz*B|p>-Tuq_dqHa@-O1Vu@&5MvgZ^MVD(*MDueA?%+Ap%@O-{@QEaedo$h$Yw z+gohg{NnQRs{Wqwd+$PFY`b`Q32`!DGt*oIY1L|u51Pa0Z0*ghr8UN3^vemm&YtG{ z^5RVfFj3sds5KrGccC}6ywlIWKQgu!Lxy}lj&EzW7m*%0_s*_i``M9vk9FM|OgczJ zer`q}F@7tn96Y-**e^e?A1|J3F3xTYqToRNcwsTiJ0m+`F{_{3D!e)RetLphxcQDv zmXA9JZG`;6pf}1^)-*cJot^xkeXxyy-tP=_Fi08wL9#Fu?9I+_r+0+z(G}7Ie7ckG_J=GdNtRLG?{!~gHLlg_a;RB9 zvh`d)yU3+HSi~IB8fN^L{C^ACH{WDSsYcsrA0p3&!wCadLD}RzUqX8N2cZUFIt7>|)*SD*H0ekFswv?a+Cja-pMz zpFaHE*B}4xYre}5JpK8N|J+LBS4)4PhtBiYm4iR|N%hR-d1rF){+W$=XL9iVnJe?o zT=|QiW`ARg>l8NF+3AP;^zOoBFSn#oyk-i|e>egoKD&T7Kh>Sy0{6YXyPf7o`pVhy z+0i*lwkvhiMNhQZfurYHeL4Ht&p5q2&TPxh7-h>V#b(xW} z9TXo*eTa^2q70W;mY2{k=*rIEP@0lId}}Zs?sRr{3st@$U7q}}TWgQ|{ex!j6%vj+ zSwcZvnzRz|omER6Ru*XtwO@^cXu&UT4%vO^%>1K&v#|)|5#0tV@ywhSK zE^F&xk?iH8Hbzuu{0dRo>mr=G?9ZL-$%`l2e68LgOZr8p)y~#dG!8r4C{(2Hur8s! zcy)bwd%2M5b!M*_ODwG6SoYQ0hRdJPtC0x^d!YEUSQt&aF*6jOoIK&`vV5=%Mpg4C z8M4y}gH{-xqsi`Wr`3_YIT&_cbh_=mdnScOMW#VC3zp5~!6aO6Y~}}u375_Fr|ZkB zjZy|Qhr{Npd^Fh}wT2z6QGWFPsO!4=CrO~cZLZxO?mq8yyR3-@h|Ptxz3hr*vn%S~ zAD!Q!>rk-4<_JezAh|d?J(WB$Ng^ZACM6hp^+b#zz^3*Eo!>^$ZFiq%%WFaAZ%;bi zF;6$?Vbigkw!19TV(50-0}Rrcal=aXN9_+stP&o9k?D-a)s?2_+z!v|5)f^_C;K)Z z4`0<6vcJiAJAHpVy^`&bliG0n9pXs}qk8zK8OZtNwUR7Mf6kmca}ddp{qPp!DXSx> z^IYptgH!6(rA>cwnXp}UO1{s{_pyc3Z4`Z#yiEhQ94??@Du`cxArj zmb;PppJGHuxDeQ7TGlzv#6JGZaMDB9&+#GBWuu?;C5hRMaW8+&rXJ5{R^BJ>&wigX zW!miaCp~HPvzw#yi@SHJ`VI$JYSQ1PwOJ%)i6P{&Eoswsch_V=*w{OrJ-ywu)ilDd zX!i&F3a>Cb@}R%X{LIncC|*1l96&ynt%i*?l-{+w-yZgxJMvce{ZjJ=cSiep!DnlL zl-AhhuP<+w>Q}edvOSET$hR(E|0UTgxopo&pDoTRX?Hsrr-^x-vH`mNz3QB%gDscG z_UOz0M50W-Z@anI?DRCdBZjoa@)@@?_U?=35GM$`CtuyJBgx&0`2OhHRZhPAI{T7i z`efnh&#}nfk~cHGHp5~GBUvWJ`z0+_*3C{I5!LGUC6nv~GDeHQm#x4+%&W`Y&}>hfY*E6zr+bumP+o$~x*iiwsN z=HY}JVb65>Sg`Vpaq=&(>v;F6o6n)lj`4lTpAgH*(Wuq_z)s5xE(c zGCLD9Q*+|{Xi9Il+v7oL`TN^sbeFgD_U|yhabTByU`a(f{!Y8Ho}XSV@X$CF z*tL#dv7hXINg#%Vy7=wKYk#asd4C_0;TyeLUFc zpG&D1Qs8BC*pnFO91OZBrg8pld&nK|V!+VwNp*Gl_AhP8O*9C$xa=PGZC2Rfr4_D^ z7HXzFa2Q)73wOF3tDHA2%J|{nlKY)r%a!G>B{KJ$Bc2IELJD72K_Yd}ZJfAMj&HbazO5 z*bBv&MVTzoX>(BSgM|-Sw<$KBu zEcfLUYyi}%g*A~aZ`Dk8+6}OLWR|*nUDwuf-{xXAM%{R!JiwpTQA0aGnY*fl2TjQdIzmT_qDN&C)i;UW5=SHOl&!23Myu({A#ytajZZFQKkjz1(*~ms!>q;qYn0*(NJZtF2Vi!7;{9)&xr{;Jdl6{5oeyviicp zuh4mbCG23FpR8U8&6~G4HWHl+nSEIvL#x@@mCK;Nwfy$-?&^vy%ifpb)J2NRhtKBx z2=-qGQ0|wtgXz1`^tO3!GP;gAb$z?Y_ErvK)C;7^5kUI-TVmOS?cXNA*VhB(C)mL0)Nyg;# z{AQYwRzT>p?k59sXsF_aX6#MmcktyAR&^2cKdl2Ulh;D$YZ`A z`06pBzP)oFq>;OK&mqPD!k*K}BmEh_1_GflRCb5WJsFZ&v|Qo={I~XViFvTh=zBH~ zwmW;1{zM|RJ2|))CWx9Ug6+~&?al~i7sPF;=CP6?cHsnB%wk@g-W(rYO-bTE{_f`d z=;p2V|1tl?jC}IvPo89d{nzP%KYyZaQ@{5m2GO8XRMn`ZmvMDTdnEydibpMpE@QOw zZ3chLU)7?bGoI6mjUbZGlgE&#|HBPJ={u{I|k1 z%@kG|LzT0lMH}f)IwJ5`2-H; z+XO%?{sS0|w$!&uIA-~C%s02ty7}$9D-fHEb(nMCcyJzU%Sy4s2f+$WUCMiiux!!j zTLw_3RH;~VNVtkljr$_isZt=WKX#Zxv&Tw%jV;2lxE6J4VY1mo=0f1AUW(5q(~K02 z%RYi{hSE)1FQjuj{4Lhj?#@eAhC7vb$P$}|O;w&?pXKx-SyDTam_@XMi+bEcll;8> zN`MR@PVBq<3-7&rz@@&P(}j+O^NId+rl3~FhR&i%{{ES zDzq6tFd-0keo!Q=XV{KZgt%gTqUP@?g za(@8{&@beycbU%o%55PrI1lSv&lG%6xv4U(xghizFdtbDK_q;+Q2&WuH-O0eLs^`@ zhniyv4gF$7McDG}wEE2vG-U$D#9DZHdi3t>`~xFf9M1mukNx1>*%fku2Vh~M0l`%C zW_6ubXL(D4gS)(%PQN=Qs3{pRJ#nYg9%Z|&XTk@4`g5J9A{sf6MfP*dsLElOvqCb= zD&kU%2VU$CewySxvDoa-?7x4SeOVfh(c-CIPM`@P##WPXc|f7CoW~dwU>!BAY|M^V zql0E^2*P3R%=QRDYhjou%T3^+(O6Zo;Rys9q=FH>uEpIPNX|CF7g^QC8`syvu6Y$f z4)dXqKrLG$>5S_#PLDulh3Y*t!zH5@6#(4>%j&d7rpO zvJ--MB6soo={bScS!=7Ywx+el0`BFv_1>bSq1)zoZe|Me5sRtEKi1cb;iCD@-%(zI zO;EQl+PbxYjY22Yvw97BJ+r;dc22jqt85(6mRx{iV+$0)HQ@(ou#NXzZXmk_&=D(0 z5dx_klUp(}`>-(v<5$_r7UIPNV%vF-h}^_aSiUYIdLN|B? z*g6l3*+I~sWLoS@Co?QJ+sthOO7h zcVyJ*re11}@P9#i?^xJDuMnL;@k302ViERlSo7stt4QjI{6$@}&K?$2WavXgtU9|= z^7208FNpynP6Kq7F);DS86i~xp^L|mf~*T+xOPpHCEnE=O8QhScIg2{PE#2 zorso#an2!M_+!layX|I2VO((Z>?wR105e0lqpwlGXm+%K@@8(#y*%^=E2Rir@5PsT z&^SaQ{LE8R7&>i9Avh`7{VjMtJvud%!Uhas4v|MVJcc0(kzw~34~U>-ngX-3ty5UK z2nc&T1<=;f+v2UytXZDGiXLcARtzf^D@C{;Yv_JdA+>q5Y)pVL7^CN%!OIbVaKx}$ zm+#=eo^(L6{WsOV!WV>;hA`B4DmDg@LGbE@S+blOz{Iv^JX-<3o}GojpgDD zkzw;?mb@;WG_1J?C|Plzr*~FLJ2phNzP5&y4Xxlgc2k~`#-1%b_Q?&JQ~HrbPWW>4 zEvv*V8*B_?!Gw2a?m_&RC~7K&7+)8U(E&j&og@!FSwwIn0VzWbI6qkW+WtV3AsMz-{3m z*801nvy1py#=wqu2-SCbicEOATnLe$p!v;@0BR$pK~!lEBwU+k z0cK$Q{1pSZF2q7ADnFPI(3Jxi2Ggk`GQKx4O$6b=YYDs#(FO(E`nH8PaP z%Mb%%^M$ZmJjp)*?%<;_goWATEP~>AaV_&7Sm~^B;~3+jVbE+8SnP@ykL0x< zVuv+>C?o>JO&Y)^?KYraH8y5YI$0)9ArCBA1k{y*v-xD78D1SFLbl9x^W71V`4$6O zxY^j_{8TrenEj_)lqpuOvAVB5rqo|#kDoryI3wk}>KRLH?VXYP(GKZB2lC@bm{~Af z4=@Qc5v~X|%LNe9Ar@Eu!_iG{q%N0-1oMUyxtSUb5Eqax4>s4a5Tl`kxyduNJW3Kb zLG+>ZIE*e<$4Gj2UOXpC?m^HiLdk5wfMrWsvlnLHO0*N{1pGNG20X!tuA6 zDbtmaV8_jFoSIOVH$nVf^+bdJePOv2N1aRZw4HQ|wCx zHwP2J$ENjU84sR|(*>9_N^IgdBHKI#43Xb$k;;<7Du!a+@0p{eF#l3R3y}2y*CyW{ z>}G3#Yn>4($aVfnYuTTlWt&<_PDist3nHd%+RBpB9gKTSQJ;h5gdgpI|bOiN!LnDa#WF7BJ4hDsiE!8c)o_{V(NGB`YCP&u^ zR7gZ6AKTVAK+ZHhz!-2WHgw*C4YF2~_eId!Ld_Z%kjUx6vy-FSBd#gr%f!;y;`P{r z_HK0>e^^Wl&L=1pDHy9&3dZr}J76Iw&?nEZm#>e0km06VwT;0dq%`f>1+-FG1JM?E zIjmX8S0omNeSx@;GTX3+F|fz`*-C0DJ3MI6<_Nkg>LT(_KDMJY!21la)C%lhcDoVl zw}+h_1gw6|Ud?*@h`y3nTRg+Hwy0llHhf9NeVVoQphAnXFsm*`S6Vo&Wl7di{Z0 zHqlb-6+=NR?gFt1^mGhobFj|<+=EYxPq1taXrJ_qA{25hvei}X@g0D$u*b`Hz?6Vp zpm4($3np5d(blXN!2x)lHp_)Jv;~ ztuQGpA|2NT@dHm~3AyL;Rq@gQmW)QkL`5ofUDoBXjW8Q>2AP?Cu79?V;=r7BMbL8a47d2?$aNFtv;urY& zLLp)y0^Q9Q-SWi6ONhGKH%3$fdG^?fq!Zp*USHZ=!NILsW==@lVa zstAFHl$`r9*ce<+*UJu}*S!^qEqH#aT%9kkTgt>eqI_Yj7p#d4E*tUr{Jp3E_djN8 z-v@3&$ssf^=?EcsCB*SP%zrMz_v5b@pD(Zb32;(H4Zx0eveRP-sy}4^d~-4TlOL8I zV*x(?=K8Ttg*L|<4%v0}`uO-Uzt1PmV~=u)Y-nw6(`12ZCOwuYvWK{Yz9emxB0zBj zAzrh_20_Zk`0&@#-L5tjkuCUt0>rSO#+}_)`1~*0T>{u9^A!^)899itG$Cy@5S}^* za0y)W0~WQeu9`Q2Y<6}sy%5=6`}mK`H$riH{oP;g^yE&l4@LCYw;B~UQQP2Fh<#eB z)SZc#a-em2fBP5E&>qqP3ZzFZ5Y%5Eo-Gu8*`eq?((Q`3GKx?X4sn1Z18nayfE=|1 zLCAn1-{k?0ohsmPx&oyqXV)-1cu{Vn0G}EGSKL@e`2O10=4@6rsq%0(Lw*?s&)qu- zBO~ka!VefoH-u{CAYBP9AX3-Z0m`4E<_pRJA ziLwzhD^bSZU`l)&A`Qi=>I`6i$%>GQYuBgmudW5GXxBx&n`5wgd2)RSHCZ3m+TF%} z7xPeS&y2P6&zLZ!Aqj`+%d$3^7L> z1M>T`OIQk8y|dfaX;h!Ja}iJwTUbVz_I0tZ%+5!@{52{EdT?PX6K?|@(B{oz25jAc zW`d~vjUSepA(~Io?zG$|c-U}~wkL9#^Idr7^VD~0@@p&YF|D#lpSQ3`H^Z*a+nBgO z{^R}r4sIt&lg-1qm?XzL2k5>KnlDgEMGSVE2OW~Io)U$Cxx6d>M3J+RSkNAQ{4A!Y zVmI*`CB@*=B14sV3kl>p+^>JX?=6#}A^h!s<+z8s-Pj z0&@}a+yRX%#%_JJg6MMKq47&fV6nI@&w*imxm! zyjzh($b|wFWwc^xph_+isBzhnFULmT+J6jGT(u zwD*PJ*8#Ac{{QHt`YB{P0;Bwv7^fd2@AjePeZX zX=7z$34Z7D`tsVOYZAVadBPZ)pIRA1h7fHK{r~Lo_2UqC)Z?!+HyT@O1Ry!h z^ID8P$vtxG$g`l z@mu_?P?&R#Rk#l)V-{lvp>y_gqZ$BV0kHpO&b@KNKdFADSY5LC@RRyvvAt4wOEFQ4 z#SmDAG%{pH=cqm{xYRzC`p~-AZjSMeidVIejL6I@4f5_8o|~s9Zq|0ea={^xLQ{de;Q%O}Lqm8-ZyJ86$s_TY=FglBa z$7lj;2PpOFOhf@;ffPq82AmQ-<*$mtbKzuG0v7-V9g-K?8Ff%aMq!($b<>EloAuvG zf2FD@Vj**s(LFq5RmIo@g@oLcLMyE_QnkbzQ%lBZm(e)rs(|{-`(C_Ouq^3iFkW-ef;(tS~pShI(zzNC3|{$b#}~uFD{?r2%a6^KDCqqO0jY9 z2e_o)^H&?@=0^R2j?a`T`wbr21#NpyY~es_uw}zvZb_3Jb7QdNg1vAG7JhX6R`1`; z<_II}Is*`So(fA7MiyGB2hW2n81gM;Ff;!NZM^+-eFutoXJ>ejt%}{p4=RGssxut+ z{>qXPRluvUPKFNVgsDVCy!}PrEm{gfFbLLH8G1V)|2CfD?0(s*-J(9Q=zrbcSKpTP6ysriNA`qJ2QXu!{ z+4R?UXWxs`ECmRV1n!$|HK_`Yuty0NjgiJn4H$pF~NHa4`^ zNYd<$J&Ebekzinc3Z>D=E^gvNZ<2{D#hUV*ii?$Z8&&bZcDM1T zK0lL1HVPxq+ii)fBNzieX^@~82%qAhk@Xb}WnL1lAZ05cZ>jnQMj+7}S#nH2GKx6K zc6-v1@g81EGiyPYDf%PvL9yLltYRYZ%d%6KNUG!+^1+_@O?V*Lfg-N)Pqtwl zeBevV@d^HHIetphxs?ehO)I@A>{gm{{WjYyl~((ilt`tbxzK%H6{=AdKRq zOmF}lsOp|Hi0d^m^mvT4?pz`b$ybn(qwVQ9A(!Sn@%sqF->;V=jTTAp?wN-0nF5SX z4RUXBWO>|*m@A|X9vZUONOCh5*;Cx2x9llxbEdY+QMRcLBtY)dxeu}vOyPfAdbtsB z^zH;zMn=ND%oVRM6r@Zb-;#FBLImwE?0`A5;=%NfO@fB1iA=C%U#{q~`l_DkU!LnjipGn3)D$^8+mWXFn1(9g^eeb|v zEh+x>#R)j9Rd_0l&$P6QU8}h<3A+jgFsf^tJb%W$io#V z)7)fzSRJiJDC0Tu_A>%4;gLAzM?G zu2{U{a_5|}q(Wpx(uIr>xH~GqU}@Mzvb%sQwv%~8MOv1gZR^?Ar~cQb_Q~o_Sy?4+ z+?`k0y)a%8Shx?`t|MNH=Q1DgPWNkzxi!yYY<5+)E>5$z^pzwpmJ@PtKI7Ve&21Ri zt$jE0K97j4vTsRNyHcd)q_vl4Wd5m_uUX8@IW1*ly8Rgia{aWf$?=(+ZdP62B2^&F zL-D^1?74WuJ<8LzEV0J(;G>Nvcr2qDs%A+Z&^-R;j1ri zZIK#YjHv}>rM2b?^B7o1!-%!%wocUIyu=AN1EVSAAQ)qu3C+%+OLa2Slyh$~)sz|qB@TI(r@8u`&W5Rx zlq8r9Nk@Dbj%)Kx7dr48dReM#9IHiwD{5gBb%T9t4-NNRd;b^|h?1yE{86x*OIgA;Ajz^mh_j5<`%t0BdNg(j0jOrv|`Yvxm* zg$2sWY|B2vML**Hc1KnY7azp~tda9PDml->wB*G2cAnYE^1|Z&;9|-t6a?p`Aq8s+ zR#62`3N}$sVLj}x!ys=XM?ht3A`8Kv1a!8NY=(nW~4t*&1Vs+v|8$7m;P~#0HQ#OM&c{WDph^;kFReaU8#r9WZM3G8hPP zNM#g8-velujW2eg*;@eE^U8gswxPC}Vvc7->R;bno~t_O6>QeJl0S`ZoYk|0OjRD& zglDGjVt$q@j--5O{D7s7h$?U&O@ipqD2!N7l7d6p52~JCj%V?RR>=aDidFcnku4t* zAX+neqWDifbE0b`zV9m_FIg89P)d106+Zy36?e8nAx#xkhbUv^%c^F0YT;@TYp9W6 z+2hAG zM*`C++*-xVEdm1{y9m?^U8GXFe#L6mv*-X5yI3YCq?nyyFnPF1?WhP6yvnTtAp>If zKI(atjQbCpD=36dZ8hgC&oY)thH340x@gF<65F?2`RdK(=_%nn`>&V(?j5RVYyI6@ zfTQDJ&gpM{@&1-$SaA9zvyTW0!Qxyq{Y89_Qm-rQwxjc_w-lx>D@ca}DUMT^8D>4{ z_mNJBr1NSf5DgV=tfWg#J$Jpk(yxO|eL+ zOH0O)2%1{1;8)LN)h;c!pKh%Yyv}j6!D!Tv!8n6vI&suX^yj-h^dWDxqF7wf0J~ow zB+6qR9B^sJ!l|)dgH=yf?d_KxxMC9`k3fFp&F>R|?A|Y^A@P+~agfyD4$vK@`0kqu zR+E0W12P|%rCS_chu&5rwKlXsfzxiUFVER#s05ZihqWBxzC4 zUPd`yv)|Q;B84OG4U|(E6ssMT$Og@v#EVS>HKj1mq$s0xHZSmE&mT{GATc^l0HhmB zX9pi_Qzfa;s1<}H70nuOrA5U{5OZonjTZp(X%1-wv>kL;9G)kcXQ#)j1MrB&OEBfVhDhfaI_9wK_g|`lrKNOm$(K}f z7)xkOMoB(Z@lw2Qtr$x^Af40&I3a@Wglex#Fy;PgDqo>F;?_42u=gsSgafI7W#GUm zpn&gTg?8H{8o5UFs>l?GE|Cb0i7Ha9%f-aZwvmTp9!er=SW;_KP!D&7|39GAjQB9K zh1KooLhUVta6E~3!r8i5$~L?nzDTRdS-j#?1?0p>kdud8VXP$JxacHg^_FF_iUa}n zU6L;i<#0%yTrzDfV|wPZ$cE7;s-(vM5F?nQ)qH9dy4O6#^mL~B?d|33>57FJV-~?m z!+Sm;B3y|V#9b9ss~qAPI$C~=AcQy^8s13`GTx#b2=G&gOePphl^w#4#_+?R9kr05UI8**}oeP$3Hgn^E-sGyExmtv1*gusy5U z?a-N&TpHwwW?+{+R}?K}N+`y!btW$*;nhU($F)e`33a%lSx7mZAo^YS^HpSeSfc!B zxi*!p9ICM+i8Lm2lr*>oxt?dozG2!h@qX${KkpT^G zd=EMJs4^1<7LLiP$ySy^bzj*J_saUJG%H1Jzqikn9_I%Ha_MZP#eH^NUq_*xHb6~W zzW`d$fLo7ZN%y--G41*PX(ixF;o@+EKP@X8|G!!)*oOoC_h7AHYXj$DdNw?Y-MlVb z0aLJ{(2nrAD0j&>7Q439Z_`iiV_(!ogkA@0W;h`kO$*f`Hfpuu#X=m9k=5qDt&|T>0^hon zeI-UL30d}vfOTORWkq{~zY<)>w+#Rd*d%YQOo7S}psi`MhE4b}5n~v)S*Nh7T(Q15 zx;{Ii8fWLGe=0tQ*E7bL((hSe?|>7An#ZH6QZCck6ll7^2Ym;`c%}lMR|LLv@RF)z zvd`>Fi&pSEyiBeh#VxVo>?;fW{YVC z?qB@X5OEBsuK)((Os;xAVO1Eaj< z5K8QD*(T*-E6)uii-}+k$yP_1shpq5#yJK3%HyUxLXJpdeZ_Z=bgnyOA_!NF{?_OP zhZIf=b}*fIFe-y;_*hvf*+VJLW#dlsM%kzUvjSn`HPhlaq6a&ilMg5Gj~+}hjf+XY zO;zsa4Z?^{=$GI<=MrC337*DHSJuX7A z%4KwlXGGGZBL6p6=Wqam<8w2+du=0@)k_*1vW^SAqDXphe_(id-N^7UhS15G&~j_% z)w_H5dU|aTuKH58mD0wA+~+HomYAg76{R$bQfyA33ZkkE4u~yV8G2Lwsl9C;+FPet zNb{S%k_}KS9)(4mQOSmQpbDY}I}J38O%o^WCApbUm@UEHcSU9C#W9T+h~M41_Am^S9IvJG(eOzdM=M*-kR1lS8OM9SnxU{(%aNs8AN}j+sfYJxUTnKsgF` z2i!KEH_KQ6rY%8{SXQAtIUf1b;_IP}lihHH{tY|_H3#`$f&Vzm3#|s#OVN+BScJ6A z9Fn+*j8aZhP|G@+7!3gfDao_J=7R}jJ$#M8RU2PkjqYBv(aTP=jCl2sRR^&c73wFU zTN}fuj9lTx7BK)t+tl!fq%LSHJ#`;u)J?R#9MalG{0f|V)^9XJF+JObO{zD{EpZ*8 zKcGB9$@nkYQgqpiQX!FxNmAOp&VVra(} zfv~bcMa^|)QdKt!?uuFEQur@L+F)T9urua!IAqSD<&oK%RJpXkPIHXYE3-t5lLRQG zQbd}F6{3o<{HV4X!l-Sjl_Fv|^u+wJ_>_Tq+>2!fVIYywaiq#Ou^Lebd3m5X&plKe zbYdW+84C-Et%6wAr`OU1TUP(h5)WfHHGVd{oPp?L^+3bt?-c-YKt9Cce#cI^pgEPs zwn~BblKV5Ccz4@*pLjRhd7pUqTEYMq=d4Yz6?LukG|M3fG7l^7qkmAyiNs0!tZp$f zf}`$^hGWV^hYiEDS%{!2aAXnf1YskA<5}d238^KFZNF0zBEKWYBBiBB5!iw#a>Z5} z8|xdE97f$+GoI8fgJP)o&|cq{=OpX7Hj|`<#GZNY+yeN{b;7e~kh0S_!pEvASEH@? zUL;2AY{ozyCil!QMfqUy9JUdaRrcm(1`1CMy;hYY7&Y4f3TN^fA!MdsQCczBetdgm zZL;I&I~E#EQLHaQ5uy_lEbv4Kw}3?|o;MSzGv5ReYh!(DWqD4~>!d&D6MrhYcV(_&_F3MH@qfK!$dlVA z!91hqng>|j)kc5x`h@gV*cR)qdyk^ZaeYpbiv8ynN7 zD~(m2K{X%R!Y2H09Q{z05GC&{(n$G1`Vt71axZiRotB%2N=mND#^&BwlnxDKJPxbZ zBkeV&^}QmGM#i$J&&sparN$;*&ew4v*BYCfYg_b!V{|vglMqAsWO{t=EJ>ric&F*i z092tZj-}oMi084J%@!1d!m1c{TZ32JFLKxmM_UtyLB2Dj^O@IJPp!hFigeuZ+O&8~ zpWEiZcl+WKCEiGuMlE89Q5K#`D6l^iuN1|z+8+>-^?b%rb{`h6j>fVW6kaBPs+`8@ zCr5l~b7@P(G)f-0mIexzNwEH7>Tp`D+Qf6&=@aUd+ML@~Sa%=2k7y{_aGE<7_yHOb z?+L?iR>G1+Kr%-tW2XjvIoR9jl5;4QMOYgvLv%0u+%^b1@> z3`bcic8G@Zmio!@G*%%{S9zSI=s&=~gV&}{GKn(N`x339>Z%$&)WFhviH;3J2xAvV z@18pp&+rdET?;#2s1zUK8yb^SkO!G^mK9NzA<=0c1O<}nA=%eRJr_QoG^PhE!q(mO zHA#x}B)q)-(7LoN=6hh5<*^FxVy!u7&fTexiFL4*UlqzQGX652iddu#Wm&0bvQXRY zva*!s>j|x1+Ca{^5utft^~mbY%LcO@VIBO~1)pwm+9OlWaQtk;tem8o7n-VkSL_09 zjXi6!Zib~du6z(>lL~i~K0+wqKnGT@fvm926|6Z#=BV8;e-33#N!h`Rmh29Ho@wY#{C&VMb? zbaQk2T?D~k+U~Illhh+Fzy$E$&vacm6B(>jw@M#KTGUQ&@2)S%hN%Qy<0odgfl3zz z9j#ZL`L<^Nd6E7Yh{8ewz!po!CcynT_C}f1n~Z%yEeWGxU#`l*Lw2D4#igHOX{8;e z#bb1@3Yez7TQ$KQB~n$r(Rf=~wFFhT;e(_14mH$8H2Zm02X*)}x!GbNevk#MwW5Vm z$dXX$O?6&u^%??eNo@z6L$Pn9VjnC^>40hZWJ>RX^&?V?suQSPor2v>*xQYDFSn@n z5(!MhdWQjqNTn`}@CD4TSbCOqd4Ex66pZ(@-=|l{PiYK!_x|adi#wRagv``1G<*Fa zdvi?b2Za4XrM%Q45c*Rpr$3$z+n4;iIg$IB4>g2g%B8?U7>OZON2AKTRRJMszmvOn z?>?|x%^Wm4);t(mxm(hX4BGSnD=_9=SlR`0L)jNpgP_^fP0o{0vr@k8;3HhyTKLlv#j`tpir;>GQ zM`2=k%N$Hv6FY;KJHdXoS&UB@h2q4|dTHfP!-=z~YYBF<_`2YrD`oM8NQ=+#JLZ|R z1n>}5ZYAUY-#2=zCM_}A;yau@FX>RbPUIQYoiv{^zDd@rh5EcdWw9L0g)t6yMjdlb zmX|nN@6@Q<8X3c`X8PEs;E9ErN^b z?GKmN-&rjJgj9_30-%emDHIqXbLve5m3-x)f9+41^3fmiv|QzDy+1j7qa@xkg(MzH z4KR7s%0EH*D8QfsbLvv6bfX!LZ_9|NW-u7QIP+nox$2YOg#g?@qhK_VA^3&_M*X*# zF-?1DYG4Xpir9PRd_vI(8L}SJ^0`I(F^f?4pni(%WyNi*@)eMmpf7;=Lxp2R1zcWV z$Yhf@tg!Kj58T_t_jGr2US^(ZrE0xI3|MZdcOGZLMBf$EB5F-Ai52%LkZJd>;Ub%H z%y8f=N8inYp{naul6_)WCuw%%I}c7ki6kup(=0c(Hdog{uU9vhHu+L%6FU85d@ z7U4LtYVj4hPh*FI=odrNqQ1Wmj92>Cuq0UE+0DJ>6%M4U3zY}VAP9;i{zxH+&yXfB za|cP!TC2=BDl0QKYKcy|eEpY{yzbCEJ%a1?dU=ednVk!afU^_6!t8~*s1U|P&z-4^ zs=^saKRL>DdSP``kh5)EUq}{sWwf_OT`DPyQV#Q{&pwj*hw7TCkZC=knUJ+GwHNd`*=AWvtQ-^6Oi225-JTen$m> zqr$8h_b0IPmziH%!Sm{T7hbTA7?Rhq&q z7cn6qC-6heeM~!6)}KdtW0JB}0rT@ zKxn0#li?N}aFMlLd@_>$HaV&4VznZXP$tx(n@4)OSad#qp4IJyES$jrS`N+p0kn?{ z^yu_h6mqUwgNc$NpsjHC7)v||yQc>}N3j9pRByMT@m7l!uu((HH;ZWMq2LBV3;99n z@mTm%p++M7J5%5t1TgZ@6$`2l8v7)vqG%JPIKFPF)S|$S9yJTV^z=6WnhBXirLc1?UsC2 zU&%9q4nMr8G@8PVW-XOgGb6=b7TRFemc2H3FZ7$3i5q*lX#X*T)T!9CvZU&~Z8qny z{h}|G>ohglMj`o)pDg0s=z7^V7TAx{jyw3wDk;mMN(zCLqNeHc%JR}iqqa@+ndm*K zs2NppqoT5x$XkyLtGHQl0!Kyd4YZzJd2^Z_o(Si7dz%C?feOZak*}?V#d8(mMHO1| zGTU*r##J=z1L1zOd#2WU^bYmf&1mSDO4*|;_;-dZ#jT$lr-Hq2SWz9Kw^nlWaG~dy zBAd62Q&xwN0wkJP+&nkkhX^(ZNRcT;H{xu$Y)aS~lFmmDriphhtn3e6RJ$=gohMP4 zn2YsZSlFtDA`Em?GI#diQACx+Ys(!JU07D0mZ(a&i+%a{9FUNBl*(^blGT@$A^@ck z#Lk^${OzT!P+3K*PAQj?#qNbZu{EQtfAA*>j#1tCpm)+s@Fu)I?BbTzJVd z1qu_Mms37To`GDV${ioG!DgTlDJEREv?~SrT9fJ}H#1&pnNpRE8f&VnNy#+#kUbSt z^{Exd+Lw;uNfqyjW;OO1iyix|ctAfQK9r>g1iM3`;Qiw3*Lfkzb&Pvj<0~sDYgW z8^Jm#YKSNT$`hB67NPn%r9{5T{?@FBqL@e;+A4}9EoAr1J{Xd{Ap)N`uicDl;t)`f z=U6_br4tknisfc?$a!o|G2@uOGF5{uqFDnCQI9Puhq?cW4)fJ^YOf0QSR$wDq%EyN z0I9mwwIkC=h_?D*Zvs4S?e16vwkYhcRsx;w;4hd+66;(2rYFO$ieoSx+^vM0#O`PUbblOI5vIJE5RVti+~0VPIx3brvk;?(73Riyp`mRxCT*(rRC-E`(K3 zHMB%2z|JL-tVt~6<(l$cZ!h1G|Ew+YsG&Nh`|! z!L}`Xc6_(JOB{(-npB1BHPuuz4?Ui|*ih6^V^+y*`hb!Mls!C~JYc#2WR~x#oW0T0 zwQG@OTDFNoQsUG@%F2?eP+0NT1uv8pS22&d6^!Kpjdm(pqzW?~ux4$CVf7x6pKtrC zTsJ3E^5=*(oQBfds=BikVlm{9-E5S`oR%T@1>CF689=3 zLmhoXQKZqWUD=4dp~x_&A;R?Hc$(u?!vnvmS%m=Z+ltyjkBqa$LK-0a&|b)vJ{4Fh za}?R@ZofnuM)B@Yn0C--y<<(7mlTqYS)i?^+Vx-}bF;CRSfQH?BSIbp)g*uK+xD=} z?o7Nr9ns#jLsA{X!(URhVV7+0lDDC|hGk4`n&F-`489lfPX(i-QA{kdc&#NC(Xc=T z5UNcK{8-67H8qcoQY!kQf|y18`RF4m@$PYL_x$pxWP|M<-P{TUnmx*qT3ISRGoLX| z6`qZXw_&R)u_@u%Rp-owZq+)oofRA`b~i`}Z@Nm0@GuOk;H6Ebkxz>SqU{_7e?$!@ z2iVQkU#;$DFY2KjWPS~^N#@ z@kMN9%Pp4z6R!H;v?KtlTJP$DiRnP^c*4c|B)j3xo8C!89P>0I-n9ID>V(G}i+w+_ z)f_0@2cdZz!(E0tD)GALpLZEjkeLWtS-c1(66ev@Kilr%TsO_U2x-wkm*L0W4#mbt zneFuWuBL1THT7a@y{oB_m(_PPv^SmKuapTzw^^@mpGwjyaI)>FO)c-OKe!YxmWmZK zMWOAoEHvw^5ZTOS0|T4^vzRxl3Ajp3Bf@VGgKZZo1hk*`H6>gZHEZpCFq`}p&yljG zY%|ZPBsqK5{8HL+J*$8f3G}mXWz<>JN>a>ll#g3>7INc;%gzcl_+_K%?HHI;dM{KD zU<{3L{IRTcJAuB)SVhvN!c=hB)uoPr(EWqJ^hO5PulQE;Qh@?VeKqKn)(GGrGo|V{ zQU(^(wYPtr@jm9YmPhn8Ki^IW+9NpLn=ooiZdK^C?T@GR9+hhjR zLgr;`-4j8@t4q{^ngz!qOF|aDq$Nc>&eoDe=0tG?^9@NWLi_g4m3$YwN9IFVb4Dj( z*?>wlA+E@ELwC)P5+Oyng;-b?GZtcTR2E|B)4t1E!Zl7hk&PxTsrucCl6Qp@%&YEN zn(~ofWh$7+Q9x!ki{)W#jks3yMz6z$P#dDM25H_7IsGhb1y7X}Zc zb$OK*nhf5MA$g|%1awA=OOfR{OCa*ZS;-hAoslhxf<0Z(TpkCCA-`lQ9eWCkSwml} z50vc7UeU8|rt4j(CgOBJR9)zD7Y`R-3hDPUk=(R>pweM5d936il-Qjez7SPO=@J%6 zn!&En*d^UP;S3{Z3_~lnCt^khol)z+Nz^>+bL>P}d7eNR;5paAOBx_emZXzY>EWW& zhOjfnZI`&fM!^=OZX(W>h*z1hd-W(5o99_%+^1y5a5c5}J3M5K%BGGLfiY{g<#)L4 z6YIN4I%@=UrFgYyC&j;$nOa!pBLZvExgod;?Y+`6nzVdkJD17{PpH3NljY274ttk5$88?(kNg*)K91FMv|q#w*{kA`;Pwa-PA z;~?=Gu62;@@W!6`?Tq&TG50pvW>D} zfX#fXKO9a52&C#yR++rA;tL2p<{J&EcSb0hm<^b0Yu_;G)!L{9i3JM*nAo}wA)%AE zT!mei(tyBsg&sPHSjv; zdVJx*OTi#A=aL04MXY+1!G0Gx4|_|k3Vq6C4AY!-9BY}1vm~2a2wE1BSXC?T=;Y*@ zN?mk;;2`aAshth(9t;dTRBd;MjHr689B;oo{s>{zVD>nzHj46?VPJ6|YK5`_i7`f+ z4R$3!v~;z_Vk~@A<`+p@`MakI&{zuy;urCye@9zp$DXBthDJfjN-)keu)xv_{VU>3 za09!Yy$PfwO_n)W#Q0is%eV_vlrSnIF8XbYf8Hw{;QmJo@W|odvZLnl`56eZOsZnJ zG$-SIm7odKM7I-*lx5N05uaRvnbM!)M_*IPhw|~a8ck7AZM@V^1zXns0J(&ZV$Kx= ze)-TvQES3fCuaccq}W+83SA~8x!Pr_-<+n+tC2Bpdf-Xi#~I3qJ-~C=AQ^l=p5AM8a+Y!_5g;;h7^-m(aIu z&t8AGHL|YBmaGw-L*Yv?kb3gTNU>BOu_`z*Dd;pOWDr*vjVePzG^eXnDGrekF$T~p;VzMWoQHobLUeO|hqfAdct_~u8x z!ZR>7f#1l65JCwi$YxWsC$6Au0;`Ao%BSond_3Gt)O`p!MbdSxiF}#IA<1>mXLzzpH>f7DtElliEoCS7P%J1}> z8y#>Bb}Vav{-3c}G~W*ay$W0~bAjLr#7K@}t7Pq)PUv=r1R*y;kXpA2uFfp(FJlg}Hd}+3YYO_0zOVJ&ZzYk^|O5~PeHw2_mOya5)J0Zf~ zNsxr0#Y~u6i$K$`&A(_0I8TS56n#i&LxKq9KVN4#ZLfDEFSK#g^w?TBpXWCc`8HuG zbNdDDlHh+~dU1h&i)%()5HCO19nO@SSfRlP^fE6QVQ_KN#l)n8~I%Pqm>qe)%;-O*nzzB@AtS@<9qhOKDPSIo7YiYGSUl^@W{|OmyoqI_^FmA;>5kBK}hR3vy7%rTwgYX&p}-*ZqEa4H@(7_JhIw1+Zc6 zblJE{*giI!Gpz~vg6zUlt5hK1w}4E_@>$9ZzuL- zBKpR<7JRg<+U(}h+KlF}0c~1oY#oCu`OC*zMZVCE@559g?+n5lKd!7F`z(D^%51zJgmfC%X3Z0~l1XWR#(c~Uigsi1NX==u?;3hils4^ZmQB%py6AzQPDv;2QmlKXl_*Wat}jM67>9M zrOphwv(N>8KGtvj-;I%0X>S)9;VRMUT$o&~6O-mM7%&Dd6y^)Es&aOvEMS=lhx4UG?kRHQU%n|uu~F~M=8pD8WXKp%UU5J0s6DEtA3dP5V6QSVeuC* zJ|O4mGJ6Uc-M1)ZwxreN1y}jrE_8(UFFV}oJI*q-iG}0%-S+a^dT$Z>ZF|8g`cm&; z=Df_#BGTRCpIC}XqXh3Y@=gX3FYOvA#wmp^>(le;_tW#mXV0E3_!Q}As75Od#Ewsz zNH1oPuZ6n=Vs8s??WdW2<@D_0?B;E}9zP!lp>&?6WH{AD%M^+tqJ0%k_ry8B6|9QQ?IK;4Si zMS3?Z(3rYFlP*tPFwSGhjY^|o)li6KNQF>Huv|hzDokQ6+L!=>Y#~LY>vjGi^e3K0 z1WI1wr4A_ys|-KWVanZ(p&9krK&p$iW7H``kNFp&G=&6DB6(I{SLDv>>kqfnXV`3^ z=LduWSRGDB^)!SMgPZZD5E-#=Ptj*@o-!z%TkrR#fW|COlb)QBr`g{5<)lwgRXoZn z8zN+6RqS@24jP0L9*W40y|_CE&){*^upU!l#1eGt^vrnPDlC}U^JdSC=QU76)$`Ib z<9VC&PAgBe(~O`o=W4n?zsq>q{9pCw(3$bPc~4P3?dfSek5OSG6sw8-=dtjB4`N@5 z1ZhQ&*#mTVfIq5&wJf*7{GIg7^bM6G+v8tQX(n|cqsu4RQC%AH7qv9jgPkD!Ix6${ zy-+9^6Yr zXQyY!XGd~f(+^ynrynzSq#{uAN!CCdH<=u!RvrpF#_$m z1FOA&I8tkXfi;>YU>Cx1rezYfVUTS9pgoh;^qfWA74iuI#6pzl)2l*DE3OJlOo5rI zv6rZga((CX$#DT&!9s zA~!17_W0_}&9Bd&zP@{N^Y6z@#9yC3yS#q$I0&#JP=*~RDiD3rv=^{Hx7(8r+>edw z0;eO`_CI7tu<4xD9V%^qdUZ#c=aEoPumtM+NVih!HVy$7E&G8~+xC1|X2T$b47f~zXB%TBbsj)!hP&na~Tl19-ZzETSLvdaB~ zW>*yRS|SqjqJ6)fuCG>LC|XftJK3TavQE)f+uV4!46FQ!@XD=(!(F^ZV3JOYB0+1u zhk}>N-Ia*FAl9oROmw)1+8;}l+6nz680O+NUrzd|khh%xehXfEGgcZ5^){7)AJa5Q$@j(% z>eiH>5c=W2z{MC6H}ALsvWG6uk8W|Zc~gIUe17uN47~5Bbc(SgJY9#I?~e!qKK|zT zvDC@Q62z{@-|YAs;&*&>#2Y)j#0w8sDkmgYf*+84JfgBykWX&DThJwzIrk^}nb=vJ zB$BHIYnsg^-&u6(ap~^q7qq4Vh!TR%AC*xKpqLG3sy8*{ESzglEoz3M_H9}yusLm^ zVbqdF^*8vm6tMn{W}W({)2q;E8D2ImvAq1l>|wlMu~vyXSanKy(B-)^2en$t-iYD~ zs{@?-F?g|O7i{urtg@=qM0?aP#}__ zVX)qFiVU!#1G<(F6R}qCqQEs!-OuEbmqhz4#1lurBx>%U;ML z1Oz;Q@WLbxH-@G~%Ocr&{bsM9;cZ7FAUXg6ulams=y{EzO3UR4$)R>Tm{LH8=AEGL z$FJn=wBa1KhbnDKnl+W(4WoKd>F*@xStxtb+Q~63KivSZ66yH=rKJ;dVS0kWwn?B8 zMoBeQGy!R=3<`r`u8+W$Jk8At+!BYk0=Iw^R*h*#uZ*k9;&&-)EC(QzxQc<9z+rlu8=uDiSjY--q`jJIy5$Wk=}UD&Yjt~d zbRx~oADJQiQMM9)w4M2*T`|*O2)DU~ML_NZ7rKXNcq3TZUKI$#aEtpMzas+n;T&*5 zpN5o=Ifw$h2!+zPsB~i4DB#%E^`h5umLV8ksqUa#Yp;K$TpvCZ9sZ|fVcUb3_a}|Ea=}FfK%i+J10VGMP8o8-Q3G$NIq5_)vMk^ z8{O{^RSk)eWx71H4)X5iAm;kLG+3s%5v7dERAXz7d0J@i##?;6dK}PZk$&{Hz%Gfbp7NNOd8b!AX;Cc33!v#|ND^cbRzLW1-p_sY zl@_;shsX37){8#i#Q-hb-fhDD%$6l3Pb75H?3}_5Iy(R1=!2R;&6-2Ahi7J9Pi|gP zVinOkYlzvgx)D@i=#_`L-h>N-KOqx9BQ@@@IN{}}Dm5!wWX!;ozNzIP26R)t?zOHy z)K6}ZRM*Fz9cC7`MkxC-%lSl4%^x=P2!32T^ALS!c8_HIV3S`FWv`&lAJu_X#ic>%sj9!wb zbV>`!_+?0D4A>KX zuSm5A?>c?T3q0_QVoz&dX!&VR1m3uY9eO6S(FTqNKCjJ?Ns&rNJg=oRrUB7#02y60 z)W9UXytp6&RlkBb@s1*>_r-ZQ11lq(BNOC)NMckLdXO+DQZ&RigfWFgXGFcbZ;2|0 z;C*#B`wsRe)@x@OmL17LKQeycN@hK`Ykz&m4-aFU*aSd$Q|<{-id@@JdFT!3NUHVX zoLXeJH)2xM0?t-Eb;Ryha)@hi5a!1n-Joba?UEt008Y2xgouTFEo5vda7D!#>{Hof zEG~|}DsYwppJUEG7BW^{9F7O%Nyp_^wb{d#wNi?zM(t{tfoM^exa ze2VXJWY+i$&2kC#`}gbj6}o!4aN@#h?G<&l3gx-4t15W>vWQGY{ew&i;+#FxjaCkm z!GYrIEiZ-QW~*BUj-Q{OoKXm^KwmH&k+T3jR+3BpMhtpr@j>lZfjgl;sVOgnk3-n? z^NUowr+9RJ^%hEJSy;JhE87XdR!%qc1v$F1n}Q%5dbH#++g*{n|g%k6eNUf9VIzFkg}>8O{S(4X$~} z#nZ}30CGhimsdAdH&@nIt%+MCl&GXn`w+THnF7QF&5-nQEaKTSg#M=-t#IgYEI3$P zz^_?;J(Tw?zi1qP+>v_r0 zj`tE35>M>LrU7n z3jbD>w0O$?fPJd=BH7lOrE~PbXZXz_e;JNZ5q~b52FzmGYq;d=4cwhbkj$_L_a~Vwu!%PEo>#%5eOC~O89OiE%#x%o0~0>vYr{AoyJ(J z6n`4FRvIm|0VMqBM}~Svm@GILUW$*^?QP;wZ&IxN|?6v|2tit6W}uFScS4FkhG zP@kDSDw&3aiOjH!uPLXEmC8NdR-y9AL{+aWx$i_FU7^R0se4>v*}8BF;4i1cKF z%v}CH^}}p17FNDbw!ModwF8sJ9UB!i)YvY=&pbBis~8&8gk>2O5uu8o9PL#t6ZuTC zR|~RPw42ww4s6V7XG%U&2sY*nzz}AsYRU;4O8`0jOpmC^2}w-7erwcG8yai_10E`D z4jn7GO8=ntBt})$p!`Xe*oMbO#{*l#k|+;Pz0hDV8znlmrDMoqtI=3lfpM_1zPYx# zv9Y$fv;osBZs_RBGALG$ed_i#2ks2Q2DwNrDdPxuUO|Higb^>r*F$!)L;L% zPy^Lu{4;rD{!$Crfxt*+5B40X^5E`!d0K?lWcmA7ZPf32oDWbvWPBIkqR+2_f;$We@s+NNsG2tacUhES++L1m6nmcA)I z8IE3*6ezuMK~Pv0#;!&XgeYsM6ftkYmE;n-Jyct~Gl5ETAj;EGuA3u!ygEX)ro{@Q|JkO20ov0Lt2a=*Ci1n#jd4^WIXW8ZJvrL#YH{t8AdCepP8y z(mO>+3kVoos7Ll2VA8$s%Se!XC}jlddwT@l9y(yS3U%U(r8+b6)GKXnw^bW4mVZ%l z7Goz@f}+X#9qK(xTMhOsX1xZHDs&eT?=7|#dQ0O0x(*ziQT=t1u6Pz+{n`Bnc^<%T zUcV7QaB?Qb2e~xYM_2VfN{&EY**-eqth;wq26!wxWQO6RZ9+Kp;`SOBd0xpd`>2#$ zBP#Y-S})JlYFlP)b@(-G+x$z#YhhFE0~29a$!g1#Vo2SQNK838$29f_i4H@bN^;17 zDDi|w2Nc>OffDqel`QoObOqQ^1e^l_^nRIEl;2C)cleZr=PZv|!-gn9pS3hf^v{jU z*|T{|wh;E~q^nT1rTG2{>prIVvgm^wDRvbtI;aDMNFR2mgy3l$_S> z=`TN9&?uRf2Wgb5s*DG#u_^eTp^uNP2BNlQ(Y9DnAQWUWZhdQ&KQA_+=-x z(^DeUo!o=ePgmsJV=XmXxFBt0qr_d_Mi^#m%G{=)%edX40;lmvS!~Af+9urU<&I5v z@19-X+!nQcn1tfBnFc|BxPSc7Y66M;s4{O9li7t5IyVhet22r)#Y_}$l0wo}hyp3l zov|Hw%BmdHvQGU?uaZ`*+S$GS= zJ@{L%-1E_vESiFtkmA$8t@N5NsH+WKgN8{Yc=i$PvNvctP-3~U8P3|$x{NF$aK^69 zUuKVgcpQ7Y`xA$T@3QllEaORfqXHl$($}+fMNUHXbbe<<-EuC;s`S1^7IZ+0l~xu< zJnO8;3h@fc?%YAdDkjTlwV@oK0-6@y!G!o!p&1RuE;ASMZ?C7{Uw$|BRNb=BK#{PoH7`>z$H zqj8fG=3}BE&a_kXEysZF4{IV6s(5{Va`_HcAwPak_s`UOe286ej9=q$2J|=dfER%n_P{Qt+%P#md}cM6Cc{&aW8mjm!iHnW)$v9uPK_p z#|(rh-d%Yrr8@+%Rhdr6)t``f398j0l}c*VDYcstrjGbG(tcPsM$kDuKYDWmhfYqi zvb~p>_=`HsTTO2ogqDNbup|Wrw&I z(ZL&oe5^=IVk1;0lX-GWJ=qls{X?2K3!*lj~zrqV4xTiRT0 zEH#!l=-9ZnMl#(BO&rOf+v;j3OU83^?0U@C_pu$8o*HttjD?UeDx%)?h4JgG5{9$i z@;uVx1u4t85>AA%%GSnLog@Jsi~1CYEgYl34?hGIz}jwvi7wG;tysk^WIGQipR4F< z2#I{@hR$Z`*@ns0WY8WN%xowB#RLMJ;#9_K2uZ=LC?LY3hISFxD~A{xpok|2aVFWO z@VceHT1aXJwX3!F)jeROt~gY~h}r`oDbNC{oN6vqAYT~}`8KIhSRP3nrMj&ovs2Bi zZ6*6f**nUmdMbH}iLY#O^419go@gZ%nSALTsFxJYWt61;_xyEn1xkWBOYY_M^2O`X>Qmr;+Xh*z1f%iB8UZsH&$b_9~V1yELC&7GxlCUS=YI z3$O$=QxTvu&X1!Rc;PRSDJ!5wpx+7XEqHUtFV+bUSP{8e8~W$EJVpu3XAar83_rt@WK<7ahi?3YHB*Rk3~}R7t{W37Uv? zS5TkL4Iathy*xirJ{U9>t8QuybVr#Ao%&1e1nTCk%1)o0ot{EXc#Axfo+INXR2V1f z$vBwv9=134P&O;Ne;TC=5&3E@?@#(Jn|Gz7N!+CFJ9>h z40@5W=613tFTouYJcg$zOe0>mu;U3lQUy&Vt<6j+X@zcEIIl3S46wK^bt#|_e$-VI z*b+@ixjhNkoeq%$-pPjr5}paLW(mPVf%?jNr2bK!t=9s^l@(}L0v1UgvUqqv3wKc# z61_?dLiX!CI~4kpajefFqgZ@gxB{TRWERZ|;y4x&50hUehj=vGvL-C4XDXR_t3l?c zhctd9ZetcKG=jLyd1@_nD@3bKNE1-WSYKXU+3Cu#mLRfrv+9}ch@v~4A<4!GkXR1>Sv9Qze*;o~g$h|mQ|Wjj_f(Y| zLi6vOSfM4Gy4TVsan(mz8as8F{WYLOCLLHGP)@0KmoRb{6*1>(P~nckE6_ZepU~dk zW!GkFl&S%jH9-7hgfcrJptA8+73|Yq7lr>p;9NI1%g|n^y*;A{((+OKaesX%qK z_(x{`my4Q)G!NV4QLdEa)WI!Lt8h=Mu`U{ zkYwo_?&GC+Xeop~vjyVD;6y_*EX9hAsv8(jlxCqMC*0j$ zuz0T^H{GTa6AqY@5$3}tSFl$wd&>Grii+}M*@oES=I({=UB0VUo~^I05m#GVS>D`i zKrmYeL0FkSUENw+?n=H5p6?ORxVJVbvey(<+KB%Z&8W4#!@p>13cVA`*zIC}f$1*mvIso|SpGls$)p89+^%N9)Y_xr0b6yQD*4u3 z`WJ-wjEAnhixq11bMWX9d@_UB;&iKlF-DU^RwG>asD6$vKJbm`S%fkI`Wfq3b zj1=|X=s(r}&;EvQ-d&u%XEgtB=H3IoilXZuzq5Ncm)?851Ofp<@12kY2%Ur?Qep@f z2&9oBpn^zK0jYuligXZ_jv%OjfS@1%ZJj!tV?>#frT*90gbGqY-bk3-*WW$s>C^p}lI|L^=$Zk;Ud;865=u$8m+4 zKZBl@pJ1XcQv*f}B>HnVJ*2U@Lq<-621ZO6|NRU>OJ4M`z}*2~*z$`iq4FuA+WX4g zcwa;vuM+d^Fu9#Q)y_RlH2>2)>wP5nP(frLcfb`ZfIWag-$sP9g_!YJdPwAQcTJTV zj19u0vPXOG49#u$y&**IejH4XM7@*Z^a??Id$R%NFwn)|MC_bxN=o{DyUdS{GM7~B zlV@Bbd*+ZHz5WzB0>Nqa&N~1jrYU-N&pq!;Hutf+{ed0Zk(I%H@KtpgHkBh_dpwF= z2>M0dU5T*gd4J|ncUm93g>>;>0g1wXL`)mtT-%^Kk0t!8XB#dxf2-Dun{rsh@JOt8 z=O|zGINg2Qg?m=VXAYSrxs`CpFAG5|Y!3nR6!Oe~bP@E0BSwBKM;uYi@1~>xMT=kX12@?AsAuW5dh(M{n1Iv~ z7(vN-e|63RtnEE#5e~2`4S%cpkp z2z)9%5iCe!hj1@WKKf651_qb=zp!Cb?c=ofXDe6Sp7N&P@(TwLTp(}n;AX80vkI~}P5R9TdD@tp|ufjmf`yB+rG`u~d1!D>gGCYf2DR)32%4$O#P2hEF zcRe6AizEYLdp{w@EsdBZmT^$!DkawTeLihmBqDv^e*C3bNi(|Ci=4NFy0RB^TVXvH zv*1|QY1)`(!ValM#fR;@($BKvPv9f92#kO|t{|n{JBMEpbD+R^GB@JJz{RuYHTdO1 z{;@#6uTZ+B!|IozKQf3I9r!WFtleYcdST0%Y%+#y_-qbt_ma#L2e zI$`cz+m)5#hhzUoVRkuS9oVDH{gslB;HqiuXU|7a2V#Ge{QNch#a5~irmdPbY1Xi1r)ufj8juD2I!xo@`gMP}>MW_eF!=cZM`4S%bkwYANbBzCmZ1hN+C-M) zMz+)Bba}wt-VoY|`-&vDGZTH(qj4{8M`e&J%yR^URCXs0=RAn-xDz*u*?R)nDBqDo zd-beQa~vCYcv6!wF@S(mqtny*p<;UnBs{)G4VyP@4kL>7oMsK1_3+MnU^kRp_`s+$ z&K=qyJ2fMe4r1@#*rNg445CSCRcjhh2>j%&uNf*|x-QCOs7Tp#)KxrX@QnNt1YATg z1ZN-J^`yJN0Brp18GcFg4i(~^CAPl@>L034EnKUJeXvvTN=A7C0&d7veC7uQ+tADF zL9sS;d{l3!jT~>B4XWNcfx|+%R;>01;1#;ex^>a`9)0AY*@OgmSd8%ONVwlV9;;Sj zv@H6D8D57z*xLKMtW?kTN(KZ9tQFwkHEM<+0noNmB};QWcvt)i~ zFu9|d7hWmog?L%E5HMBSt);`aql!K2-N@x#ztW=#y@7e;NPHne^)_e8Q7yTJ(f(8` z){Xlhm=Jaa;S8vr*7pB~V67yEq0}o~_Qq4hN_WdbGm>$T5AD-5)Ydt!HzN(;TsB_C zeGNQv)w?r3d5z-`JtAQ=f+-tptnZ=64FSQVDyPP6USo*w{?VKp*YA_QI92CeHUsP8 zwQZ$Mwe1zw@ad6tf`ipHmg7X&ZMenwAgE+!G~K)18oh_cdizh#x%ck{<&B8Wji7-b zBAhd0xA%zBU87uVj*;vyQE&@|J!A;pL7{Vfb1j&9^X|b9oRXs5Wx$(O%}5PQAi?lA z!a!XU22&9n-LPZ%ag|=Z2L@A|^BMk|dN^i=a`Aorms1#cseRVjw@t7G1c>?IXCE_Q zzX{Huao;4?1MCxaawmV!80=t+cKfBl>9BCjaHu&PZcb^*C141y{q;AxFu0&q$5)DL zGVS*zEVv7(XP=muxF{Lip#|ACDAuJkCzvFQ>SE8W;SH@CHh0L4uJJg&857QPzwU6E zA?=-g5*RkkKZ}bu>2NvJnRxDs9X%qGGgV5SFcA}lz2rFNXNy6v-kiLJSB3=*oP6tr z)m4P*!Y80Gj|>eCjo2V9D{FjmcGBqja5P4SIv^qVQc+Y&BBJo)A}%p)l&3SNkg<{8 zkwRw#xl!pU2s-JBnUIl*t6uP0`XpwLcJK>92)8rV(=>cSqlgKOBPKM7n1Fq=@@LF) zd!J|$j@xeBcjJNDED|~rCEkzi`28}IM==5)ayzGIdT|odxzAo;ysxNhonmCf(qh`sj5XdSCfSg72s=&{SrDQ zXQw7+$Wm2_(H^;R0QFHn#^d91Jel&1vZa`K%}mc2FWO7`gF%n>*#<4zgLy~N=uY1nV2Av=&qc5t6u)eF`)JiG^TY0$Wn zkOliwBc7i_L|z}p{vc^oCk;qU$?;^7p85&RMDj`0Cex9IFIn_U92HKC9R?iHa04S= z)(bh1G<^d*zvW(XzB@5B15{(r>-x+6QYlGzh_jRRxgj0r^m7op&dWclU*lz$dr6+u zBXPJVMP0J)U^Dzi_FcdDXnczky^)AsCokG`OF-OVAMNa`JJ1h>;j8lqs;+(wGwb(j zd3DEM_w;0r@+gM0*|azK*`|2pD15tRB-vi)^iGR{2&bUlN*FpPds0T|Myp??BqwH} zk=#^8Gwy&OqtEkb7Fi)XF*${-fFE>C%o^Pdu^${HGOl}~yi5eO*DsGuh~!MAe)EX& zarjCgwuq6S&==@cq#zs^;*7>`{g{&_<6fT>AFoKQgP5IskCbjSRx%b|o|EcG^Ng2O zOGxshq~IA@d?rI+;-X7tdQL`i+9>iHE+KrqGpLanxRjGs!De|6e zqx2&YI26VY*>KH3@=>%WDJ3z}gO5{U?@wimV*x6^SRbOSVU@A25EsNTd5Q{@dz#{BwtFqVFmju9I_I$i6{*ff6NNc~oG zyn4Ovd;N3Z%7L9a&w%7aoEKMuYxlO-cJxTbFie%%EhlYk=VY2N z(B8Il%ItaVL1S*nvTcGg1v&Z|R=_nX=6glz^={ zk=-wOgi@OQVxw)+L*b_9WM+?+g?sIXedVvk>~x?dp$4%_iA)=z8mq)XlZD-))3R&R zK*sw$ih=QaT3r9W^jd8FE1%85(>9g+qyKs)W>6n!UQ1B@FMqE4QaT@fdcfJyo$Ylh zbm>z0MdJidmoD-ALPSD*_t-x8h6%qNUDj5<`6O-l#0z~EKb@@hlGNWzV;{Rly%=9tv)1Vbq zUn#bf)ajkkIWaXEo}ex9R37MQuk$MDX-lB&bU&KW;7#z-%)W|nE1GE&0yXWx9#nhj ziZiO=W6%cbGU=?vS&uJz_fdlV1yx&&e!CO%BSE8!dzKaAG?a?w8tT zP6G;NO-}M;fn2iPF*=m3YPj9WF=;ueGQ;Zv;dv-fD1B^^m3mLp9}YZ@{@HBUAMu>B z5E@sN)t8Pgen_vLp^ub@Ae|NMG}=NRZiz`U8&S zj$RNP=8mUNkdsR$bdiz#{v0(VbVHBiw6RFV=zDA{zbxq?hshS%ZlcZVL9U`#;Zab0 zipI8vxBZSJ_PK`P zl2iS>FYCgg-It%nhreI z#RG3AImt&8$N>&b^@A+nZk+rko%a<`&9-9)&i6JjAdDg#oSB{~8HR2ueF6PS+zW6J z?PC4CC>3GhK{*^k=r6d=@E6;-?IZ3z6O%I2aS}_04Z|{!^1Ij*3?QbSW*0Dv9$LUh z#rZSuQ}*~uhhFeza}^BLwCSx#-3KcpP$u%q-!35NrPlkmwG4jG_>`M(h#!S71bX}( zS)b~*iNl6~_fRG8s6ZKo{%OhRZrq5+G`Q{{>Ss8leo~SXnFQmP)h9FgepHz%*pry1WYX^|1G;QEj><;w(^C73vRys< z0J?HD0?i|P86zajLCLa4rsFfk*`vcb%xn}vHozHkq#MgLPKe_HXg_%yk$-H3Yfqo> zSp*}I_wZowy~dC|{X2w-lxH+~@JE&|WrJD1XYhOJnAxuiDA${BTN)*-VE5ESMG!^A zUg`J{otn&0{$CzOJum-{R@%=)i1TFUWTt_06bE<7ge=9OQ@kDEujh3COQ%XnfaIq5 zi73-{Sd@bHE5jBnjNG(Iu-oPLgdWQ2@+C7Wg{&k@`WuS7dirErRsvm@z)zCOIRr`f z2z>f9n*{dnS9TrF2&o&YLxMX<_OG`eq`~Wl@6^IK@pJ{@(laOeM$#rRFkQ7gk3!KfzbPLKqXs;ei0z{8u8o#9m(KDVKf&+nDait>4FHH6Wb zrl%SK>Pvfe!e6Rn9prE~b_65_U-MS|kAWSgUNJWW8-s_W1I0-cMoztuB)!Q^k#hgN&V!n z79{7W*Yix5XlOJfO$b zEmMf{gIKtE$dSL);CFtLJ%P4SHOwZa?GZ{Qphrx7ZcjAQ0NBeLdXOEo#G$kOJ#VU5 z-{id14a(zDFC91KyxlBDN1*v$chtj2z$ z8%bY#sYjBR@)KoG65CTCQp~&~2{JX_rx+*?YZ$~pe7+L}DmE=!iYCT$a#W@T&EG3? zz{F&a&h&U}L!aA!kiJ9{nUA0|{{vq_`2#2<*w1M4ANd*b%rPw=N>JX&zjZwVs^B$+ zO=N%A;tJsd;k;%B>1do9O3c(sTWS^L`c`vw2>($sRb#S!dj{e;zbfX3r^Q+EZ&RfKHqe&i+_c z$h7@`O=7Gx+4D7Atx=gu0whKCdaSnwB)!}j5-8Kn>?ZnFhy7@pOS07rM=(ucsc6jM zI-uUZA&0;AREiPn7#uVDTTs6-C~as<(#KxS_G$&2UUFaoq0}C#-$IFMzi++G`%^Am z>v>GIu<9GcQ7q`W(c4kx(tcH>euvbG0rTFPo-q?JnDeejYkMv$;^!V(Ns zXwUpD5?9t*(5HFXpN_P*?${| zl^Q`7keBLFc~sxlqy%sr0zDU2fg*CY^yp`OZ%^)*s|}PZM8e!w`}ZN zWbqE^eT(Qmy&9XP_WIqe>v>=HyDhHqs5HWz<@oJy(S0N-6dCOJ(ZTa)_aKK)In?qE z(dmkcwtt}~a`luOD5VG&V9$IM#sjhrJrG&T+reHf>79?VnZHxAy&r^=**2xpGXf`k zq2jP%z@fjx#t(GhzbDa==^2$Ezc?$euwltw@W2omK6kvF#BDgCiP|HH&-BQT=SsJJ z0#=o?JlPnU+uL}eMkl9?z*rz0=bvl4b3Z8a5xp@B+nPA1t_pE@(y+uh3QXs%Jw6_c zZ-7t@Wn)&=Zldq@fJn@KdPaCgV23>poQ=pr{1)sV<(^3#puz!^*j_l*&%M%E^s0ls zzxWKLV+5>CHjZYiRUGVg>xDg&(NSId_v+pi+sd)6k-xz9Lwi@Znze>1NE-oPFlsbb zz_G@U-5BUy?l$(7=dG|@{4~AX;Pb!+gK8-Hdv)m06$L0gRJo4{qEPT8~6l2Ux!|it&aC`k(^m~ zU}KLbAw!}hc4C&JAG~rnm05|>Yt&UiS@IMZ!)7Uj+fpo*l%9i)JyKk{ z=74`8E3sLSbBqJYJ*1B zJ*3EwcjwB+o^}2rBKI9FB6-|WE#(Kf5CEZv3@+o<3IAyWWy1rTrGKhKkmT_u{imAc z&|cp`{Q}otx%)3KmdZ^N-9&e|ugjqDSezrcAG}>v4@}I@ton+xRSL=IM_@ZMH+ci~ zj~{W>4{Rnv@Phnb59;K2>brfr5r6w!HzQ9X!8-W3V{UACE2D zh~gBBfLVQH$SGvA$M8GeMlLNKHVi7qHz66yY?MbX%}3a2$d+5(#Vi?p9)u1cOTAE$qwi5XF@e<9#L_rgN*deHi3VZFe<%LTyN>aP5z5q7%qr}&&- zc7kVIPGX7*nt;{L+_&!h&O{L6&QA=-6S>dHsrIHyA}V#BAW7htmG=+ji4Yu(!1-VZ z1_r^=Zih#PMJ20X2_}4`8URIr^%LRJcrqjGJNRTZ%}Bu0pfIe1?>BbzIQb*D`juR9 zkDwA8#Vx0HJ+nRVV$#Q_h3f=A5gTeh5bCb3b0gfo6yjyU2gR8QZ+ISgFvmXcfg5n2 z7)WW7%?oUWbbmPIO{?Z{j89HU!E!$rVs>i74WJer}8WXa6KG&J4Jt zc91jD1vaFR5d(8>$9Sr((wWJtnevAGQW)pp;Fobb0o^DZkPrB6buXW}tNW;w2eZ>N zV0c*c2VsgGb1FDXf8Z={!BLe*97J2c>0kSc2|VxAoYV*? zeqwK5>)+g|r&HB$-Rj4((n0SG{=JWE`q%o$l6id;*A)o=7ev=Z`YL1>>VRg28ij@o zA4#FDj*pDv-|eK60t=b+Dn~^RGTRuj*D`(%)B>7%Z8&pr_A%_tT$$cD8$s1> zryhQ!eV8+9o)D6))w}aPG2srfb5f7WY6SL zJipbM<6-3f#EB7ku!jBfaEQ5f3+}3}{Y%u_?fmj;yBp|S+oswtuRXZoP#*{RSz=$M zZE1HW_}}!aSMH9wn_{pFl&V%S7qB+tAr}>;E;a_2>YD4xIRok00cwZVAv{kv#@R4vI%K4hQxld#zrA_d;1e+s(L> z4YWnx>nlpa-9;#a#Zms|-ze!jt4LX#PvUavE^0|X^~FnfXabybrNR`tU86B7?B55L zA18~jROI#EBtvKYPdU0CJ^`zoBgK@`tlXZUL7ql1Y5fcNHx$;z=5mt!D);Y(Rji5q zi{QriEBjnSD|-Z;ki!+}aNnrgH*U7`>dOak03(lKHiyxHjtt{j|1bJEchraezD_P< ziundPgrhy>P|nYu+LC8Oc@He7h;Mr(URLvS)Rn*D4tVfR^j5AD2eI}+P@A0gWCk1= zb$G%rSDgu!3p{FmBFo_$e7m(4=1jtE6rrENxx2Bs?Gv{%Gd1rsVGvE<+xp{#{&pc-%|VShn*W>SRwRDsW=_}{&=y2?y~uHp;?Li)+{ogcT~ z10N9Y%wtd)pgwHHs!ggNpV%r=vk660=G=)>BaDg#`slerjK->}VvZ{TDz z;O*v|a@`Gd&;Rq$J!XQve1Ulde@$ZN2P=?(X}I?8hLY9osU4~EwPQ?!>6`w++$3g} z!tD$uBmC~Ev~6B=zFj2vMo~4WJpZEH`ZxFxqL*X*^8EZy|6I7A5$dPPvgy0(-CC{W zt)0F4%YU*g*vu$X;ODO0ZGfmcl8aK9rm=soU|#KjIX63$pHCkv>GJdDa|u?SlWw+8#;Ib& z=fF2b5HZg!M~R%8z@#sX1<#8|^+XUReDfAz8kOUjhMQ^FxRYx%d3Q&X%I9{&No7T3 zb9}AyF@$QT|6{ot>U-Musig*y<5Vu8gu`@#_XDo;)@oWQX7B(eym-Pq=D0bXF3mAl zI-ye$aZXQh<8vFiC&01wG%b51Hj1Ps!U+tko-zU7$HMU-EH3&6!783H*hTV@E&o9y zVjna5+)FP%qKQF|V|o})s9TnY9`N3k`N;5lUNf!wiN5qIdad521DfDJ*2Zg9-vm74 z?+4~JwB!dkY4H*eT_zm=BkI?$ufpBQL3n~RLE)hY?bAE1wL8>}XWXGf(n7&W)e{hN zR(=5oAuZd0dl5eZc#?ZO7K*Ub0bzAim6T@+3fz{p)cD!oOyxc3_xB(rQ>tM<-I}|L zfMR3KVR^5j=(~5~zq@i6Vjm=9Ap?w-k`wA+M4q1bNSRz1Qfp07Jv%2)z(l?;(6WfCZmQ!UtoQJwvttP};W5ouS4J9})?A?H~28l<0Z z15@W4N@0yqFGu2JJc17*R4!^RwX<5$r^418kt_cWeeG3nYW`5FKU@3x>pGe9-|I#{ z8jr|HO`V8sA7hgmf6n~h@PT}et29b^IIcnC7{AV~?Y13P z$+>i2wC+`I^=+q4)u=E?t=PH$MFZ7Zoc(*K1f_7a8ww`1`(5#a|9Yv+Qg-t8YcVo1 zUV@&O@$s6LA)BN0)orbDF828^4Kt_A?X`C@}r1}ZKKpk>nU$-?#o+f2IU zI1?Wt2#y|$Sx)&&cY?&eRhYaU4R)FX7PI^(6_|!ga|bU~8b+_H@AsfT59F@He>yp> z*dWj!ur;gpSNw*odPYjGWd(T!@^^RgKK1W?Gg@#C{57_Arj;6?NZ-W>`X2dTF@(y( zk{RFtc)cR&*Ks^e8+SOczb`|XPa>j0N;|{pR)8Y18@Y_aq zpkcc#mTxy_?O$L3;ttDlrzc_ePNrvsboXvMM{>s(H>5x4!QvO* zsXJOW(tW4u;#Yy*XJGX}7gW!7LIVm#Rcy$m0;T46u?e`p<9sy#XQ;WXu0;-KIc$8T>ncqj@*aP>(kA z9*fHN`246~i+{J-F~7N43fZG;H2gs(P@%(izg*^4F)oM9ZTaxmisw_^mxfT9aL5qS5^jflC)Du>NB#N@@GmPfsev?L;6OOQnDLCXSE{`)@itb5;IUil zZDbN6r*o?c&QKxJreA&Sg~EUwIX;qg3HXtspZ7JKU{zG(nvdUGS-VcEc0O9M#AL?C z^rxR(h2$NZ9_k&ELO5lC&o-&%C5*;#mlPh80eR$HW~SXS92waAd~rSuXYKHH$iDK` z*w7ju?{GQ{2mW01oeqcT5N6e)u3-fn4yP$BtHR{|$gEh*H3d254@Pm0POI|rdyuGF##OHva8bh*1?;qn{W%4frOLXJc?lo-(e~88&xPF({r7Pp^T%(>M?pZ@!RB+KkyW~I zU6yC|Gh9dbOS6ADS6me)1~#@zZePpPy-m#g`K+?hkK@M`nz~lV+g{$<$xEy7uM_#` zux3`pS3Bal>djq~+`#2pn4+aAT3JTgx43k+b}d4-)0JEe5S+WeI&esh(vA7+``frq z;o7WZxISaPS-GvNR1G9tDH6KFaPxG&c2&~k-nejfHASpzM?Dtqx+;;=72g1tW>_~@4im1!XrDRF ztmSk~Z^|3)?yfJ8Fzcw7I?XygoUWhcy%IfLx0>M|`bkp3RaCNDRYIrPu&`CM-ErJ5 zUdSq2{3H^U3tFWLF;PMBh|{d%a=J3*uciW;U3$5GlUKdk@#T$*xHd82Izi}!K5|%& zpYjtnC?Eir)gWB6n>o$46OOIPoJlf&Uu~w3ql%Ve^;E*)4GHgmh;UgE-mUx-(`O8( z56IuHS;U-v5E0Aa31fc3NT!>EeJn==6qMyy3EgKodi+iJQbj&9{cEP@mt*<`5ZiL( zTTNIPOk_Ehb|VabpRkjB`_-XLcNK)gpdOGf@2;)I5sndyhl65gmlI0I5(Il4?BY$j{f zRKEB7Z00nFHnki@WX`40OmCIc-u(*GRU-+n%eylcFg;9CBOZciInI_~J-0Vz`q~b{ z=Vh?PA55Z;u(iXG4Nr?PdYXEELJ7U7eU<|B79{Vaq4y&29j56bjUwV2cRFT$r~ z`410c`s>dK_sAzdm3_3MGjo3HLAXfPt+J%^{7uZMdYJHsD};~B()v8k^lo|gC^Qdx z!NK>=Ex>dq*@}xW5SAk>ig4f&!j&@#%S#@e705NDQog0Fr0ut|mj{$)3Ga{L-TYE^p3Y|a9m#JWRp(vLH_Ryy zO=&rdv4l^SBy14EyH!dN4%x_@31ygm)k(NsmRvZ2`Lk9K9+mZftRU~2l7`z8nZHfu zhs!b#ZeY$1*{&yL?;Mdb{*2`9j*@$Smp$_LeSGpa$-^J^X8H>$>4%E3{Jye{z5bBp z?;_kM>$71Q)A@cUY;O=AZcXTs?^+@yrHy>Pf|S`uqzq1x^v{yKkt2C&mhAmEV))zn z>j=x|$kOE9ozF3SM)E`GT&8zUBCHWhcu>-|({-jZ`VhuTPF}u}>2ZG&HkC3}c|X&u zBMD9U%)$dqFa4RYxNOnhN16Ua>VdJcG)-4AVI#MN@&H=3XC+S$f{D*)o{w7bf{&j?YqQV-7BP8*e>VU()0S|^G zy-7G?H|iVWsn!W_r1>4-sI#a`$msoj0h33k0*)D25pZn32LMwhF+X+A4}fW{p9V}{ zJQy(JN($h(vmU_Ali4vb8{RiAv_L7Bj9V14FO2qZZ>_6FG}Yx-#@FoIYsMLLoZU71 z_c|O#C_Vt>2&rz)!ZURyJ`Y%TV-3I-U)=-PQj`L0b%-4t*K!eHe7oI%_mp@LaL|X9 zQRI-7+mIeQimdOsMU^-5;^oq4av_T(O+r-#IKzoE)0%))Jl6vzsG06$^x_E|dtjkdIkEY(DUAyw0EbALskJ>~!k%qwO{ zCeS|fV~YB#W~CR^j|!Y8Q45};T`P2?6JX&>?D!%pS$NSR_W~Ac_7Gt4#jOELyvmL$ zIfgYV^;JWpOJ6$*SSCL^zwC>RRJm2@mRjzXXVmpRv3(^nUKRgPKfWdw^QW(iE>8mO z7k`oF2Sl|4KnKO3*MQy-V@ZQUVieI~@n=<_BVr!A>`if#S4YJOu&Q-Th%rEKi3Ke4 zZE^D_pfjRQG)VuwxOhL%0`rgc$XsHsIg6`j%>58;NwYzt=>*=pb0qE#Zg~W7$kMWa zL(9zv9EOPrhm|tJ0!)o3Dbo7>0GNI(12E$_NipuiX~4{DG!R)+h9jL5Us9D`ZuT#@ z4t?SQ!0LPWT+K23Wv${H0mDDq0$95+VV#pCVqK#WV7+H4Wc3G=!<#&}0E-m*X-8G3l5qpu`)@==SMV}#Lh4=}h00&0u zcGlQ17}sGl9tUh$17b**}!Bi|F!r0g5GZn}${)20EHZTpnfNcTL+QhSBY0PJ06I$$5cbl-tc6jodz zmJ%PA1K97I7XXKBCLB7KZyxqJHCjRq%s^Q8MUzJo|EYtV;n9ZxlP0szN7Q8Rd9q>H ztdS+jDWj?k1jLuL0Fz(81UP0k+c);JzW`HyCeDoz#cYU65}*nI?Oy6E8nnjwa70GcVzKLPZpSo{jm zW8(3yK#z-6RG_oOwg-V`i?8{vIbtLm|Ad&(1n6f`zcA2zBbue`H>ykkI$#vxVC|rh z!D`Jm2e;-U<~fS+BC`Qi!c%54?dm48BweN#%(9`l+Jyls&>=IGI`FW$zZlRFbNE7_ zH_aBMfR37fMgtu)1vp-EN}u|Y=EgZ>Cl6o`J`hbAdN7O?d#EUT?GaJp2GCp)ULI(k z=t?rq7jww_OGVXIs;IIzDk7uY3y%VpFGYr`^z&FSNsWB3A|1A6n|f}uIJOHJTSQ?n zxwTa|t^%!cPU!}8+<6^@lr^hyqy#c*z0eV`$tGwrtLfkQ0h_g*57<29EMSZMy#QOr z!~(Y3OFnOX<#D9j++?q}&E&AQ-A8kgZh!L;zzz#)BBx_^0i+|hlOmmN{0$g&m1RbM z2I9zfTeqIYbv`H%7>&3Dz#`$4=29t8vsT%|?9dAL(R8}2kbbpGvJDL$_ye$USZBbt z*VvqxQ!@d(_Rj$9@d0^!Kod&vkg$1xqvw+=$K4=zjW0_6eQ-YO^zf~NfYZe9C2`|X zQJMN`w%G9)&^*z18qgw9k{$Mxc={utXT|em$@QZ1GeCR9Z+y`{(e@_LK@l?%=!jU$ zp};XQ;7eQ`7jJC@Iw@WvfliAYzV)o=Muz!NjGYa1PW(ll`c$N~1iC1$lF(m>OzOCA zMKQAZbuoc#{*x#h0rZH`;Z2}NjbF)}uR72YBZ+TVYP@R!J!e$?0cf=` zgMz-!cz-m|Mq~3>AJ~hNCgiSH`!L)NhOp ztjmwaUbg5LBjh)rn?@^E=?|k1-#*ivTm|GxRzb)b*T*UkW4FgubzKQ~wH2fA#Y{T}G5xqlYWHFNV?pzlmqEub6b zw}*lLGPi>Zt*OqlBt~(I$ipk0buR<0aTb{hwBC7~BbMi# z$&~3Aon1+#EzWMOfwns}_WyI^gd* zhZn%rd(K~40DbKI>m8s^oNuuW7o9)SMttEcF$U;MXIsAEC+D{$>u=5t^6k&T6acNZepwB)(aJX;XtPzF_1tFF z91OJ6x-kl9w{@W;&|d3`Y0sUz0rE2)aN{a@% zX@zbF`olWX0O)TkW)9H8kYAPqEe?sg2k7aLi}wO84@nCFS`kwD640uU5u1QEg>eFyu4*8O<^)y!zs+-4LRl1-SP9)1d^b25E zyZpFYe@qc~Q8jiFS)!Q<3?J=t}eShZsn0GtIcnx@<6lK0jkRN2tHi}k6{v6|(qCig=JJW&Y8l_l?dB#z;eZCR(2halJ-fw^w8Uu(H z8J7kEJ!vf23AEUFr4!H+qf#%Rr;MGvdfM2^!j>AxQFO?9;~2UzHZaKL888UnU#O7?G6k8II8gQC}FH-uV}tlmAGSE)arL#+l^9>l$d z3F`qH-Kd3}#!IPln(mlx;%xQ56@WJJq0UvHM8SoLYkNh)L zgz_!ZL@1gfE8c7adL^XO3^qTe>}sUrhF8HO1Gba+!J4& zR9U4iS4D0+u02}q_fz9`D1lqD)>p)MlI2^GpJMTY`1N6+U&QB+sQcw>+(1T!{j~rq zp1uND>Cvfxl|u}`DmPyStlEp>;T}lMQSIPjz|PBA=-3m~XsOHD(+5RO%EM7Hu&gS% zMK8Td8S=oYoXB9aDtEx4RRcrc2g?J%;>CkNPl)D!0L>MRiROtC9C*wZP5FZb;^l65?J7}qJdhfL-$BE`%{Y!e zveuh#V2CTLShXr8+PxJGRD-Jq9nc)BVHHYoqYU~lO|#J;t9d42^6L8lQ<_jeWp<@$ z&)F2FnpEr2Rr6=i--(y1S71jA(os=vh&O z)_H}P&JI~Aa>xg(#I{d>R*U=#f!2umEM>jOqO@%gV`l(u6xX%`Jug-!0KFvs;JwYF z;1r-OB8w_%tN5WS(01|9BA^}O>TaN&Vjm0JC0?fP+9MYA1A0Xir_k*cTW1386Vq9< z{i1GNpo8LKEYKU`eHS@Dw_=mYUQJL^L+ z`$wQpM8hFKpNmmcg_p!0_VpLyW7h0jQJO`6FSbw;eiR4kGXEyxZsO{9(ToI{Vw`|{ z!PzZ75om@H%l=$oyh`?6XmtD#Xo;~T6zD0VNC%*0#>>>L%Z(+~fSxfPK~GyNjJ>dq z)>>m1@2xWy^WJ)6A$w}0@e^fai?OpY&{pFGj9A!_A*E}Aq#{ds zSnQvCfW_-w0xWU7AYjRh?2u9m$%v&JTYzPL?*v%33CHK16_>Ilm8Pr& zto$QH!m9GzFMw5Nw+3`SUlg!f^nHM#waMt!d$8U$rc-Z({Y~vss}{{~?fP_s>wLp! z>Q1ELkEn;bTJ_?oKk8>te>UjD#hHdb)>4l zdZG(p^oumlG2xWq&QDWSbXgk@*mW1RY3xHs0K5HA7_j@9#{qk6d=ju{lOF(kjU^ZN zj$@1ZthfN!w+`DFcOU}k__Soee&;g*`=3|_I3SrEHgFQDch7Gt00+$_Ti<(-GBtQA z)zFYztk2Nm+W?1klP<~YRCV_);|MabHQl7)T`2`g;k3acz9hSQ5=gO;>E)3g_0TQA z(F0Eajv3E7kKF?~uu@L(Eva8Vg>>4GtAOb!2p3 zDy&$Z9MimNeZ^&Ecg(_dMQs`WwIY{=mfOriD*VEiS8OvK=}O@wK;`%ifK|Fs1ga*o z)7&PzrP|dDz|iHaX7x(ETjL-}7}ls6V9o9eRpqLSHoO-;fcK(a;=T8J0M;5)3oyLV z3xKtE-vq34i)>$a9vMC&gJM>1DTTcL5_V#P?n{tfA*ygz^*M2t(`PHi6w1gdagO4? zT9g|Kv_|Z#4764>gNJ3U6FFUEfWv`O@5wcZjjc#ovT zgy?n1m^8N@;N&dU{Nd&-Wr`@utl1*g0(wqFv;H~pT`o}6^n$Ttb-ePaM?{jo8i8_+y+ z0*O7}oJq~U)LcXETxLEV2K1bHmilpzS(bhCy_wSx=!W?K+4y(!cvYZjP8TcjxO31} zpn1+B{{Ss?4x@BF>D)moZE{{GgKTxq2b}X*=0WGfM}gjORy+lC$XSM^ z9Cn7a1$xtYt_09gXM6U_Th1=qflfHbKM3@Wvnkp0r1Q~AK&PCesT1CHwxk#Hk+UDQ z<|od?c0k`ahnEBT$@$_Opr4(di$H%me@y}U%Q=Rn{OxR89KZX=*@|@8X06Eq+HY0v z4RqKVL$*0;m1QY!TZjJuddGT?n(IC5g`PlPS%0w&*Q~` zpcNr&_`9_s8>l5-4*7!W`$)*Rl0c_Jwx$C86mq6C&^*_hF9I!g75ffolWP+hdZ+6a zHOekmadzr%*Q!5pZ;xyIUqG+8;!XqYb%jR&?Q>;NzaDnI5dw7Hb(~#%(KV+o&>ya* z9|Ha9s?6^A%k@utpub%UshSq$yG#*ZlJCfQpf&jxegpJozV=0c-pN-FI$jDw**zzb zQ8%Fh;5|)P-=yjf0FHQwF5AfQWVSWp%?rpnA})~!-V|Fo!FW^@VV#eOH=Y7|OH`+p zep^(%3UpljNd`V4o+=LXjxd)3ofPLc1Dz6O*fpoc(Km7RwP>&f=$cr~I)5uV+`!d$ zVj(Ls!+7mepvR1J(}1=af4vQ~-MGqz>@Zr=&h0cpsjqe!`^W0jj+)`hmE%j106AX-UsxiF{2mIQR6xr zcFZV7z5bTb_c5TijZW;~*x%d z;DcX9My=~b)pe=TgK%9z4-KqJA_v!12h{kn8}aoaq?>f& z+nYa$ipi2|52n$n^BD(9b)TPtbi}kPfc5T&V`kN__dC)JHq8ZWxVAN5qi^U8G_J;h zOOvxdBd6&-;egF1utv?Fhz4wd0$}=gQYE(^fj%4uz;wu{D5EZ7TvsKi3m5hTA%=&eQ7wcIis>&~epR4ol`U34Le<@2lwsOzinE;P6R&d(x3d0Y|hXB|T}Y07pjt z063}ye>?h1zA?FFEx0q9+kvl{4( z_>?a@EB4F>`amR41^P&IB-5W0YdZmbBDx#_x*%3iwl0eEoL{*lUc7+J%i;iu`K72! zf_){T+XGz_V><$UD-QE_KZ-`=gr&w~{{TH}8!Lu*PS6GHmM}z?##p z1BUO}3|RMl_C~}U{;FPMs?Y|XlCc{KjsqJlAhR`@LN(QF=`y67=cA2i(SGw`ykaxgBOk>x8Xf(eWUdhmBz0eM7qg-vPQF8bbXpvT?g3Wk9L49Q@R1R zs`oKq>p^UBo9*)e+ZLl5X!qdLfbAPB0ql^;wsx%fH(;lo@qke$69A(tux2qODPEno zvGrYEXMc9x@c`1X#YY2nD^A7VW1a`FXC11UK0VpPeKR)##$93`#@FS$`bGB#>_2D? z;DA@yGXrZ=0pIf^MPiVP8sXkADJ+9O{seGH8*=8*H#!3jlkZA=W){-J_pmocl;p2G z^GPnZ&qI&vGsQfa}UNL|ox=+mRi>p^f3V-*SIQkCI>*6}S z!u{e+s_O${DhYE?gpdc`5Z|$z4~dxLKyQkPL`TJ!CZ0Sd3jBbpx5N_8I=n3gQFxDw z4ZkDvgm~|5pm#(Hd-tU1R{-dgX!;A#yP`W8?zFf;?l>dP@qO=!_Wgi95UEa-@}WrV zgRApm_(Y&j#AI^N1##jYTzw|~`3mTgxUdQ63vu)&&}FfX0&qo4VO_oyb2#d}Dhgde z<~7mOfvays?NnTSFDlW=ye`hOwLgju@Ghltt2?0rZr6J{5wJo07QjY*Nb)Anvx}P5 zpiyb@#T39+7i$Byanpvhn@#oAA(Oos*^|C@R46MHbBOHTzN8Tle$T7C*! zM!x)jnX&BY_2Lss^9B)_1hi3n#SVC0gi&NSi9%g~UKEK`Brl1!$v~UMch!Klh`H|p zfx-B`?cxeu;~nA;)^Mk&w+xxP#KmDiyTt)kZ;yzkBH1ghk}>y*Riwb{;wo4D4~Tw6 zaP@|mN-j7e-Xaqn6K%=RC&U|6aqoyY+J=)Ni}W}pmKOy2R6N`f=rfVVj=LzHT8yi! zV!;5Quf+D{K-a{f7l6JIqC3zB#<6%_8PQFZI$Ul(`vY>;m^qVyUNCo(*S4A-PW

l@pu=W&7tqh<>e@iJ%-$56Y0fh=YmYiDwsE%e3>C&a=a=YY$sS=>vXD{R z>p0gd5RL2lv;F{V5W$}|JWpHIsCikW8?U6CweH>-F!B{>Y^&3Bc0}|$)XJUrk}tdU zB~iQIdknD06>5dPcr`ls5cS*#&eg2JhtAdwfIf2e=QP>J&Jrx~obyHc(&wF%LxHY3 z8^40gYtA=Vi*K9@$t&MF<4NrAoO7v~zjyX;i>vF-8B~BjI7{Y3=8w+m?7*L#iDadp zok>H0reJuEdyiQ0XK^*%T1&o}VWm>GXIg!zV;_aBz|~{ctWZUpvR}?azTEOBY1Llt z2Yc|xRqB4Zd6c~FTUr42{EV+j>&CWcpW(Yc72AvA;(}O|0d&;pI2PzVV=E>3lJN)0 zw#s~BEzry64yaQ3il_pl$9*~N)hkN1z7G$S>CJkU&2El#xhP6w`BsD#inFjvBi+ca z@-PWrZEj`2uoLKBD|}>0z`6&>t4;T90c>_+D`1=MjRD)XL^G`RoBsif3SR>l{dJ~# zs})MQ&$odJDSz=kfCai02P}9`55Pj#_@=^Rsd}Br{e0nffji(usOEXayKv=Kx^cKMw;&tR#Cj zxK;?TArv0;*34%Bn@?n$TU4V<*gA`jNxKq+9aq&vPUJ1hQm0Y{0i&Ld28>=!?Gf`8 zMWS;Nc2bwJUjlZ$PIiio8Ufht$DaYa@1mslh@cwnId>`2ypUj@B2PaqUYd=oSt8%FK(ocI^+0pPg(E=oM0;=^Mo0^2(H4th)YVHwCW~Gw zequK+6LYy0;#u+R!?;>0lIeV`5)~i8)oM{R1!#>Zei3M`c}!XsL*mKrfewqyq~{TlJrw9ov8e*kQE_<&(A(k{a`SQVAv@}X z*j)hV9r1TM&`EKc(r`+23k$iL^sn*2bYhqKN8BQ@ZizW`^rjf@*KqZS@e?aC)fl!HXoj)5 z9MDYT=1QPPjkA3FV@63b-)v(*B+wkABf0JgWAmw{d~ay9^MHg@j>+F~4e0BEZb zlMS@Z=z9ieyHV{5&<-P{GSE)rSQnsO#;{nRmyOF+fOZ?-P%Z5-zMKQ}iV-#wXs@w_ z486}d%eTL3Y&r(?n(^41K(8C$UIf~2tX~6kz)0N!bjX-MwmEEEr+z$Q4B3sVH;vuw zhoeRcRmR)K&$EDz8?UkQCyZh)hi%(N7ofB~a4Q_yldiJ|)`~T(;Zd>cEud$Nuns`$ zj3XZby=cT10@`hi90)YqTsR77ju~|o=m|5MwV!J~3VD)tvvTXD$mrOM#E&$S0Xr2X zwFb`uPsk@GmUtD{4@K+){7zIZ3Upn(z(Ri%`^Z#3i}6cQMzFcdjH*Y0o-tN51lndi^fSJBbmhg$LRVC&=fO+%s86U9Zf5QRnrXhe4rqZH{teJF^A>gHD)ZQr zKpV~QO+Z`BeN%v5HjB~4dClC*tHWmPjX=lE<1{d5%$FA7>SOb5UR^Y=KaZ=c<}4HF zdvoVmpkK|$dG(h$^dF#U&Y#u-&2l#R1ZaUXl=q%?{y=_x&bezh(0b=Ix_>V@KcP0* z>0HqjnXfzd6$g6TIf_3&?JPqJe%|>kTYJTs{RPm^&iSm_6l)0kcCIy#SEsQy4|K*l zx*q6b>+7u!a-lKYayWbu1xl!h0>w(^I)GnXAHXk_=K=iE`T%|zeE`3#I)LBT8=at> zj!sZsM<=Kd6rG@=S)kHgqZ8;K+0h9q-#I!#mAs=9RLvtgfjh701l4j!Cx{EpJEA~! z9Z{f0U_^m19Z{g>T_Ot9QV|8h10xF5))56%!*xV~I(bJFsH-CiM3`kXsq%~{P%|*1 zK)s-d0`&tU3N+9W1$3Fg5d|9FIif%#r!S&F<6IF1n&c5tz^x++H1$RlXyzADpt+7H zpeqy@QJ{s6D9}=!4U=8=M_<)gN`WBQAZSr%oS0fla445 zr6USN-!7s+Ox_U%ItNA+=n@!FfKdc=M1gLm=zg1s0zCpF3eF-&gO@Pi?<(dM$Z#8TUXN&KODbAN9_hJEKHZwhTdKnPF zSmqS*3m=*xKH;62B7(Ob6%UgXkBN?TfF2jKS>|l(mEPM@79 zt?fh^Z6^xqKrLmpohX;5ohYyELZhZ71sGZ71q$JJG-_qe-RgME-!tGqnTlM8hCE(J0VPApDCRc}ABRY$uxB*-kX| zvlGp7*@@pq=P$iXPfd^fX1U z+t`WTfp#LoYbW~TvJ-v1b|UVMcA}_`JX1tRo++#&&!{eeoyZ>;c_u#4P89c#JY$w9 z;cC+zdf6;l$|{i)4}(#vm{s&=SOc?oN!xN*iF=Umbam%*wz()VXaF?Y!#RDGN)I2? z8`pIn!E;tb-+qAgpNRvEit7V-&ureEE1J^A%o8eK6H zm(}XCoK~OZgVbjQvp~hWs!#nRTYXl#v-+%@xB9G-hx)9Vm-=)Es!s+1i>sEm`V7_T zvwEQVtfAFs*j?0TO{G3-1**?*tv*%5wfd}`xB9H3)n{F^j3!l{ZgyCp`iuxtpY;OO zXML?cb(z8Hv%#I!XG1^r*(jI#Y@CPstg6*#6R-Mg>gQ%R)9O=KC{TSi*Xpx{QlE_4 z1y5V6&o*~dpKXKHXS=-AXM3$aJ81RUQL9gH;I2rmK09gk8Ff4L8J)NKj0seqodeZp z7gKcA>ND09-EN~ky9cVzaIgC8kxPB{^s3KZcT}Hu2;8N*B)6O0J5YTF1nx48qOQ0; zUbR>{|3GM}O4pzqIUjJatXEMv&8lU!qAKUAiFqccX;>~s8bpXbM0S&5#LAb~>ZXEL zHx+}_O(nBHWvy<^${||$tH)S-!$&HM~Gn+N*f2QUbKqVlSQ?gSw`#0 zs(I>3x7L%@w4Myrda{sf+8~F+tY)cTan%dxu!p+dw#KWTr^YL&!yeX98c!9Y!ybkO zXuPF(lhd_jFy6Ob)C|;dg{k9eDIKRv3A2L}hHD*HJ5a~f(K@bfu#Qs&=-`AAO2^d; z)N%E-j#Ew5I<7(9I9~Y-1JzsOAobQHP`zO&Z+nNjxM20xOsO~B zo}jpd&HdC{3#H!Fqlin`(x=`mL?UeERcNiv{BS<(NQ7;)Leo_URA_CrLTjfKT6E8$15iET!8ZE?709}#t>?T{H9sZaGFI**_zBOR@(Lmm8+NgOq&cCV*TeQ z+tOuFis18Y|BnvtVQa^BPJRFX6SL?4(g|oxya%68EWSP!_NjZ?LxA^umImmFgiWwg zchhb?E?SKSnk8Pn2WYNX&IcBXvl&27iFY%BR)`n*+jZizB%t-8S_04|v2;ApU!n`q zQ$~9}^0JYX2=tEe3o|bpb@}Eg=I%j2Z<~MJ3v|)E^#IWC=7}jl^PC>OXrpuWeL!zG ze;o|;sWW*9(67#%2|)9$nI53^R#DdMb*t?Np!cjJ$w1#&r5^@*BxL0%pk*PS=KyUD zDaIGQ6*6ZS(8Z8<*|0xDZjJ?7efZjsT~Cr8*IYM-0{!JW$4A!X zyPp-@oo^&t`*yyXeBXt9+t{M(`Swf(nv(x4Te~QKOG%gfF(l-^{C|?HC-X0nt<9e@ z23I%o|IIhdDewmhXZXYxRWHe(1!!I2 z2v&D};q&b9D~12$)utj5r0&Hc(@5)|iyR=$rWGy2x6dy+njQCT(eV@2xKV~$w)f!t zf9+j;Y#dh+ANw39ZoX{i!zl^L*>OIdTRn~iVYx~HZ@3>nh zj#QyUq-vs46jTHeq7(!L{wM+}LJ?Bb{8IsfN-63eAyO5qe?Xxig%3p$3WS;WX7+aX z-Z>yZ2#F`%zMbEjnfG?y?%SR5dMEJNt^dIHj{g5`qyPDD8=a`7R|WpXuNtZE_Rjh5 zym!_57G0NZ&(gZlK10{xx~FJ?*icW`-KH04#oBz6F1L<1X#Lpo0R&ET{Vnp; zze1k+{owLH=dSQ4((B|C>2u7!!fKu;;K|JckWY&!H~-S@VtTIcy4h4x6iZ4jpFCVT*VUTPr+= zaBz7;*nLPu+=p#p_o4Ga?n4*54{}fu_hEa$eb~Y715XON58Y+=p-w()-YU_NoxU@% zD-c|MWOiqwN1TaA*_oifK*nmG5NBfdtj@$9b|&iNv*vqE&V(h-M6Wm#ed0{`!R3B& zCI-Zr7>qa*(#QHCb|x4QXJTL2nb;q8CMY5z&IARTt2h$}!_LHJ-Kqe34`z&b12oo~_#c9T5uXp!b4~mU;F1wv2uJ3b z_!mKUhY4Q{mJx4+3tcAuCg|B=!kgi}-J=jKF<1h(jrdZyYs8lU>@n$C4l{dAIEK?K z6TSipM*LwoYs5bg9lD}pu(AristS&8;~7c}RvS=s(gAPLh7yB#1&a4{ys7`;ezaj; z6?_TeHBf2iIO4Ui5Y9%<6MT_b;nyPgPR60Z!o70Azx<&7vOQ^hR6d5k8pFJwL!A1J zNAx_e_$$w!R}eQHFTROo#DB*6@3DRtX)5?`nCjPg$RZLj^r_0bxTW#D#`pooZ!=yq z-%`lu`M~P2HjQUx6?|J2+(MlCeZ5)hkE>n)4lsU;@gn0CI41rlmuUVD#__>X;ZHM; zfg4ruQBvVIgdb@t=#VKqw#-uWIkg42$oS*SHU2&3{|4i~Yti^oUYvi-_?}f7KZmc> zQF$M6lYRW-U2Q48xY`2TVLi7ZxY*%7<54?&VvW`#cEE#D$Mq$~TUmbt<5wA%E3=t# zuTAUu8jgd?HpW}pEk*A`no70nYY^LOpb0u3vlO3fjw!tIx}$cJeQecFbHzGYPZx-Sgdh;fnJsC zjK9gp)zA1X#{bl4DdfwH-%&Udfod7rnd}g~Z?+*$=gA`0gYTlN@`#$3fCAnfLcFPV z1x#C`p~hpZ=W#xN-s09Y<8N=U6!KlZ9_eHDCVO5Y{ZM{xK~j~MSkG#g`Resh$0}cX*6_&L*KWik@ zFl3!V!NnYpx+*LANUyq_%K&rqWhOB#XaHsdDzn=;Cll@0ra{_zR-&EsPdG)Kf$kvFgGhv6HaLY66rIOs882kQMX{O(h?qAh_JD(=w=-f;JrfDgM^ymgopD63A$Zi zTV7o>33q~T5EE&X6-rC!JB5~z{g0=R4YfPDObWZsqe9#yR4-a1P9Y?4mCWJI2wC0a z6sEb?3#E{BH;{cNnJl`egvw$LS=mQ>$!^X`(RU#JJ%u7P^hkt{RjNpQu!n(Fj7!Cj zPeQ_4+?$I(kw0E*Aq5yuR29T^00*qs-3hwd&*CR>0BELd6t9VEZ_eEafob*r8 zRDP=~e;c(QJ=fk3j9=oR^t?u8v#vnKFa1b;65Ib4ke9gW3=4F!e02N_1NkE~RM98# z)kYG)GOD)tXF{%{$A2DilJDboiNh9HejXn%$+zGyD*qY8h=tkxcl$ZE)&kikg?*aW^ zL$*cuqicYQ#8uwgt!bHWO_k?g@SkJDV*H7`#Jg|*gYq{}mc}pg5*NSw0C^Nq_(--?{n zf5w)eyu=?0mKKq5iUN@n`Y*Ucm>j?OnP)A{5#?ud=tkU9K%KBoAJID^-yOk4UTRt& zWIxFIeM?jENMnSAmMZdZ_iFjC(*hYNQTer1mO>liyLN_ z-_7z-`xR!i+*dyp?olYI_p^Ms7xcx`dPRB03Ac!xpyad5+zUqamK$0g|4}4VBBZj1 q<_xjHWeS91xwJszYq2_vKRKV|{z>bjssD|?(DHS1;b}D$_WwI;G}&DM diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index 4460b2db..b613d2cd 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -18,6 +18,7 @@ static void test_irange_list_union_merge(void **state); static void test_irange_list_union_lossy_cov(void **state); static void test_irange_list_union_complete_cov(void **state); +static void test_irange_list_union_intersection(void **state); /* Entrypoint */ @@ -30,6 +31,7 @@ main(void) cmocka_unit_test(test_irange_list_union_merge), cmocka_unit_test(test_irange_list_union_lossy_cov), cmocka_unit_test(test_irange_list_union_complete_cov), + cmocka_unit_test(test_irange_list_union_intersection), }; /* Run series of tests */ @@ -46,14 +48,13 @@ main(void) static void test_irange_list_union_merge(void **state) { - IndexRange a; + IndexRange a, b; List *unmerged, *union_result; /* Subtest #0 */ a = make_irange(0, 8, IR_COMPLETE); - unmerged = NIL; unmerged = lappend_irange(unmerged, make_irange(9, 10, IR_COMPLETE)); unmerged = lappend_irange(unmerged, make_irange(11, 11, IR_LOSSY)); @@ -66,6 +67,31 @@ test_irange_list_union_merge(void **state) assert_string_equal(rangeset_print(union_result), "[0-10]C, 11L, [12-24]C"); + + union_result = irange_list_union(unmerged, unmerged); + + assert_string_equal(rangeset_print(union_result), + "[9-10]C, 11L, [12-24]C"); + + + /* Subtest #1 */ + a = make_irange(0, 10, IR_COMPLETE); + b = make_irange(12, 20, IR_COMPLETE); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-10]C, [12-20]C"); + + /* Subtest #2 */ + a = make_irange(0, 10, IR_LOSSY); + b = make_irange(11, 20, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-20]L"); + } /* Lossy IndexRange covers complete IndexRange */ @@ -184,3 +210,70 @@ test_irange_list_union_complete_cov(void **state) assert_string_equal(rangeset_print(union_result), "[0-100]C"); } + +static void +test_irange_list_union_intersection(void **state) +{ + IndexRange a, b; + List *unmerged, + *union_result; + + + /* Subtest #0 */ + a = make_irange(0, 55, IR_COMPLETE); + b = make_irange(55, 100, IR_COMPLETE); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #1 */ + a = make_irange(0, 55, IR_COMPLETE); + b = make_irange(55, 100, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-55]C, [56-100]L"); + + /* Subtest #2 */ + unmerged = NIL; + unmerged = lappend_irange(unmerged, make_irange(0, 45, IR_LOSSY)); + unmerged = lappend_irange(unmerged, make_irange(100, 100, IR_LOSSY)); + b = make_irange(40, 65, IR_COMPLETE); + union_result = irange_list_union(unmerged, list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-39]L, [40-65]C, 100L"); + + /* Subtest #3 */ + unmerged = NIL; + unmerged = lappend_irange(unmerged, make_irange(0, 45, IR_LOSSY)); + unmerged = lappend_irange(unmerged, make_irange(64, 100, IR_LOSSY)); + b = make_irange(40, 65, IR_COMPLETE); + union_result = irange_list_union(unmerged, list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-39]L, [40-65]C, [66-100]L"); + + /* Subtest #4 */ + unmerged = NIL; + unmerged = lappend_irange(unmerged, make_irange(0, 45, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(64, 100, IR_COMPLETE)); + b = make_irange(40, 65, IR_COMPLETE); + union_result = irange_list_union(unmerged, list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #5 */ + unmerged = NIL; + unmerged = lappend_irange(unmerged, make_irange(0, 45, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(64, 100, IR_COMPLETE)); + b = make_irange(40, 65, IR_LOSSY); + union_result = irange_list_union(unmerged, list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-45]C, [46-63]L, [64-100]C"); +} From 350cf51a7e5317e3b6343f0f0620411f3724648f Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 31 Oct 2016 15:06:12 +0300 Subject: [PATCH 0036/1124] Add primary tests for parallel queries under partitioning --- tests/partitioning_test.py | 78 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/tests/partitioning_test.py b/tests/partitioning_test.py index c71c9b9b..a866efd7 100644 --- a/tests/partitioning_test.py +++ b/tests/partitioning_test.py @@ -406,6 +406,84 @@ def test_foreign_table(self): # Testing drop partitions (including foreign partitions) master.safe_psql('postgres', 'select drop_partitions(\'abc\')') + def test_parallel_nodes(self): + """Test parallel queries under partitions""" + + # Init and start postgres instance with preload pg_pathman module + node = get_new_node('test') + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + version = node.execute("postgres", "show server_version_num") + if version < 90600: + return + + # Prepare test database + node.psql('postgres', 'create extension pg_pathman') + node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i') + node.psql('postgres', 'alter table range_partitioned alter column i set not null') + node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)') + node.psql('postgres', 'vacuum analyze range_partitioned') + + node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i') + node.psql('postgres', 'alter table hash_partitioned alter column i set not null') + node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)') + node.psql('postgres', 'vacuum analyze hash_partitioned') + + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + plan = con.execute('explain (costs off) select count(*) from range_partitioned where i < 1500') + expected = [('Finalize Aggregate',), + (' -> Gather',), + (' Workers Planned: 2',), + (' -> Partial Aggregate',), + (' -> Append',), + (' -> Parallel Seq Scan on range_partitioned_1',), + (' -> Parallel Seq Scan on range_partitioned_2',), + (' Filter: (i < 1500)',)] + self.assertEqual(plan, expected) + + # Check count of returned tuples + count = con.execute('select count(*) from range_partitioned where i < 1500') + self.assertEqual(count[0][0], 1499) + + # Check simple parallel seq scan plan with limit + plan = con.execute('explain (costs off) select * from range_partitioned where i < 1500 limit 5') + expected = [('Limit',), + (' -> Gather',), + (' Workers Planned: 2',), + (' -> Append',), + (' -> Parallel Seq Scan on range_partitioned_1',), + (' -> Parallel Seq Scan on range_partitioned_2',), + (' Filter: (i < 1500)',)] + self.assertEqual(plan, expected) + + # Check tuples returned by query above + res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5') + expected = [(1,), (2,), (3,), (4,), (5,)] + self.assertEqual(res_tuples, expected) + # import ipdb; ipdb.set_trace() + + # Remove all objects for testing + node.psql('postgres', 'drop table range_partitioned cascade') + node.psql('postgres', 'drop table hash_partitioned cascade') + node.psql('postgres', 'drop extension pg_pathman cascade') + + # Stop instance and finish work + node.stop() + node.cleanup() + if __name__ == "__main__": unittest.main() From 949840f83adf5c21549d1b899105e2274b8dc4ca Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 31 Oct 2016 17:40:15 +0300 Subject: [PATCH 0037/1124] improve irange_list_intersection(), make irange_cmp_lossiness() inline static, more tests --- src/rangeset.c | 65 +++++++++++++++---------------- src/rangeset.h | 33 +++++++++++----- tests/cmocka/rangeset_tests.c | 72 +++++++++++++++++++++++++++++++++-- 3 files changed, 123 insertions(+), 47 deletions(-) diff --git a/src/rangeset.c b/src/rangeset.c index a26f6d59..6715ee0e 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -44,22 +44,6 @@ irange_eq_bounds(IndexRange a, IndexRange b) (irange_upper(a) == irange_upper(b)); } -/* Comapre lossiness factor of two ranges */ -ir_cmp_lossiness -irange_cmp_lossiness(IndexRange a, IndexRange b) -{ - if (is_irange_lossy(a) == is_irange_lossy(b)) - return IR_EQ_LOSSINESS; - - if (is_irange_lossy(a)) - return IR_A_LOSSY; - - if (is_irange_lossy(b)) - return IR_B_LOSSY; - - return IR_EQ_LOSSINESS; -} - /* Make union of two conjuncted ranges */ IndexRange @@ -161,6 +145,10 @@ irange_union_internal(IndexRange first, IndexRange second, List **new_iranges) { + /* Assert that both IndexRanges are valid */ + Assert(is_irange_valid(first)); + Assert(is_irange_valid(second)); + /* Swap 'first' and 'second' if order is incorrect */ if (irange_lower(first) > irange_lower(second)) { @@ -332,39 +320,48 @@ irange_list_intersection(List *a, List *b) IndexRange ra = lfirst_irange(ca), rb = lfirst_irange(cb); + /* Assert that both IndexRanges are valid */ + Assert(is_irange_valid(ra)); + Assert(is_irange_valid(rb)); + /* Only care about intersecting ranges */ if (iranges_intersect(ra, rb)) { - IndexRange intersect, last; + IndexRange ir_intersection; + bool glued_to_last = false; /* * Get intersection and try to "glue" it to - * previous range, put it separately otherwise. + * last irange, put it separately otherwise. */ - intersect = irange_intersection_simple(ra, rb); + ir_intersection = irange_intersection_simple(ra, rb); if (result != NIL) { - last = llast_irange(result); - if (iranges_adjoin(last, intersect) && - is_irange_lossy(last) == is_irange_lossy(intersect)) - { - llast(result) = alloc_irange(irange_union_simple(last, intersect)); - } - else + IndexRange last = llast_irange(result); + + /* Test if we can glue 'last' and 'ir_intersection' */ + if (irange_cmp_lossiness(last, ir_intersection) == IR_EQ_LOSSINESS && + iranges_adjoin(last, ir_intersection)) { - result = lappend_irange(result, intersect); + IndexRange ir_union = irange_union_simple(last, ir_intersection); + + /* We allocate a new IndexRange for safety */ + llast(result) = alloc_irange(ir_union); + + /* Successfully glued them */ + glued_to_last = true; } } - else - { - result = lappend_irange(result, intersect); - } + + /* Append IndexRange if we couldn't glue it */ + if (!glued_to_last) + result = lappend_irange(result, ir_intersection); } /* - * Fetch next ranges. We use upper bound of current range to determine - * which lists to fetch, since lower bound of next range is greater (or - * equal) to upper bound of current. + * Fetch next iranges. We use upper bound of current irange to + * determine which lists to fetch, since lower bound of next + * irange is greater (or equal) to upper bound of current. */ if (irange_upper(ra) <= irange_upper(rb)) ca = lnext(ca); diff --git a/src/rangeset.h b/src/rangeset.h index b2f113cf..5f3cd4e4 100644 --- a/src/rangeset.h +++ b/src/rangeset.h @@ -44,6 +44,14 @@ typedef struct { #define irange_upper(irange) ( (uint32) (irange.upper & IRANGE_BONDARY_MASK) ) +#define lfirst_irange(lc) ( *(IndexRange *) lfirst(lc) ) +#define lappend_irange(list, irange) ( lappend((list), alloc_irange(irange)) ) +#define lcons_irange(irange, list) ( lcons(alloc_irange(irange), (list)) ) +#define list_make1_irange(irange) ( lcons(alloc_irange(irange), NIL) ) +#define llast_irange(list) ( lfirst_irange(list_tail(list)) ) +#define linitial_irange(list) ( lfirst_irange(list_head(list)) ) + + inline static IndexRange make_irange(uint32 lower, uint32 upper, bool lossy) { @@ -93,14 +101,6 @@ irb_succ(uint32 boundary) } -#define lfirst_irange(lc) ( *(IndexRange *) lfirst(lc) ) -#define lappend_irange(list, irange) ( lappend((list), alloc_irange(irange)) ) -#define lcons_irange(irange, list) ( lcons(alloc_irange(irange), (list)) ) -#define list_make1_irange(irange) ( lcons(alloc_irange(irange), NIL) ) -#define llast_irange(list) ( lfirst_irange(list_tail(list)) ) -#define linitial_irange(list) ( lfirst_irange(list_head(list)) ) - - /* Result of function irange_cmp_lossiness() */ typedef enum { @@ -109,12 +109,27 @@ typedef enum IR_B_LOSSY /* IndexRange 'b' is lossy ('a' is not) */ } ir_cmp_lossiness; +/* Comapre lossiness factor of two IndexRanges */ +inline static ir_cmp_lossiness +irange_cmp_lossiness(IndexRange a, IndexRange b) +{ + if (is_irange_lossy(a) == is_irange_lossy(b)) + return IR_EQ_LOSSINESS; + + if (is_irange_lossy(a)) + return IR_A_LOSSY; + + if (is_irange_lossy(b)) + return IR_B_LOSSY; + + return IR_EQ_LOSSINESS; +} + /* Various traits */ bool iranges_intersect(IndexRange a, IndexRange b); bool iranges_adjoin(IndexRange a, IndexRange b); bool irange_eq_bounds(IndexRange a, IndexRange b); -ir_cmp_lossiness irange_cmp_lossiness(IndexRange a, IndexRange b); /* Basic operations on IndexRanges */ IndexRange irange_union_simple(IndexRange a, IndexRange b); diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index b613d2cd..8f8d873f 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -18,7 +18,9 @@ static void test_irange_list_union_merge(void **state); static void test_irange_list_union_lossy_cov(void **state); static void test_irange_list_union_complete_cov(void **state); -static void test_irange_list_union_intersection(void **state); +static void test_irange_list_union_intersecting(void **state); + +static void test_irange_list_intersection(void **state); /* Entrypoint */ @@ -31,7 +33,8 @@ main(void) cmocka_unit_test(test_irange_list_union_merge), cmocka_unit_test(test_irange_list_union_lossy_cov), cmocka_unit_test(test_irange_list_union_complete_cov), - cmocka_unit_test(test_irange_list_union_intersection), + cmocka_unit_test(test_irange_list_union_intersecting), + cmocka_unit_test(test_irange_list_intersection), }; /* Run series of tests */ @@ -44,7 +47,7 @@ main(void) * ---------------------- */ -/* Test merges of adjoint lists */ +/* Test merges of adjoint IndexRanges */ static void test_irange_list_union_merge(void **state) { @@ -211,8 +214,9 @@ test_irange_list_union_complete_cov(void **state) "[0-100]C"); } +/* Several IndexRanges intersect, unite them */ static void -test_irange_list_union_intersection(void **state) +test_irange_list_union_intersecting(void **state) { IndexRange a, b; List *unmerged, @@ -277,3 +281,63 @@ test_irange_list_union_intersection(void **state) assert_string_equal(rangeset_print(union_result), "[0-45]C, [46-63]L, [64-100]C"); } + + +/* Test intersection of IndexRanges */ +static void +test_irange_list_intersection(void **state) +{ + IndexRange a, b; + List *intersection_result; + + + /* Subtest #0 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(10, 20, IR_LOSSY); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + "[10-20]L"); + + /* Subtest #1 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(10, 20, IR_COMPLETE); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + "[10-20]L"); + + /* Subtest #2 */ + a = make_irange(0, 100, IR_COMPLETE); + b = make_irange(10, 20, IR_LOSSY); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + "[10-20]L"); + + /* Subtest #3 */ + a = make_irange(15, 25, IR_COMPLETE); + b = make_irange(10, 20, IR_LOSSY); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + "[15-20]L"); + + /* Subtest #4 */ + a = make_irange(15, 25, IR_COMPLETE); + b = make_irange(10, 20, IR_COMPLETE); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + "[15-20]C"); +} From 0675d1165e22084ae41cb82b5891734678a19031 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 31 Oct 2016 20:45:16 +0300 Subject: [PATCH 0038/1124] Transfer some internal functions to compat.c and fix check of postgres version in tests --- src/pg_compat.c | 213 +++++++++++++++++++++++++++++++++++ src/pg_compat.h | 13 +++ src/pg_pathman.c | 221 +------------------------------------ tests/partitioning_test.py | 2 +- 4 files changed, 229 insertions(+), 220 deletions(-) diff --git a/src/pg_compat.c b/src/pg_compat.c index 7474d689..83c77486 100644 --- a/src/pg_compat.c +++ b/src/pg_compat.c @@ -10,9 +10,13 @@ #include "pg_compat.h" +#include "catalog/pg_proc.h" +#include "foreign/fdwapi.h" +#include "optimizer/clauses.h" #include "optimizer/pathnode.h" #include "port.h" #include "utils.h" +#include "utils/lsyscache.h" #include @@ -111,4 +115,213 @@ make_result(List *tlist, return node; } + +/* + * If this relation could possibly be scanned from within a worker, then set + * its consider_parallel flag. + */ +void +set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, + RangeTblEntry *rte) +{ + /* + * The flag has previously been initialized to false, so we can just + * return if it becomes clear that we can't safely set it. + */ + Assert(!rel->consider_parallel); + + /* Don't call this if parallelism is disallowed for the entire query. */ + Assert(root->glob->parallelModeOK); + + /* This should only be called for baserels and appendrel children. */ + Assert(rel->reloptkind == RELOPT_BASEREL || + rel->reloptkind == RELOPT_OTHER_MEMBER_REL); + + /* Assorted checks based on rtekind. */ + switch (rte->rtekind) + { + case RTE_RELATION: + + /* + * Currently, parallel workers can't access the leader's temporary + * tables. We could possibly relax this if the wrote all of its + * local buffers at the start of the query and made no changes + * thereafter (maybe we could allow hint bit changes), and if we + * taught the workers to read them. Writing a large number of + * temporary buffers could be expensive, though, and we don't have + * the rest of the necessary infrastructure right now anyway. So + * for now, bail out if we see a temporary table. + */ + if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP) + return; + + /* + * Table sampling can be pushed down to workers if the sample + * function and its arguments are safe. + */ + if (rte->tablesample != NULL) + { + Oid proparallel = func_parallel(rte->tablesample->tsmhandler); + + if (proparallel != PROPARALLEL_SAFE) + return; + if (has_parallel_hazard((Node *) rte->tablesample->args, + false)) + return; + } + + /* + * Ask FDWs whether they can support performing a ForeignScan + * within a worker. Most often, the answer will be no. For + * example, if the nature of the FDW is such that it opens a TCP + * connection with a remote server, each parallel worker would end + * up with a separate connection, and these connections might not + * be appropriately coordinated between workers and the leader. + */ + if (rte->relkind == RELKIND_FOREIGN_TABLE) + { + Assert(rel->fdwroutine); + if (!rel->fdwroutine->IsForeignScanParallelSafe) + return; + if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte)) + return; + } + + /* + * There are additional considerations for appendrels, which we'll + * deal with in set_append_rel_size and set_append_rel_pathlist. + * For now, just set consider_parallel based on the rel's own + * quals and targetlist. + */ + break; + + case RTE_SUBQUERY: + + /* + * There's no intrinsic problem with scanning a subquery-in-FROM + * (as distinct from a SubPlan or InitPlan) in a parallel worker. + * If the subquery doesn't happen to have any parallel-safe paths, + * then flagging it as consider_parallel won't change anything, + * but that's true for plain tables, too. We must set + * consider_parallel based on the rel's own quals and targetlist, + * so that if a subquery path is parallel-safe but the quals and + * projection we're sticking onto it are not, we correctly mark + * the SubqueryScanPath as not parallel-safe. (Note that + * set_subquery_pathlist() might push some of these quals down + * into the subquery itself, but that doesn't change anything.) + */ + break; + + case RTE_JOIN: + /* Shouldn't happen; we're only considering baserels here. */ + Assert(false); + return; + + case RTE_FUNCTION: + /* Check for parallel-restricted functions. */ + if (has_parallel_hazard((Node *) rte->functions, false)) + return; + break; + + case RTE_VALUES: + /* Check for parallel-restricted functions. */ + if (has_parallel_hazard((Node *) rte->values_lists, false)) + return; + break; + + case RTE_CTE: + + /* + * CTE tuplestores aren't shared among parallel workers, so we + * force all CTE scans to happen in the leader. Also, populating + * the CTE would require executing a subplan that's not available + * in the worker, might be parallel-restricted, and must get + * executed only once. + */ + return; + } + + /* + * If there's anything in baserestrictinfo that's parallel-restricted, we + * give up on parallelizing access to this relation. We could consider + * instead postponing application of the restricted quals until we're + * above all the parallelism in the plan tree, but it's not clear that + * that would be a win in very many cases, and it might be tricky to make + * outer join clauses work correctly. It would likely break equivalence + * classes, too. + */ + if (has_parallel_hazard((Node *) rel->baserestrictinfo, false)) + return; + + /* + * Likewise, if the relation's outputs are not parallel-safe, give up. + * (Usually, they're just Vars, but sometimes they're not.) + */ + if (has_parallel_hazard((Node *) rel->reltarget->exprs, false)) + return; + + /* We have a winner. */ + rel->consider_parallel = true; +} + +/* + * create_plain_partial_paths + * Build partial access paths for parallel scan of a plain relation + */ +void +create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) +{ + int parallel_workers; + + /* + * If the user has set the parallel_workers reloption, use that; otherwise + * select a default number of workers. + */ + if (rel->rel_parallel_workers != -1) + parallel_workers = rel->rel_parallel_workers; + else + { + int parallel_threshold; + + /* + * If this relation is too small to be worth a parallel scan, just + * return without doing anything ... unless it's an inheritance child. + * In that case, we want to generate a parallel path here anyway. It + * might not be worthwhile just for this relation, but when combined + * with all of its inheritance siblings it may well pay off. + */ + if (rel->pages < (BlockNumber) min_parallel_relation_size && + rel->reloptkind == RELOPT_BASEREL) + return; + + /* + * Select the number of workers based on the log of the size of the + * relation. This probably needs to be a good deal more + * sophisticated, but we need something here for now. Note that the + * upper limit of the min_parallel_relation_size GUC is chosen to + * prevent overflow here. + */ + parallel_workers = 1; + parallel_threshold = Max(min_parallel_relation_size, 1); + while (rel->pages >= (BlockNumber) (parallel_threshold * 3)) + { + parallel_workers++; + parallel_threshold *= 3; + if (parallel_threshold > INT_MAX / 3) + break; /* avoid overflow */ + } + } + + /* + * In no case use more than max_parallel_workers_per_gather workers. + */ + parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather); + + /* If any limit was set to zero, the user doesn't want a parallel scan. */ + if (parallel_workers <= 0) + return; + + /* Add an unordered partial path based on a parallel sequential scan. */ + add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); +} #endif diff --git a/src/pg_compat.h b/src/pg_compat.h index 7bef6778..17f93c36 100644 --- a/src/pg_compat.h +++ b/src/pg_compat.h @@ -41,6 +41,15 @@ extern void copy_targetlist_compat(RelOptInfo *dest, RelOptInfo *rel); #define pull_var_clause_compat(node, aggbehavior, phbehavior) \ pull_var_clause(node, aggbehavior | phbehavior) +extern void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, + RangeTblEntry *rte); +#define set_rel_consider_parallel_compat(root, rel, rte) \ + set_rel_consider_parallel(root, rel, rte) + +extern void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel); +#define create_plain_partial_paths_compat(root, rel) \ + create_plain_partial_paths(root, rel) + extern Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan); #define make_result_compat(root, tlist, resconstantqual, subplan) \ make_result(tlist, resconstantqual, subplan) @@ -68,6 +77,10 @@ extern Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan); #define make_result_compat(root, tlist, resconstantqual, subplan) \ make_result(root, tlist, resconstantqual, subplan) +#define set_rel_consider_parallel_compat(root, rel, rte) ((void) true) + +#define create_plain_partial_paths_compat(root, rel) ((void) true) + #endif diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 4ecd2005..9061f505 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -26,7 +26,6 @@ #include "access/transam.h" #include "access/xact.h" #include "catalog/pg_cast.h" -#include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "executor/spi.h" #include "foreign/fdwapi.h" @@ -99,11 +98,6 @@ static void generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, PathKey *pathkeyAsc, PathKey *pathkeyDesc); static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer); -#if PG_VERSION_NUM >= 90600 -static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel); -static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte); -#endif /* @@ -1765,7 +1759,7 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) #if PG_VERSION_NUM >= 90600 /* If appropriate, consider parallel sequential scan */ if (rel->consider_parallel && required_outer == NULL) - create_plain_partial_paths(root, rel); + create_plain_partial_paths_compat(root, rel); #endif /* Consider index scans */ @@ -1856,7 +1850,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, * For consistency, do this before calling set_rel_size() for the child. */ if (root->glob->parallelModeOK && rel->consider_parallel) - set_rel_consider_parallel(root, childrel, childRTE); + set_rel_consider_parallel_compat(root, childrel, childRTE); #endif /* @@ -2075,217 +2069,6 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, } } -#if PG_VERSION_NUM >= 90600 -/* - * create_plain_partial_paths - * Build partial access paths for parallel scan of a plain relation - */ -static void -create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) -{ - int parallel_workers; - - /* - * If the user has set the parallel_workers reloption, use that; otherwise - * select a default number of workers. - */ - if (rel->rel_parallel_workers != -1) - parallel_workers = rel->rel_parallel_workers; - else - { - int parallel_threshold; - - /* - * If this relation is too small to be worth a parallel scan, just - * return without doing anything ... unless it's an inheritance child. - * In that case, we want to generate a parallel path here anyway. It - * might not be worthwhile just for this relation, but when combined - * with all of its inheritance siblings it may well pay off. - */ - if (rel->pages < (BlockNumber) min_parallel_relation_size && - rel->reloptkind == RELOPT_BASEREL) - return; - - /* - * Select the number of workers based on the log of the size of the - * relation. This probably needs to be a good deal more - * sophisticated, but we need something here for now. Note that the - * upper limit of the min_parallel_relation_size GUC is chosen to - * prevent overflow here. - */ - parallel_workers = 1; - parallel_threshold = Max(min_parallel_relation_size, 1); - while (rel->pages >= (BlockNumber) (parallel_threshold * 3)) - { - parallel_workers++; - parallel_threshold *= 3; - if (parallel_threshold > INT_MAX / 3) - break; /* avoid overflow */ - } - } - - /* - * In no case use more than max_parallel_workers_per_gather workers. - */ - parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather); - - /* If any limit was set to zero, the user doesn't want a parallel scan. */ - if (parallel_workers <= 0) - return; - - /* Add an unordered partial path based on a parallel sequential scan. */ - add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); -} - -/* - * If this relation could possibly be scanned from within a worker, then set - * its consider_parallel flag. - */ -static void -set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte) -{ - /* - * The flag has previously been initialized to false, so we can just - * return if it becomes clear that we can't safely set it. - */ - Assert(!rel->consider_parallel); - - /* Don't call this if parallelism is disallowed for the entire query. */ - Assert(root->glob->parallelModeOK); - - /* This should only be called for baserels and appendrel children. */ - Assert(rel->reloptkind == RELOPT_BASEREL || - rel->reloptkind == RELOPT_OTHER_MEMBER_REL); - - /* Assorted checks based on rtekind. */ - switch (rte->rtekind) - { - case RTE_RELATION: - - /* - * Currently, parallel workers can't access the leader's temporary - * tables. We could possibly relax this if the wrote all of its - * local buffers at the start of the query and made no changes - * thereafter (maybe we could allow hint bit changes), and if we - * taught the workers to read them. Writing a large number of - * temporary buffers could be expensive, though, and we don't have - * the rest of the necessary infrastructure right now anyway. So - * for now, bail out if we see a temporary table. - */ - if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP) - return; - - /* - * Table sampling can be pushed down to workers if the sample - * function and its arguments are safe. - */ - if (rte->tablesample != NULL) - { - Oid proparallel = func_parallel(rte->tablesample->tsmhandler); - - if (proparallel != PROPARALLEL_SAFE) - return; - if (has_parallel_hazard((Node *) rte->tablesample->args, - false)) - return; - } - - /* - * Ask FDWs whether they can support performing a ForeignScan - * within a worker. Most often, the answer will be no. For - * example, if the nature of the FDW is such that it opens a TCP - * connection with a remote server, each parallel worker would end - * up with a separate connection, and these connections might not - * be appropriately coordinated between workers and the leader. - */ - if (rte->relkind == RELKIND_FOREIGN_TABLE) - { - Assert(rel->fdwroutine); - if (!rel->fdwroutine->IsForeignScanParallelSafe) - return; - if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte)) - return; - } - - /* - * There are additional considerations for appendrels, which we'll - * deal with in set_append_rel_size and set_append_rel_pathlist. - * For now, just set consider_parallel based on the rel's own - * quals and targetlist. - */ - break; - - case RTE_SUBQUERY: - - /* - * There's no intrinsic problem with scanning a subquery-in-FROM - * (as distinct from a SubPlan or InitPlan) in a parallel worker. - * If the subquery doesn't happen to have any parallel-safe paths, - * then flagging it as consider_parallel won't change anything, - * but that's true for plain tables, too. We must set - * consider_parallel based on the rel's own quals and targetlist, - * so that if a subquery path is parallel-safe but the quals and - * projection we're sticking onto it are not, we correctly mark - * the SubqueryScanPath as not parallel-safe. (Note that - * set_subquery_pathlist() might push some of these quals down - * into the subquery itself, but that doesn't change anything.) - */ - break; - - case RTE_JOIN: - /* Shouldn't happen; we're only considering baserels here. */ - Assert(false); - return; - - case RTE_FUNCTION: - /* Check for parallel-restricted functions. */ - if (has_parallel_hazard((Node *) rte->functions, false)) - return; - break; - - case RTE_VALUES: - /* Check for parallel-restricted functions. */ - if (has_parallel_hazard((Node *) rte->values_lists, false)) - return; - break; - - case RTE_CTE: - - /* - * CTE tuplestores aren't shared among parallel workers, so we - * force all CTE scans to happen in the leader. Also, populating - * the CTE would require executing a subplan that's not available - * in the worker, might be parallel-restricted, and must get - * executed only once. - */ - return; - } - - /* - * If there's anything in baserestrictinfo that's parallel-restricted, we - * give up on parallelizing access to this relation. We could consider - * instead postponing application of the restricted quals until we're - * above all the parallelism in the plan tree, but it's not clear that - * that would be a win in very many cases, and it might be tricky to make - * outer join clauses work correctly. It would likely break equivalence - * classes, too. - */ - if (has_parallel_hazard((Node *) rel->baserestrictinfo, false)) - return; - - /* - * Likewise, if the relation's outputs are not parallel-safe, give up. - * (Usually, they're just Vars, but sometimes they're not.) - */ - if (has_parallel_hazard((Node *) rel->reltarget->exprs, false)) - return; - - /* We have a winner. */ - rel->consider_parallel = true; -} -#endif - static List * accumulate_append_subpath(List *subpaths, Path *path) { diff --git a/tests/partitioning_test.py b/tests/partitioning_test.py index a866efd7..b3b8f571 100644 --- a/tests/partitioning_test.py +++ b/tests/partitioning_test.py @@ -420,7 +420,7 @@ def test_parallel_nodes(self): # Check version of postgres server # If version < 9.6 skip all tests for parallel queries version = node.execute("postgres", "show server_version_num") - if version < 90600: + if int(version[0][0]) < 90600: return # Prepare test database From effb898bcb7d2748c0d48ae9c6d8f0ea1e5beb8b Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Tue, 1 Nov 2016 12:07:26 +0300 Subject: [PATCH 0039/1124] Fix getting version number in tests --- tests/partitioning_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/partitioning_test.py b/tests/partitioning_test.py index b3b8f571..27c1cec1 100644 --- a/tests/partitioning_test.py +++ b/tests/partitioning_test.py @@ -419,8 +419,8 @@ def test_parallel_nodes(self): # Check version of postgres server # If version < 9.6 skip all tests for parallel queries - version = node.execute("postgres", "show server_version_num") - if int(version[0][0]) < 90600: + version = node.psql("postgres", "show server_version_num") + if int(version[1]) < 90600: return # Prepare test database From fccccedd87ce595632465e18515595626ab0e18e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 1 Nov 2016 15:07:14 +0300 Subject: [PATCH 0040/1124] fix irb_succ(), more tests, add rule 'check' to Makefile --- src/rangeset.h | 6 +- tests/cmocka/Makefile | 3 + tests/cmocka/rangeset_tests.c | 126 +++++++++++++++++++++++++++++++++- 3 files changed, 131 insertions(+), 4 deletions(-) diff --git a/src/rangeset.h b/src/rangeset.h index 5f3cd4e4..dd65ef1c 100644 --- a/src/rangeset.h +++ b/src/rangeset.h @@ -47,7 +47,7 @@ typedef struct { #define lfirst_irange(lc) ( *(IndexRange *) lfirst(lc) ) #define lappend_irange(list, irange) ( lappend((list), alloc_irange(irange)) ) #define lcons_irange(irange, list) ( lcons(alloc_irange(irange), (list)) ) -#define list_make1_irange(irange) ( lcons(alloc_irange(irange), NIL) ) +#define list_make1_irange(irange) ( lcons_irange(irange, NIL) ) #define llast_irange(list) ( lfirst_irange(list_tail(list)) ) #define linitial_irange(list) ( lfirst_irange(list_head(list)) ) @@ -90,12 +90,12 @@ irb_pred(uint32 boundary) return 0; } -/* Return predecessor or IRANGE_BONDARY_MASK */ +/* Return successor or IRANGE_BONDARY_MASK */ inline static uint32 irb_succ(uint32 boundary) { if (boundary >= IRANGE_BONDARY_MASK) - return boundary; + return IRANGE_BONDARY_MASK; return boundary + 1; } diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index a7c7343c..678a1ca0 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -26,3 +26,6 @@ build_extension: clean: rm -f $(OBJ) $(TEST_BIN) + +check: all + ./$(TEST_BIN) diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index 8f8d873f..ea06e648 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -15,6 +15,8 @@ * ----------------------- */ +static void test_irange_basic(void **state); + static void test_irange_list_union_merge(void **state); static void test_irange_list_union_lossy_cov(void **state); static void test_irange_list_union_complete_cov(void **state); @@ -30,6 +32,7 @@ main(void) /* Array of test functions */ const struct CMUnitTest tests[] = { + cmocka_unit_test(test_irange_basic), cmocka_unit_test(test_irange_list_union_merge), cmocka_unit_test(test_irange_list_union_lossy_cov), cmocka_unit_test(test_irange_list_union_complete_cov), @@ -47,6 +50,38 @@ main(void) * ---------------------- */ +/* Basic behavior tests */ +static void +test_irange_basic(void **state) +{ + IndexRange irange; + List *irange_list; + + /* test irb_pred() */ + assert_int_equal(99, irb_pred(100)); + assert_int_equal(0, irb_pred(1)); + assert_int_equal(0, irb_pred(0)); + + /* test irb_succ() */ + assert_int_equal(100, irb_succ(99)); + assert_int_equal(IRANGE_BONDARY_MASK, irb_succ(IRANGE_BONDARY_MASK)); + assert_int_equal(IRANGE_BONDARY_MASK, irb_succ(IRANGE_BONDARY_MASK + 1)); + + /* test convenience macros */ + irange = make_irange(0, IRANGE_BONDARY_MASK, IR_LOSSY); + assert_int_equal(irange_lower(irange), 0); + assert_int_equal(irange_upper(irange), IRANGE_BONDARY_MASK); + assert_true(is_irange_lossy(irange)); + assert_true(is_irange_valid(irange)); + + /* test allocation */ + irange_list = NIL; + irange_list = lappend_irange(irange_list, irange); + assert_memory_equal(&irange, &linitial_irange(irange_list), sizeof(IndexRange)); + assert_memory_equal(&irange, &llast_irange(irange_list), sizeof(IndexRange)); +} + + /* Test merges of adjoint IndexRanges */ static void test_irange_list_union_merge(void **state) @@ -288,7 +323,9 @@ static void test_irange_list_intersection(void **state) { IndexRange a, b; - List *intersection_result; + List *intersection_result, + *left_list, + *right_list; /* Subtest #0 */ @@ -340,4 +377,91 @@ test_irange_list_intersection(void **state) assert_string_equal(rangeset_print(intersection_result), "[15-20]C"); + + /* Subtest #5 */ + left_list = NIL; + left_list = lappend_irange(left_list, make_irange(0, 11, IR_LOSSY)); + left_list = lappend_irange(left_list, make_irange(12, 20, IR_COMPLETE)); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(1, 15, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(16, 20, IR_LOSSY)); + + intersection_result = irange_list_intersection(left_list, right_list); + + assert_string_equal(rangeset_print(intersection_result), + "[1-11]L, [12-15]C, [16-20]L"); + + /* Subtest #6 */ + left_list = NIL; + left_list = lappend_irange(left_list, make_irange(0, 11, IR_LOSSY)); + left_list = lappend_irange(left_list, make_irange(12, 20, IR_COMPLETE)); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(1, 15, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(16, 20, IR_COMPLETE)); + + intersection_result = irange_list_intersection(left_list, right_list); + + assert_string_equal(rangeset_print(intersection_result), + "[1-11]L, [12-20]C"); + + /* Subtest #7 */ + a = make_irange(0, 10, IR_COMPLETE); + b = make_irange(20, 20, IR_COMPLETE); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + ""); /* empty set */ + + /* Subtest #8 */ + a = make_irange(0, 10, IR_LOSSY); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(10, 10, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(16, 20, IR_LOSSY)); + + intersection_result = irange_list_intersection(list_make1_irange(a), + right_list); + + assert_string_equal(rangeset_print(intersection_result), + "10L"); + + /* Subtest #9 */ + left_list = NIL; + left_list = lappend_irange(left_list, make_irange(15, 15, IR_LOSSY)); + left_list = lappend_irange(left_list, make_irange(25, 25, IR_COMPLETE)); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(0, 20, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(21, 40, IR_LOSSY)); + + intersection_result = irange_list_intersection(left_list, right_list); + + assert_string_equal(rangeset_print(intersection_result), + "15L, 25L"); + + /* Subtest #10 */ + left_list = NIL; + left_list = lappend_irange(left_list, make_irange(21, 21, IR_LOSSY)); + left_list = lappend_irange(left_list, make_irange(22, 22, IR_COMPLETE)); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(0, 21, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(22, 40, IR_LOSSY)); + + intersection_result = irange_list_intersection(left_list, right_list); + + assert_string_equal(rangeset_print(intersection_result), + "[21-22]L"); + + /* Subtest #11 */ + left_list = NIL; + left_list = lappend_irange(left_list, make_irange(21, 21, IR_LOSSY)); + left_list = lappend_irange(left_list, make_irange(22, 25, IR_COMPLETE)); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(0, 21, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(22, 40, IR_COMPLETE)); + + intersection_result = irange_list_intersection(left_list, right_list); + + assert_string_equal(rangeset_print(intersection_result), + "21L, [22-25]C"); } From bda6e5bebb04ce5501afb53d3c183a632293703b Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Tue, 1 Nov 2016 20:05:54 +0300 Subject: [PATCH 0041/1124] Fix tests for parallel queries --- tests/partitioning_test.py | 155 +++++++++++++++++++++++++++++++------ 1 file changed, 131 insertions(+), 24 deletions(-) diff --git a/tests/partitioning_test.py b/tests/partitioning_test.py index 27c1cec1..a9a934fd 100644 --- a/tests/partitioning_test.py +++ b/tests/partitioning_test.py @@ -409,6 +409,8 @@ def test_foreign_table(self): def test_parallel_nodes(self): """Test parallel queries under partitions""" + import json + # Init and start postgres instance with preload pg_pathman module node = get_new_node('test') node.init() @@ -419,8 +421,8 @@ def test_parallel_nodes(self): # Check version of postgres server # If version < 9.6 skip all tests for parallel queries - version = node.psql("postgres", "show server_version_num") - if int(version[1]) < 90600: + version = int(node.psql("postgres", "show server_version_num")[1]) + if version < 90600: return # Prepare test database @@ -435,6 +437,26 @@ def test_parallel_nodes(self): node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)') node.psql('postgres', 'vacuum analyze hash_partitioned') + node.psql('postgres', """ + create or replace function query_plan(query text) returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + # Helper function for json equality + def ordered(obj): + if isinstance(obj, dict): + return sorted((k, ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered(x) for x in obj) + else: + return obj + # Test parallel select with node.connect() as con: con.execute('set max_parallel_workers_per_gather = 2') @@ -443,35 +465,120 @@ def test_parallel_nodes(self): con.execute('set parallel_tuple_cost = 0') # Check parallel aggregate plan - plan = con.execute('explain (costs off) select count(*) from range_partitioned where i < 1500') - expected = [('Finalize Aggregate',), - (' -> Gather',), - (' Workers Planned: 2',), - (' -> Partial Aggregate',), - (' -> Append',), - (' -> Parallel Seq Scan on range_partitioned_1',), - (' -> Parallel Seq Scan on range_partitioned_2',), - (' Filter: (i < 1500)',)] - self.assertEqual(plan, expected) + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) # Check count of returned tuples - count = con.execute('select count(*) from range_partitioned where i < 1500') - self.assertEqual(count[0][0], 1499) + count = con.execute('select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) # Check simple parallel seq scan plan with limit - plan = con.execute('explain (costs off) select * from range_partitioned where i < 1500 limit 5') - expected = [('Limit',), - (' -> Gather',), - (' Workers Planned: 2',), - (' -> Append',), - (' -> Parallel Seq Scan on range_partitioned_1',), - (' -> Parallel Seq Scan on range_partitioned_2',), - (' Filter: (i < 1500)',)] - self.assertEqual(plan, expected) + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) # Check tuples returned by query above res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5') - expected = [(1,), (2,), (3,), (4,), (5,)] + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] self.assertEqual(res_tuples, expected) # import ipdb; ipdb.set_trace() From ed178adac61e596633411a6b49574677f99735cb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 2 Nov 2016 17:59:17 +0300 Subject: [PATCH 0042/1124] introduce 'planner_tree_modification' subsystem for Plan- and Query- tree modification, refactoring, new query tree walker (fix issue #53) --- Makefile | 2 +- expected/pathman_basic.out | 2 +- src/hooks.c | 47 +-- src/partition_filter.c | 46 +-- src/partition_filter.h | 2 - src/pathman.h | 16 - src/pg_pathman.c | 191 +--------- src/planner_tree_modification.c | 616 ++++++++++++++++++++++++++++++++ src/planner_tree_modification.h | 50 +++ src/utils.c | 235 ------------ src/utils.h | 12 - 11 files changed, 692 insertions(+), 527 deletions(-) create mode 100644 src/planner_tree_modification.c create mode 100644 src/planner_tree_modification.h diff --git a/Makefile b/Makefile index 58e5e939..0b73f7b7 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/runtimeappend.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/copy_stmt_hooking.o \ - src/pg_compat.o $(WIN32RES) + src/planner_tree_modification.o src/pg_compat.o $(WIN32RES) EXTENSION = pg_pathman EXTVERSION = 1.1 diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 41ab6ab5..9716c764 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -191,7 +191,7 @@ SELECT COUNT(*) FROM ONLY test.num_range_rel; (1 row) SELECT * FROM ONLY test.range_rel UNION SELECT * FROM test.range_rel; -ERROR: It is prohibited to query partitioned tables both with and without ONLY modifier +ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; VACUUM; diff --git a/src/hooks.c b/src/hooks.c index 42f9cc79..23330628 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -13,6 +13,7 @@ #include "init.h" #include "partition_filter.h" #include "pg_compat.h" +#include "planner_tree_modification.h" #include "runtimeappend.h" #include "runtime_merge_append.h" #include "utils.h" @@ -202,8 +203,12 @@ pathman_rel_pathlist_hook(PlannerInfo *root, return; /* pg_pathman is not ready */ /* This works only for SELECT queries (at least for now) */ - if (root->parse->commandType != CMD_SELECT || - !list_member_oid(inheritance_enabled_relids, rte->relid)) + if (root->parse->commandType != CMD_SELECT) + return; + + /* Skip if this table is not allowed to act as parent (see FROM ONLY) */ + if (PARENTHOOD_DISALLOWED == get_parenthood_status(root->query_level, + rte->relid)) return; /* Proceed iff relation 'rel' is partitioned */ @@ -223,8 +228,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (prel->parttype == PT_RANGE) { /* - * Get pathkeys for ascending and descending sort by partition - * column + * Get pathkeys for ascending and descending sort by partition column */ List *pathkeys; Var *var; @@ -438,29 +442,11 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) proc((planned_stmt)->rtable, (Plan *) lfirst(lc)); \ } while (0) - PlannedStmt *result; + PlannedStmt *result; - /* FIXME: fix these commands (traverse whole query tree) */ + /* Modify query tree if needed */ if (IsPathmanReady()) - { - switch(parse->commandType) - { - case CMD_SELECT: - disable_inheritance(parse); - rowmark_add_tableoids(parse); /* add attributes for rowmarks */ - break; - - case CMD_UPDATE: - case CMD_DELETE: - disable_inheritance_cte(parse); - disable_inheritance_subselect(parse); - handle_modification_query(parse); - break; - - default: - break; - } - } + pathman_transform_query(parse); /* Invoke original hook if needed */ if (planner_hook_next) @@ -475,12 +461,10 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); - } - list_free(inheritance_disabled_relids); - list_free(inheritance_enabled_relids); - inheritance_disabled_relids = NIL; - inheritance_enabled_relids = NIL; + /* Free all parenthood lists (see pathman_transform_query()) */ + reset_parenthood_statuses(); + } return result; } @@ -516,9 +500,6 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) { load_config(); /* perform main cache initialization */ } - - inheritance_disabled_relids = NIL; - inheritance_enabled_relids = NIL; } /* diff --git a/src/partition_filter.c b/src/partition_filter.c index 51f09923..5d596852 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -11,6 +11,7 @@ #include "init.h" #include "nodes_common.h" #include "partition_filter.h" +#include "planner_tree_modification.h" #include "utils.h" #include "foreign/fdwapi.h" @@ -63,7 +64,6 @@ CustomExecMethods partition_filter_exec_methods; static estate_mod_data * fetch_estate_mod_data(EState *estate); -static void partition_filter_visitor(Plan *plan, void *context); static List * pfilter_build_tlist(List *tlist); static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); @@ -112,17 +112,6 @@ init_partition_filter_static_data(void) } -/* - * Add PartitionFilter nodes to the plan tree - */ -void -add_partition_filters(List *rtable, Plan *plan) -{ - if (pg_pathman_enable_partition_filter) - plan_tree_walker(plan, partition_filter_visitor, rtable); -} - - /* * Initialize ResultPartsStorage (hash table etc). */ @@ -804,36 +793,3 @@ pfilter_build_tlist(List *tlist) return result_tlist; } - -/* - * Add partition filters to ModifyTable node's children. - * - * 'context' should point to the PlannedStmt->rtable. - */ -static void -partition_filter_visitor(Plan *plan, void *context) -{ - List *rtable = (List *) context; - ModifyTable *modify_table = (ModifyTable *) plan; - ListCell *lc1, - *lc2; - - /* Skip if not ModifyTable with 'INSERT' command */ - if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_INSERT) - return; - - Assert(rtable && IsA(rtable, List)); - - forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) - { - Index rindex = lfirst_int(lc2); - Oid relid = getrelid(rindex, rtable); - const PartRelationInfo *prel = get_pathman_relation_info(relid); - - /* Check that table is partitioned */ - if (prel) - lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), - relid, - modify_table->onConflictAction); - } -} diff --git a/src/partition_filter.h b/src/partition_filter.h index f0cf0584..899d163e 100644 --- a/src/partition_filter.h +++ b/src/partition_filter.h @@ -93,8 +93,6 @@ extern CustomExecMethods partition_filter_exec_methods; void init_partition_filter_static_data(void); -void add_partition_filters(List *rtable, Plan *plan); - /* ResultPartsStorage init\fini\scan function */ void init_result_parts_storage(ResultPartsStorage *parts_storage, EState *estate, diff --git a/src/pathman.h b/src/pathman.h index 84d71dd9..d887b510 100644 --- a/src/pathman.h +++ b/src/pathman.h @@ -106,17 +106,6 @@ typedef enum } search_rangerel_result; -/* - * The list of partitioned relation relids that must be handled by pg_pathman - */ -extern List *inheritance_enabled_relids; - -/* - * This list is used to ensure that partitioned relation isn't used both - * with and without ONLY modifiers - */ -extern List *inheritance_disabled_relids; - /* * pg_pathman's global state. */ @@ -133,11 +122,6 @@ search_rangerel_result search_range_partition_eq(const Datum value, uint32 hash_to_part_index(uint32 value, uint32 partitions); -void handle_modification_query(Query *parse); -void disable_inheritance(Query *parse); -void disable_inheritance_cte(Query *parse); -void disable_inheritance_subselect(Query *parse); - /* copied from allpaths.h */ void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a9f3bf31..d44805d6 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -16,6 +16,7 @@ #include "hooks.h" #include "utils.h" #include "partition_filter.h" +#include "planner_tree_modification.h" #include "runtimeappend.h" #include "runtime_merge_append.h" #include "xact_handling.h" @@ -48,8 +49,6 @@ PG_MODULE_MAGIC; -List *inheritance_disabled_relids = NIL; -List *inheritance_enabled_relids = NIL; PathmanState *pmstate; Oid pathman_config_relid = InvalidOid; Oid pathman_config_params_relid = InvalidOid; @@ -60,7 +59,6 @@ void _PG_init(void); /* Utility functions */ static Node *wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue); -static bool disable_inheritance_subselect_walker(Node *node, void *context); /* "Partition creation"-related functions */ static Datum extract_binary_interval_from_text(Datum interval_text, @@ -97,7 +95,9 @@ static void generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, List *all_child_pathkeys, PathKey *pathkeyAsc, PathKey *pathkeyDesc); -static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer); +static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, + RelOptInfo *rel, + Relids required_outer); /* @@ -122,7 +122,11 @@ static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo ( IsA((node), Const) || (WcxtHasExprContext(wcxt) ? IsA((node), Param) : false) ) #define ExtractConst(wcxt, node) \ - ( IsA((node), Param) ? extract_const((wcxt), (Param *) (node)) : ((Const *) (node)) ) + ( \ + IsA((node), Param) ? \ + extract_const((wcxt), (Param *) (node)) : \ + ((Const *) (node)) \ + ) /* @@ -173,183 +177,6 @@ _PG_init(void) init_partition_filter_static_data(); } -/* - * Disables inheritance for partitioned by pathman relations. - * It must be done to prevent PostgresSQL from exhaustive search. - */ -void -disable_inheritance(Query *parse) -{ - const PartRelationInfo *prel; - RangeTblEntry *rte; - MemoryContext oldcontext; - ListCell *lc; - - /* If query contains CTE (WITH statement) then handle subqueries too */ - disable_inheritance_cte(parse); - - /* If query contains subselects */ - disable_inheritance_subselect(parse); - - foreach(lc, parse->rtable) - { - rte = (RangeTblEntry *) lfirst(lc); - - switch(rte->rtekind) - { - case RTE_RELATION: - if (rte->inh) - { - /* Look up this relation in pathman local cache */ - prel = get_pathman_relation_info(rte->relid); - if (prel) - { - /* We'll set this flag later */ - rte->inh = false; - - /* - * Sometimes user uses the ONLY statement and in this case - * rte->inh is also false. We should differ the case - * when user uses ONLY statement from case when we - * make rte->inh false intentionally. - */ - oldcontext = MemoryContextSwitchTo(TopMemoryContext); - inheritance_enabled_relids = \ - lappend_oid(inheritance_enabled_relids, rte->relid); - MemoryContextSwitchTo(oldcontext); - - /* - * Check if relation was already found with ONLY modifier. In - * this case throw an error because we cannot handle - * situations when partitioned table used both with and - * without ONLY modifier in SELECT queries - */ - if (list_member_oid(inheritance_disabled_relids, rte->relid)) - goto disable_error; - - goto disable_next; - } - } - - oldcontext = MemoryContextSwitchTo(TopMemoryContext); - inheritance_disabled_relids = \ - lappend_oid(inheritance_disabled_relids, rte->relid); - MemoryContextSwitchTo(oldcontext); - - /* Check if relation was already found withoud ONLY modifier */ - if (list_member_oid(inheritance_enabled_relids, rte->relid)) - goto disable_error; - break; - case RTE_SUBQUERY: - /* Recursively disable inheritance for subqueries */ - disable_inheritance(rte->subquery); - break; - default: - break; - } - -disable_next: - ; - } - - return; - -disable_error: - elog(ERROR, "It is prohibited to query partitioned tables both " - "with and without ONLY modifier"); -} - -void -disable_inheritance_cte(Query *parse) -{ - ListCell *lc; - - foreach(lc, parse->cteList) - { - CommonTableExpr *cte = (CommonTableExpr*) lfirst(lc); - - if (IsA(cte->ctequery, Query)) - disable_inheritance((Query *) cte->ctequery); - } -} - -void -disable_inheritance_subselect(Query *parse) -{ - Node *quals; - - if (!parse->jointree || !parse->jointree->quals) - return; - - quals = parse->jointree->quals; - disable_inheritance_subselect_walker(quals, NULL); -} - -static bool -disable_inheritance_subselect_walker(Node *node, void *context) -{ - if (node == NULL) - return false; - - if (IsA(node, SubLink)) - { - disable_inheritance((Query *) (((SubLink *) node)->subselect)); - return false; - } - - return expression_tree_walker(node, disable_inheritance_subselect_walker, (void *) context); -} - -/* - * Checks if query affects only one partition. If true then substitute - */ -void -handle_modification_query(Query *parse) -{ - const PartRelationInfo *prel; - List *ranges; - RangeTblEntry *rte; - WrapperNode *wrap; - Expr *expr; - WalkerContext context; - - Assert(parse->commandType == CMD_UPDATE || - parse->commandType == CMD_DELETE); - Assert(parse->resultRelation > 0); - - rte = rt_fetch(parse->resultRelation, parse->rtable); - prel = get_pathman_relation_info(rte->relid); - - if (!prel) - return; - - /* Parse syntax tree and extract partition ranges */ - ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); - expr = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); - if (!expr) - return; - - /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel, NULL, false); - wrap = walk_expr_tree(expr, &context); - - ranges = irange_list_intersect(ranges, wrap->rangeset); - - /* If only one partition is affected then substitute parent table with partition */ - if (irange_list_length(ranges) == 1) - { - IndexRange irange = linitial_irange(ranges); - if (irange.ir_lower == irange.ir_upper) - { - Oid *children = PrelGetChildrenArray(prel); - rte->relid = children[irange.ir_lower]; - rte->inh = false; - } - } - - return; -} - /* * Creates child relation and adds it to root. * Returns child index in simple_rel_array diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c new file mode 100644 index 00000000..f99176a3 --- /dev/null +++ b/src/planner_tree_modification.c @@ -0,0 +1,616 @@ +/* ------------------------------------------------------------------------ + * + * planner_tree_modification.c + * Functions for query- and plan- tree modification + * + * Copyright (c) 2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#include "partition_filter.h" +#include "planner_tree_modification.h" +#include "rangeset.h" + +#include "access/sysattr.h" +#include "catalog/pg_type.h" +#include "miscadmin.h" +#include "optimizer/clauses.h" +#include "utils/builtins.h" +#include "utils/memutils.h" + + +/* Special column name for rowmarks */ +#define TABLEOID_STR(subst) ( "pathman_tableoid" subst ) +#define TABLEOID_STR_BASE_LEN ( sizeof(TABLEOID_STR("")) - 1 ) + + +/* context for pathman_transform_query_walker() */ +typedef struct +{ + int query_level; /* level of current Query */ +} query_transform_cxt; + +static bool pathman_transform_query_walker(Node *node, void *context); + +static void disable_standard_inheritance(Query *parse, query_transform_cxt *cxt); +static void rowmark_add_tableoids(Query *parse, query_transform_cxt *cxt); +static void handle_modification_query(Query *parse, query_transform_cxt *cxt); + +static void partition_filter_visitor(Plan *plan, void *context); + +static void lock_rows_visitor(Plan *plan, void *context); +static List *get_tableoids_list(List *tlist); + + +/* + * This list is used to ensure that partitioned relation + * isn't used with both and without ONLY modifiers + */ +static List *per_query_parenthood_lists = NIL; + + +/* + * Basic plan tree walker + * + * 'visitor' is applied right before return + */ +void +plan_tree_walker(Plan *plan, + void (*visitor) (Plan *plan, void *context), + void *context) +{ + ListCell *l; + + if (plan == NULL) + return; + + check_stack_depth(); + + /* Plan-type-specific fixes */ + switch (nodeTag(plan)) + { + case T_SubqueryScan: + plan_tree_walker(((SubqueryScan *) plan)->subplan, visitor, context); + break; + + case T_CustomScan: + foreach(l, ((CustomScan *) plan)->custom_plans) + plan_tree_walker((Plan *) lfirst(l), visitor, context); + break; + + case T_ModifyTable: + foreach (l, ((ModifyTable *) plan)->plans) + plan_tree_walker((Plan *) lfirst(l), visitor, context); + break; + + /* Since they look alike */ + case T_MergeAppend: + case T_Append: + foreach(l, ((Append *) plan)->appendplans) + plan_tree_walker((Plan *) lfirst(l), visitor, context); + break; + + case T_BitmapAnd: + foreach(l, ((BitmapAnd *) plan)->bitmapplans) + plan_tree_walker((Plan *) lfirst(l), visitor, context); + break; + + case T_BitmapOr: + foreach(l, ((BitmapOr *) plan)->bitmapplans) + plan_tree_walker((Plan *) lfirst(l), visitor, context); + break; + + default: + break; + } + + plan_tree_walker(plan->lefttree, visitor, context); + plan_tree_walker(plan->righttree, visitor, context); + + /* Apply visitor to the current node */ + visitor(plan, context); +} + + +/* + * ------------------------------- + * Walker for Query modification + * ------------------------------- + */ + +/* Perform some transformations on Query tree */ +void +pathman_transform_query(Query *parse) +{ + query_transform_cxt context = { 0 }; + + pathman_transform_query_walker((Node *) parse, (void *) &context); +} + +/* Walker for pathman_transform_query() */ +static bool +pathman_transform_query_walker(Node *node, void *context) +{ + if (node == NULL) + return false; + + else if (IsA(node, Query)) + { + Query *query = (Query *) node; + query_transform_cxt *cxt = (query_transform_cxt *) context; + bool walker_result; + + /* Increment Query level */ + cxt->query_level++; + + /* Apply Query tree modifiers */ + rowmark_add_tableoids(query, cxt); + disable_standard_inheritance(query, cxt); + handle_modification_query(query, cxt); + + /* Handle Query node */ + walker_result = query_tree_walker(query, + pathman_transform_query_walker, + context, + 0); + + /* Decrement Query level */ + cxt->query_level--; + + /* Result of query_tree_walker() */ + return walker_result; + } + + /* Handle expression subtree */ + return expression_tree_walker(node, + pathman_transform_query_walker, + context); +} + + +/* + * ---------------------- + * Query tree modifiers + * ---------------------- + */ + +/* + * Disable standard inheritance if table is partitioned by pg_pathman. + * + * This function sets RangeTblEntry::inh flag to false. + */ +static void +disable_standard_inheritance(Query *parse, query_transform_cxt *cxt) +{ + ListCell *lc; + + /* Exit if it's not a SELECT query */ + if (parse->commandType != CMD_SELECT) + return; + + /* Walk through RangeTblEntries list */ + foreach (lc, parse->rtable) + { + RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); + + /* Operate only on simple (non-join etc) relations */ + if (rte->rtekind != RTE_RELATION || rte->relkind != RELKIND_RELATION) + continue; + + /* Table may be partitioned */ + if (rte->inh) + { + const PartRelationInfo *prel; + + /* Proceed if table is partitioned by pg_pathman */ + if ((prel = get_pathman_relation_info(rte->relid)) != NULL) + { + /* We'll set this flag later */ + rte->inh = false; + + /* Try marking it using PARENTHOOD_ALLOWED */ + assign_rel_parenthood_status(cxt->query_level, + rte->relid, + PARENTHOOD_ALLOWED); + } + } + /* Else try marking it using PARENTHOOD_DISALLOWED */ + else assign_rel_parenthood_status(cxt->query_level, + rte->relid, + PARENTHOOD_DISALLOWED); + } +} + +/* + * Add missing 'TABLEOID_STR%u' junk attributes for inherited partitions + * + * This is necessary since preprocess_targetlist() heavily + * depends on the 'inh' flag which we have to unset. + * + * postprocess_lock_rows() will later transform 'TABLEOID_STR:Oid' + * relnames into 'tableoid:rowmarkId'. + */ +static void +rowmark_add_tableoids(Query *parse, query_transform_cxt *cxt) +{ + ListCell *lc; + + /* Generate 'tableoid' for partitioned table rowmark */ + foreach (lc, parse->rowMarks) + { + RowMarkClause *rc = (RowMarkClause *) lfirst(lc); + Oid parent = getrelid(rc->rti, parse->rtable); + Var *var; + TargetEntry *tle; + char resname[64]; + + /* Check that table is partitioned */ + if (!get_pathman_relation_info(parent)) + continue; + + var = makeVar(rc->rti, + TableOidAttributeNumber, + OIDOID, + -1, + InvalidOid, + 0); + + /* Use parent's Oid as TABLEOID_STR's key (%u) */ + snprintf(resname, sizeof(resname), TABLEOID_STR("%u"), parent); + + tle = makeTargetEntry((Expr *) var, + list_length(parse->targetList) + 1, + pstrdup(resname), + true); + + /* There's no problem here since new attribute is junk */ + parse->targetList = lappend(parse->targetList, tle); + } +} + +/* Checks if query affects only one partition */ +static void +handle_modification_query(Query *parse, query_transform_cxt *cxt) +{ + const PartRelationInfo *prel; + List *ranges; + RangeTblEntry *rte; + WrapperNode *wrap; + Expr *expr; + WalkerContext context; + + /* Exit if it's not a DELETE or UPDATE query */ + if (parse->resultRelation == 0 || + (parse->commandType != CMD_UPDATE && + parse->commandType != CMD_DELETE)) + return; + + rte = rt_fetch(parse->resultRelation, parse->rtable); + prel = get_pathman_relation_info(rte->relid); + + /* Exit if it's not partitioned */ + if (!prel) return; + + /* Parse syntax tree and extract partition ranges */ + ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); + expr = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); + + /* Exit if there's no expr (no use) */ + if (!expr) return; + + /* Parse syntax tree and extract partition ranges */ + InitWalkerContext(&context, prel, NULL, false); + wrap = walk_expr_tree(expr, &context); + + ranges = irange_list_intersect(ranges, wrap->rangeset); + + /* + * If only one partition is affected, + * substitute parent table with partition. + */ + if (irange_list_length(ranges) == 1) + { + IndexRange irange = linitial_irange(ranges); + + if (irange.ir_lower == irange.ir_upper) + { + Oid *children = PrelGetChildrenArray(prel); + + rte->relid = children[irange.ir_lower]; + + /* Disable standard planning */ + rte->inh = false; + } + } +} + + +/* + * ------------------------------- + * PartitionFilter-related stuff + * ------------------------------- + */ + +/* Add PartitionFilter nodes to the plan tree */ +void +add_partition_filters(List *rtable, Plan *plan) +{ + if (pg_pathman_enable_partition_filter) + plan_tree_walker(plan, partition_filter_visitor, rtable); +} + +/* + * Add partition filters to ModifyTable node's children. + * + * 'context' should point to the PlannedStmt->rtable. + */ +static void +partition_filter_visitor(Plan *plan, void *context) +{ + List *rtable = (List *) context; + ModifyTable *modify_table = (ModifyTable *) plan; + ListCell *lc1, + *lc2; + + /* Skip if not ModifyTable with 'INSERT' command */ + if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_INSERT) + return; + + Assert(rtable && IsA(rtable, List)); + + forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) + { + Index rindex = lfirst_int(lc2); + Oid relid = getrelid(rindex, rtable); + const PartRelationInfo *prel = get_pathman_relation_info(relid); + + /* Check that table is partitioned */ + if (prel) + lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), + relid, + modify_table->onConflictAction); + } +} + + +/* + * ----------------------- + * Rowmark-related stuff + * ----------------------- + */ + +/* Final rowmark processing for partitioned tables */ +void +postprocess_lock_rows(List *rtable, Plan *plan) +{ + plan_tree_walker(plan, lock_rows_visitor, rtable); +} + +/* + * Extract target entries with resnames beginning with TABLEOID_STR + * and var->varoattno == TableOidAttributeNumber + */ +static List * +get_tableoids_list(List *tlist) +{ + List *result = NIL; + ListCell *lc; + + foreach (lc, tlist) + { + TargetEntry *te = (TargetEntry *) lfirst(lc); + Var *var = (Var *) te->expr; + + if (!IsA(var, Var)) + continue; + + /* Check that column name begins with TABLEOID_STR & it's tableoid */ + if (var->varoattno == TableOidAttributeNumber && + (te->resname && strlen(te->resname) > TABLEOID_STR_BASE_LEN) && + 0 == strncmp(te->resname, TABLEOID_STR(""), TABLEOID_STR_BASE_LEN)) + { + result = lappend(result, te); + } + } + + return result; +} + +/* + * Find 'TABLEOID_STR%u' attributes that were manually + * created for partitioned tables and replace Oids + * (used for '%u') with expected rc->rowmarkIds + */ +static void +lock_rows_visitor(Plan *plan, void *context) +{ + List *rtable = (List *) context; + LockRows *lock_rows = (LockRows *) plan; + Plan *lock_child = outerPlan(plan); + List *tableoids; + ListCell *lc; + + if (!IsA(lock_rows, LockRows)) + return; + + Assert(rtable && IsA(rtable, List) && lock_child); + + /* Select tableoid attributes that must be renamed */ + tableoids = get_tableoids_list(lock_child->targetlist); + if (!tableoids) + return; /* this LockRows has nothing to do with partitioned table */ + + foreach (lc, lock_rows->rowMarks) + { + PlanRowMark *rc = (PlanRowMark *) lfirst(lc); + Oid parent_oid = getrelid(rc->rti, rtable); + ListCell *mark_lc; + List *finished_tes = NIL; /* postprocessed target entries */ + + foreach (mark_lc, tableoids) + { + TargetEntry *te = (TargetEntry *) lfirst(mark_lc); + const char *cur_oid_str = &(te->resname[TABLEOID_STR_BASE_LEN]); + Datum cur_oid_datum; + + cur_oid_datum = DirectFunctionCall1(oidin, CStringGetDatum(cur_oid_str)); + + if (DatumGetObjectId(cur_oid_datum) == parent_oid) + { + char resname[64]; + + /* Replace 'TABLEOID_STR:Oid' with 'tableoid:rowmarkId' */ + snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); + te->resname = pstrdup(resname); + + finished_tes = lappend(finished_tes, te); + } + } + + /* Remove target entries that have been processed in this step */ + foreach (mark_lc, finished_tes) + tableoids = list_delete_ptr(tableoids, lfirst(mark_lc)); + + if (list_length(tableoids) == 0) + break; /* nothing to do */ + } +} + + +/* + * ----------------------------------------------- + * Parenthood safety checks (SELECT * FROM ONLY) + * ----------------------------------------------- + */ + +/* private struct stored by parenthood lists */ +typedef struct +{ + rel_parenthood_status parenthood_status; + Oid relid; +} cached_parenthood_status; + + +inline static rel_parenthood_status +list_member_parenthood(List *list, Oid relid) +{ + ListCell *lc; + + foreach (lc, list) + { + cached_parenthood_status *status; + + status = (cached_parenthood_status *) lfirst(lc); + + if (status->relid == relid) + return status->parenthood_status; + } + + return PARENTHOOD_NOT_SET; +} + +inline static void +list_free_parenthood(List *list) +{ + list_free_deep(list); +} + +inline static List * +lappend_parenthood(List *list, Oid relid, rel_parenthood_status new_status) +{ + cached_parenthood_status *status; + + status = palloc(sizeof(cached_parenthood_status)); + status->parenthood_status = new_status; + status->relid = relid; + + return lappend(list, (void *) status); +} + +/* Set parenthood status (per query level) */ +void +assign_rel_parenthood_status(Index query_level, + Oid relid, + rel_parenthood_status new_status) +{ + List *nth_parenthood_list; + ListCell *nth_parenthood_cell = NULL; + rel_parenthood_status existing_status; + MemoryContext old_mcxt; + + Assert(query_level > 0); + + /* Create new ListCells if it's a new Query level */ + while (query_level > list_length(per_query_parenthood_lists)) + { + old_mcxt = MemoryContextSwitchTo(TopTransactionContext); + per_query_parenthood_lists = lappend(per_query_parenthood_lists, NIL); + MemoryContextSwitchTo(old_mcxt); + + nth_parenthood_cell = list_tail(per_query_parenthood_lists); + } + + /* Else fetch an existing ListCell */ + if (nth_parenthood_cell == NULL) + nth_parenthood_cell = list_nth_cell(per_query_parenthood_lists, + query_level - 1); + + /* Extract parenthood list from ListCell */ + nth_parenthood_list = (List *) lfirst(nth_parenthood_cell); + + /* Search for a parenthood status */ + existing_status = list_member_parenthood(nth_parenthood_list, relid); + + /* Parenthood statuses mismatched, emit an ERROR */ + if (existing_status != new_status && existing_status != PARENTHOOD_NOT_SET) + { + /* Don't forget to clear all lists! */ + reset_parenthood_statuses(); + + elog(ERROR, "It is prohibited to apply ONLY modifier to partitioned " + "tables which have already been mentioned without ONLY"); + } + + /* Append new element (relid, status) */ + old_mcxt = MemoryContextSwitchTo(TopTransactionContext); + nth_parenthood_list = lappend_parenthood(nth_parenthood_list, relid, new_status); + MemoryContextSwitchTo(old_mcxt); + + /* Update ListCell */ + lfirst(nth_parenthood_cell) = nth_parenthood_list; +} + +/* Get parenthood status (per query level) */ +rel_parenthood_status +get_parenthood_status(Index query_level, Oid relid) +{ + List *nth_parenthood_list; + + Assert(query_level > 0); + + /* Return PARENTHOOD_NOT_SET if there's no such level */ + if (query_level > list_length(per_query_parenthood_lists)) + return PARENTHOOD_NOT_SET; + + /* Fetch a parenthood list for a Query indentified by 'query_level' */ + nth_parenthood_list = (List *) list_nth(per_query_parenthood_lists, + query_level - 1); + + return list_member_parenthood(nth_parenthood_list, relid); +} + +/* Reset all cached statuses (query end) */ +void +reset_parenthood_statuses(void) +{ + ListCell *lc; + + /* Clear parenthood lists for each Query level */ + foreach (lc, per_query_parenthood_lists) + list_free_parenthood((List *) lfirst(lc)); + + /* Now free the main list and point it to NIL */ + list_free(per_query_parenthood_lists); + per_query_parenthood_lists = NIL; +} diff --git a/src/planner_tree_modification.h b/src/planner_tree_modification.h new file mode 100644 index 00000000..ee7bfc1e --- /dev/null +++ b/src/planner_tree_modification.h @@ -0,0 +1,50 @@ +/* ------------------------------------------------------------------------ + * + * planner_tree_modification.h + * Functions for query- and plan- tree modification + * + * Copyright (c) 2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PLANNER_TREE_MODIFICATION_H +#define PLANNER_TREE_MODIFICATION_H + + +#include "pathman.h" + +#include "postgres.h" +#include "utils/rel.h" +#include "nodes/relation.h" +#include "nodes/nodeFuncs.h" + + +/* Plan tree rewriting utility */ +void plan_tree_walker(Plan *plan, + void (*visitor) (Plan *plan, void *context), + void *context); + +/* Query tree rewriting utility */ +void pathman_transform_query(Query *parse); + +/* These functions scribble on Plan tree */ +void add_partition_filters(List *rtable, Plan *plan); +void postprocess_lock_rows(List *rtable, Plan *plan); + + +/* used by assign_rel_parenthood_status() etc */ +typedef enum +{ + PARENTHOOD_NOT_SET = 0, /* relation hasn't been tracked */ + PARENTHOOD_DISALLOWED, /* children are disabled (e.g. ONLY) */ + PARENTHOOD_ALLOWED /* children are enabled (default) */ +} rel_parenthood_status; + +void assign_rel_parenthood_status(Index query_level, Oid relid, + rel_parenthood_status new_status); +rel_parenthood_status get_parenthood_status(Index query_level, Oid relid); +void reset_parenthood_statuses(void); + + +#endif /* PLANNER_TREE_MODIFICATION_H */ diff --git a/src/utils.c b/src/utils.c index 831d5a24..ff3e762e 100644 --- a/src/utils.c +++ b/src/utils.c @@ -21,7 +21,6 @@ #include "commands/extension.h" #include "miscadmin.h" #include "optimizer/var.h" -#include "optimizer/restrictinfo.h" #include "parser/parse_oper.h" #include "utils/builtins.h" #include "utils/fmgroids.h" @@ -30,17 +29,10 @@ #include "utils/typcache.h" -#define TABLEOID_STR(subst) ( "pathman_tableoid" subst ) -#define TABLEOID_STR_BASE_LEN ( sizeof(TABLEOID_STR("")) - 1 ) - - static bool clause_contains_params_walker(Node *node, void *context); static void change_varnos_in_restrinct_info(RestrictInfo *rinfo, change_varno_context *context); static bool change_varno_walker(Node *node, change_varno_context *context); -static List *get_tableoids_list(List *tlist); -static void lock_rows_visitor(Plan *plan, void *context); -static bool rowmark_add_tableoids_walker(Node *node, void *context); /* @@ -66,96 +58,6 @@ clause_contains_params_walker(Node *node, void *context) context); } -/* - * Extract target entries with resnames beginning with TABLEOID_STR - * and var->varoattno == TableOidAttributeNumber - */ -static List * -get_tableoids_list(List *tlist) -{ - List *result = NIL; - ListCell *lc; - - foreach (lc, tlist) - { - TargetEntry *te = (TargetEntry *) lfirst(lc); - Var *var = (Var *) te->expr; - - if (!IsA(var, Var)) - continue; - - /* Check that column name begins with TABLEOID_STR & it's tableoid */ - if (var->varoattno == TableOidAttributeNumber && - (te->resname && strlen(te->resname) > TABLEOID_STR_BASE_LEN) && - 0 == strncmp(te->resname, TABLEOID_STR(""), TABLEOID_STR_BASE_LEN)) - { - result = lappend(result, te); - } - } - - return result; -} - -/* - * Find 'TABLEOID_STR%u' attributes that were manually - * created for partitioned tables and replace Oids - * (used for '%u') with expected rc->rowmarkIds - */ -static void -lock_rows_visitor(Plan *plan, void *context) -{ - List *rtable = (List *) context; - LockRows *lock_rows = (LockRows *) plan; - Plan *lock_child = outerPlan(plan); - List *tableoids; - ListCell *lc; - - if (!IsA(lock_rows, LockRows)) - return; - - Assert(rtable && IsA(rtable, List) && lock_child); - - /* Select tableoid attributes that must be renamed */ - tableoids = get_tableoids_list(lock_child->targetlist); - if (!tableoids) - return; /* this LockRows has nothing to do with partitioned table */ - - foreach (lc, lock_rows->rowMarks) - { - PlanRowMark *rc = (PlanRowMark *) lfirst(lc); - Oid parent_oid = getrelid(rc->rti, rtable); - ListCell *mark_lc; - List *finished_tes = NIL; /* postprocessed target entries */ - - foreach (mark_lc, tableoids) - { - TargetEntry *te = (TargetEntry *) lfirst(mark_lc); - const char *cur_oid_str = &(te->resname[TABLEOID_STR_BASE_LEN]); - Datum cur_oid_datum; - - cur_oid_datum = DirectFunctionCall1(oidin, CStringGetDatum(cur_oid_str)); - - if (DatumGetObjectId(cur_oid_datum) == parent_oid) - { - char resname[64]; - - /* Replace 'TABLEOID_STR:Oid' with 'tableoid:rowmarkId' */ - snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); - te->resname = pstrdup(resname); - - finished_tes = lappend(finished_tes, te); - } - } - - /* Remove target entries that have been processed in this step */ - foreach (mark_lc, finished_tes) - tableoids = list_delete_ptr(tableoids, lfirst(mark_lc)); - - if (list_length(tableoids) == 0) - break; /* nothing to do */ - } -} - /* * Print Bitmapset as cstring. */ @@ -331,143 +233,6 @@ change_varnos_in_restrinct_info(RestrictInfo *rinfo, change_varno_context *conte } } -/* - * Basic plan tree walker - * - * 'visitor' is applied right before return - */ -void -plan_tree_walker(Plan *plan, - void (*visitor) (Plan *plan, void *context), - void *context) -{ - ListCell *l; - - if (plan == NULL) - return; - - check_stack_depth(); - - /* Plan-type-specific fixes */ - switch (nodeTag(plan)) - { - case T_SubqueryScan: - plan_tree_walker(((SubqueryScan *) plan)->subplan, visitor, context); - break; - - case T_CustomScan: - foreach(l, ((CustomScan *) plan)->custom_plans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); - break; - - case T_ModifyTable: - foreach (l, ((ModifyTable *) plan)->plans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); - break; - - /* Since they look alike */ - case T_MergeAppend: - case T_Append: - foreach(l, ((Append *) plan)->appendplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); - break; - - case T_BitmapAnd: - foreach(l, ((BitmapAnd *) plan)->bitmapplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); - break; - - case T_BitmapOr: - foreach(l, ((BitmapOr *) plan)->bitmapplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); - break; - - default: - break; - } - - plan_tree_walker(plan->lefttree, visitor, context); - plan_tree_walker(plan->righttree, visitor, context); - - /* Apply visitor to the current node */ - visitor(plan, context); -} - -static bool -rowmark_add_tableoids_walker(Node *node, void *context) -{ - if (node == NULL) - return false; - - if (IsA(node, Query)) - { - Query *parse = (Query *) node; - ListCell *lc; - - /* Generate 'tableoid' for partitioned table rowmark */ - foreach (lc, parse->rowMarks) - { - RowMarkClause *rc = (RowMarkClause *) lfirst(lc); - Oid parent = getrelid(rc->rti, parse->rtable); - Var *var; - TargetEntry *tle; - char resname[64]; - - /* Check that table is partitioned */ - if (!get_pathman_relation_info(parent)) - continue; - - var = makeVar(rc->rti, - TableOidAttributeNumber, - OIDOID, - -1, - InvalidOid, - 0); - - /* Use parent's Oid as TABLEOID_STR's key (%u) */ - snprintf(resname, sizeof(resname), TABLEOID_STR("%u"), parent); - - tle = makeTargetEntry((Expr *) var, - list_length(parse->targetList) + 1, - pstrdup(resname), - true); - - /* There's no problem here since new attribute is junk */ - parse->targetList = lappend(parse->targetList, tle); - } - - return query_tree_walker((Query *) node, - rowmark_add_tableoids_walker, - NULL, 0); - } - - return expression_tree_walker(node, rowmark_add_tableoids_walker, NULL); -} - -/* - * Add missing 'TABLEOID_STR%u' junk attributes for inherited partitions - * - * This is necessary since preprocess_targetlist() heavily - * depends on the 'inh' flag which we have to unset. - * - * postprocess_lock_rows() will later transform 'TABLEOID_STR:Oid' - * relnames into 'tableoid:rowmarkId'. - */ -void -rowmark_add_tableoids(Query *parse) -{ - rowmark_add_tableoids_walker((Node *) parse, NULL); -} - -/* - * Final rowmark processing for partitioned tables - */ -void -postprocess_lock_rows(List *rtable, Plan *plan) -{ - plan_tree_walker(plan, lock_rows_visitor, rtable); -} - /* * Returns pg_pathman schema's Oid or InvalidOid if that's not possible. */ diff --git a/src/utils.h b/src/utils.h index 4222f549..c7a28682 100644 --- a/src/utils.h +++ b/src/utils.h @@ -26,20 +26,8 @@ typedef struct } change_varno_context; -/* - * Plan tree modification. - */ -void plan_tree_walker(Plan *plan, - void (*visitor) (Plan *plan, void *context), - void *context); void change_varnos(Node *node, Oid old_varno, Oid new_varno); -/* - * Rowmark processing. - */ -void rowmark_add_tableoids(Query *parse); -void postprocess_lock_rows(List *rtable, Plan *plan); - /* * Various traits. */ From 2704b143efb6d58c88541e287c81ef0030036962 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 2 Nov 2016 19:06:42 +0300 Subject: [PATCH 0043/1124] fixed bogus coding in assign_rel_parenthood_status() --- src/planner_tree_modification.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index f99176a3..e8844ce8 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -504,7 +504,12 @@ list_member_parenthood(List *list, Oid relid) status = (cached_parenthood_status *) lfirst(lc); if (status->relid == relid) + { + /* This should NEVER happen! */ + Assert(status->parenthood_status != PARENTHOOD_NOT_SET); + return status->parenthood_status; + } } return PARENTHOOD_NOT_SET; @@ -562,8 +567,21 @@ assign_rel_parenthood_status(Index query_level, /* Search for a parenthood status */ existing_status = list_member_parenthood(nth_parenthood_list, relid); + /* Append new status entry if we couldn't find it */ + if (existing_status == PARENTHOOD_NOT_SET) + { + /* Append new element (relid, status) */ + old_mcxt = MemoryContextSwitchTo(TopTransactionContext); + nth_parenthood_list = lappend_parenthood(nth_parenthood_list, + relid, new_status); + MemoryContextSwitchTo(old_mcxt); + + /* Update ListCell */ + lfirst(nth_parenthood_cell) = nth_parenthood_list; + } + /* Parenthood statuses mismatched, emit an ERROR */ - if (existing_status != new_status && existing_status != PARENTHOOD_NOT_SET) + else if (existing_status != new_status) { /* Don't forget to clear all lists! */ reset_parenthood_statuses(); @@ -571,14 +589,6 @@ assign_rel_parenthood_status(Index query_level, elog(ERROR, "It is prohibited to apply ONLY modifier to partitioned " "tables which have already been mentioned without ONLY"); } - - /* Append new element (relid, status) */ - old_mcxt = MemoryContextSwitchTo(TopTransactionContext); - nth_parenthood_list = lappend_parenthood(nth_parenthood_list, relid, new_status); - MemoryContextSwitchTo(old_mcxt); - - /* Update ListCell */ - lfirst(nth_parenthood_cell) = nth_parenthood_list; } /* Get parenthood status (per query level) */ From d0fb919044760c389bab75f7420aeddadac28352 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 3 Nov 2016 12:20:33 +0300 Subject: [PATCH 0044/1124] Add tests for special case with ONLY statement --- expected/pathman_basic.out | 20 ++++++++++++++++++++ sql/pathman_basic.sql | 8 ++++++++ 2 files changed, 28 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 9716c764..097f69ef 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -190,8 +190,28 @@ SELECT COUNT(*) FROM ONLY test.num_range_rel; 0 (1 row) +/* test special case: ONLY statement with not-ONLY for partitioned table */ SELECT * FROM ONLY test.range_rel UNION SELECT * FROM test.range_rel; ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY +SELECT * FROM test.range_rel UNION SELECT * FROM ONLY test.range_rel; +ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY +SELECT * FROM test.range_rel UNION SELECT * FROM test.range_rel UNION SELECT * FROM ONLY test.range_rel; +ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY +SELECT * FROM ONLY test.range_rel UNION SELECT * FROM test.range_rel UNION SELECT * FROM test.range_rel; +ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY +/* FIXME: result of next command execution is not right just yet */ +WITH q1 AS (SELECT * FROM test.range_rel), q2 AS (SELECT * FROM ONLY test.range_rel) SELECT * FROM q1 JOIN q2 USING(id); +ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY +WITH q1 AS (SELECT * FROM ONLY test.range_rel) SELECT * FROM test.range_rel JOIN q1 USING(id); + id | dt | txt | dt | txt +----+----+-----+----+----- +(0 rows) + +SELECT * FROM test.range_rel WHERE id = (SELECT id FROM ONLY test.range_rel LIMIT 1); + id | dt | txt +----+----+----- +(0 rows) + SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; VACUUM; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 0fd56748..7111de82 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -57,7 +57,15 @@ INSERT INTO test.num_range_rel SELECT COUNT(*) FROM test.num_range_rel; SELECT COUNT(*) FROM ONLY test.num_range_rel; +/* test special case: ONLY statement with not-ONLY for partitioned table */ SELECT * FROM ONLY test.range_rel UNION SELECT * FROM test.range_rel; +SELECT * FROM test.range_rel UNION SELECT * FROM ONLY test.range_rel; +SELECT * FROM test.range_rel UNION SELECT * FROM test.range_rel UNION SELECT * FROM ONLY test.range_rel; +SELECT * FROM ONLY test.range_rel UNION SELECT * FROM test.range_rel UNION SELECT * FROM test.range_rel; +/* FIXME: result of next command execution is not right just yet */ +WITH q1 AS (SELECT * FROM test.range_rel), q2 AS (SELECT * FROM ONLY test.range_rel) SELECT * FROM q1 JOIN q2 USING(id); +WITH q1 AS (SELECT * FROM ONLY test.range_rel) SELECT * FROM test.range_rel JOIN q1 USING(id); +SELECT * FROM test.range_rel WHERE id = (SELECT id FROM ONLY test.range_rel LIMIT 1); SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; From 989092cbddce06ed80af1c7e66f2212740bdeeeb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 3 Nov 2016 16:07:05 +0300 Subject: [PATCH 0045/1124] improve assign_rel_parenthood_status() and some other functions, implement Query tracking via hashtable, HACK for pg_stat_statements --- expected/pathman_basic.out | 217 +++++++++++++++++++++++--- sql/pathman_basic.sql | 50 +++++- src/hooks.c | 28 +++- src/planner_tree_modification.c | 262 +++++++++++++++----------------- src/planner_tree_modification.h | 7 +- 5 files changed, 387 insertions(+), 177 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 097f69ef..1d2401fc 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -191,27 +191,206 @@ SELECT COUNT(*) FROM ONLY test.num_range_rel; (1 row) /* test special case: ONLY statement with not-ONLY for partitioned table */ -SELECT * FROM ONLY test.range_rel UNION SELECT * FROM test.range_rel; -ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY -SELECT * FROM test.range_rel UNION SELECT * FROM ONLY test.range_rel; -ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY -SELECT * FROM test.range_rel UNION SELECT * FROM test.range_rel UNION SELECT * FROM ONLY test.range_rel; -ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY -SELECT * FROM ONLY test.range_rel UNION SELECT * FROM test.range_rel UNION SELECT * FROM test.range_rel; -ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY -/* FIXME: result of next command execution is not right just yet */ -WITH q1 AS (SELECT * FROM test.range_rel), q2 AS (SELECT * FROM ONLY test.range_rel) SELECT * FROM q1 JOIN q2 USING(id); +CREATE TABLE test.from_only_test(val INT NOT NULL); +INSERT INTO test.from_only_test SELECT generate_series(1, 20); +SELECT pathman.create_range_partitions('test.from_only_test', 'val', 1, 2); +NOTICE: sequence "from_only_test_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test.from_only_test +UNION SELECT * FROM test.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test.from_only_test +UNION SELECT * FROM ONLY test.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test.from_only_test +UNION SELECT * FROM test.from_only_test +UNION SELECT * FROM ONLY test.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 + -> Seq Scan on from_only_test +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test.from_only_test +UNION SELECT * FROM test.from_only_test +UNION SELECT * FROM test.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query */ +EXPLAIN (COSTS OFF) +SELECT * FROM test.from_only_test a JOIN ONLY test.from_only_test b USING(val); ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY -WITH q1 AS (SELECT * FROM ONLY test.range_rel) SELECT * FROM test.range_rel JOIN q1 USING(id); - id | dt | txt | dt | txt -----+----+-----+----+----- -(0 rows) +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test.from_only_test), + q2 AS (SELECT * FROM ONLY test.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------- + Hash Join + Hash Cond: (q1.val = q2.val) + CTE q1 + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + CTE q2 + -> Seq Scan on from_only_test + -> CTE Scan on q1 + -> Hash + -> CTE Scan on q2 +(19 rows) -SELECT * FROM test.range_rel WHERE id = (SELECT id FROM ONLY test.range_rel LIMIT 1); - id | dt | txt -----+----+----- -(0 rows) +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test.from_only_test) +SELECT * FROM test.from_only_test JOIN q1 USING(val); + QUERY PLAN +---------------------------------------------- + Hash Join + Hash Cond: (from_only_test_1.val = q1.val) + CTE q1 + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Hash + -> CTE Scan on q1 +(17 rows) +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel WHERE id = (SELECT id FROM ONLY test.range_rel LIMIT 1); + QUERY PLAN +-------------------------------------------------------- + Append + InitPlan 1 (returns $0) + -> Limit + -> Seq Scan on range_rel + -> Index Scan using range_rel_1_pkey on range_rel_1 + Index Cond: (id = $0) + -> Index Scan using range_rel_2_pkey on range_rel_2 + Index Cond: (id = $0) + -> Index Scan using range_rel_3_pkey on range_rel_3 + Index Cond: (id = $0) + -> Index Scan using range_rel_4_pkey on range_rel_4 + Index Cond: (id = $0) +(12 rows) + +DROP TABLE test.from_only_test CASCADE; +NOTICE: drop cascades to 10 other objects SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; VACUUM; @@ -1449,7 +1628,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM messages; (3 rows) DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 14 other objects DROP EXTENSION pg_pathman CASCADE; NOTICE: drop cascades to 3 other objects DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 7111de82..25269e1d 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -58,15 +58,51 @@ SELECT COUNT(*) FROM test.num_range_rel; SELECT COUNT(*) FROM ONLY test.num_range_rel; /* test special case: ONLY statement with not-ONLY for partitioned table */ -SELECT * FROM ONLY test.range_rel UNION SELECT * FROM test.range_rel; -SELECT * FROM test.range_rel UNION SELECT * FROM ONLY test.range_rel; -SELECT * FROM test.range_rel UNION SELECT * FROM test.range_rel UNION SELECT * FROM ONLY test.range_rel; -SELECT * FROM ONLY test.range_rel UNION SELECT * FROM test.range_rel UNION SELECT * FROM test.range_rel; -/* FIXME: result of next command execution is not right just yet */ -WITH q1 AS (SELECT * FROM test.range_rel), q2 AS (SELECT * FROM ONLY test.range_rel) SELECT * FROM q1 JOIN q2 USING(id); -WITH q1 AS (SELECT * FROM ONLY test.range_rel) SELECT * FROM test.range_rel JOIN q1 USING(id); +CREATE TABLE test.from_only_test(val INT NOT NULL); +INSERT INTO test.from_only_test SELECT generate_series(1, 20); +SELECT pathman.create_range_partitions('test.from_only_test', 'val', 1, 2); + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test.from_only_test +UNION SELECT * FROM test.from_only_test; + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test.from_only_test +UNION SELECT * FROM ONLY test.from_only_test; + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test.from_only_test +UNION SELECT * FROM test.from_only_test +UNION SELECT * FROM ONLY test.from_only_test; + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test.from_only_test +UNION SELECT * FROM test.from_only_test +UNION SELECT * FROM test.from_only_test; + +/* not ok, ONLY|non-ONLY in one query */ +EXPLAIN (COSTS OFF) +SELECT * FROM test.from_only_test a JOIN ONLY test.from_only_test b USING(val); + +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test.from_only_test), + q2 AS (SELECT * FROM ONLY test.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test.from_only_test) +SELECT * FROM test.from_only_test JOIN q1 USING(val); + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE id = (SELECT id FROM ONLY test.range_rel LIMIT 1); +DROP TABLE test.from_only_test CASCADE; + + SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; diff --git a/src/hooks.c b/src/hooks.c index 23330628..7048e51c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -199,16 +199,19 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (set_rel_pathlist_hook_next != NULL) set_rel_pathlist_hook_next(root, rel, rti, rte); + /* Make sure that pg_pathman is ready */ if (!IsPathmanReady()) - return; /* pg_pathman is not ready */ + return; - /* This works only for SELECT queries (at least for now) */ - if (root->parse->commandType != CMD_SELECT) + /* This works only for SELECTs on simple relations */ + if (root->parse->commandType != CMD_SELECT || + rte->rtekind != RTE_RELATION || + rte->relkind != RELKIND_RELATION) return; /* Skip if this table is not allowed to act as parent (see FROM ONLY) */ - if (PARENTHOOD_DISALLOWED == get_parenthood_status(root->query_level, - rte->relid)) + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, + rte->relid)) return; /* Proceed iff relation 'rel' is partitioned */ @@ -443,10 +446,16 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) } while (0) PlannedStmt *result; + uint32 query_id = parse->queryId; - /* Modify query tree if needed */ if (IsPathmanReady()) + { + /* Increment parenthood_statuses refcount */ + incr_refcount_parenthood_statuses(); + + /* Modify query tree if needed */ pathman_transform_query(parse); + } /* Invoke original hook if needed */ if (planner_hook_next) @@ -462,8 +471,11 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); - /* Free all parenthood lists (see pathman_transform_query()) */ - reset_parenthood_statuses(); + /* Decrement parenthood_statuses refcount */ + decr_refcount_parenthood_statuses(); + + /* HACK: restore queryId set by pg_stat_statements */ + result->queryId = query_id; } return result; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index e8844ce8..0bde6ecb 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -8,6 +8,7 @@ * ------------------------------------------------------------------------ */ +#include "nodes_common.h" #include "partition_filter.h" #include "planner_tree_modification.h" #include "rangeset.h" @@ -25,17 +26,11 @@ #define TABLEOID_STR_BASE_LEN ( sizeof(TABLEOID_STR("")) - 1 ) -/* context for pathman_transform_query_walker() */ -typedef struct -{ - int query_level; /* level of current Query */ -} query_transform_cxt; - static bool pathman_transform_query_walker(Node *node, void *context); -static void disable_standard_inheritance(Query *parse, query_transform_cxt *cxt); -static void rowmark_add_tableoids(Query *parse, query_transform_cxt *cxt); -static void handle_modification_query(Query *parse, query_transform_cxt *cxt); +static void disable_standard_inheritance(Query *parse); +static void rowmark_add_tableoids(Query *parse); +static void handle_modification_query(Query *parse); static void partition_filter_visitor(Plan *plan, void *context); @@ -44,10 +39,35 @@ static List *get_tableoids_list(List *tlist); /* - * This list is used to ensure that partitioned relation - * isn't used with both and without ONLY modifiers + * This table is used to ensure that partitioned relation + * cant't be used with both and without ONLY modifiers. + */ +static HTAB *per_table_parenthood_mapping = NULL; +static uint32 per_table_parenthood_mapping_refcount = 0; + +/* + * We have to mark each Query with a unique id in order + * to recognize them properly. */ -static List *per_query_parenthood_lists = NIL; +#define QUERY_ID_INITIAL 0 +static uint32 latest_query_id = QUERY_ID_INITIAL; + +static inline void +assign_query_id(Query *query) +{ + uint32 prev_id = latest_query_id++; + + if (prev_id > latest_query_id) + elog(WARNING, "assign_query_id(): queryId overflow"); + + query->queryId = latest_query_id; +} + +static inline void +reset_query_id_generator(void) +{ + latest_query_id = QUERY_ID_INITIAL; +} /* @@ -123,9 +143,7 @@ plan_tree_walker(Plan *plan, void pathman_transform_query(Query *parse) { - query_transform_cxt context = { 0 }; - - pathman_transform_query_walker((Node *) parse, (void *) &context); + pathman_transform_query_walker((Node *) parse, NULL); } /* Walker for pathman_transform_query() */ @@ -137,29 +155,21 @@ pathman_transform_query_walker(Node *node, void *context) else if (IsA(node, Query)) { - Query *query = (Query *) node; - query_transform_cxt *cxt = (query_transform_cxt *) context; - bool walker_result; + Query *query = (Query *) node; - /* Increment Query level */ - cxt->query_level++; + /* Assign Query a 'queryId' */ + assign_query_id(query); /* Apply Query tree modifiers */ - rowmark_add_tableoids(query, cxt); - disable_standard_inheritance(query, cxt); - handle_modification_query(query, cxt); + rowmark_add_tableoids(query); + disable_standard_inheritance(query); + handle_modification_query(query); /* Handle Query node */ - walker_result = query_tree_walker(query, - pathman_transform_query_walker, - context, - 0); - - /* Decrement Query level */ - cxt->query_level--; - - /* Result of query_tree_walker() */ - return walker_result; + return query_tree_walker(query, + pathman_transform_query_walker, + context, + 0); } /* Handle expression subtree */ @@ -181,7 +191,7 @@ pathman_transform_query_walker(Node *node, void *context) * This function sets RangeTblEntry::inh flag to false. */ static void -disable_standard_inheritance(Query *parse, query_transform_cxt *cxt) +disable_standard_inheritance(Query *parse) { ListCell *lc; @@ -210,13 +220,13 @@ disable_standard_inheritance(Query *parse, query_transform_cxt *cxt) rte->inh = false; /* Try marking it using PARENTHOOD_ALLOWED */ - assign_rel_parenthood_status(cxt->query_level, + assign_rel_parenthood_status(parse->queryId, rte->relid, PARENTHOOD_ALLOWED); } } /* Else try marking it using PARENTHOOD_DISALLOWED */ - else assign_rel_parenthood_status(cxt->query_level, + else assign_rel_parenthood_status(parse->queryId, rte->relid, PARENTHOOD_DISALLOWED); } @@ -232,7 +242,7 @@ disable_standard_inheritance(Query *parse, query_transform_cxt *cxt) * relnames into 'tableoid:rowmarkId'. */ static void -rowmark_add_tableoids(Query *parse, query_transform_cxt *cxt) +rowmark_add_tableoids(Query *parse) { ListCell *lc; @@ -271,7 +281,7 @@ rowmark_add_tableoids(Query *parse, query_transform_cxt *cxt) /* Checks if query affects only one partition */ static void -handle_modification_query(Query *parse, query_transform_cxt *cxt) +handle_modification_query(Query *parse) { const PartRelationInfo *prel; List *ranges; @@ -487,140 +497,112 @@ lock_rows_visitor(Plan *plan, void *context) /* private struct stored by parenthood lists */ typedef struct { + Oid relid; /* key (part #1) */ + uint32 queryId; /* key (part #2) */ rel_parenthood_status parenthood_status; - Oid relid; -} cached_parenthood_status; - - -inline static rel_parenthood_status -list_member_parenthood(List *list, Oid relid) -{ - ListCell *lc; - - foreach (lc, list) - { - cached_parenthood_status *status; +} cached_parenthood_status; - status = (cached_parenthood_status *) lfirst(lc); - - if (status->relid == relid) - { - /* This should NEVER happen! */ - Assert(status->parenthood_status != PARENTHOOD_NOT_SET); - - return status->parenthood_status; - } - } - - return PARENTHOOD_NOT_SET; -} - -inline static void -list_free_parenthood(List *list) -{ - list_free_deep(list); -} - -inline static List * -lappend_parenthood(List *list, Oid relid, rel_parenthood_status new_status) -{ - cached_parenthood_status *status; - - status = palloc(sizeof(cached_parenthood_status)); - status->parenthood_status = new_status; - status->relid = relid; - - return lappend(list, (void *) status); -} /* Set parenthood status (per query level) */ void -assign_rel_parenthood_status(Index query_level, +assign_rel_parenthood_status(uint32 query_id, Oid relid, rel_parenthood_status new_status) { - List *nth_parenthood_list; - ListCell *nth_parenthood_cell = NULL; - rel_parenthood_status existing_status; - MemoryContext old_mcxt; - - Assert(query_level > 0); + cached_parenthood_status *status_entry, + key = { relid, query_id, PARENTHOOD_NOT_SET }; + bool found; - /* Create new ListCells if it's a new Query level */ - while (query_level > list_length(per_query_parenthood_lists)) + /* We prefer to init this table lazily */ + if (per_table_parenthood_mapping == NULL) { - old_mcxt = MemoryContextSwitchTo(TopTransactionContext); - per_query_parenthood_lists = lappend(per_query_parenthood_lists, NIL); - MemoryContextSwitchTo(old_mcxt); - - nth_parenthood_cell = list_tail(per_query_parenthood_lists); - } + const long start_elems = 50; + HASHCTL hashctl; - /* Else fetch an existing ListCell */ - if (nth_parenthood_cell == NULL) - nth_parenthood_cell = list_nth_cell(per_query_parenthood_lists, - query_level - 1); + memset(&hashctl, 0, sizeof(HASHCTL)); + hashctl.entrysize = sizeof(cached_parenthood_status); + hashctl.keysize = offsetof(cached_parenthood_status, parenthood_status); + hashctl.hcxt = TopTransactionContext; - /* Extract parenthood list from ListCell */ - nth_parenthood_list = (List *) lfirst(nth_parenthood_cell); + per_table_parenthood_mapping = hash_create("Parenthood Storage", + start_elems, &hashctl, + HASH_ELEM | HASH_BLOBS); + } - /* Search for a parenthood status */ - existing_status = list_member_parenthood(nth_parenthood_list, relid); + /* Search by 'key' */ + status_entry = hash_search(per_table_parenthood_mapping, + &key, HASH_ENTER, &found); - /* Append new status entry if we couldn't find it */ - if (existing_status == PARENTHOOD_NOT_SET) + if (found) { - /* Append new element (relid, status) */ - old_mcxt = MemoryContextSwitchTo(TopTransactionContext); - nth_parenthood_list = lappend_parenthood(nth_parenthood_list, - relid, new_status); - MemoryContextSwitchTo(old_mcxt); - - /* Update ListCell */ - lfirst(nth_parenthood_cell) = nth_parenthood_list; - } + /* Saved status conflicts with 'new_status' */ + if (status_entry->parenthood_status != new_status) + { + /* Don't forget to clear all lists! */ + decr_refcount_parenthood_statuses(); - /* Parenthood statuses mismatched, emit an ERROR */ - else if (existing_status != new_status) + elog(ERROR, "It is prohibited to apply ONLY modifier to partitioned " + "tables which have already been mentioned without ONLY"); + } + } + else { - /* Don't forget to clear all lists! */ - reset_parenthood_statuses(); + /* This should NEVER happen! */ + Assert(new_status != PARENTHOOD_NOT_SET); - elog(ERROR, "It is prohibited to apply ONLY modifier to partitioned " - "tables which have already been mentioned without ONLY"); + status_entry->parenthood_status = new_status; } } /* Get parenthood status (per query level) */ rel_parenthood_status -get_parenthood_status(Index query_level, Oid relid) +get_rel_parenthood_status(uint32 query_id, Oid relid) { - List *nth_parenthood_list; + cached_parenthood_status *status_entry, + key = { relid, query_id, PARENTHOOD_NOT_SET }; - Assert(query_level > 0); + /* Skip if table is not initialized */ + if (per_table_parenthood_mapping) + { + /* Search by 'key' */ + status_entry = hash_search(per_table_parenthood_mapping, + &key, HASH_FIND, NULL); - /* Return PARENTHOOD_NOT_SET if there's no such level */ - if (query_level > list_length(per_query_parenthood_lists)) - return PARENTHOOD_NOT_SET; + if (status_entry) + { + /* This should NEVER happen! */ + Assert(status_entry->parenthood_status != PARENTHOOD_NOT_SET); - /* Fetch a parenthood list for a Query indentified by 'query_level' */ - nth_parenthood_list = (List *) list_nth(per_query_parenthood_lists, - query_level - 1); + /* Return cached parenthood status */ + return status_entry->parenthood_status; + } + } - return list_member_parenthood(nth_parenthood_list, relid); + /* Not found, return stub value */ + return PARENTHOOD_NOT_SET; } -/* Reset all cached statuses (query end) */ +/* Increate usage counter by 1 */ void -reset_parenthood_statuses(void) +incr_refcount_parenthood_statuses(void) { - ListCell *lc; + Assert(per_table_parenthood_mapping_refcount >= 0); + per_table_parenthood_mapping_refcount++; +} - /* Clear parenthood lists for each Query level */ - foreach (lc, per_query_parenthood_lists) - list_free_parenthood((List *) lfirst(lc)); +/* Reset all cached statuses if needed (query end) */ +void +decr_refcount_parenthood_statuses(void) +{ + Assert(per_table_parenthood_mapping_refcount > 0); + per_table_parenthood_mapping_refcount--; - /* Now free the main list and point it to NIL */ - list_free(per_query_parenthood_lists); - per_query_parenthood_lists = NIL; + /* Free resources if no one is using them */ + if (per_table_parenthood_mapping_refcount == 0) + { + reset_query_id_generator(); + + hash_destroy(per_table_parenthood_mapping); + per_table_parenthood_mapping = NULL; + } } diff --git a/src/planner_tree_modification.h b/src/planner_tree_modification.h index ee7bfc1e..21ac9a3a 100644 --- a/src/planner_tree_modification.h +++ b/src/planner_tree_modification.h @@ -41,10 +41,11 @@ typedef enum PARENTHOOD_ALLOWED /* children are enabled (default) */ } rel_parenthood_status; -void assign_rel_parenthood_status(Index query_level, Oid relid, +void assign_rel_parenthood_status(uint32 query_id, Oid relid, rel_parenthood_status new_status); -rel_parenthood_status get_parenthood_status(Index query_level, Oid relid); -void reset_parenthood_statuses(void); +rel_parenthood_status get_rel_parenthood_status(uint32 query_id, Oid relid); +void incr_refcount_parenthood_statuses(void); +void decr_refcount_parenthood_statuses(void); #endif /* PLANNER_TREE_MODIFICATION_H */ From 10021fa9fff5fc374de463854d34967a72954c2d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 3 Nov 2016 16:22:41 +0300 Subject: [PATCH 0046/1124] reset per_table_parenthood_mapping completely in case of error --- src/hooks.c | 2 +- src/planner_tree_modification.c | 13 +++++++++---- src/planner_tree_modification.h | 2 +- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 7048e51c..5f6484f2 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -472,7 +472,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) ExecuteForPlanTree(result, add_partition_filters); /* Decrement parenthood_statuses refcount */ - decr_refcount_parenthood_statuses(); + decr_refcount_parenthood_statuses(false); /* HACK: restore queryId set by pg_stat_statements */ result->queryId = query_id; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 0bde6ecb..d88c5a52 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -538,8 +538,8 @@ assign_rel_parenthood_status(uint32 query_id, /* Saved status conflicts with 'new_status' */ if (status_entry->parenthood_status != new_status) { - /* Don't forget to clear all lists! */ - decr_refcount_parenthood_statuses(); + /* Don't forget to clear ALL tracked statuses! */ + decr_refcount_parenthood_statuses(true); elog(ERROR, "It is prohibited to apply ONLY modifier to partitioned " "tables which have already been mentioned without ONLY"); @@ -592,10 +592,15 @@ incr_refcount_parenthood_statuses(void) /* Reset all cached statuses if needed (query end) */ void -decr_refcount_parenthood_statuses(void) +decr_refcount_parenthood_statuses(bool entirely) { Assert(per_table_parenthood_mapping_refcount > 0); - per_table_parenthood_mapping_refcount--; + + /* Should we destroy the table right now? */ + if (entirely) + per_table_parenthood_mapping_refcount = 0; + else + per_table_parenthood_mapping_refcount--; /* Free resources if no one is using them */ if (per_table_parenthood_mapping_refcount == 0) diff --git a/src/planner_tree_modification.h b/src/planner_tree_modification.h index 21ac9a3a..6572de9f 100644 --- a/src/planner_tree_modification.h +++ b/src/planner_tree_modification.h @@ -45,7 +45,7 @@ void assign_rel_parenthood_status(uint32 query_id, Oid relid, rel_parenthood_status new_status); rel_parenthood_status get_rel_parenthood_status(uint32 query_id, Oid relid); void incr_refcount_parenthood_statuses(void); -void decr_refcount_parenthood_statuses(void); +void decr_refcount_parenthood_statuses(bool entirely); #endif /* PLANNER_TREE_MODIFICATION_H */ From 9828acac8016808cd064643d528ea01c5755f62c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 3 Nov 2016 17:44:14 +0300 Subject: [PATCH 0047/1124] add 'prel_varno' to WalkerContext --- src/hooks.c | 5 +++-- src/nodes_common.c | 4 ++-- src/partition_filter.c | 3 ++- src/pathman.h | 4 +++- src/pg_pathman.c | 11 ++++++++--- src/planner_tree_modification.c | 10 +++++++--- 6 files changed, 25 insertions(+), 12 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 5f6484f2..c5260fa4 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -98,7 +98,8 @@ pathman_join_pathlist_hook(PlannerInfo *root, { WrapperNode *wrap; - InitWalkerContext(&context, inner_prel, NULL, false); + InitWalkerContext(&context, innerrel->relid, + inner_prel, NULL, false); wrap = walk_expr_tree((Expr *) lfirst(lc), &context); paramsel *= wrap->paramsel; @@ -266,7 +267,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); /* Make wrappers over restrictions and collect final rangeset */ - InitWalkerContext(&context, prel, NULL, false); + InitWalkerContext(&context, rti, prel, NULL, false); wrappers = NIL; foreach(lc, rel->baserestrictinfo) { diff --git a/src/nodes_common.c b/src/nodes_common.c index f75bd2f1..b93d069d 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -558,10 +558,10 @@ rescan_append_common(CustomScanState *node) /* First we select all available partitions... */ ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); - InitWalkerContext(&wcxt, prel, econtext, false); + InitWalkerContext(&wcxt, INDEX_VAR, prel, econtext, false); foreach (lc, scan_state->custom_exprs) { - WrapperNode *wn; + WrapperNode *wn; /* ... then we cut off irrelevant ones using the provided clauses */ wn = walk_expr_tree((Expr *) lfirst(lc), &wcxt); diff --git a/src/partition_filter.c b/src/partition_filter.c index 5d596852..c7f959c9 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -307,7 +307,8 @@ find_partitions_for_value(Datum value, const PartRelationInfo *prel, CopyToTempConst(constlen, attlen); CopyToTempConst(constbyval, attbyval); - InitWalkerContext(&wcxt, prel, econtext, true); + /* We use 0 since varno doesn't matter for Const */ + InitWalkerContext(&wcxt, 0, prel, econtext, true); ranges = walk_expr_tree((Expr *) &temp_const, &wcxt)->rangeset; return get_partition_oids(ranges, nparts, prel, false); } diff --git a/src/pathman.h b/src/pathman.h index d887b510..7ca156bd 100644 --- a/src/pathman.h +++ b/src/pathman.h @@ -140,6 +140,7 @@ typedef struct typedef struct { + Index prel_varno; /* Var::varno associated with prel */ const PartRelationInfo *prel; /* main partitioning structure */ ExprContext *econtext; /* for ExecEvalExpr() */ bool for_insert; /* are we in PartitionFilter now? */ @@ -148,8 +149,9 @@ typedef struct /* * Usual initialization procedure for WalkerContext. */ -#define InitWalkerContext(context, prel_info, ecxt, for_ins) \ +#define InitWalkerContext(context, prel_vno, prel_info, ecxt, for_ins) \ do { \ + (context)->prel_varno = (prel_vno); \ (context)->prel = (prel_info); \ (context)->econtext = (ecxt); \ (context)->for_insert = (for_ins); \ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index d44805d6..8d6deed0 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1289,7 +1289,9 @@ pull_var_param(const WalkerContext *ctx, (Var *) left : (Var *) ((RelabelType *) left)->arg; - if (v->varoattno == ctx->prel->attnum) + /* Check if 'v' is partitioned column of 'prel' */ + if (v->varoattno == ctx->prel->attnum && + v->varno == ctx->prel_varno) { *var_ptr = left; *param_ptr = right; @@ -1304,7 +1306,9 @@ pull_var_param(const WalkerContext *ctx, (Var *) right : (Var *) ((RelabelType *) right)->arg; - if (v->varoattno == ctx->prel->attnum) + /* Check if 'v' is partitioned column of 'prel' */ + if (v->varoattno == ctx->prel->attnum && + v->varno == ctx->prel_varno) { *var_ptr = right; *param_ptr = left; @@ -1409,7 +1413,8 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) /* Skip if base types or attribute numbers do not match */ if (getBaseType(var->vartype) != getBaseType(prel->atttype) || - var->varoattno != prel->attnum) + var->varoattno != prel->attnum || /* partitioned attribute */ + var->varno != context->prel_varno) /* partitioned table */ { goto handle_arrexpr_return; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index d88c5a52..ab2c4a6d 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -289,14 +289,18 @@ handle_modification_query(Query *parse) WrapperNode *wrap; Expr *expr; WalkerContext context; + Index result_rel; + + /* Fetch index of result relation */ + result_rel = parse->resultRelation; /* Exit if it's not a DELETE or UPDATE query */ - if (parse->resultRelation == 0 || + if (result_rel == 0 || (parse->commandType != CMD_UPDATE && parse->commandType != CMD_DELETE)) return; - rte = rt_fetch(parse->resultRelation, parse->rtable); + rte = rt_fetch(result_rel, parse->rtable); prel = get_pathman_relation_info(rte->relid); /* Exit if it's not partitioned */ @@ -310,7 +314,7 @@ handle_modification_query(Query *parse) if (!expr) return; /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel, NULL, false); + InitWalkerContext(&context, result_rel, prel, NULL, false); wrap = walk_expr_tree(expr, &context); ranges = irange_list_intersect(ranges, wrap->rangeset); From 032d81f159bc42a305b42bf16f6a1cf1da4bc2d5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 3 Nov 2016 17:58:34 +0300 Subject: [PATCH 0048/1124] add tests for 'column = IN(...)' for both HASH & RANGE partitioned tables --- expected/pathman_basic.out | 95 ++++++++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 11 +++++ 2 files changed, 106 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 1d2401fc..f333c4b3 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -517,6 +517,48 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1 -> Seq Scan on num_range_rel_4 (8 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (2500); + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (id = 2500) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (500, 1500); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on num_range_rel_1 + Filter: (id = ANY ('{500,1500}'::integer[])) + -> Seq Scan on num_range_rel_2 + Filter: (id = ANY ('{500,1500}'::integer[])) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-500, 500, 1500); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on num_range_rel_1 + Filter: (id = ANY ('{-500,500,1500}'::integer[])) + -> Seq Scan on num_range_rel_2 + Filter: (id = ANY ('{-500,500,1500}'::integer[])) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-1, -1, -1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-1, -1, -1, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; QUERY PLAN -------------------------------------------------------------------------------- @@ -587,6 +629,59 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; Filter: (value = 1) (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (2); + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (2, 1); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on hash_rel_1 + Filter: (value = ANY ('{2,1}'::integer[])) + -> Seq Scan on hash_rel_2 + Filter: (value = ANY ('{2,1}'::integer[])) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (1, 2); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on hash_rel_1 + Filter: (value = ANY ('{1,2}'::integer[])) + -> Seq Scan on hash_rel_2 + Filter: (value = ANY ('{1,2}'::integer[])) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (1, 2, -1); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on hash_rel_1 + Filter: (value = ANY ('{1,2,-1}'::integer[])) + -> Seq Scan on hash_rel_2 + Filter: (value = ANY ('{1,2,-1}'::integer[])) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (0, 0, 0); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = ANY ('{0,0,0}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (NULL::int, NULL, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; QUERY PLAN ---------------------------------------------------------------- diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 25269e1d..e343ce19 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -140,6 +140,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (2500); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (500, 1500); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-500, 500, 1500); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-1, -1, -1); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-1, -1, -1, NULL); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; @@ -153,6 +158,12 @@ SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (2); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (2, 1); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (1, 2); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (1, 2, -1); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (0, 0, 0); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (NULL::int, NULL, NULL); EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; From 681aa4326f2051072a996769cdfbc10a8d2a3ede Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Fri, 4 Nov 2016 02:21:48 +0300 Subject: [PATCH 0049/1124] Fix problem with parallel queries when none partition is selected --- src/pg_pathman.c | 13 ++++++++----- tests/python/partitioning_test.py | 16 ++++++++++++++++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a8386872..92f08671 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1878,12 +1878,15 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, parallel_workers = Max(parallel_workers, path->parallel_workers); } - Assert(parallel_workers > 0); - /* Generate a partial append path. */ - appendpath = create_append_path(rel, partial_subpaths, NULL, - parallel_workers); - add_partial_path(rel, (Path *) appendpath); + if (parallel_workers > 0) + { + + /* Generate a partial append path. */ + appendpath = create_append_path(rel, partial_subpaths, NULL, + parallel_workers); + add_partial_path(rel, (Path *) appendpath); + } } #endif diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index a9a934fd..f1c3a730 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -580,6 +580,22 @@ def ordered(obj): res_tuples = sorted(map(lambda x: x[0], res_tuples)) expected = [1, 2, 3, 4, 5] self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) # import ipdb; ipdb.set_trace() # Remove all objects for testing From 2e6d2f8a5459ab15d4498b474620752d74137da3 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 4 Nov 2016 04:29:40 +0300 Subject: [PATCH 0050/1124] try reloading cached 'prel' entry when about to spawn a new partition for INSERT (issue #57), extract function perform_type_cast(), select_partition_for_insert() & find_partitions_for_value() now take 'value_type', improve handle_const(), xact_lock_partitioned_rel() and xact_lock_rel_exclusive() return LockAcquireResult --- src/copy_stmt_hooking.c | 1 + src/init.c | 56 +++------------- src/partition_filter.c | 21 +++--- src/partition_filter.h | 8 ++- src/pathman_workers.c | 2 +- src/pg_pathman.c | 144 ++++++++++++++++++++++++++++------------ src/relation_info.c | 14 +++- src/relation_info.h | 4 +- src/utils.c | 62 +++++++++++++++++ src/utils.h | 6 +- src/xact_handling.c | 50 ++++++++------ src/xact_handling.h | 4 +- 12 files changed, 238 insertions(+), 134 deletions(-) diff --git a/src/copy_stmt_hooking.c b/src/copy_stmt_hooking.c index a3f02714..1f21f0ab 100644 --- a/src/copy_stmt_hooking.c +++ b/src/copy_stmt_hooking.c @@ -508,6 +508,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Search for a matching partition */ rri_holder_child = select_partition_for_insert(prel, &parts_storage, values[prel->attnum - 1], + prel->atttype, estate, true); child_result_rel = rri_holder_child->result_rel_info; estate->es_result_relation_info = child_result_rel; diff --git a/src/init.c b/src/init.c index 461441f9..e9caef4e 100644 --- a/src/init.c +++ b/src/init.c @@ -27,7 +27,6 @@ #include "catalog/pg_type.h" #include "miscadmin.h" #include "optimizer/clauses.h" -#include "parser/parse_coerce.h" #include "utils/datum.h" #include "utils/inval.h" #include "utils/builtins.h" @@ -925,6 +924,7 @@ read_opexpr_const(const OpExpr *opexpr, const Node *right; const Var *part_attr; /* partitioned column */ const Const *constant; + bool cast_success; if (list_length(opexpr->args) != 2) return false; @@ -960,52 +960,18 @@ read_opexpr_const(const OpExpr *opexpr, constant = (Const *) right; - /* Check that types are binary coercible */ - if (IsBinaryCoercible(constant->consttype, prel->atttype)) - { - *val = constant->constvalue; - } - /* If not, try to perfrom a type cast */ - else - { - CoercionPathType ret; - Oid castfunc = InvalidOid; - - ret = find_coercion_pathway(prel->atttype, constant->consttype, - COERCION_EXPLICIT, &castfunc); - - switch (ret) - { - /* There's a function */ - case COERCION_PATH_FUNC: - { - /* Perform conversion */ - Assert(castfunc != InvalidOid); - *val = OidFunctionCall1(castfunc, constant->constvalue); - } - break; + /* Cast Const to a proper type if needed */ + *val = perform_type_cast(constant->constvalue, + getBaseType(constant->consttype), + getBaseType(prel->atttype), + &cast_success); - /* Types are binary compatible (no implicit cast) */ - case COERCION_PATH_RELABELTYPE: - { - /* We don't perform any checks here */ - *val = constant->constvalue; - } - break; - - /* TODO: implement these if needed */ - case COERCION_PATH_ARRAYCOERCE: - case COERCION_PATH_COERCEVIAIO: + if (!cast_success) + { + elog(WARNING, "Constant type in some check constraint " + "does not match the partitioned column's type"); - /* There's no cast available */ - case COERCION_PATH_NONE: - default: - { - elog(WARNING, "Constant type in some check constraint " - "does not match the partitioned column's type"); - return false; - } - } + return false; } return true; diff --git a/src/partition_filter.c b/src/partition_filter.c index c7f959c9..1972e188 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -282,8 +282,9 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) * Find matching partitions for 'value' using PartRelationInfo. */ Oid * -find_partitions_for_value(Datum value, const PartRelationInfo *prel, - ExprContext *econtext, int *nparts) +find_partitions_for_value(Datum value, Oid value_type, + const PartRelationInfo *prel, + int *nparts) { #define CopyToTempConst(const_field, attr_field) \ ( temp_const.const_field = prel->attr_field ) @@ -308,7 +309,7 @@ find_partitions_for_value(Datum value, const PartRelationInfo *prel, CopyToTempConst(constbyval, attbyval); /* We use 0 since varno doesn't matter for Const */ - InitWalkerContext(&wcxt, 0, prel, econtext, true); + InitWalkerContext(&wcxt, 0, prel, NULL, true); ranges = walk_expr_tree((Expr *) &temp_const, &wcxt)->rangeset; return get_partition_oids(ranges, nparts, prel, false); } @@ -429,9 +430,9 @@ partition_filter_exec(CustomScanState *node) old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); /* Search for a matching partition */ - rri_holder = select_partition_for_insert(prel, - &state->result_parts, - value, estate, true); + rri_holder = select_partition_for_insert(prel, &state->result_parts, + value, prel->atttype, + estate, true); estate->es_result_relation_info = rri_holder->result_rel_info; /* Switch back and clean up per-tuple context */ @@ -475,20 +476,18 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e ResultRelInfoHolder * select_partition_for_insert(const PartRelationInfo *prel, ResultPartsStorage *parts_storage, - Datum value, EState *estate, + Datum value, Oid value_type, + EState *estate, bool spawn_partitions) { MemoryContext old_cxt; - ExprContext *econtext; ResultRelInfoHolder *rri_holder; Oid selected_partid = InvalidOid; Oid *parts; int nparts; - econtext = GetPerTupleExprContext(estate); - /* Search for matching partitions */ - parts = find_partitions_for_value(value, prel, econtext, &nparts); + parts = find_partitions_for_value(value, value_type, prel, &nparts); if (nparts > 1) elog(ERROR, ERR_PART_ATTR_MULTIPLE); diff --git a/src/partition_filter.h b/src/partition_filter.h index 899d163e..abd1c40e 100644 --- a/src/partition_filter.h +++ b/src/partition_filter.h @@ -106,8 +106,9 @@ ResultRelInfoHolder * scan_result_parts_storage(Oid partid, ResultPartsStorage *storage); /* Find suitable partition using 'value' */ -Oid *find_partitions_for_value(Datum value, const PartRelationInfo *prel, - ExprContext *econtext, int *nparts); +Oid * find_partitions_for_value(Datum value, Oid value_type, + const PartRelationInfo *prel, + int *nparts); Plan * make_partition_filter(Plan *subplan, Oid partitioned_table, @@ -131,7 +132,8 @@ void partition_filter_explain(CustomScanState *node, ResultRelInfoHolder * select_partition_for_insert(const PartRelationInfo *prel, ResultPartsStorage *parts_storage, - Datum value, EState *estate, + Datum value, Oid value_type, + EState *estate, bool spawn_partitions); #endif diff --git a/src/pathman_workers.c b/src/pathman_workers.c index c398a9b8..11a936c5 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -610,7 +610,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* Check if relation is a partitioned table */ shout_if_prel_is_invalid(relid, /* We also lock the parent relation */ - get_pathman_relation_info_after_lock(relid, true), + get_pathman_relation_info_after_lock(relid, true, NULL), /* Partitioning type does not matter here */ PT_INDIFFERENT); /* diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 92f08671..2822c384 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -704,6 +704,7 @@ create_partitions_internal(Oid relid, Datum value, Oid value_type) PG_TRY(); { const PartRelationInfo *prel; + LockAcquireResult lock_result; /* could we lock the parent? */ Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; @@ -713,57 +714,91 @@ create_partitions_internal(Oid relid, Datum value, Oid value_type) Oid base_atttype; /* base type of prel->atttype */ Oid base_value_type; /* base type of value_type */ - Datum min_rvalue, /* absolute MIN */ - max_rvalue; /* absolute MAX */ - - Oid interval_type = InvalidOid; - Datum interval_binary, /* assigned 'width' of a single partition */ - interval_text; - - FmgrInfo interval_type_cmp; - /* Fetch PartRelationInfo by 'relid' */ - prel = get_pathman_relation_info(relid); + prel = get_pathman_relation_info_after_lock(relid, true, &lock_result); shout_if_prel_is_invalid(relid, prel, PT_RANGE); /* Fetch base types of prel->atttype & value_type */ base_atttype = getBaseType(prel->atttype); base_value_type = getBaseType(value_type); - /* Read max & min range values from PartRelationInfo */ - min_rvalue = PrelGetRangesArray(prel)[0].min; - max_rvalue = PrelGetRangesArray(prel)[PrelLastChild(prel)].max; - - /* Copy datums on order to protect them from cache invalidation */ - min_rvalue = datumCopy(min_rvalue, prel->attbyval, prel->attlen); - max_rvalue = datumCopy(max_rvalue, prel->attbyval, prel->attlen); + /* Search for a suitable partition if we didn't hold it */ + Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); + if (lock_result == LOCKACQUIRE_OK) + { + Oid *parts; + int nparts; - /* Retrieve interval as TEXT from tuple */ - interval_text = values[Anum_pathman_config_range_interval - 1]; + /* Search for matching partitions */ + parts = find_partitions_for_value(value, value_type, prel, &nparts); - /* Convert interval to binary representation */ - interval_binary = extract_binary_interval_from_text(interval_text, - base_atttype, - &interval_type); + /* Shout if there's more than one */ + if (nparts > 1) + elog(ERROR, ERR_PART_ATTR_MULTIPLE); - /* Fill the FmgrInfo struct with a cmp(value, part_attribute) function */ - fill_type_cmp_fmgr_info(&interval_type_cmp, base_value_type, base_atttype); + /* It seems that we got a partition! */ + else if (nparts == 1) + { + /* Unlock the parent (we're not going to spawn) */ + xact_unlock_partitioned_rel(relid); - if (SPI_connect() != SPI_OK_CONNECT) - elog(ERROR, "could not connect using SPI"); + /* Simply return the suitable partition */ + partid = parts[0]; + } - /* while (value >= MAX) ... */ - spawn_partitions(PrelParentRelid(prel), value, max_rvalue, - base_atttype, &interval_type_cmp, interval_binary, - interval_type, true, &partid); + /* Don't forget to free */ + pfree(parts); + } - /* while (value < MIN) ... */ + /* Else spawn a new one (we hold a lock on the parent) */ if (partid == InvalidOid) - spawn_partitions(PrelParentRelid(prel), value, min_rvalue, - base_atttype, &interval_type_cmp, interval_binary, - interval_type, false, &partid); + { + Datum min_rvalue, /* absolute MIN */ + max_rvalue; /* absolute MAX */ + + Oid interval_type = InvalidOid; + Datum interval_binary, /* assigned 'width' of one partition */ + interval_text; + + FmgrInfo interval_type_cmp; + + /* Read max & min range values from PartRelationInfo */ + min_rvalue = PrelGetRangesArray(prel)[0].min; + max_rvalue = PrelGetRangesArray(prel)[PrelLastChild(prel)].max; + + /* Copy datums on order to protect them from cache invalidation */ + min_rvalue = datumCopy(min_rvalue, prel->attbyval, prel->attlen); + max_rvalue = datumCopy(max_rvalue, prel->attbyval, prel->attlen); + + /* Retrieve interval as TEXT from tuple */ + interval_text = values[Anum_pathman_config_range_interval - 1]; - SPI_finish(); /* close SPI connection */ + /* Convert interval to binary representation */ + interval_binary = extract_binary_interval_from_text(interval_text, + base_atttype, + &interval_type); + + /* Fill the FmgrInfo struct with a cmp(value, part_attribute) */ + fill_type_cmp_fmgr_info(&interval_type_cmp, + base_value_type, + base_atttype); + + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect using SPI"); + + /* while (value >= MAX) ... */ + spawn_partitions(PrelParentRelid(prel), value, max_rvalue, + base_atttype, &interval_type_cmp, + interval_binary, interval_type, true, &partid); + + /* while (value < MIN) ... */ + if (partid == InvalidOid) + spawn_partitions(PrelParentRelid(prel), value, min_rvalue, + base_atttype, &interval_type_cmp, + interval_binary, interval_type, false, &partid); + + SPI_finish(); /* close SPI connection */ + } } else elog(ERROR, "pg_pathman's config does not contain relation \"%s\"", @@ -1082,7 +1117,7 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, PrelGetRangesArray(context->prel), PrelChildrenCount(context->prel), strategy, - result); + result); /* output */ result->paramsel = estimate_paramsel_using_prel(prel, strategy); @@ -1169,7 +1204,7 @@ search_range_partition_eq(const Datum value, ranges, nranges, BTEqualStrategyNumber, - &result); + &result); /* output */ if (result.found_gap) { @@ -1220,7 +1255,7 @@ handle_const(const Const *c, WalkerContext *context) /* * Had to add this check for queries like: - * select * from test.hash_rel where txt = NULL; + * select * from test.hash_rel where txt = NULL; */ if (!context->for_insert || c->constisnull) { @@ -1234,9 +1269,30 @@ handle_const(const Const *c, WalkerContext *context) { case PT_HASH: { - Datum value = OidFunctionCall1(prel->hash_proc, c->constvalue); - uint32 idx = hash_to_part_index(DatumGetInt32(value), - PrelChildrenCount(prel)); + Datum value, /* value to be hashed */ + hash; /* 32-bit hash */ + uint32 idx; /* index of partition */ + bool cast_success; + + /* Peform type cast if types mismatch */ + if (prel->atttype != c->consttype) + { + value = perform_type_cast(c->constvalue, + getBaseType(c->consttype), + getBaseType(prel->atttype), + &cast_success); + + if (!cast_success) + elog(ERROR, "Cannot select partition: " + "unable to perform type cast"); + } + /* Else use the Const's value */ + else value = c->constvalue; + + /* Calculate 32-bit hash of 'value' and corresponding index */ + hash = OidFunctionCall1(prel->hash_proc, value); + idx = hash_to_part_index(DatumGetInt32(hash), + PrelChildrenCount(prel)); result->paramsel = estimate_paramsel_using_prel(prel, strategy); result->rangeset = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); @@ -1245,7 +1301,7 @@ handle_const(const Const *c, WalkerContext *context) case PT_RANGE: { - FmgrInfo cmp_finfo; + FmgrInfo cmp_finfo; fill_type_cmp_fmgr_info(&cmp_finfo, getBaseType(c->consttype), @@ -1256,7 +1312,7 @@ handle_const(const Const *c, WalkerContext *context) PrelGetRangesArray(context->prel), PrelChildrenCount(context->prel), strategy, - result); + result); /* output */ result->paramsel = estimate_paramsel_using_prel(prel, strategy); } diff --git a/src/relation_info.c b/src/relation_info.c index b95e9a57..2aea5bab 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -300,12 +300,22 @@ get_pathman_relation_info(Oid relid) /* Acquire lock on a table and try to get PartRelationInfo */ const PartRelationInfo * -get_pathman_relation_info_after_lock(Oid relid, bool unlock_if_not_found) +get_pathman_relation_info_after_lock(Oid relid, + bool unlock_if_not_found, + LockAcquireResult *lock_result) { const PartRelationInfo *prel; + LockAcquireResult acquire_result; /* Restrict concurrent partition creation (it's dangerous) */ - xact_lock_partitioned_rel(relid, false); + acquire_result = xact_lock_partitioned_rel(relid, false); + + /* Invalidate cache entry (see AcceptInvalidationMessages()) */ + invalidate_pathman_relation_info(relid, NULL); + + /* Set 'lock_result' if asked to */ + if (lock_result) + *lock_result = acquire_result; prel = get_pathman_relation_info(relid); if (!prel && unlock_if_not_found) diff --git a/src/relation_info.h b/src/relation_info.h index bec9bca9..977673f6 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -14,6 +14,7 @@ #include "postgres.h" #include "access/attnum.h" #include "port/atomics.h" +#include "storage/lock.h" /* @@ -126,7 +127,8 @@ void invalidate_pathman_relation_info(Oid relid, bool *found); void remove_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found); + bool unlock_if_not_found, + LockAcquireResult *lock_result); void delay_pathman_shutdown(void); void delay_invalidation_parent_rel(Oid parent); diff --git a/src/utils.c b/src/utils.c index 87e3c2a6..f7d5b535 100644 --- a/src/utils.c +++ b/src/utils.c @@ -21,6 +21,7 @@ #include "commands/extension.h" #include "miscadmin.h" #include "optimizer/var.h" +#include "parser/parse_coerce.h" #include "parser/parse_oper.h" #include "utils/builtins.h" #include "utils/fmgroids.h" @@ -326,3 +327,64 @@ check_security_policy_internal(Oid relid, Oid role) return true; } + +/* + * Try casting value of type 'in_type' to 'out_type'. + * + * This function might emit ERROR. + */ +Datum +perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success) +{ + CoercionPathType ret; + Oid castfunc = InvalidOid; + + /* Speculative success */ + if (success) *success = true; + + /* Fast and trivial path */ + if (in_type == out_type) + return value; + + /* Check that types are binary coercible */ + if (IsBinaryCoercible(in_type, out_type)) + return value; + + /* If not, try to perfrom a type cast */ + ret = find_coercion_pathway(out_type, in_type, + COERCION_EXPLICIT, + &castfunc); + + /* Handle coercion paths */ + switch (ret) + { + /* There's a function */ + case COERCION_PATH_FUNC: + { + /* Perform conversion */ + Assert(castfunc != InvalidOid); + return OidFunctionCall1(castfunc, value); + } + + /* Types are binary compatible (no implicit cast) */ + case COERCION_PATH_RELABELTYPE: + { + /* We don't perform any checks here */ + return value; + } + + /* TODO: implement these casts if needed */ + case COERCION_PATH_ARRAYCOERCE: + case COERCION_PATH_COERCEVIAIO: + + /* There's no cast available */ + case COERCION_PATH_NONE: + default: + { + /* Oops, something is wrong */ + if (success) *success = false; + + return (Datum) 0; + } + } +} diff --git a/src/utils.h b/src/utils.h index 2dd326cb..5946dba1 100644 --- a/src/utils.h +++ b/src/utils.h @@ -38,14 +38,14 @@ char get_rel_persistence(Oid relid); #endif Oid get_rel_owner(Oid relid); +Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); + /* * Handy execution-stage functions. */ char * get_rel_name_or_relid(Oid relid); Oid get_binary_operator_oid(char *opname, Oid arg1, Oid arg2); -void fill_type_cmp_fmgr_info(FmgrInfo *finfo, - Oid type1, - Oid type2); +void fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2); char * datum_to_cstring(Datum datum, Oid typid); #endif diff --git a/src/xact_handling.c b/src/xact_handling.c index a53fb4c3..898cc9b8 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -15,28 +15,43 @@ #include "catalog/catalog.h" #include "miscadmin.h" #include "storage/lmgr.h" +#include "utils/inval.h" static inline void SetLocktagRelationOid(LOCKTAG *tag, Oid relid); static inline bool do_we_hold_the_lock(Oid relid, LOCKMODE lockmode); + +static LockAcquireResult +LockAcquireOid(Oid relid, LOCKMODE lockmode, bool sessionLock, bool dontWait) +{ + LOCKTAG tag; + LockAcquireResult res; + + /* Create a tag for lock */ + SetLocktagRelationOid(&tag, relid); + + res = LockAcquire(&tag, lockmode, sessionLock, dontWait); + + /* + * Now that we have the lock, check for invalidation messages; + * see notes in LockRelationOid. + */ + if (res != LOCKACQUIRE_ALREADY_HELD) + AcceptInvalidationMessages(); + + return res; +} + + /* * Lock certain partitioned relation to disable concurrent access. */ -bool +LockAcquireResult xact_lock_partitioned_rel(Oid relid, bool nowait) { - if (nowait) - { - if (ConditionalLockRelationOid(relid, ShareUpdateExclusiveLock)) - return true; - return false; - } - else - LockRelationOid(relid, ShareUpdateExclusiveLock); - - return true; + return LockAcquireOid(relid, ShareUpdateExclusiveLock, false, nowait); } /* @@ -51,19 +66,10 @@ xact_unlock_partitioned_rel(Oid relid) /* * Lock relation exclusively (SELECTs are possible). */ -bool +LockAcquireResult xact_lock_rel_exclusive(Oid relid, bool nowait) { - if (nowait) - { - if (ConditionalLockRelationOid(relid, ExclusiveLock)) - return true; - return false; - } - else - LockRelationOid(relid, ExclusiveLock); - - return true; + return LockAcquireOid(relid, ExclusiveLock, false, nowait); } /* diff --git a/src/xact_handling.h b/src/xact_handling.h index b5f8ed3c..b0a5ffe8 100644 --- a/src/xact_handling.h +++ b/src/xact_handling.h @@ -19,10 +19,10 @@ /* * Transaction locks. */ -bool xact_lock_partitioned_rel(Oid relid, bool nowait); +LockAcquireResult xact_lock_partitioned_rel(Oid relid, bool nowait); void xact_unlock_partitioned_rel(Oid relid); -bool xact_lock_rel_exclusive(Oid relid, bool nowait); +LockAcquireResult xact_lock_rel_exclusive(Oid relid, bool nowait); void xact_unlock_rel_exclusive(Oid relid); /* From 071ab3242d7cb9b0c2f08aa0722fa7e5f5550332 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 5 Nov 2016 01:49:21 +0300 Subject: [PATCH 0051/1124] fixes and improvements in append_child_relation() & set_append_rel_pathlist(), add missing set_dummy_rel_pathlist() for PG 9.5 --- src/hooks.c | 2 +- src/pg_compat.c | 37 +++++++++++++++++++- src/pg_compat.h | 9 ++++- src/pg_pathman.c | 90 ++++++++++++++++++++++++++++++------------------ 4 files changed, 102 insertions(+), 36 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 4d453b02..a1b76c0a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -218,7 +218,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, /* Proceed iff relation 'rel' is partitioned */ if ((prel = get_pathman_relation_info(rte->relid)) != NULL) { - Relation parent_rel; /* parent's relation (heap */ + Relation parent_rel; /* parent's relation (heap) */ Oid *children; /* selected children oids */ List *ranges, /* a list of IndexRanges */ *wrappers, /* a list of WrapperNodes */ diff --git a/src/pg_compat.c b/src/pg_compat.c index 286f36fa..8d8e49f6 100644 --- a/src/pg_compat.c +++ b/src/pg_compat.c @@ -58,7 +58,9 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) #endif } + /* Set 'rows' for append relation */ rel->rows = parent_rows; + #if PG_VERSION_NUM >= 90600 rel->reltarget->width = rint(parent_size / parent_rows); #else @@ -314,4 +316,37 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) /* Add an unordered partial path based on a parallel sequential scan. */ add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); } -#endif + + +#else /* PG_VERSION_NUM >= 90500 */ + +/* + * set_dummy_rel_pathlist + * Build a dummy path for a relation that's been excluded by constraints + * + * Rather than inventing a special "dummy" path type, we represent this as an + * AppendPath with no members (see also IS_DUMMY_PATH/IS_DUMMY_REL macros). + */ +void +set_dummy_rel_pathlist(RelOptInfo *rel) +{ + /* Set dummy size estimates --- we leave attr_widths[] as zeroes */ + rel->rows = 0; + rel->width = 0; + + /* Discard any pre-existing paths; no further need for them */ + rel->pathlist = NIL; + + add_path(rel, (Path *) create_append_path(rel, NIL, NULL)); + + /* + * We set the cheapest path immediately, to ensure that IS_DUMMY_REL() + * will recognize the relation as dummy if anyone asks. This is redundant + * when we're called from set_rel_size(), but not when called from + * elsewhere, and doing it twice is harmless anyway. + */ + set_cheapest(rel); +} + + +#endif /* PG_VERSION_NUM >= 90600 */ diff --git a/src/pg_compat.h b/src/pg_compat.h index 91e1e167..130dadf4 100644 --- a/src/pg_compat.h +++ b/src/pg_compat.h @@ -23,8 +23,10 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); void adjust_targetlist_compat(PlannerInfo *root, RelOptInfo *dest, RelOptInfo *rel, AppendRelInfo *appinfo); + #if PG_VERSION_NUM >= 90600 + #define get_parameterized_joinrel_size_compat(root, rel, outer_path, \ inner_path, sjinfo, \ restrict_clauses) \ @@ -54,8 +56,10 @@ extern Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan); #define make_result_compat(root, tlist, resconstantqual, subplan) \ make_result(tlist, resconstantqual, subplan) + #else /* PG_VERSION_NUM >= 90500 */ + #define get_parameterized_joinrel_size_compat(root, rel, \ outer_path, \ inner_path, \ @@ -81,7 +85,10 @@ extern Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan); #define create_plain_partial_paths_compat(root, rel) ((void) true) -#endif +void set_dummy_rel_pathlist(RelOptInfo *rel); + + +#endif /* PG_VERSION_NUM */ #endif /* PG_COMPAT_H */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 2822c384..0f013665 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -33,6 +33,7 @@ #include "fmgr.h" #include "miscadmin.h" #include "optimizer/clauses.h" +#include "optimizer/plancat.h" #include "optimizer/prep.h" #include "optimizer/restrictinfo.h" #include "optimizer/cost.h" @@ -276,6 +277,8 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, Index childRTindex; PlanRowMark *parent_rowmark, *child_rowmark; + Node *childqual; + List *childquals; ListCell *lc, *lc2; @@ -323,10 +326,9 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, * Copy restrictions. If it's not the parent table, copy only * those restrictions that are related to this partition. */ - child_rel->baserestrictinfo = NIL; if (parent_rte->relid != child_oid) { - List *childquals = NIL; + childquals = NIL; forboth(lc, wrappers, lc2, parent_rel->baserestrictinfo) { @@ -345,24 +347,39 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, Assert(new_clause); childquals = lappend(childquals, new_clause); } - - childquals = (List *) adjust_appendrel_attrs(root, - (Node *) childquals, - appinfo); - childquals = make_restrictinfos_from_actual_clauses(root, childquals); - child_rel->baserestrictinfo = childquals; } /* If it's the parent table, copy all restrictions */ - else + else childquals = get_all_actual_clauses(parent_rel->baserestrictinfo); + + /* Now it's time to change varnos and rebuld quals */ + childquals = (List *) adjust_appendrel_attrs(root, + (Node *) childquals, + appinfo); + childqual = eval_const_expressions(root, (Node *) + make_ands_explicit(childquals)); + if (childqual && IsA(childqual, Const) && + (((Const *) childqual)->constisnull || + !DatumGetBool(((Const *) childqual)->constvalue))) { - List *childquals = NIL; - - childquals = get_all_actual_clauses(parent_rel->baserestrictinfo); - childquals = (List *) adjust_appendrel_attrs(root, - (Node *) childquals, - appinfo); - childquals = make_restrictinfos_from_actual_clauses(root, childquals); - child_rel->baserestrictinfo = childquals; + /* + * Restriction reduces to constant FALSE or constant NULL after + * substitution, so this child need not be scanned. + */ + set_dummy_rel_pathlist(child_rel); + } + childquals = make_ands_implicit((Expr *) childqual); + childquals = make_restrictinfos_from_actual_clauses(root, childquals); + + /* Set new shiny childquals */ + child_rel->baserestrictinfo = childquals; + + if (relation_excluded_by_constraints(root, child_rel, child_rte)) + { + /* + * This child need not be scanned, so we can omit it from the + * appendrel. + */ + set_dummy_rel_pathlist(child_rel); } /* @@ -373,9 +390,6 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, add_child_rel_equivalences(root, appinfo, parent_rel, child_rel); child_rel->has_eclass_joins = parent_rel->has_eclass_joins; - /* Recalc parent relation tuples count */ - parent_rel->tuples += child_rel->tuples; - /* Close child relations, but keep locks */ heap_close(child_relation, NoLock); @@ -1666,8 +1680,8 @@ set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) { - Relids required_outer; - Path *path; + Relids required_outer; + Path *path; /* * We don't support pushing join clauses into the quals of a seqscan, but @@ -1753,11 +1767,11 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, */ foreach(l, root->append_rel_list) { - AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); - Index childRTindex; - RangeTblEntry *childRTE; - RelOptInfo *childrel; - ListCell *lcp; + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); + Index childRTindex; + RangeTblEntry *childRTE; + RelOptInfo *childrel; + ListCell *lcp; /* append_rel_list contains all append rels; ignore others */ if (appinfo->parent_relid != parentRTindex) @@ -1780,24 +1794,34 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, set_rel_consider_parallel_compat(root, childrel, childRTE); #endif - /* - * Compute the child's access paths. - */ + /* Compute child's access paths & sizes */ if (childRTE->relkind == RELKIND_FOREIGN_TABLE) { + /* childrel->rows should be >= 1 */ set_foreign_size(root, childrel, childRTE); + + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(childrel)) + continue; + set_foreign_pathlist(root, childrel, childRTE); } else { + /* childrel->rows should be >= 1 */ set_plain_rel_size(root, childrel, childRTE); + + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(childrel)) + continue; + set_plain_rel_pathlist(root, childrel, childRTE); } + + /* Set cheapest path for child */ set_cheapest(childrel); - /* - * If child is dummy, ignore it. - */ + /* If child BECAME dummy, ignore it */ if (IS_DUMMY_REL(childrel)) continue; From 641dc1aa80cb7484d6ef472e698162bf667d460a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 5 Nov 2016 02:25:17 +0300 Subject: [PATCH 0052/1124] test concurrent partition creation on INSERT --- tests/python/partitioning_test.py | 63 +++++++++++++++++++++++++++++-- 1 file changed, 59 insertions(+), 4 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index f1c3a730..a03437bc 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -10,6 +10,7 @@ from testgres import get_new_node, stop_all import time import os +import threading def if_fdw_enabled(func): @@ -26,7 +27,6 @@ class PartitioningTests(unittest.TestCase): def setUp(self): self.setup_cmd = [ - # 'create extension pg_pathman', 'create table abc(id serial, t text)', 'insert into abc select generate_series(1, 300000)', 'select create_hash_partitions(\'abc\', \'id\', 3, partition_data := false)', @@ -34,7 +34,6 @@ def setUp(self): def tearDown(self): stop_all() - # clean_all() def start_new_pathman_cluster(self, name='test', allows_streaming=False): node = get_new_node(name) @@ -571,7 +570,7 @@ def ordered(obj): ] } } - ] + ] """) self.assertEqual(ordered(plan), ordered(expected)) @@ -596,7 +595,6 @@ def ordered(obj): ] """) self.assertEqual(ordered(plan), ordered(expected)) - # import ipdb; ipdb.set_trace() # Remove all objects for testing node.psql('postgres', 'drop table range_partitioned cascade') @@ -607,6 +605,63 @@ def ordered(obj): node.stop() node.cleanup() + def test_conc_part_creation_insert(self): + """Test concurrent partition creation on INSERT""" + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'ins_test' and partition it + with node.connect() as con0: + con0.begin() + con0.execute('create table ins_test(val int not null)') + con0.execute('insert into ins_test select generate_series(1, 50)') + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('lock table ins_test in share update exclusive mode') + + # Step 2: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 4: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) + + # Stop instance and finish work + node.stop() + node.cleanup() + if __name__ == "__main__": unittest.main() From 30556adc331d7b461f2f0666709504271f26b0f2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 5 Nov 2016 02:59:44 +0300 Subject: [PATCH 0053/1124] tests for improved append_child_relation() & set_append_rel_pathlist() --- expected/pathman_basic.out | 74 +++++++++++++++++++++++++++++++++++++- sql/pathman_basic.sql | 21 +++++++++++ 2 files changed, 94 insertions(+), 1 deletion(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 03122c2a..91f9990e 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -190,6 +190,78 @@ SELECT COUNT(*) FROM ONLY test.num_range_rel; 0 (1 row) +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); +NOTICE: sequence "improved_dummy_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(7 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + set_enable_parent +------------------- + +(1 row) + +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(3 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 11 other objects /* test special case: ONLY statement with not-ONLY for partitioned table */ CREATE TABLE test.from_only_test(val INT NOT NULL); INSERT INTO test.from_only_test SELECT generate_series(1, 20); @@ -1837,6 +1909,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2 (12 rows) DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 46 other objects +NOTICE: drop cascades to 47 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index bc2615c8..1ae4b0e2 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -57,6 +57,27 @@ INSERT INTO test.num_range_rel SELECT COUNT(*) FROM test.num_range_rel; SELECT COUNT(*) FROM ONLY test.num_range_rel; + +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + +DROP TABLE test.improved_dummy CASCADE; + + /* test special case: ONLY statement with not-ONLY for partitioned table */ CREATE TABLE test.from_only_test(val INT NOT NULL); INSERT INTO test.from_only_test SELECT generate_series(1, 20); From 1823fbbbc5eeb88d6f93533b67de6029a5f24711 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 5 Nov 2016 03:46:09 +0300 Subject: [PATCH 0054/1124] fix INSERT INTO ... SELECT ... FROM partitioned_table (+tests) --- expected/pathman_basic.out | 60 +++++++++++++++++++++++++++++++++++++- sql/pathman_basic.sql | 23 +++++++++++++++ src/hooks.c | 9 +++--- 3 files changed, 87 insertions(+), 5 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 91f9990e..fd0ad824 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -262,6 +262,64 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 A DROP TABLE test.improved_dummy CASCADE; NOTICE: drop cascades to 11 other objects +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); +NOTICE: sequence "insert_into_select_seq" does not exist, skipping + create_range_partitions +------------------------- + 5 +(1 row) + +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(7 rows) + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select + Filter: (val <= 80) + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(9 rows) + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; + count +------- + 100 +(1 row) + +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; +NOTICE: drop cascades to 5 other objects /* test special case: ONLY statement with not-ONLY for partitioned table */ CREATE TABLE test.from_only_test(val INT NOT NULL); INSERT INTO test.from_only_test SELECT generate_series(1, 20); @@ -1909,6 +1967,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2 (12 rows) DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 47 other objects +NOTICE: drop cascades to 48 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 1ae4b0e2..06fd374a 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -78,6 +78,29 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 A DROP TABLE test.improved_dummy CASCADE; +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; + + /* test special case: ONLY statement with not-ONLY for partitioned table */ CREATE TABLE test.from_only_test(val INT NOT NULL); INSERT INTO test.from_only_test SELECT generate_series(1, 20); diff --git a/src/hooks.c b/src/hooks.c index a1b76c0a..22cf8a0a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -204,10 +204,11 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady()) return; - /* This works only for SELECTs on simple relations */ - if (root->parse->commandType != CMD_SELECT || - rte->rtekind != RTE_RELATION || - rte->relkind != RELKIND_RELATION) + /* This works only for SELECTs or INSERTs on simple relations */ + if (rte->rtekind != RTE_RELATION || + rte->relkind != RELKIND_RELATION || + (root->parse->commandType != CMD_SELECT && + root->parse->commandType != CMD_INSERT)) /* INSERT INTO ... SELECT ... */ return; /* Skip if this table is not allowed to act as parent (see FROM ONLY) */ From fa1429ebe34c187929c2dacc372814c383fb52ad Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 6 Nov 2016 23:34:20 +0300 Subject: [PATCH 0055/1124] fix bogus coding in handle_boolexpr() --- src/pg_pathman.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 0f013665..1200187c 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1474,7 +1474,7 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) default: result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), - IR_COMPLETE)); + IR_LOSSY)); break; } } From d586756fc5ceff17dede9c1dc8910d8fedda11cb Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 7 Nov 2016 12:11:49 +0300 Subject: [PATCH 0056/1124] NOT operator fixed --- expected/pathman_basic.out | 35 +++++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 9 +++++++++ src/pg_pathman.c | 2 +- 3 files changed, 45 insertions(+), 1 deletion(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 41ab6ab5..fd9e0691 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1428,6 +1428,41 @@ EXPLAIN (COSTS OFF) SELECT * FROM messages; -> Seq Scan on messages_2 (3 rows) +/* Testing NOT operator */ +CREATE TABLE bool_test(a serial, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO bool_test SELECT g, (g % 2) = 0 FROM GENERATE_SERIES(1, 100) AS g; +SELECT count(*) FROM bool_test; + count +------- + 100 +(1 row) + +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); + count +------- + 0 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = false; + count +------- + 50 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = false; + count +------- + 50 +(1 row) + +DROP TABLE bool_test CASCADE; +NOTICE: drop cascades to 3 other objects DROP SCHEMA test CASCADE; NOTICE: drop cascades to 13 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 0fd56748..0acdee4b 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -387,6 +387,15 @@ ALTER TABLE replies DROP CONSTRAINT replies_message_id_fkey; SELECT create_range_partitions('messages', 'id', 1, 100, 2); EXPLAIN (COSTS OFF) SELECT * FROM messages; +/* Testing NOT operator */ +CREATE TABLE bool_test(a serial, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); +INSERT INTO bool_test SELECT g, (g % 2) = 0 FROM GENERATE_SERIES(1, 100) AS g; +SELECT count(*) FROM bool_test; +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); +SELECT count(*) FROM bool_test WHERE b = false; +SELECT count(*) FROM bool_test WHERE b = false; +DROP TABLE bool_test CASCADE; DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a9f3bf31..8511f91b 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1533,7 +1533,7 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) default: result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), - false)); + true)); break; } } From cd3b01eefffa0878d54a45da06a88ead7d50eabf Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 8 Nov 2016 20:16:04 +0300 Subject: [PATCH 0057/1124] create_single_range_partition() function fix --- expected/pathman_basic.out | 20 ++++++++++---------- range.sql | 13 ++++++++++--- sql/pathman_basic.sql | 10 +++++----- src/pg_pathman.c | 3 +-- 4 files changed, 26 insertions(+), 20 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index fd0ad824..8edf810f 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1912,34 +1912,34 @@ NOTICE: sequence "index_on_childs_seq" does not exist, skipping 0 (1 row) -SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1K'); +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); add_range_partition --------------------------- - test.index_on_childs_1_1K + test.index_on_childs_1_1k (1 row) -SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1K_2K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); append_range_partition ---------------------------- - test.index_on_childs_1K_2K + test.index_on_childs_1k_2k (1 row) -SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2K_3K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); append_range_partition ---------------------------- - test.index_on_childs_2K_3K + test.index_on_childs_2k_3k (1 row) -SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3K_4K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); append_range_partition ---------------------------- - test.index_on_childs_3K_4K + test.index_on_childs_3k_4k (1 row) -SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4K_5K'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); append_range_partition ---------------------------- - test.index_on_childs_4K_5K + test.index_on_childs_4k_5k (1 row) SELECT set_enable_parent('test.index_on_childs', true); diff --git a/range.sql b/range.sql index b30bcd63..0b0ff724 100644 --- a/range.sql +++ b/range.sql @@ -460,7 +460,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( end_value ANYELEMENT, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS +RETURNS REGCLASS AS $$ DECLARE v_part_num INT; @@ -472,7 +472,7 @@ DECLARE v_child_relname_exists BOOL; v_seq_name TEXT; v_init_callback REGPROCEDURE; - + v_result REGCLASS; BEGIN v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; @@ -535,13 +535,20 @@ BEGIN ON params.partrel = parent_relid INTO v_init_callback; + /* + * Save the regclass value because in callback user may want to rename + * partition + */ + v_result := v_child_relname::regclass; + + /* Invoke callback */ PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, v_child_relname::REGCLASS, v_init_callback, start_value, end_value); - RETURN v_child_relname; + RETURN v_result; END $$ LANGUAGE plpgsql SET client_min_messages = WARNING; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 06fd374a..93ed18a8 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -498,11 +498,11 @@ CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); CREATE INDEX ON test.index_on_childs(c2); INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); -SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1K'); -SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1K_2K'); -SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2K_3K'); -SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3K_4K'); -SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4K_5K'); +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); SELECT set_enable_parent('test.index_on_childs', true); VACUUM ANALYZE test.index_on_childs; EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 1200187c..05140862 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -582,8 +582,7 @@ spawn_partitions(Oid partitioned_rel, /* parent's Oid */ char *query; /* Create querty statement */ - query = psprintf("SELECT part::regclass " - "FROM %s.create_single_range_partition($1, $2, $3) AS part", + query = psprintf("SELECT %s.create_single_range_partition($1, $2, $3) AS part", get_namespace_name(get_pathman_schema())); /* Execute comparison function cmp(value, cur_part_leading) */ From f8726bea17466302b1335e353b7cd2162bf5a0a7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 9 Nov 2016 17:30:06 +0300 Subject: [PATCH 0058/1124] add missing pathman_basic.out fixes --- expected/pathman_basic.out | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index fd0ad824..966b24af 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -320,7 +320,7 @@ SELECT count(*) FROM test.insert_into_select_copy; DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; NOTICE: drop cascades to 5 other objects -/* test special case: ONLY statement with not-ONLY for partitioned table */ +/* Test special case: ONLY statement with not-ONLY for partitioned table */ CREATE TABLE test.from_only_test(val INT NOT NULL); INSERT INTO test.from_only_test SELECT generate_series(1, 20); SELECT pathman.create_range_partitions('test.from_only_test', 'val', 1, 2); @@ -1818,6 +1818,41 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-12-15'; Filter: (dt = 'Tue Dec 15 00:00:00 2015'::timestamp without time zone) (3 rows) +/* Test NOT operator */ +CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO bool_test SELECT g, (g % 4) = 0 FROM generate_series(1, 100) AS g; +SELECT count(*) FROM bool_test; + count +------- + 100 +(1 row) + +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); + count +------- + 0 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ + count +------- + 75 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ + count +------- + 25 +(1 row) + +DROP TABLE bool_test CASCADE; +NOTICE: drop cascades to 3 other objects /* Test foreign keys */ CREATE TABLE test.messages(id SERIAL PRIMARY KEY, msg TEXT); CREATE TABLE test.replies(id SERIAL PRIMARY KEY, message_id INTEGER REFERENCES test.messages(id), msg TEXT); From 1cd5a120e8859348ed4af86b26afb988f16d6a81 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 10 Nov 2016 18:37:15 +0300 Subject: [PATCH 0059/1124] [WIP] refactoring for the 'partition_creation' subsystem --- expected/pathman_basic.out | 4 +- range.sql | 4 +- src/partition_creation.c | 289 +++++++++++++++++++++++-------------- src/partition_creation.h | 31 +++- 4 files changed, 213 insertions(+), 115 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 41ab6ab5..632e3c9f 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1270,13 +1270,13 @@ SELECT split_range_partition('range_rel_1', '2010-02-15'::date); SELECT append_range_partition('range_rel'); append_range_partition ------------------------ - public.range_rel_14 + range_rel_14 (1 row) SELECT prepend_range_partition('range_rel'); prepend_range_partition ------------------------- - public.range_rel_15 + range_rel_15 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt < '2010-03-01'; diff --git a/range.sql b/range.sql index b30bcd63..9fb9d03d 100644 --- a/range.sql +++ b/range.sql @@ -460,7 +460,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( end_value ANYELEMENT, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS +RETURNS REGCLASS AS $$ DECLARE v_part_num INT; @@ -541,7 +541,7 @@ BEGIN start_value, end_value); - RETURN v_child_relname; + RETURN v_child_relname::REGCLASS; END $$ LANGUAGE plpgsql SET client_min_messages = WARNING; diff --git a/src/partition_creation.c b/src/partition_creation.c index 9d0ebf76..2bee4813 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1,7 +1,6 @@ #include "pathman.h" #include "init.h" #include "partition_creation.h" -#include "relation_info.h" #include "access/reloptions.h" #include "access/xact.h" @@ -10,38 +9,87 @@ #include "commands/defrem.h" #include "commands/event_trigger.h" #include "commands/tablecmds.h" -#include "nodes/makefuncs.h" -#include "parser/parse_expr.h" -#include "parser/parse_node.h" #include "parser/parse_relation.h" +#include "parser/parse_utilcmd.h" +#include "tcop/utility.h" +#include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/syscache.h" -/* TODO: comment */ +static Oid create_single_partition(Oid parent_relid, + RangeVar *partition_rv, + char *tablespace, + char **partitioned_column); + +static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, + Oid relowner); + + +/* Create one RANGE partition [start_value, end_value) */ Oid -create_single_range_partition(Oid parent_relid, - Datum start_value, - Datum end_value, - Oid value_type, - RangeVar *partition_rv, - char *tablespace) +create_single_range_partition_internal(Oid parent_relid, + Datum start_value, + Datum end_value, + Oid value_type, + RangeVar *partition_rv, + char *tablespace) { - CreateStmt create_stmt; - ObjectAddress partition_addr; - Oid child_relid; - Relation child_relation; - Datum toast_options; - TableLikeClause like_clause; - Constraint *check_constr; - RangeVar *parent_rv; + Oid partition; + Relation child_relation; + Constraint *check_constr; + char *partitioned_column; + + /* Create a partition & get 'partitioned_column' */ + partition = create_single_partition(parent_relid, + partition_rv, + tablespace, + &partitioned_column); /* get it */ + + /* Build check constraint for RANGE partition */ + check_constr = build_range_check_constraint(partition, + partitioned_column, + start_value, + end_value, + value_type); + + /* Open the relation and add new check constraint */ + child_relation = heap_open(partition, AccessExclusiveLock); + AddRelationNewConstraints(child_relation, NIL, + list_make1(check_constr), + false, true, true); + heap_close(child_relation, NoLock); + + /* Return the Oid */ + return partition; +} + +/* Create a partition-like table (no constraints yet) */ +static Oid +create_single_partition(Oid parent_relid, + RangeVar *partition_rv, + char *tablespace, + char **partitioned_column) /* to be set */ +{ + /* Value to be returned */ + Oid child_relid = InvalidOid; /* safety */ + + /* Parent's namespace and name */ Oid parent_nsp; char *parent_name, - *parent_nsp_name, - partitioned_column; + *parent_nsp_name; + + /* Values extracted from PATHMAN_CONFIG */ Datum config_values[Natts_pathman_config]; bool config_nulls[Natts_pathman_config]; - static char *validnsps[] = HEAP_RELOPT_NAMESPACES; + + /* Elements of the "CREATE TABLE" query tree */ + RangeVar *parent_rv; + TableLikeClause like_clause; + CreateStmt create_stmt; + List *create_stmts; + ListCell *lc; + /* Lock parent and check if it exists */ LockRelationOid(parent_relid, ShareUpdateExclusiveLock); @@ -59,6 +107,15 @@ create_single_range_partition(Oid parent_relid, parent_nsp = get_rel_namespace(parent_relid); parent_nsp_name = get_namespace_name(parent_nsp); + /* Fetch partitioned column's name */ + if (partitioned_column) + { + Datum partitioned_column_datum; + + partitioned_column_datum = config_values[Anum_pathman_config_attname - 1]; + *partitioned_column = TextDatumGetCString(partitioned_column_datum); + } + /* Make up parent's RangeVar */ parent_rv = makeRangeVar(parent_nsp_name, parent_name, -1); @@ -77,31 +134,83 @@ create_single_range_partition(Oid parent_relid, /* Initialize TableLikeClause structure */ NodeSetTag(&like_clause, T_TableLikeClause); like_clause.relation = copyObject(parent_rv); - like_clause.options = CREATE_TABLE_LIKE_ALL; + like_clause.options = CREATE_TABLE_LIKE_DEFAULTS | + CREATE_TABLE_LIKE_CONSTRAINTS | + CREATE_TABLE_LIKE_INDEXES | + CREATE_TABLE_LIKE_STORAGE; /* Initialize CreateStmt structure */ NodeSetTag(&create_stmt, T_CreateStmt); create_stmt.relation = copyObject(partition_rv); - create_stmt.tableElts = list_make1(&like_clause); + create_stmt.tableElts = list_make1(copyObject(&like_clause)); create_stmt.inhRelations = list_make1(copyObject(parent_rv)); create_stmt.ofTypename = NULL; - create_stmt.constraints = list_make1(&check_constr); + create_stmt.constraints = NIL; create_stmt.options = NIL; create_stmt.oncommit = ONCOMMIT_NOOP; create_stmt.tablespacename = tablespace; create_stmt.if_not_exists = false; + /* Generate columns using the parent table */ + create_stmts = transformCreateStmt(&create_stmt, NULL); + + /* Create the partition and all required relations */ + foreach (lc, create_stmts) + { + Node *cur_stmt; + + /* Fetch current CreateStmt */ + cur_stmt = (Node *) lfirst(lc); + + if (IsA(cur_stmt, CreateStmt)) + { + Oid child_relowner; + + /* Partition should have the same owner as the parent */ + child_relowner = get_rel_owner(parent_relid); + + /* Create a partition and save its Oid */ + child_relid = create_table_using_stmt((CreateStmt *) cur_stmt, + child_relowner).objectId; + } + else if (IsA(cur_stmt, CreateForeignTableStmt)) + { + elog(ERROR, "FDW partition creation is not implemented yet"); + } + else + { + /* + * Recurse for anything else. Note the recursive + * call will stash the objects so created into our + * event trigger context. + */ + ProcessUtility(cur_stmt, + "have to provide query string", + PROCESS_UTILITY_SUBCOMMAND, + NULL, + None_Receiver, + NULL); + } + } + + return child_relid; +} + +/* Create a new table using cooked CreateStmt */ +static ObjectAddress +create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) +{ + ObjectAddress table_addr; + Datum toast_options; + static char *validnsps[] = HEAP_RELOPT_NAMESPACES; + /* Create new partition owned by parent's posessor */ - partition_addr = DefineRelation(&create_stmt, RELKIND_RELATION, - get_rel_owner(parent_relid), NULL); + table_addr = DefineRelation(create_stmt, RELKIND_RELATION, relowner, NULL); /* Save data about a simple DDL command that was just executed */ - EventTriggerCollectSimpleCommand(partition_addr, + EventTriggerCollectSimpleCommand(table_addr, InvalidObjectAddress, - (Node *) &create_stmt); - - /* Save partition's Oid */ - child_relid = partition_addr.objectId; + (Node *) create_stmt); /* * Let NewRelationCreateToastTable decide if this @@ -110,46 +219,29 @@ create_single_range_partition(Oid parent_relid, CommandCounterIncrement(); /* Parse and validate reloptions for the toast table */ - toast_options = transformRelOptions((Datum) 0, create_stmt.options, + toast_options = transformRelOptions((Datum) 0, create_stmt->options, "toast", validnsps, true, false); /* Parse options for a new toast table */ (void) heap_reloptions(RELKIND_TOASTVALUE, toast_options, true); /* Now create the toast table if needed */ - NewRelationCreateToastTable(child_relid, toast_options); + NewRelationCreateToastTable(table_addr.objectId, toast_options); /* Update config one more time */ CommandCounterIncrement(); - /* Fetch partitioned column's name */ - partitioned_column = config_values[Anum_pathman_config_attname - 1]; - - /* Build check constraint for RANGE partition */ - check_constr = build_range_check_constraint(partitioned_column, - start_value, - end_value, - value_type); - - /* Open the relation and add new check constraint */ - child_relation = heap_openrv(partition_rv, AccessExclusiveLock); - AddRelationNewConstraints(child_relation, NIL, - list_make1(check_constr), - false, true, true); - heap_close(child_relation, NoLock); - - /* Invoke init_callback on partition */ - invoke_init_callback(parent_relid, child_relid, InvalidOid, - start_value, end_value, value_type); - - return child_relid; + /* Return the address */ + return table_addr; } + +/* Build RANGE check constraint expression tree */ Node * -raw_range_check_tree(char *attname, - Datum start_value, - Datum end_value, - Oid value_type) +build_raw_range_check_tree(char *attname, + Datum start_value, + Datum end_value, + Oid value_type) { BoolExpr *and_oper = makeNode(BoolExpr); A_Expr *left_arg = makeNode(A_Expr), @@ -191,69 +283,52 @@ raw_range_check_tree(char *attname, return (Node *) and_oper; } -Node * -good_range_check_tree(RangeVar *partition, - char *attname, - Datum start_value, - Datum end_value, - Oid value_type) -{ - ParseState *pstate = make_parsestate(NULL); - RangeTblEntry *partition_rte; - Node *expression, - *raw_expression; - ParseNamespaceItem pni; - - /* Required for transformExpr() */ - partition_rte = addRangeTableEntry(pstate, partition, NULL, false, false); - - memset((void *) &pni, 0, sizeof(ParseNamespaceItem)); - pni.p_rte = partition_rte; - pni.p_rel_visible = true; - pni.p_cols_visible = true; - - pstate->p_namespace = list_make1(&pni); - pstate->p_rtable = list_make1(partition_rte); - - /* Transform raw check constraint expression into Constraint */ - raw_expression = raw_range_check_tree(attname, start_value, end_value, value_type); - expression = transformExpr(pstate, raw_expression, EXPR_KIND_CHECK_CONSTRAINT); - - return (Node *) expression; -} - +/* Build complete RANGE check constraint */ Constraint * -build_range_check_constraint(char *attname, +build_range_check_constraint(Oid child_relid, + char *attname, Datum start_value, Datum end_value, Oid value_type) { - Constraint *range_constr; + Constraint *range_constr; + char *range_constr_name; + AttrNumber attnum; - range_constr = makeNode(Constraint); - range_constr->conname = NULL; - range_constr->deferrable = false; - range_constr->initdeferred = false; - range_constr->location = -1; - range_constr->contype = CONSTR_CHECK; - range_constr->is_no_inherit = true; - - range_constr->raw_expr = raw_range_check_tree(attname, - start_value, - end_value, - value_type); + /* Build a correct name for this constraint */ + attnum = get_attnum(child_relid, attname); + range_constr_name = build_check_constraint_name_internal(child_relid, attnum); + /* Initialize basic properties of a CHECK constraint */ + range_constr = makeNode(Constraint); + range_constr->conname = range_constr_name; + range_constr->deferrable = false; + range_constr->initdeferred = false; + range_constr->location = -1; + range_constr->contype = CONSTR_CHECK; + range_constr->is_no_inherit = true; + + /* Validate existing data using this constraint */ + range_constr->skip_validation = false; + range_constr->initially_valid = true; + + /* Finally we should build an expression tree */ + range_constr->raw_expr = build_raw_range_check_tree(attname, + start_value, + end_value, + value_type); + /* Everything seems to be fine */ return range_constr; } -/* TODO: comment */ +/* Invoke 'init_callback' for a partition */ void invoke_init_callback(Oid parent_relid, Oid child_relid, - Oid init_callback, + PartType part_type, Datum start_value, Datum end_value, Oid value_type) { - + /* TODO: implement callback invocation machinery */ } diff --git a/src/partition_creation.h b/src/partition_creation.h index ac8792fa..e3d9c624 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -1,8 +1,31 @@ +#include "relation_info.h" #include "postgres.h" #include "nodes/parsenodes.h" -Constraint *build_range_check_constraint(char *attname, - Datum start_value, - Datum end_value, - Oid value_type); + +Oid +create_single_range_partition_internal(Oid parent_relid, + Datum start_value, + Datum end_value, + Oid value_type, + RangeVar *partition_rv, + char *tablespace); + +Constraint * build_range_check_constraint(Oid child_relid, + char *attname, + Datum start_value, + Datum end_value, + Oid value_type); + +Node * build_raw_range_check_tree(char *attname, + Datum start_value, + Datum end_value, + Oid value_type); + +void invoke_init_callback(Oid parent_relid, + Oid child_relid, + PartType part_type, + Datum start_value, + Datum end_value, + Oid value_type); From 12778977c3cffa238c599856ac342fdd041d29c4 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 11 Nov 2016 12:37:43 +0300 Subject: [PATCH 0060/1124] constraint renaming and tests --- Makefile | 4 +- ...g.out => pathman_utility_stmt_hooking.out} | 91 +++++++++++++++++++ ...g.sql => pathman_utility_stmt_hooking.sql} | 52 ++++++++++- src/hooks.c | 35 +++++-- src/init.c | 2 +- src/init.h | 9 ++ ..._stmt_hooking.c => utility_stmt_hooking.c} | 51 ++++++++++- ..._stmt_hooking.h => utility_stmt_hooking.h} | 3 +- src/utils.c | 30 ++++++ src/utils.h | 1 + 10 files changed, 259 insertions(+), 19 deletions(-) rename expected/{pathman_copy_stmt_hooking.out => pathman_utility_stmt_hooking.out} (52%) rename sql/{pathman_copy_stmt_hooking.sql => pathman_utility_stmt_hooking.sql} (64%) rename src/{copy_stmt_hooking.c => utility_stmt_hooking.c} (93%) rename src/{copy_stmt_hooking.h => utility_stmt_hooking.h} (86%) diff --git a/Makefile b/Makefile index c7168d1b..f06e5896 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ MODULE_big = pg_pathman OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/runtimeappend.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ - src/hooks.o src/nodes_common.o src/xact_handling.o src/copy_stmt_hooking.o \ + src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/pg_compat.o $(WIN32RES) EXTENSION = pg_pathman @@ -20,7 +20,7 @@ REGRESS = pathman_basic \ pathman_foreign_keys \ pathman_permissions \ pathman_rowmarks \ - pathman_copy_stmt_hooking \ + pathman_utility_stmt_hooking \ pathman_calamity EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output diff --git a/expected/pathman_copy_stmt_hooking.out b/expected/pathman_utility_stmt_hooking.out similarity index 52% rename from expected/pathman_copy_stmt_hooking.out rename to expected/pathman_utility_stmt_hooking.out index d0fcaaf7..5fb95f2e 100644 --- a/expected/pathman_copy_stmt_hooking.out +++ b/expected/pathman_utility_stmt_hooking.out @@ -1,5 +1,8 @@ \set VERBOSITY terse CREATE EXTENSION pg_pathman; +/* + * Test COPY + */ CREATE SCHEMA copy_stmt_hooking; CREATE TABLE copy_stmt_hooking.test( val int not null, @@ -190,4 +193,92 @@ SELECT * FROM copy_stmt_hooking.test ORDER BY val; DROP SCHEMA copy_stmt_hooking CASCADE; NOTICE: drop cascades to 7 other objects +/* + * Test auto check constraint renaming + */ +CREATE SCHEMA rename; +CREATE TABLE rename.test(a serial, b int); +SELECT create_hash_partitions('rename.test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +ALTER TABLE rename.test_0 RENAME TO test_one; +/* We expect to find check constraint renamed as well */ +\d+ rename.test_one + Table "rename.test_one" + Column | Type | Modifiers | Storage | Stats target | Description +--------+---------+---------------------------------------------------------+---------+--------------+------------- + a | integer | not null default nextval('rename.test_a_seq'::regclass) | plain | | + b | integer | | plain | | +Check constraints: + "pathman_test_one_1_check" CHECK (get_hash_part_idx(hashint4(a), 3) = 0) +Inherits: rename.test + +/* Generates check constraint for relation */ +CREATE OR REPLACE FUNCTION add_constraint(rel regclass, att text) +RETURNS VOID AS $$ +declare + constraint_name text := build_check_constraint_name(rel, 'a'); +BEGIN + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (a < 100);', + rel, constraint_name); +END +$$ +LANGUAGE plpgsql; +/* + * Check that it doesn't affect regular inherited tables that aren't managed + * by pg_pathman + */ +CREATE TABLE rename.test_inh (LIKE rename.test INCLUDING ALL); +CREATE TABLE rename.test_inh_1 (LIKE rename.test INCLUDING ALL); +ALTER TABLE rename.test_inh_1 INHERIT rename.test_inh; +SELECT add_constraint('rename.test_inh_1', 'a'); + add_constraint +---------------- + +(1 row) + +ALTER TABLE rename.test_inh_1 RENAME TO test_inh_one; +\d+ rename.test_inh_one + Table "rename.test_inh_one" + Column | Type | Modifiers | Storage | Stats target | Description +--------+---------+---------------------------------------------------------+---------+--------------+------------- + a | integer | not null default nextval('rename.test_a_seq'::regclass) | plain | | + b | integer | | plain | | +Check constraints: + "pathman_test_inh_1_1_check" CHECK (a < 100) +Inherits: rename.test_inh + +/* Check that plain tables are not affected too */ +CREATE TABLE rename.plain_test(a serial, b int); +ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; +SELECT add_constraint('rename.plain_test_renamed', 'a'); + add_constraint +---------------- + +(1 row) + +\d+ rename.plain_test_renamed + Table "rename.plain_test_renamed" + Column | Type | Modifiers | Storage | Stats target | Description +--------+---------+---------------------------------------------------------------+---------+--------------+------------- + a | integer | not null default nextval('rename.plain_test_a_seq'::regclass) | plain | | + b | integer | | plain | | +Check constraints: + "pathman_plain_test_renamed_1_check" CHECK (a < 100) + +ALTER TABLE rename.plain_test_renamed RENAME TO plain_test; +\d+ rename.plain_test + Table "rename.plain_test" + Column | Type | Modifiers | Storage | Stats target | Description +--------+---------+---------------------------------------------------------------+---------+--------------+------------- + a | integer | not null default nextval('rename.plain_test_a_seq'::regclass) | plain | | + b | integer | | plain | | +Check constraints: + "pathman_plain_test_renamed_1_check" CHECK (a < 100) + +DROP SCHEMA rename CASCADE; +NOTICE: drop cascades to 7 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_copy_stmt_hooking.sql b/sql/pathman_utility_stmt_hooking.sql similarity index 64% rename from sql/pathman_copy_stmt_hooking.sql rename to sql/pathman_utility_stmt_hooking.sql index b7e9868a..de06c26f 100644 --- a/sql/pathman_copy_stmt_hooking.sql +++ b/sql/pathman_utility_stmt_hooking.sql @@ -1,9 +1,11 @@ \set VERBOSITY terse CREATE EXTENSION pg_pathman; -CREATE SCHEMA copy_stmt_hooking; - +/* + * Test COPY + */ +CREATE SCHEMA copy_stmt_hooking; CREATE TABLE copy_stmt_hooking.test( val int not null, comment text, @@ -89,6 +91,50 @@ COPY copy_stmt_hooking.test FROM stdin; SELECT count(*) FROM ONLY copy_stmt_hooking.test; SELECT * FROM copy_stmt_hooking.test ORDER BY val; - DROP SCHEMA copy_stmt_hooking CASCADE; + +/* + * Test auto check constraint renaming + */ +CREATE SCHEMA rename; + +CREATE TABLE rename.test(a serial, b int); +SELECT create_hash_partitions('rename.test', 'a', 3); +ALTER TABLE rename.test_0 RENAME TO test_one; +/* We expect to find check constraint renamed as well */ +\d+ rename.test_one + +/* Generates check constraint for relation */ +CREATE OR REPLACE FUNCTION add_constraint(rel regclass, att text) +RETURNS VOID AS $$ +declare + constraint_name text := build_check_constraint_name(rel, 'a'); +BEGIN + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (a < 100);', + rel, constraint_name); +END +$$ +LANGUAGE plpgsql; + +/* + * Check that it doesn't affect regular inherited tables that aren't managed + * by pg_pathman + */ +CREATE TABLE rename.test_inh (LIKE rename.test INCLUDING ALL); +CREATE TABLE rename.test_inh_1 (LIKE rename.test INCLUDING ALL); +ALTER TABLE rename.test_inh_1 INHERIT rename.test_inh; +SELECT add_constraint('rename.test_inh_1', 'a'); +ALTER TABLE rename.test_inh_1 RENAME TO test_inh_one; +\d+ rename.test_inh_one + +/* Check that plain tables are not affected too */ +CREATE TABLE rename.plain_test(a serial, b int); +ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; +SELECT add_constraint('rename.plain_test_renamed', 'a'); +\d+ rename.plain_test_renamed +ALTER TABLE rename.plain_test_renamed RENAME TO plain_test; +\d+ rename.plain_test + +DROP SCHEMA rename CASCADE; + DROP EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index 22cf8a0a..dbee736f 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -8,7 +8,7 @@ * ------------------------------------------------------------------------ */ -#include "copy_stmt_hooking.h" +#include "utility_stmt_hooking.h" #include "hooks.h" #include "init.h" #include "partition_filter.h" @@ -24,6 +24,7 @@ #include "optimizer/cost.h" #include "optimizer/restrictinfo.h" #include "utils/typcache.h" +#include "utils/lsyscache.h" set_join_pathlist_hook_type set_join_pathlist_next = NULL; @@ -34,6 +35,10 @@ shmem_startup_hook_type shmem_startup_hook_next = NULL; ProcessUtility_hook_type process_utility_hook_next = NULL; +#define is_table_rename_statement(s) \ + IsA((s), RenameStmt) && ((RenameStmt *)(s))->renameType == OBJECT_TABLE + + /* Take care of joins */ void pathman_join_pathlist_hook(PlannerInfo *root, @@ -627,17 +632,29 @@ pathman_process_utility_hook(Node *parsetree, char *completionTag) { /* Override standard COPY statement if needed */ - if (IsPathmanReady() && is_pathman_related_copy(parsetree)) + if (IsPathmanReady()) { - uint64 processed; + if (is_pathman_related_copy(parsetree)) + { + uint64 processed; + + /* Handle our COPY case (and show a special cmd name) */ + PathmanDoCopy((CopyStmt *) parsetree, queryString, &processed); + if (completionTag) + snprintf(completionTag, COMPLETION_TAG_BUFSIZE, + "PATHMAN COPY " UINT64_FORMAT, processed); - /* Handle our COPY case (and show a special cmd name) */ - PathmanDoCopy((CopyStmt *) parsetree, queryString, &processed); - if (completionTag) - snprintf(completionTag, COMPLETION_TAG_BUFSIZE, - "PATHMAN COPY " UINT64_FORMAT, processed); + return; /* don't call standard_ProcessUtility() or hooks */ + } - return; /* don't call standard_ProcessUtility() or hooks */ + if (is_table_rename_statement(parsetree)) + { + /* + * Rename check constraint of a table if it is a partition managed + * by pg_pathman + */ + PathmanDoRenameConstraint((RenameStmt *) parsetree); + } } /* Call hooks set by other extensions if needed */ diff --git a/src/init.c b/src/init.c index e9caef4e..2be77763 100644 --- a/src/init.c +++ b/src/init.c @@ -598,7 +598,7 @@ find_inheritance_children_array(Oid parentrelId, char * build_check_constraint_name_internal(Oid relid, AttrNumber attno) { - return psprintf("pathman_%s_%u_check", get_rel_name(relid), attno); + return build_check_constraint_name_by_relname(get_rel_name(relid), attno); } /* diff --git a/src/init.h b/src/init.h index 2e889373..1179c027 100644 --- a/src/init.h +++ b/src/init.h @@ -86,6 +86,15 @@ extern PathmanInitState pg_pathman_init_state; pg_pathman_init_state.initialization_needed = true; \ } while (0) +/* + * Generate check constraint name for given relname + */ +static inline char * +build_check_constraint_name_by_relname(char *relname, AttrNumber attno) +{ + return psprintf("pathman_%s_%u_check", relname, attno); +} + /* * Save and restore PathmanInitState. diff --git a/src/copy_stmt_hooking.c b/src/utility_stmt_hooking.c similarity index 93% rename from src/copy_stmt_hooking.c rename to src/utility_stmt_hooking.c index 1f21f0ab..65cbd729 100644 --- a/src/copy_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -1,7 +1,8 @@ /* ------------------------------------------------------------------------ * - * copy_stmt_hooking.c - * Override COPY TO/FROM statement for partitioned tables + * utility_stmt_hooking.c + * Override COPY TO/FROM and ALTER TABLE ... RENAME statements + * for partitioned tables * * Copyright (c) 2016, Postgres Professional * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group @@ -10,7 +11,7 @@ * ------------------------------------------------------------------------ */ -#include "copy_stmt_hooking.h" +#include "utility_stmt_hooking.h" #include "init.h" #include "partition_filter.h" #include "relation_info.h" @@ -22,6 +23,7 @@ #include "catalog/pg_attribute.h" #include "commands/copy.h" #include "commands/trigger.h" +#include "commands/tablecmds.h" #include "executor/executor.h" #include "foreign/fdwapi.h" #include "miscadmin.h" @@ -622,3 +624,46 @@ prepare_rri_fdw_for_copy(EState *estate, elog(ERROR, "cannot copy to foreign partition \"%s\"", get_rel_name(RelationGetRelid(rri->ri_RelationDesc))); } + +/* + * Rename check constraint of table if it is a partition + */ +void +PathmanDoRenameConstraint(const RenameStmt *stmt) +{ + Oid partition = RangeVarGetRelid(stmt->relation, NoLock, true); + Oid parent = get_rel_parent(partition); + + if (partition != InvalidOid && parent != InvalidOid) + { + char *old_constraint_name, + *new_constraint_name; + const PartRelationInfo *prel = get_pathman_relation_info(parent); + + if (prel) + { + RangeVar *rngVar; + RenameStmt *s; + + /* Generate old constraint name */ + old_constraint_name = build_check_constraint_name_by_relname( + get_rel_name(partition), + prel->attnum); + + /* Generate new constraint name */ + new_constraint_name = build_check_constraint_name_by_relname( + stmt->newname, + prel->attnum); + + /* Build check constraint RENAME statement */ + s = makeNode(RenameStmt); + s->renameType = OBJECT_TABCONSTRAINT; + s->relation = stmt->relation; + s->subname = old_constraint_name; + s->newname = new_constraint_name; + s->missing_ok = false; + + RenameConstraint(s); + } + } +} diff --git a/src/copy_stmt_hooking.h b/src/utility_stmt_hooking.h similarity index 86% rename from src/copy_stmt_hooking.h rename to src/utility_stmt_hooking.h index 389a411c..b207581b 100644 --- a/src/copy_stmt_hooking.h +++ b/src/utility_stmt_hooking.h @@ -1,6 +1,6 @@ /* ------------------------------------------------------------------------ * - * copy_stmt_hooking.h + * utility_stmt_hooking.h * Transaction-specific locks and other functions * * Copyright (c) 2016, Postgres Professional @@ -19,5 +19,6 @@ bool is_pathman_related_copy(Node *parsetree); void PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed); +void PathmanDoRenameConstraint(const RenameStmt *stmt); #endif diff --git a/src/utils.c b/src/utils.c index f7d5b535..4d103e71 100644 --- a/src/utils.c +++ b/src/utils.c @@ -18,6 +18,7 @@ #include "catalog/pg_type.h" #include "catalog/pg_extension.h" #include "catalog/pg_proc.h" +#include "catalog/pg_inherits.h" #include "commands/extension.h" #include "miscadmin.h" #include "optimizer/var.h" @@ -258,6 +259,35 @@ get_rel_owner(Oid relid) return InvalidOid; } +/* + * Lookup for a parent table + */ +Oid +get_rel_parent(Oid relid) +{ + ScanKeyData key[1]; + Relation relation; + HeapTuple inheritsTuple; + Oid inhparent = InvalidOid; + SysScanDesc scan; + + relation = heap_open(InheritsRelationId, AccessShareLock); + ScanKeyInit(&key[0], + Anum_pg_inherits_inhrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(relid)); + scan = systable_beginscan(relation, InvalidOid, false, + NULL, 1, key); + + if ((inheritsTuple = systable_getnext(scan)) != NULL) + inhparent = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhparent; + + systable_endscan(scan); + heap_close(relation, AccessShareLock); + + return inhparent; +} + /* * Checks that callback function meets specific requirements. * It must have the only JSONB argument and BOOL return type. diff --git a/src/utils.h b/src/utils.h index 5946dba1..6ed950f7 100644 --- a/src/utils.h +++ b/src/utils.h @@ -37,6 +37,7 @@ List * list_reverse(List *l); char get_rel_persistence(Oid relid); #endif Oid get_rel_owner(Oid relid); +Oid get_rel_parent(Oid relid); Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); From 43e6683f82a225d3d539cb41d53b9a868fce34ee Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 14 Nov 2016 16:40:57 +0300 Subject: [PATCH 0061/1124] pgpro 1c patch support --- src/pg_compat.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/pg_compat.h b/src/pg_compat.h index 7bef6778..977fed8c 100644 --- a/src/pg_compat.h +++ b/src/pg_compat.h @@ -35,8 +35,13 @@ extern void copy_targetlist_compat(RelOptInfo *dest, RelOptInfo *rel); #define check_index_predicates_compat(rool, rel) \ check_index_predicates(root, rel) +#ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path(rel, subpaths, required_outer, parallel_workers) +#else +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(rel, subpaths, required_outer, false, NIL, parallel_workers) +#endif #define pull_var_clause_compat(node, aggbehavior, phbehavior) \ pull_var_clause(node, aggbehavior | phbehavior) From d3dea68b68f8b77d2d9a77f4a0850ba2159cf7cf Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 15 Nov 2016 20:07:21 +0300 Subject: [PATCH 0062/1124] rewrite create_single_range_partition() function in C language --- hash.sql | 2 +- range.sql | 162 +++++-------------- sql/pathman_basic.sql | 14 +- src/init.c | 11 ++ src/init.h | 2 + src/partition_creation.c | 325 ++++++++++++++++++++++++++++++++++----- src/partition_creation.h | 76 ++++++++- src/pg_pathman.c | 4 +- src/pl_funcs.c | 115 ++++---------- src/pl_range_funcs.c | 156 +++++++++++++------ src/relation_info.c | 2 - src/relation_info.h | 1 - src/utils.c | 20 +-- src/utils.h | 3 +- 14 files changed, 568 insertions(+), 325 deletions(-) diff --git a/hash.sql b/hash.sql index 6521af9f..4d8781f9 100644 --- a/hash.sql +++ b/hash.sql @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------ * * hash.sql - * HASH partitioning functions + * HASH partitioning functions * * Copyright (c) 2015-2016, Postgres Professional * diff --git a/range.sql b/range.sql index 9fb9d03d..1fb2ec79 100644 --- a/range.sql +++ b/range.sql @@ -1,33 +1,20 @@ /* ------------------------------------------------------------------------ * * range.sql - * RANGE partitioning functions + * RANGE partitioning functions * * Copyright (c) 2015-2016, Postgres Professional * * ------------------------------------------------------------------------ */ -CREATE OR REPLACE FUNCTION @extschema@.get_sequence_name( - plain_schema TEXT, - plain_relname TEXT) -RETURNS TEXT AS -$$ -BEGIN - RETURN format('%s.%s', - quote_ident(plain_schema), - quote_ident(format('%s_seq', plain_relname))); -END -$$ -LANGUAGE plpgsql; - CREATE OR REPLACE FUNCTION @extschema@.create_or_replace_sequence( - plain_schema TEXT, - plain_relname TEXT, + parent_relid REGCLASS, OUT seq_name TEXT) AS $$ BEGIN - seq_name := @extschema@.get_sequence_name(plain_schema, plain_relname); + seq_name := @extschema@.build_sequence_name(parent_relid); + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); END @@ -110,7 +97,7 @@ BEGIN PERFORM @extschema@.common_relation_checks(parent_relid, attribute); IF p_count < 0 THEN - RAISE EXCEPTION '''p_count'' must not be less than 0'; + RAISE EXCEPTION '"p_count" must not be less than 0'; END IF; /* Try to determine partitions count if not set */ @@ -154,7 +141,7 @@ BEGIN END IF; /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) + PERFORM @extschema@.create_or_replace_sequence(parent_relid) FROM @extschema@.get_plain_schema_and_relname(parent_relid); /* Insert new entry to pathman config */ @@ -269,7 +256,7 @@ BEGIN END IF; /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) + PERFORM @extschema@.create_or_replace_sequence(parent_relid) FROM @extschema@.get_plain_schema_and_relname(parent_relid); /* Insert new entry to pathman config */ @@ -343,7 +330,7 @@ BEGIN end_value); /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) + PERFORM @extschema@.create_or_replace_sequence(parent_relid) FROM @extschema@.get_plain_schema_and_relname(parent_relid); /* Insert new entry to pathman config */ @@ -413,7 +400,7 @@ BEGIN end_value); /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) + PERFORM @extschema@.create_or_replace_sequence(parent_relid) FROM @extschema@.get_plain_schema_and_relname(parent_relid); /* Insert new entry to pathman config */ @@ -450,102 +437,6 @@ BEGIN END $$ LANGUAGE plpgsql; -/* - * Creates new RANGE partition. Returns partition name. - * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). - */ -CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( - parent_relid REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS REGCLASS AS -$$ -DECLARE - v_part_num INT; - v_child_relname TEXT; - v_plain_child_relname TEXT; - v_attname TEXT; - v_plain_schema TEXT; - v_plain_relname TEXT; - v_child_relname_exists BOOL; - v_seq_name TEXT; - v_init_callback REGPROCEDURE; - -BEGIN - v_attname := attname FROM @extschema@.pathman_config - WHERE partrel = parent_relid; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT * INTO v_plain_schema, v_plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - v_seq_name := @extschema@.get_sequence_name(v_plain_schema, v_plain_relname); - - IF partition_name IS NULL THEN - /* Get next value from sequence */ - LOOP - v_part_num := nextval(v_seq_name); - v_plain_child_relname := format('%s_%s', v_plain_relname, v_part_num); - v_child_relname := format('%s.%s', - quote_ident(v_plain_schema), - quote_ident(v_plain_child_relname)); - - v_child_relname_exists := count(*) > 0 - FROM pg_class - WHERE relname = v_plain_child_relname AND - relnamespace = v_plain_schema::regnamespace - LIMIT 1; - - EXIT WHEN v_child_relname_exists = false; - END LOOP; - ELSE - v_child_relname := partition_name; - END IF; - - IF tablespace IS NULL THEN - tablespace := @extschema@.get_rel_tablespace_name(parent_relid); - END IF; - - EXECUTE format('CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) - INHERITS (%2$s) TABLESPACE %3$s', - v_child_relname, - parent_relid::TEXT, - tablespace); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - v_child_relname, - @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, - v_attname), - @extschema@.build_range_condition(v_attname, - start_value, - end_value)); - - PERFORM @extschema@.copy_foreign_keys(parent_relid, v_child_relname::REGCLASS); - - /* Fetch init_callback from 'params' table */ - WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) - FROM stub_callback - LEFT JOIN @extschema@.pathman_config_params AS params - ON params.partrel = parent_relid - INTO v_init_callback; - - PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - v_child_relname::REGCLASS, - v_init_callback, - start_value, - end_value); - - RETURN v_child_relname::REGCLASS; -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; - /* * Split RANGE partition */ @@ -1016,9 +907,10 @@ BEGIN END IF; /* check range overlap */ - IF @extschema@.partitions_count(parent_relid) > 0 - AND @extschema@.check_overlap(parent_relid, start_value, end_value) THEN - RAISE EXCEPTION 'specified range overlaps with existing partitions'; + IF @extschema@.partitions_count(parent_relid) > 0 THEN + PERFORM @extschema@.check_range_available(parent_relid, + start_value, + end_value); END IF; /* Create new partition */ @@ -1133,9 +1025,8 @@ BEGIN partition::TEXT; END IF; - IF @extschema@.check_overlap(parent_relid, start_value, end_value) THEN - RAISE EXCEPTION 'specified range overlaps with existing partitions'; - END IF; + /* check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN RAISE EXCEPTION 'partition must have the exact same structure as parent'; @@ -1321,6 +1212,20 @@ BEGIN END $$ LANGUAGE plpgsql; +/* + * Creates new RANGE partition. Returns partition name. + * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). + */ +CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'create_single_range_partition_pl' +LANGUAGE C +SET client_min_messages = WARNING; + /* * Construct CHECK constraint condition for a range partition. */ @@ -1331,6 +1236,11 @@ CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( RETURNS TEXT AS 'pg_pathman', 'build_range_condition' LANGUAGE C; +CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' +LANGUAGE C; + /* * Returns N-th range (as an array of two elements). */ @@ -1354,11 +1264,11 @@ LANGUAGE C; * Checks if range overlaps with existing partitions. * Returns TRUE if overlaps and FALSE otherwise. */ -CREATE OR REPLACE FUNCTION @extschema@.check_overlap( +CREATE OR REPLACE FUNCTION @extschema@.check_range_available( parent_relid REGCLASS, range_min ANYELEMENT, range_max ANYELEMENT) -RETURNS BOOLEAN AS 'pg_pathman', 'check_overlap' +RETURNS VOID AS 'pg_pathman', 'check_range_available_pl' LANGUAGE C; /* diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 0fd56748..8c488d78 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -280,13 +280,13 @@ CREATE TABLE test.num_range_rel ( id SERIAL PRIMARY KEY, txt TEXT); SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 4001, 5000); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 4000, 5000); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 3999, 5000); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 3000, 3500); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 0, 999); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 0, 1000); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 0, 1001); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4001, 5000); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4000, 5000); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3999, 5000); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3000, 3500); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 999); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1000); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1001); /* CaMeL cAsE table names and attributes */ CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); diff --git a/src/init.c b/src/init.c index 5ae06a92..ab187b46 100644 --- a/src/init.c +++ b/src/init.c @@ -572,6 +572,17 @@ build_check_constraint_name_internal(Oid relid, AttrNumber attno) return psprintf("pathman_%s_%u_check", get_rel_name(relid), attno); } +/* + * Generate part sequence name for a parent. + * + * This function does not perform sanity checks at all. + */ +char * +build_sequence_name_internal(Oid relid) +{ + return psprintf("%s_seq", get_rel_name(relid)); +} + /* * Check that relation 'relid' is partitioned by pg_pathman. * diff --git a/src/init.h b/src/init.h index effb2675..f085a71e 100644 --- a/src/init.h +++ b/src/init.h @@ -116,6 +116,8 @@ Oid *find_inheritance_children_array(Oid parentrelId, char *build_check_constraint_name_internal(Oid relid, AttrNumber attno); +char *build_sequence_name_internal(Oid relid); + bool pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, diff --git a/src/partition_creation.c b/src/partition_creation.c index 2bee4813..df2c4062 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1,30 +1,43 @@ -#include "pathman.h" +#include "pathman.h" #include "init.h" #include "partition_creation.h" #include "access/reloptions.h" #include "access/xact.h" #include "catalog/heap.h" +#include "catalog/pg_type.h" #include "catalog/toasting.h" -#include "commands/defrem.h" #include "commands/event_trigger.h" +#include "commands/sequence.h" #include "commands/tablecmds.h" +#include "parser/parse_func.h" #include "parser/parse_relation.h" #include "parser/parse_utilcmd.h" #include "tcop/utility.h" #include "utils/builtins.h" +#include "utils/jsonb.h" #include "utils/lsyscache.h" #include "utils/syscache.h" -static Oid create_single_partition(Oid parent_relid, - RangeVar *partition_rv, - char *tablespace, - char **partitioned_column); +static Oid create_single_partition_internal(Oid parent_relid, + RangeVar *partition_rv, + char *tablespace, + char **partitioned_column); + +static char *choose_partition_name(Oid parent_relid, Oid parent_nsp); static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, Oid relowner); +static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); + + +/* + * --------------------------------------- + * Public interface (partition creation) + * --------------------------------------- + */ /* Create one RANGE partition [start_value, end_value) */ Oid @@ -35,44 +48,74 @@ create_single_range_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace) { - Oid partition; - Relation child_relation; - Constraint *check_constr; - char *partitioned_column; + Oid partition_relid; + Relation child_relation; + Constraint *check_constr; + char *partitioned_column; + init_callback_params callback_params; /* Create a partition & get 'partitioned_column' */ - partition = create_single_partition(parent_relid, - partition_rv, - tablespace, - &partitioned_column); /* get it */ + partition_relid = create_single_partition_internal(parent_relid, + partition_rv, + tablespace, + &partitioned_column); /* Build check constraint for RANGE partition */ - check_constr = build_range_check_constraint(partition, + check_constr = build_range_check_constraint(partition_relid, partitioned_column, start_value, end_value, value_type); - /* Open the relation and add new check constraint */ - child_relation = heap_open(partition, AccessExclusiveLock); + /* Open the relation and add new check constraint & fkeys */ + child_relation = heap_open(partition_relid, AccessExclusiveLock); AddRelationNewConstraints(child_relation, NIL, list_make1(check_constr), false, true, true); heap_close(child_relation, NoLock); + CommandCounterIncrement(); + + /* Finally invoke 'init_callback' */ + MakeInitCallbackRangeParams(&callback_params, InvalidOid, + parent_relid, partition_relid, + start_value, end_value, value_type); + invoke_part_callback(&callback_params); + /* Return the Oid */ - return partition; + return partition_relid; +} + + +/* + * -------------------- + * Partition creation + * -------------------- + */ + +/* Choose a good name for a partition */ +static char * +choose_partition_name(Oid parent_relid, Oid parent_nsp) +{ + Datum part_num; + Oid part_seq_relid; + + part_seq_relid = get_relname_relid(build_sequence_name_internal(parent_relid), + parent_nsp); + part_num = DirectFunctionCall1(nextval_oid, ObjectIdGetDatum(part_seq_relid)); + + return psprintf("%s_%u", get_rel_name(parent_relid), DatumGetInt32(part_num)); } /* Create a partition-like table (no constraints yet) */ static Oid -create_single_partition(Oid parent_relid, - RangeVar *partition_rv, - char *tablespace, - char **partitioned_column) /* to be set */ +create_single_partition_internal(Oid parent_relid, + RangeVar *partition_rv, + char *tablespace, + char **partitioned_column) /* to be set */ { /* Value to be returned */ - Oid child_relid = InvalidOid; /* safety */ + Oid partition_relid = InvalidOid; /* safety */ /* Parent's namespace and name */ Oid parent_nsp; @@ -125,7 +168,7 @@ create_single_partition(Oid parent_relid, char *part_name; /* Make up a name for the partition */ - part_name = ChooseRelationName(parent_name, NULL, "part", parent_nsp); + part_name = choose_partition_name(parent_relid, parent_nsp); /* Make RangeVar for the partition */ partition_rv = makeRangeVar(parent_nsp_name, part_name, -1); @@ -170,8 +213,11 @@ create_single_partition(Oid parent_relid, child_relowner = get_rel_owner(parent_relid); /* Create a partition and save its Oid */ - child_relid = create_table_using_stmt((CreateStmt *) cur_stmt, - child_relowner).objectId; + partition_relid = create_table_using_stmt((CreateStmt *) cur_stmt, + child_relowner).objectId; + + /* Copy FOREIGN KEYS of the parent table */ + copy_foreign_keys(parent_relid, partition_relid); } else if (IsA(cur_stmt, CreateForeignTableStmt)) { @@ -185,7 +231,7 @@ create_single_partition(Oid parent_relid, * event trigger context. */ ProcessUtility(cur_stmt, - "have to provide query string", + "we have to provide a query string", PROCESS_UTILITY_SUBCOMMAND, NULL, None_Receiver, @@ -193,7 +239,7 @@ create_single_partition(Oid parent_relid, } } - return child_relid; + return partition_relid; } /* Create a new table using cooked CreateStmt */ @@ -235,6 +281,45 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) return table_addr; } +/* Copy foreign keys of parent table */ +static void +copy_foreign_keys(Oid parent_relid, Oid partition_oid) +{ + Oid copy_fkeys_proc_args[] = { REGCLASSOID, REGCLASSOID }; + List *copy_fkeys_proc_name; + FmgrInfo copy_fkeys_proc_flinfo; + FunctionCallInfoData copy_fkeys_proc_fcinfo; + char *pathman_schema; + + /* Fetch pg_pathman's schema */ + pathman_schema = get_namespace_name(get_pathman_schema()); + + /* Build function's name */ + copy_fkeys_proc_name = list_make2(makeString(pathman_schema), + makeString(CppAsString(copy_foreign_keys))); + + /* Lookup function's Oid and get FmgrInfo */ + fmgr_info(LookupFuncName(copy_fkeys_proc_name, 2, + copy_fkeys_proc_args, false), + ©_fkeys_proc_flinfo); + + InitFunctionCallInfoData(copy_fkeys_proc_fcinfo, ©_fkeys_proc_flinfo, + 2, InvalidOid, NULL, NULL); + copy_fkeys_proc_fcinfo.arg[0] = ObjectIdGetDatum(parent_relid); + copy_fkeys_proc_fcinfo.argnull[0] = false; + copy_fkeys_proc_fcinfo.arg[1] = ObjectIdGetDatum(partition_oid); + copy_fkeys_proc_fcinfo.argnull[1] = false; + + /* Invoke the callback */ + FunctionCallInvoke(©_fkeys_proc_fcinfo); +} + + +/* + * ----------------------------- + * Check constraint generation + * ----------------------------- + */ /* Build RANGE check constraint expression tree */ Node * @@ -321,14 +406,184 @@ build_range_check_constraint(Oid child_relid, return range_constr; } +/* Check if range overlaps with any partitions */ +bool +check_range_available(Oid parent_relid, + Datum start_value, + Datum end_value, + Oid value_type, + bool raise_error) +{ + const PartRelationInfo *prel; + RangeEntry *ranges; + FmgrInfo cmp_func; + uint32 i; + + /* Try fetching the PartRelationInfo structure */ + prel = get_pathman_relation_info(parent_relid); + + /* If there's no prel, return TRUE (overlap is not possible) */ + if (!prel) return true; + + /* Emit an error if it is not partitioned by RANGE */ + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + + /* Fetch comparison function */ + fill_type_cmp_fmgr_info(&cmp_func, + getBaseType(value_type), + getBaseType(prel->atttype)); + + ranges = PrelGetRangesArray(prel); + for (i = 0; i < PrelChildrenCount(prel); i++) + { + int c1 = FunctionCall2(&cmp_func, start_value, ranges[i].max), + c2 = FunctionCall2(&cmp_func, end_value, ranges[i].min); + + /* There's someone! */ + if (c1 < 0 && c2 > 0) + { + if (raise_error) + elog(ERROR, "specified range [%s, %s) overlaps " + "with existing partitions", + datum_to_cstring(start_value, value_type), + datum_to_cstring(end_value, value_type)); + else + return false; + } + } + + return true; +} + + +/* + * --------------------- + * Callback invocation + * --------------------- + */ + /* Invoke 'init_callback' for a partition */ +static void +invoke_init_callback_internal(init_callback_params *cb_params) +{ +#define JSB_INIT_VAL(value, val_type, val_cstring) \ + do { \ + (value)->type = jbvString; \ + (value)->val.string.len = strlen(val_cstring); \ + (value)->val.string.val = val_cstring; \ + pushJsonbValue(&jsonb_state, val_type, (value)); \ + } while (0) + + Oid parent_oid = cb_params->parent_relid; + Oid partition_oid = cb_params->partition_relid; + + FmgrInfo cb_flinfo; + FunctionCallInfoData cb_fcinfo; + + JsonbParseState *jsonb_state = NULL; + JsonbValue *result, + key, + val; + + switch (cb_params->parttype) + { + case PT_HASH: + { + pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); + + JSB_INIT_VAL(&key, WJB_KEY, "parent"); + JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(parent_oid)); + JSB_INIT_VAL(&key, WJB_KEY, "partition"); + JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(partition_oid)); + JSB_INIT_VAL(&key, WJB_KEY, "parttype"); + JSB_INIT_VAL(&val, WJB_VALUE, PartTypeToCString(PT_HASH)); + + result = pushJsonbValue(&jsonb_state, WJB_END_OBJECT, NULL); + } + break; + + case PT_RANGE: + { + char *start_value, + *end_value; + Datum sv_datum = cb_params->params.range_params.start_value, + ev_datum = cb_params->params.range_params.end_value; + Oid type = cb_params->params.range_params.value_type; + + /* Convert min & max to CSTRING */ + start_value = datum_to_cstring(sv_datum, type); + end_value = datum_to_cstring(ev_datum, type); + + pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); + + JSB_INIT_VAL(&key, WJB_KEY, "parent"); + JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(parent_oid)); + JSB_INIT_VAL(&key, WJB_KEY, "partition"); + JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(partition_oid)); + JSB_INIT_VAL(&key, WJB_KEY, "parttype"); + JSB_INIT_VAL(&val, WJB_VALUE, PartTypeToCString(PT_RANGE)); + JSB_INIT_VAL(&key, WJB_KEY, "range_min"); + JSB_INIT_VAL(&val, WJB_VALUE, start_value); + JSB_INIT_VAL(&key, WJB_KEY, "range_max"); + JSB_INIT_VAL(&val, WJB_VALUE, end_value); + + result = pushJsonbValue(&jsonb_state, WJB_END_OBJECT, NULL); + } + break; + + default: + elog(ERROR, "Unknown partitioning type %u", cb_params->parttype); + break; + } + + /* Fetch & cache callback's Oid if needed */ + if (!cb_params->callback_is_cached) + { + Datum param_values[Natts_pathman_config_params]; + bool param_isnull[Natts_pathman_config_params]; + + /* Search for init_callback entry in PATHMAN_CONFIG_PARAMS */ + if (read_pathman_params(parent_oid, param_values, param_isnull)) + { + Datum init_cb_datum; /* Oid of init_callback */ + AttrNumber init_cb_attno = Anum_pathman_config_params_init_callback; + + /* Extract Datum storing callback's Oid */ + init_cb_datum = param_values[init_cb_attno - 1]; + + /* Cache init_callback's Oid */ + cb_params->callback = DatumGetObjectId(init_cb_datum); + } + } + + /* No callback is set, exit */ + if (!OidIsValid(cb_params->callback)) + return; + + /* Validate the callback's signature */ + validate_on_part_init_cb(cb_params->callback, true); + + fmgr_info(cb_params->callback, &cb_flinfo); + + InitFunctionCallInfoData(cb_fcinfo, &cb_flinfo, 1, InvalidOid, NULL, NULL); + cb_fcinfo.arg[0] = PointerGetDatum(JsonbValueToJsonb(result)); + cb_fcinfo.argnull[0] = false; + + /* Invoke the callback */ + FunctionCallInvoke(&cb_fcinfo); +} + +/* Invoke a callback of a specified type */ void -invoke_init_callback(Oid parent_relid, - Oid child_relid, - PartType part_type, - Datum start_value, - Datum end_value, - Oid value_type) +invoke_part_callback(init_callback_params *cb_params) { - /* TODO: implement callback invocation machinery */ + switch (cb_params->cb_type) + { + case PT_INIT_CALLBACK: + invoke_init_callback_internal(cb_params); + break; + + default: + elog(ERROR, "Unknown callback type: %u", cb_params->cb_type); + } } diff --git a/src/partition_creation.h b/src/partition_creation.h index e3d9c624..b2e9b17a 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -23,9 +23,73 @@ Node * build_raw_range_check_tree(char *attname, Datum end_value, Oid value_type); -void invoke_init_callback(Oid parent_relid, - Oid child_relid, - PartType part_type, - Datum start_value, - Datum end_value, - Oid value_type); +bool check_range_available(Oid partition_relid, + Datum start_value, + Datum end_value, + Oid value_type, + bool raise_error); + + +/* Partitioning callback type */ +typedef enum +{ + PT_INIT_CALLBACK = 0 +} part_callback_type; + +/* Args for partitioning 'init_callback' */ +typedef struct +{ + part_callback_type cb_type; + Oid callback; + bool callback_is_cached; + + PartType parttype; + + Oid parent_relid; + Oid partition_relid; + + union + { + struct + { + /* nothing */ + } hash_params; + + struct + { + Datum start_value, + end_value; + Oid value_type; + } range_params; + + } params; +} init_callback_params; + +#define MakeInitCallbackRangeParams(params_p, cb, parent, child, start, end, type) \ + do \ + { \ + memset((void *) (params_p), 0, sizeof(init_callback_params)); \ + (params_p)->cb_type = PT_INIT_CALLBACK; \ + (params_p)->callback = (cb); \ + (params_p)->callback_is_cached = false; \ + (params_p)->parttype = PT_RANGE; \ + (params_p)->parent_relid = (parent); \ + (params_p)->partition_relid = (child); \ + (params_p)->params.range_params.start_value = (start); \ + (params_p)->params.range_params.end_value = (end); \ + (params_p)->params.range_params.value_type = (type); \ + } while (0) + +#define MakeInitCallbackHashParams(params_p, cb, parent, child) \ + do \ + { \ + memset((void *) (params_p), 0, sizeof(init_callback_params)); \ + (params_p)->callback = (cb); \ + (params_p)->callback_is_cached = false; \ + (params_p)->cb_type = PT_INIT_CALLBACK; \ + (params_p)->parttype = PT_HASH; \ + (params_p)->parent_relid = (parent); \ + (params_p)->partition_relid = (child); \ + } while (0) + +void invoke_part_callback(init_callback_params *cb_params); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a9f3bf31..4e995856 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -14,10 +14,10 @@ #include "pathman.h" #include "init.h" #include "hooks.h" -#include "utils.h" #include "partition_filter.h" #include "runtimeappend.h" #include "runtime_merge_append.h" +#include "utils.h" #include "xact_handling.h" #include "postgres.h" @@ -682,7 +682,7 @@ spawn_partitions(Oid partitioned_rel, /* parent's Oid */ do { \ if (!is_cached) \ { \ - fmgr_info(get_binary_operator_oid((opname), (arg1), (arg2)), \ + fmgr_info(oprfuncid(get_binary_operator((opname), (arg1), (arg2))), \ (finfo)); \ is_cached = true; \ } \ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 22f33475..4807ec1d 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -11,6 +11,7 @@ #include "init.h" #include "utils.h" #include "pathman.h" +#include "partition_creation.h" #include "relation_info.h" #include "xact_handling.h" @@ -573,10 +574,10 @@ add_to_pathman_config(PG_FUNCTION_ARGS) MemoryContext old_mcxt = CurrentMemoryContext; if (PG_ARGISNULL(0)) - elog(ERROR, "parent_relid should not be null"); + elog(ERROR, "'parent_relid' should not be NULL"); if (PG_ARGISNULL(1)) - elog(ERROR, "attname should not be null"); + elog(ERROR, "'attname' should not be NULL"); /* Read parameters */ relid = PG_GETARG_OID(0); @@ -740,14 +741,6 @@ validate_on_part_init_callback_pl(PG_FUNCTION_ARGS) Datum invoke_on_partition_created_callback(PG_FUNCTION_ARGS) { -#define JSB_INIT_VAL(value, val_type, val_cstring) \ - do { \ - (value)->type = jbvString; \ - (value)->val.string.len = strlen(val_cstring); \ - (value)->val.string.val = val_cstring; \ - pushJsonbValue(&jsonb_state, val_type, (value)); \ - } while (0) - #define ARG_PARENT 0 /* parent table */ #define ARG_CHILD 1 /* partition */ #define ARG_CALLBACK 2 /* callback to be invoked */ @@ -756,39 +749,52 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) Oid parent_oid = PG_GETARG_OID(ARG_PARENT), partition_oid = PG_GETARG_OID(ARG_CHILD); - PartType part_type; - Oid cb_oid = PG_GETARG_OID(ARG_CALLBACK); - FmgrInfo cb_flinfo; - FunctionCallInfoData cb_fcinfo; + Oid callback_oid = PG_GETARG_OID(ARG_CALLBACK); + + init_callback_params callback_params; - JsonbParseState *jsonb_state = NULL; - JsonbValue *result, - key, - val; /* If there's no callback function specified, we're done */ - if (PG_ARGISNULL(ARG_CALLBACK) || cb_oid == InvalidOid) + if (PG_ARGISNULL(ARG_CALLBACK) || callback_oid == InvalidOid) PG_RETURN_VOID(); if (PG_ARGISNULL(ARG_PARENT)) - elog(ERROR, "parent_relid should not be null"); + elog(ERROR, "'parent_relid' should not be NULL"); if (PG_ARGISNULL(ARG_CHILD)) - elog(ERROR, "partition should not be null"); + elog(ERROR, "'partition' should not be NULL"); switch (PG_NARGS()) { case 3: - part_type = PT_HASH; + MakeInitCallbackHashParams(&callback_params, + callback_oid, + parent_oid, + partition_oid); break; case 5: { + Datum sv_datum, + ev_datum; + Oid value_type; + if (PG_ARGISNULL(ARG_RANGE_START) || PG_ARGISNULL(ARG_RANGE_END)) elog(ERROR, "both bounds must be provided for RANGE partition"); - part_type = PT_RANGE; + /* Fetch start & end values for RANGE + their type */ + sv_datum = PG_GETARG_DATUM(ARG_RANGE_START); + ev_datum = PG_GETARG_DATUM(ARG_RANGE_END); + value_type = get_fn_expr_argtype(fcinfo->flinfo, ARG_RANGE_START); + + MakeInitCallbackRangeParams(&callback_params, + callback_oid, + parent_oid, + partition_oid, + sv_datum, + ev_datum, + value_type); } break; @@ -797,67 +803,8 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) CppAsString(invoke_on_partition_created_callback)); } - /* Build JSONB according to partitioning type */ - switch (part_type) - { - case PT_HASH: - { - pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); - - JSB_INIT_VAL(&key, WJB_KEY, "parent"); - JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(parent_oid)); - JSB_INIT_VAL(&key, WJB_KEY, "partition"); - JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(partition_oid)); - JSB_INIT_VAL(&key, WJB_KEY, "parttype"); - JSB_INIT_VAL(&val, WJB_VALUE, PartTypeToCString(PT_HASH)); - - result = pushJsonbValue(&jsonb_state, WJB_END_OBJECT, NULL); - } - break; - - case PT_RANGE: - { - char *start_value, - *end_value; - Oid type = get_fn_expr_argtype(fcinfo->flinfo, ARG_RANGE_START); - - /* Convert min & max to CSTRING */ - start_value = datum_to_cstring(PG_GETARG_DATUM(ARG_RANGE_START), type); - end_value = datum_to_cstring(PG_GETARG_DATUM(ARG_RANGE_END), type); - - pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); - - JSB_INIT_VAL(&key, WJB_KEY, "parent"); - JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(parent_oid)); - JSB_INIT_VAL(&key, WJB_KEY, "partition"); - JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(partition_oid)); - JSB_INIT_VAL(&key, WJB_KEY, "parttype"); - JSB_INIT_VAL(&val, WJB_VALUE, PartTypeToCString(PT_RANGE)); - JSB_INIT_VAL(&key, WJB_KEY, "range_min"); - JSB_INIT_VAL(&val, WJB_VALUE, start_value); - JSB_INIT_VAL(&key, WJB_KEY, "range_max"); - JSB_INIT_VAL(&val, WJB_VALUE, end_value); - - result = pushJsonbValue(&jsonb_state, WJB_END_OBJECT, NULL); - } - break; - - default: - elog(ERROR, "Unknown partitioning type %u", part_type); - break; - } - - /* Validate the callback's signature */ - validate_on_part_init_cb(cb_oid, true); - - fmgr_info(cb_oid, &cb_flinfo); - - InitFunctionCallInfoData(cb_fcinfo, &cb_flinfo, 1, InvalidOid, NULL, NULL); - cb_fcinfo.arg[0] = PointerGetDatum(JsonbValueToJsonb(result)); - cb_fcinfo.argnull[0] = false; - - /* Invoke the callback */ - FunctionCallInvoke(&cb_fcinfo); + /* Now it's time to call it! */ + invoke_part_callback(&callback_params); PG_RETURN_VOID(); } diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 4644a92d..8b3f0d2b 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -8,10 +8,13 @@ * ------------------------------------------------------------------------ */ +#include "init.h" #include "pathman.h" +#include "partition_creation.h" #include "relation_info.h" #include "utils.h" +#include "catalog/namespace.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/lsyscache.h" @@ -19,13 +22,15 @@ /* Function declarations */ +PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); PG_FUNCTION_INFO_V1( find_or_create_range_partition); -PG_FUNCTION_INFO_V1( check_overlap ); +PG_FUNCTION_INFO_V1( check_range_available_pl ); PG_FUNCTION_INFO_V1( get_part_range_by_oid ); PG_FUNCTION_INFO_V1( get_part_range_by_idx ); PG_FUNCTION_INFO_V1( build_range_condition ); +PG_FUNCTION_INFO_V1( build_sequence_name ); /* @@ -34,6 +39,75 @@ PG_FUNCTION_INFO_V1( build_range_condition ); * ----------------------------- */ +/* + * pl/PgSQL wrapper for the create_single_range_partition(). + */ +Datum +create_single_range_partition_pl(PG_FUNCTION_ARGS) +{ + Oid parent_relid; + + /* RANGE boundaries + value type */ + Datum start_value, + end_value; + Oid value_type; + + /* Optional: name & tablespace */ + RangeVar *partition_name_rv; + char *tablespace; + + /* Result (REGCLASS) */ + Oid partition_relid; + + + /* Handle 'parent_relid' */ + if (PG_ARGISNULL(0)) + elog(ERROR, "'parent_relid' should not be NULL"); + + /* Handle 'start_value' */ + if (PG_ARGISNULL(1)) + elog(ERROR, "'start_value' should not be NULL"); + + /* Handle 'end_value' */ + if (PG_ARGISNULL(2)) + elog(ERROR, "'end_value' should not be NULL"); + + /* Fetch mandatory args */ + parent_relid = PG_GETARG_OID(0); + start_value = PG_GETARG_DATUM(1); + end_value = PG_GETARG_DATUM(2); + value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + + /* Fetch 'partition_name' */ + if (!PG_ARGISNULL(3)) + { + List *qualified_name; + text *partition_name; + + partition_name = PG_GETARG_TEXT_P(3); + qualified_name = textToQualifiedNameList(partition_name); + partition_name_rv = makeRangeVarFromNameList(qualified_name); + } + else partition_name_rv = NULL; /* default */ + + /* Fetch 'tablespace' */ + if (!PG_ARGISNULL(4)) + { + tablespace = TextDatumGetCString(PG_GETARG_TEXT_P(4)); + } + else tablespace = NULL; /* default */ + + /* Create a new RANGE partition and return its Oid */ + partition_relid = create_single_range_partition_internal(parent_relid, + start_value, + end_value, + value_type, + partition_name_rv, + tablespace); + + PG_RETURN_OID(partition_relid); +} + /* * Returns partition oid for specified parent relid and value. * In case when partition doesn't exist try to create one. @@ -41,7 +115,7 @@ PG_FUNCTION_INFO_V1( build_range_condition ); Datum find_or_create_range_partition(PG_FUNCTION_ARGS) { - Oid parent_oid = PG_GETARG_OID(0); + Oid parent_relid = PG_GETARG_OID(0); Datum value = PG_GETARG_DATUM(1); Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); const PartRelationInfo *prel; @@ -49,8 +123,8 @@ find_or_create_range_partition(PG_FUNCTION_ARGS) RangeEntry found_rentry; search_rangerel_result search_state; - prel = get_pathman_relation_info(parent_oid); - shout_if_prel_is_invalid(parent_oid, prel, PT_RANGE); + prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); fill_type_cmp_fmgr_info(&cmp_func, getBaseType(value_type), @@ -72,10 +146,10 @@ find_or_create_range_partition(PG_FUNCTION_ARGS) PG_RETURN_NULL(); else { - Oid child_oid = create_partitions(parent_oid, value, value_type); + Oid child_oid = create_partitions(parent_relid, value, value_type); /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_oid, NULL); + invalidate_pathman_relation_info(parent_relid, NULL); PG_RETURN_OID(child_oid); } @@ -86,52 +160,22 @@ find_or_create_range_partition(PG_FUNCTION_ARGS) * Returns TRUE if overlaps and FALSE otherwise. */ Datum -check_overlap(PG_FUNCTION_ARGS) +check_range_available_pl(PG_FUNCTION_ARGS) { - Oid parent_oid = PG_GETARG_OID(0); - - Datum p1 = PG_GETARG_DATUM(1), - p2 = PG_GETARG_DATUM(2); - - Oid p1_type = get_fn_expr_argtype(fcinfo->flinfo, 1), - p2_type = get_fn_expr_argtype(fcinfo->flinfo, 2), - part_type; - - FmgrInfo cmp_func_1, - cmp_func_2; - - uint32 i; - RangeEntry *ranges; - const PartRelationInfo *prel; - - - /* Try fetching the PartRelationInfo structure */ - prel = get_pathman_relation_info(parent_oid); - - /* If there's no prel, return FALSE (overlap is not possible) */ - if (!prel) PG_RETURN_BOOL(false); + Oid parent_relid = PG_GETARG_OID(0); - /* Emit an error if it is not partitioned by RANGE */ - shout_if_prel_is_invalid(parent_oid, prel, PT_RANGE); + Datum start_value = PG_GETARG_DATUM(1), + end_value = PG_GETARG_DATUM(2); + Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - /* Get base type of partitioned column */ - part_type = getBaseType(prel->atttype); + /* Raise ERROR if range overlaps with any partition */ + check_range_available(parent_relid, + start_value, + end_value, + value_type, + true); - /* Fetch comparison functions */ - fill_type_cmp_fmgr_info(&cmp_func_1, getBaseType(p1_type), part_type); - fill_type_cmp_fmgr_info(&cmp_func_2, getBaseType(p2_type), part_type); - - ranges = PrelGetRangesArray(prel); - for (i = 0; i < PrelChildrenCount(prel); i++) - { - int c1 = FunctionCall2(&cmp_func_1, p1, ranges[i].max); - int c2 = FunctionCall2(&cmp_func_2, p2, ranges[i].min); - - if (c1 < 0 && c2 > 0) - PG_RETURN_BOOL(true); - } - - PG_RETURN_BOOL(false); + PG_RETURN_VOID(); } @@ -284,3 +328,19 @@ build_range_condition(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(result)); } + +Datum +build_sequence_name(PG_FUNCTION_ARGS) +{ + Oid parent_relid = PG_GETARG_OID(0); + Oid parent_nsp; + char *result; + + parent_nsp = get_rel_namespace(parent_relid); + + result = psprintf("%s.%s", + quote_identifier(get_namespace_name(parent_nsp)), + quote_identifier(build_sequence_name_internal(parent_relid))); + + PG_RETURN_TEXT_P(cstring_to_text(result)); +} diff --git a/src/relation_info.c b/src/relation_info.c index 70287265..0604d4d3 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -166,14 +166,12 @@ refresh_pathman_relation_info(Oid relid, { prel->enable_parent = param_values[Anum_pathman_config_params_enable_parent - 1]; prel->auto_partition = param_values[Anum_pathman_config_params_auto - 1]; - prel->init_callback = param_values[Anum_pathman_config_params_init_callback - 1]; } /* Else set default values if they cannot be found */ else { prel->enable_parent = false; prel->auto_partition = true; - prel->init_callback = InvalidOid; } /* We've successfully built a cache entry */ diff --git a/src/relation_info.h b/src/relation_info.h index 5b50005a..7b362fe6 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -47,7 +47,6 @@ typedef struct bool valid; /* is this entry valid? */ bool enable_parent; /* include parent to the plan */ bool auto_partition; /* auto partition creation */ - Oid init_callback; /* callback for partition creation */ uint32 children_count; Oid *children; /* Oids of child partitions */ diff --git a/src/utils.c b/src/utils.c index 831d5a24..ac3220c8 100644 --- a/src/utils.c +++ b/src/utils.c @@ -530,22 +530,18 @@ is_date_type_internal(Oid typid) * * Returns operator function's Oid or throws an ERROR on InvalidOid. */ -Oid -get_binary_operator_oid(char *oprname, Oid arg1, Oid arg2) +Operator +get_binary_operator(char *oprname, Oid arg1, Oid arg2) { - Oid funcid = InvalidOid; - Operator op; + Operator op; - op = oper(NULL, list_make1(makeString(oprname)), arg1, arg2, true, -1); - if (op) - { - funcid = oprfuncid(op); - ReleaseSysCache(op); - } - else + op = compatible_oper(NULL, list_make1(makeString(oprname)), + arg1, arg2, true, -1); + + if (!op) elog(ERROR, "Cannot find operator \"%s\"(%u, %u)", oprname, arg1, arg2); - return funcid; + return op; } /* diff --git a/src/utils.h b/src/utils.h index 4222f549..e2d5d717 100644 --- a/src/utils.h +++ b/src/utils.h @@ -14,6 +14,7 @@ #include "pathman.h" #include "postgres.h" +#include "parser/parse_oper.h" #include "utils/rel.h" #include "nodes/relation.h" #include "nodes/nodeFuncs.h" @@ -63,7 +64,7 @@ Oid get_rel_owner(Oid relid); * Handy execution-stage functions. */ char * get_rel_name_or_relid(Oid relid); -Oid get_binary_operator_oid(char *opname, Oid arg1, Oid arg2); +Operator get_binary_operator(char *opname, Oid arg1, Oid arg2); void fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2); From 0dead52b3fc269a2b5dfdd476c3bdff7f2be20e9 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 16 Nov 2016 19:23:44 +0300 Subject: [PATCH 0063/1124] replace_hash_partition() function --- expected/pathman_basic.out | 48 ++++++++++++++- hash.sql | 118 +++++++++++++++++++++++++++++++++---- init.sql | 20 +++++++ sql/pathman_basic.sql | 9 +++ src/pl_funcs.c | 15 ++--- src/pl_hash_funcs.c | 77 ++++++++++++++++++++++++ src/utils.c | 23 ++++++++ src/utils.h | 1 + 8 files changed, 288 insertions(+), 23 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index fd9e0691..c4e6d2b5 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -885,14 +885,58 @@ SELECT * FROM test.hash_rel WHERE id = 123; 123 | 456 | 789 (1 row) +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); + replace_hash_partition +------------------------ + test.hash_rel_extern +(1 row) + +\d+ test.hash_rel_0 + Table "test.hash_rel_0" + Column | Type | Modifiers | Storage | Stats target | Description +--------+---------+------------------------------------------------------------+---------+--------------+------------- + id | integer | not null default nextval('test.hash_rel_id_seq'::regclass) | plain | | + value | integer | not null | plain | | + abc | integer | | plain | | +Indexes: + "hash_rel_0_pkey" PRIMARY KEY, btree (id) +Triggers: + hash_rel_upd_trig BEFORE UPDATE ON test.hash_rel_0 FOR EACH ROW EXECUTE PROCEDURE test.hash_rel_upd_trig_func() + +\d+ test.hash_rel_extern + Table "test.hash_rel_extern" + Column | Type | Modifiers | Storage | Stats target | Description +--------+---------+------------------------------------------------------------+---------+--------------+------------- + id | integer | not null default nextval('test.hash_rel_id_seq'::regclass) | plain | | + value | integer | not null | plain | | + abc | integer | | plain | | +Indexes: + "hash_rel_extern_pkey" PRIMARY KEY, btree (id) +Check constraints: + "pathman_hash_rel_extern_2_check" CHECK (pathman.get_hash_part_idx(hashint4(value), 3) = 0) +Inherits: test.hash_rel + +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +EXPLAIN SELECT * FROM test.hash_rel; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Append (cost=10000000000.00..30000000032.44 rows=2044 width=12) + -> Seq Scan on hash_rel_extern (cost=10000000000.00..10000000030.40 rows=2040 width=12) + -> Seq Scan on hash_rel_1 (cost=10000000000.00..10000000001.02 rows=2 width=12) + -> Seq Scan on hash_rel_2 (cost=10000000000.00..10000000001.02 rows=2 width=12) +(4 rows) + /* * Clean up */ SELECT pathman.drop_partitions('test.hash_rel'); -NOTICE: drop cascades to 3 other objects -NOTICE: 2 rows copied from test.hash_rel_0 +NOTICE: drop cascades to 2 other objects NOTICE: 3 rows copied from test.hash_rel_1 NOTICE: 2 rows copied from test.hash_rel_2 +NOTICE: 2 rows copied from test.hash_rel_extern drop_partitions ----------------- 3 diff --git a/hash.sql b/hash.sql index 6521af9f..a2dda3f6 100644 --- a/hash.sql +++ b/hash.sql @@ -22,8 +22,8 @@ DECLARE v_child_relname TEXT; v_plain_schema TEXT; v_plain_relname TEXT; - v_atttype REGTYPE; - v_hashfunc REGPROC; + -- v_atttype REGTYPE; + -- v_hashfunc REGPROC; v_init_callback REGPROCEDURE; BEGIN @@ -41,8 +41,8 @@ BEGIN PERFORM @extschema@.common_relation_checks(parent_relid, attribute); /* Fetch atttype and its hash function */ - v_atttype := @extschema@.get_attribute_type(parent_relid, attribute); - v_hashfunc := @extschema@.get_type_hash_func(v_atttype); + -- v_atttype := @extschema@.get_attribute_type(parent_relid, attribute); + -- v_hashfunc := @extschema@.get_type_hash_func(v_atttype); SELECT * INTO v_plain_schema, v_plain_relname FROM @extschema@.get_plain_schema_and_relname(parent_relid); @@ -64,15 +64,23 @@ BEGIN parent_relid::TEXT, @extschema@.get_rel_tablespace_name(parent_relid)); - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s - CHECK (@extschema@.get_hash_part_idx(%s(%s), %s) = %s)', + -- EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s + -- CHECK (@extschema@.get_hash_part_idx(%s(%s), %s) = %s)', + -- v_child_relname, + -- @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, + -- attribute), + -- v_hashfunc::TEXT, + -- attribute, + -- partitions_count, + -- partnum); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', v_child_relname, - @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, + @extschema@.build_check_constraint_name(v_child_relname, attribute), - v_hashfunc::TEXT, - attribute, - partitions_count, - partnum); + @extschema@.build_hash_condition(v_child_relname, + attribute, + partitions_count, + partnum)); PERFORM @extschema@.copy_foreign_keys(parent_relid, v_child_relname::REGCLASS); @@ -105,6 +113,94 @@ END $$ LANGUAGE plpgsql SET client_min_messages = WARNING; +/* + * Replace hash partition with another one. It could be useful in case when + * someone wants to attach foreign table as a partition + */ +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS) +RETURNS REGCLASS AS +$$ +DECLARE + v_attname TEXT; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + v_parent_relid REGCLASS; + v_part_count INT; + v_part_num INT; +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + v_parent_relid := @extschema@.get_parent_of_partition(old_partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.validate_relations_equality(v_parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Get partitioning key */ + v_attname := attname FROM @extschema@.pathman_config WHERE partrel = v_parent_relid; + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', v_parent_relid::TEXT; + END IF; + + /* Calculate partitions count and old partition's number */ + v_part_count := count(*) FROM @extschema@.pathman_partition_list WHERE parent = v_parent_relid; + v_part_num := @extschema@.get_partition_hash(v_parent_relid, old_partition); + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, v_parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT IF EXISTS %s', + old_partition, + @extschema@.build_check_constraint_name(old_partition::REGCLASS, + v_attname)); + + /* Attach new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, v_parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + new_partition, + @extschema@.build_check_constraint_name(new_partition::regclass, + v_attname), + @extschema@.build_hash_condition(new_partition::regclass, + v_attname, + v_part_count, + v_part_num)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = v_parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(v_parent_relid, + new_partition, + v_init_callback); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(v_parent_relid); + + RETURN new_partition; +END +$$ +LANGUAGE plpgsql; + /* * Creates an update trigger */ diff --git a/init.sql b/init.sql index 179f8aff..37c0ee2e 100644 --- a/init.sql +++ b/init.sql @@ -803,3 +803,23 @@ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( init_callback REGPROCEDURE) RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; + +/* + * Build hash condition for a CHECK CONSTRAINT + */ +CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( + parent_relid REGCLASS, + attname TEXT, + partitions_count INT, + partition_number INT) +RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' +LANGUAGE C; + +/* + * Returns hash value for specified partition (0..N) + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_hash( + parent_relid REGCLASS, + partition REGCLASS) +RETURNS INT AS 'pg_pathman', 'get_partition_hash' +LANGUAGE C; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 0acdee4b..c622d971 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -236,6 +236,15 @@ ALTER TABLE test.hash_rel ADD COLUMN abc int; INSERT INTO test.hash_rel (id, value, abc) VALUES (123, 456, 789); SELECT * FROM test.hash_rel WHERE id = 123; +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); +\d+ test.hash_rel_0 +\d+ test.hash_rel_extern +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +EXPLAIN SELECT * FROM test.hash_rel; + /* * Clean up */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 22f33475..086da79d 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -201,24 +201,19 @@ Datum get_attribute_type_pl(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); + char *attname = text_to_cstring(PG_GETARG_TEXT_P(1)); Oid result; - HeapTuple tp; - /* NOTE: for now it's the most efficient way */ - tp = SearchSysCacheAttName(relid, text_to_cstring(attname)); - if (HeapTupleIsValid(tp)) + if ((result = get_attribute_type(relid, attname)) != InvalidOid) { - Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); - result = att_tup->atttypid; - ReleaseSysCache(tp); - PG_RETURN_OID(result); } else + { elog(ERROR, "Cannot find type name for attribute \"%s\" " "of relation \"%s\"", - text_to_cstring(attname), get_rel_name_or_relid(relid)); + attname, get_rel_name_or_relid(relid)); + } PG_RETURN_NULL(); /* keep compiler happy */ } diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 6dc0916f..b9ab1213 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -9,14 +9,19 @@ */ #include "pathman.h" +#include "utils.h" #include "utils/typcache.h" +#include "utils/lsyscache.h" +#include "utils/builtins.h" /* Function declarations */ PG_FUNCTION_INFO_V1( get_type_hash_func ); PG_FUNCTION_INFO_V1( get_hash_part_idx ); +PG_FUNCTION_INFO_V1( build_hash_condition ); +PG_FUNCTION_INFO_V1( get_partition_hash ); /* @@ -44,3 +49,75 @@ get_hash_part_idx(PG_FUNCTION_ARGS) PG_RETURN_UINT32(hash_to_part_index(value, part_count)); } + +/* + * Build hash condition for a CHECK CONSTRAINT + */ +Datum +build_hash_condition(PG_FUNCTION_ARGS) +{ + TypeCacheEntry *tce; + + Oid parent = PG_GETARG_OID(0); + text *attname = PG_GETARG_TEXT_P(1); + uint32 partitions_count = PG_GETARG_UINT32(2); + uint32 partition_number = PG_GETARG_UINT32(3); + Oid attyp; + char *result; + + if (partition_number >= partitions_count) + elog(ERROR, + "Partition number cannot exceed partitions count"); + + /* Get attribute type and its hash function oid */ + attyp = get_attribute_type(parent, text_to_cstring(attname)); + if (attyp == InvalidOid) + elog(ERROR, + "Relation '%s' has no attribute '%s'", + get_rel_name(parent), + text_to_cstring(attname)); + + tce = lookup_type_cache(attyp, TYPECACHE_HASH_PROC); + + /* Create hash condition CSTRING */ + result = psprintf("%s.get_hash_part_idx(%s(%s), %u) = %u", + get_namespace_name(get_pathman_schema()), + get_func_name(tce->hash_proc), + text_to_cstring(attname), + partitions_count, + partition_number); + + PG_RETURN_TEXT_P(cstring_to_text(result)); +} + +/* + * Returns hash value for specified partition (0..N) + */ +Datum +get_partition_hash(PG_FUNCTION_ARGS) +{ + const PartRelationInfo *prel; + Oid parent = PG_GETARG_OID(0); + Oid partition = PG_GETARG_OID(1); + Oid *children; + int i; + + /* Validate partition type */ + prel = get_pathman_relation_info(parent); + if (!prel || prel->parttype != PT_HASH) + elog(ERROR, + "Relation '%s' isn't partitioned by hash", + get_rel_name(parent)); + + /* Searching for partition */ + children = PrelGetChildrenArray(prel); + for (i=0; ichildren_count; i++) + if (children[i] == partition) + PG_RETURN_UINT32(i); + + /* If we get here then there is no such partition for specified parent */ + elog(ERROR, + "Relation '%s' isn't a part of partitioned table '%s'", + get_rel_name(parent), + get_rel_name(partition)); +} diff --git a/src/utils.c b/src/utils.c index 831d5a24..a0b02651 100644 --- a/src/utils.c +++ b/src/utils.c @@ -632,6 +632,29 @@ get_rel_owner(Oid relid) return InvalidOid; } +/* + * Get type oid of a given attribute + */ +Oid +get_attribute_type(Oid relid, const char* attname) +{ + HeapTuple tp; + Oid result; + + /* NOTE: for now it's the most efficient way */ + tp = SearchSysCacheAttName(relid, attname); + if (HeapTupleIsValid(tp)) + { + Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); + result = att_tup->atttypid; + ReleaseSysCache(tp); + + return result; + } + + return InvalidOid; +} + /* * Checks that callback function meets specific requirements. * It must have the only JSONB argument and BOOL return type. diff --git a/src/utils.h b/src/utils.h index 4222f549..dc45f2eb 100644 --- a/src/utils.h +++ b/src/utils.h @@ -58,6 +58,7 @@ List * list_reverse(List *l); char get_rel_persistence(Oid relid); #endif Oid get_rel_owner(Oid relid); +Oid get_attribute_type(Oid relid, const char* attname); /* * Handy execution-stage functions. From 86292dcbf899d858ce3a582089c5eef34d42efa5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 16 Nov 2016 20:15:32 +0300 Subject: [PATCH 0064/1124] refactoring, move all functions for partition creation to partition_creation.c, add copyright headers --- src/partition_creation.c | 375 +++++++++++++++++++++++++++++++++++- src/partition_creation.h | 26 ++- src/partition_filter.c | 1 + src/pathman.h | 29 ++- src/pathman_workers.c | 1 + src/pathman_workers.h | 5 + src/pg_pathman.c | 396 +-------------------------------------- 7 files changed, 423 insertions(+), 410 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 64ed90a6..8d742e47 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1,7 +1,21 @@ -#include "pathman.h" +/*------------------------------------------------------------------------- + * + * partition_creation.c + * Various functions for partition creation. + * + * Copyright (c) 2016, Postgres Professional + * + *------------------------------------------------------------------------- + */ + #include "init.h" #include "partition_creation.h" +#include "partition_filter.h" +#include "pathman.h" +#include "pathman_workers.h" +#include "xact_handling.h" +#include "access/htup_details.h" #include "access/reloptions.h" #include "access/xact.h" #include "catalog/heap.h" @@ -10,16 +24,31 @@ #include "commands/event_trigger.h" #include "commands/sequence.h" #include "commands/tablecmds.h" +#include "miscadmin.h" #include "parser/parse_func.h" #include "parser/parse_relation.h" #include "parser/parse_utilcmd.h" #include "tcop/utility.h" #include "utils/builtins.h" +#include "utils/datum.h" #include "utils/jsonb.h" #include "utils/lsyscache.h" #include "utils/syscache.h" +static Datum extract_binary_interval_from_text(Datum interval_text, + Oid part_atttype, + Oid *interval_type); + +static Oid spawn_partitions_val(Oid parent_relid, + Datum range_bound_min, + Datum range_bound_max, + Oid range_bound_type, + Datum interval_binary, + Oid interval_type, + Datum value, + Oid value_type); + static Oid create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace, @@ -88,6 +117,56 @@ create_single_range_partition_internal(Oid parent_relid, return partition_relid; } +/* + * Create RANGE partitions (if needed) using either BGW or current backend. + * + * Returns Oid of the partition to store 'value'. + */ +Oid +create_partitions_for_value(Oid relid, Datum value, Oid value_type) +{ + TransactionId rel_xmin; + Oid last_partition = InvalidOid; + + /* Check that table is partitioned and fetch xmin */ + if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin)) + { + bool part_in_prev_xact = + TransactionIdPrecedes(rel_xmin, GetCurrentTransactionId()) || + TransactionIdEquals(rel_xmin, FrozenTransactionId); + + /* + * If table has been partitioned in some previous xact AND + * we don't hold any conflicting locks, run BGWorker. + */ + if (part_in_prev_xact && !xact_bgw_conflicting_lock_exists(relid)) + { + elog(DEBUG2, "create_partitions(): chose BGWorker [%u]", MyProcPid); + last_partition = create_partitions_for_value_bg_worker(relid, + value, + value_type); + } + /* Else it'd be better for the current backend to create partitions */ + else + { + elog(DEBUG2, "create_partitions(): chose backend [%u]", MyProcPid); + last_partition = create_partitions_for_value_internal(relid, + value, + value_type); + } + } + else + elog(ERROR, "relation \"%s\" is not partitioned by pg_pathman", + get_rel_name_or_relid(relid)); + + /* Check that 'last_partition' is valid */ + if (last_partition == InvalidOid) + elog(ERROR, "could not create new partitions for relation \"%s\"", + get_rel_name_or_relid(relid)); + + return last_partition; +} + /* * -------------------- @@ -95,6 +174,300 @@ create_single_range_partition_internal(Oid parent_relid, * -------------------- */ +/* + * Create partitions (if needed) and return Oid of the partition to store 'value'. + * + * NB: This function should not be called directly, + * use create_partitions_for_value() instead. + */ +Oid +create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) +{ + MemoryContext old_mcxt = CurrentMemoryContext; + Oid partid = InvalidOid; /* last created partition (or InvalidOid) */ + + PG_TRY(); + { + const PartRelationInfo *prel; + LockAcquireResult lock_result; /* could we lock the parent? */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ + if (pathman_config_contains_relation(relid, values, isnull, NULL)) + { + Oid base_bound_type; /* base type of prel->atttype */ + Oid base_value_type; /* base type of value_type */ + + /* Fetch PartRelationInfo by 'relid' */ + prel = get_pathman_relation_info_after_lock(relid, true, &lock_result); + shout_if_prel_is_invalid(relid, prel, PT_RANGE); + + /* Fetch base types of prel->atttype & value_type */ + base_bound_type = getBaseType(prel->atttype); + base_value_type = getBaseType(value_type); + + /* Search for a suitable partition if we didn't hold it */ + Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); + if (lock_result == LOCKACQUIRE_OK) + { + Oid *parts; + int nparts; + + /* Search for matching partitions */ + parts = find_partitions_for_value(value, value_type, prel, &nparts); + + /* Shout if there's more than one */ + if (nparts > 1) + elog(ERROR, ERR_PART_ATTR_MULTIPLE); + + /* It seems that we got a partition! */ + else if (nparts == 1) + { + /* Unlock the parent (we're not going to spawn) */ + xact_unlock_partitioned_rel(relid); + + /* Simply return the suitable partition */ + partid = parts[0]; + } + + /* Don't forget to free */ + pfree(parts); + } + + /* Else spawn a new one (we hold a lock on the parent) */ + if (partid == InvalidOid) + { + Datum bound_min, /* absolute MIN */ + bound_max; /* absolute MAX */ + + Oid interval_type = InvalidOid; + Datum interval_binary, /* assigned 'width' of one partition */ + interval_text; + + /* Read max & min range values from PartRelationInfo */ + bound_min = PrelGetRangesArray(prel)[0].min; + bound_max = PrelGetRangesArray(prel)[PrelLastChild(prel)].max; + + /* Copy datums on order to protect them from cache invalidation */ + bound_min = datumCopy(bound_min, prel->attbyval, prel->attlen); + bound_max = datumCopy(bound_max, prel->attbyval, prel->attlen); + + /* Retrieve interval as TEXT from tuple */ + interval_text = values[Anum_pathman_config_range_interval - 1]; + + /* Convert interval to binary representation */ + interval_binary = extract_binary_interval_from_text(interval_text, + base_bound_type, + &interval_type); + + /* At last, spawn partitions to store the value */ + partid = spawn_partitions_val(PrelParentRelid(prel), + bound_min, bound_max, base_bound_type, + interval_binary, interval_type, + value, base_value_type); + } + } + else + elog(ERROR, "pg_pathman's config does not contain relation \"%s\"", + get_rel_name_or_relid(relid)); + } + PG_CATCH(); + { + ErrorData *edata; + + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + edata = CopyErrorData(); + FlushErrorState(); + + elog(LOG, "create_partitions_internal(): %s [%u]", + edata->message, MyProcPid); + + FreeErrorData(edata); + + /* Reset 'partid' in case of error */ + partid = InvalidOid; + } + PG_END_TRY(); + + return partid; +} + +/* + * Convert interval from TEXT to binary form using partitioned column's type. + */ +static Datum +extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ + Oid part_atttype, /* partitioned column's type */ + Oid *interval_type) /* returned value */ +{ + Datum interval_binary; + const char *interval_cstring; + + interval_cstring = TextDatumGetCString(interval_text); + + /* If 'part_atttype' is a *date type*, cast 'range_interval' to INTERVAL */ + if (is_date_type_internal(part_atttype)) + { + int32 interval_typmod = PATHMAN_CONFIG_interval_typmod; + + /* Convert interval from CSTRING to internal form */ + interval_binary = DirectFunctionCall3(interval_in, + CStringGetDatum(interval_cstring), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(interval_typmod)); + if (interval_type) + *interval_type = INTERVALOID; + } + /* Otherwise cast it to the partitioned column's type */ + else + { + HeapTuple htup; + Oid typein_proc = InvalidOid; + + htup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(part_atttype)); + if (HeapTupleIsValid(htup)) + { + typein_proc = ((Form_pg_type) GETSTRUCT(htup))->typinput; + ReleaseSysCache(htup); + } + else + elog(ERROR, "Cannot find input function for type %u", part_atttype); + + /* + * Convert interval from CSTRING to 'prel->atttype'. + * + * Note: We pass 3 arguments in case + * 'typein_proc' also takes Oid & typmod. + */ + interval_binary = OidFunctionCall3(typein_proc, + CStringGetDatum(interval_cstring), + ObjectIdGetDatum(part_atttype), + Int32GetDatum(-1)); + if (interval_type) + *interval_type = part_atttype; + } + + return interval_binary; +} + +/* + * Append\prepend partitions if there's no partition to store 'value'. + * + * Used by create_partitions_for_value_internal(). + * + * NB: 'value' type is not needed since we've already taken + * it into account while searching for the 'cmp_proc'. + */ +static Oid +spawn_partitions_val(Oid parent_relid, /* parent's Oid */ + Datum range_bound_min, /* parent's MIN boundary */ + Datum range_bound_max, /* parent's MAX boundary */ + Oid range_bound_type, /* type of boundary's value */ + Datum interval_binary, /* interval in binary form */ + Oid interval_type, /* INTERVALOID or prel->atttype */ + Datum value, /* value to be INSERTed */ + Oid value_type) /* type of value */ +{ + bool should_append; /* append or prepend? */ + + Operator move_bound_op; /* descriptor */ + Oid move_bound_optype; /* operator's ret type */ + + FmgrInfo cmp_value_bound_finfo, /* exec 'value (>=|<) bound' */ + move_bound_finfo; /* exec 'bound + interval' */ + + Datum cur_leading_bound, /* boundaries of a new partition */ + cur_following_bound; + + Oid last_partition = InvalidOid; + + + fill_type_cmp_fmgr_info(&cmp_value_bound_finfo, value_type, range_bound_type); + + /* value >= MAX_BOUNDARY */ + if (check_ge(&cmp_value_bound_finfo, value, range_bound_max)) + { + should_append = true; + cur_leading_bound = range_bound_max; + } + + /* value < MIN_BOUNDARY */ + else if (check_lt(&cmp_value_bound_finfo, value, range_bound_min)) + { + should_append = false; + cur_leading_bound = range_bound_min; + } + + /* There's a gap, halt and emit ERROR */ + else elog(ERROR, "cannot spawn a partition inside a gap"); + + /* Get "move bound operator" descriptor */ + move_bound_op = get_binary_operator(should_append ? "+" : "-", + range_bound_type, + interval_type); + /* Get operator's ret type */ + move_bound_optype = get_operator_ret_type(move_bound_op); + + /* Get operator's underlying function */ + fmgr_info(oprfuncid(move_bound_op), &move_bound_finfo); + + /* Don't forget to release system cache */ + ReleaseSysCache(move_bound_op); + + /* Perform some casts if types don't match */ + if (move_bound_optype != range_bound_type) + { + cur_leading_bound = perform_type_cast(cur_leading_bound, + range_bound_type, + move_bound_optype, + NULL); /* might emit ERROR */ + + /* Update 'range_bound_type' */ + range_bound_type = move_bound_optype; + + /* Fetch new comparison function */ + fill_type_cmp_fmgr_info(&cmp_value_bound_finfo, + value_type, + range_bound_type); + } + + /* Execute comparison function cmp(value, cur_leading_bound) */ + while (should_append ? + check_ge(&cmp_value_bound_finfo, value, cur_leading_bound) : + check_lt(&cmp_value_bound_finfo, value, cur_leading_bound)) + { + Datum args[2]; + + /* Assign the 'following' boundary to current 'leading' value */ + cur_following_bound = cur_leading_bound; + + /* Move leading bound by interval (exec 'leading (+|-) INTERVAL') */ + cur_leading_bound = FunctionCall2(&move_bound_finfo, + cur_leading_bound, + interval_binary); + + args[0] = should_append ? cur_following_bound : cur_leading_bound; + args[1] = should_append ? cur_leading_bound : cur_following_bound; + + last_partition = create_single_range_partition_internal(parent_relid, + args[0], args[1], + range_bound_type, + NULL, NULL); + +#ifdef USE_ASSERT_CHECKING + elog(DEBUG2, "%s partition with following='%s' & leading='%s' [%u]", + (should_append ? "Appending" : "Prepending"), + DebugPrintDatum(cur_following_bound, range_bound_type), + DebugPrintDatum(cur_leading_bound, range_bound_type), + MyProcPid); +#endif + } + + return last_partition; +} + /* Choose a good name for a partition */ static char * choose_partition_name(Oid parent_relid, Oid parent_nsp) diff --git a/src/partition_creation.h b/src/partition_creation.h index b2e9b17a..f89ff1ca 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -1,16 +1,28 @@ +/*------------------------------------------------------------------------- + * + * partition_creation.h + * Various functions for partition creation. + * + * Copyright (c) 2016, Postgres Professional + * + *------------------------------------------------------------------------- + */ + #include "relation_info.h" #include "postgres.h" #include "nodes/parsenodes.h" -Oid -create_single_range_partition_internal(Oid parent_relid, - Datum start_value, - Datum end_value, - Oid value_type, - RangeVar *partition_rv, - char *tablespace); +Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); +Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type); + +Oid create_single_range_partition_internal(Oid parent_relid, + Datum start_value, + Datum end_value, + Oid value_type, + RangeVar *partition_rv, + char *tablespace); Constraint * build_range_check_constraint(Oid child_relid, char *attname, diff --git a/src/partition_filter.c b/src/partition_filter.c index 83a571ce..fd5ba001 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -10,6 +10,7 @@ #include "init.h" #include "nodes_common.h" +#include "partition_creation.h" #include "partition_filter.h" #include "planner_tree_modification.h" #include "utils.h" diff --git a/src/pathman.h b/src/pathman.h index 0e3011b4..fb0b6c0e 100644 --- a/src/pathman.h +++ b/src/pathman.h @@ -160,13 +160,6 @@ typedef struct /* Check that WalkerContext contains ExprContext (plan execution stage) */ #define WcxtHasExprContext(wcxt) ( (wcxt)->econtext ) -/* - * Functions for partition creation, use create_partitions(). - */ -Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); -Oid create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type); -Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type); - void select_range_partitions(const Datum value, FmgrInfo *cmp_func, const RangeEntry *ranges, @@ -177,4 +170,26 @@ void select_range_partitions(const Datum value, /* Examine expression in order to select partitions. */ WrapperNode *walk_expr_tree(Expr *expr, WalkerContext *context); + +/* + * Compare two Datums using the given comarison function. + * + * flinfo is a pointer to FmgrInfo, arg1 & arg2 are Datums. + */ +#define check_lt(finfo, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) < 0 ) + +#define check_le(finfo, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) <= 0 ) + +#define check_eq(finfo, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) == 0 ) + +#define check_ge(finfo, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) >= 0 ) + +#define check_gt(finfo, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) > 0 ) + + #endif /* PATHMAN_H */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index cb8ebbd1..a38e6873 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -15,6 +15,7 @@ */ #include "init.h" +#include "partition_creation.h" #include "pathman_workers.h" #include "relation_info.h" #include "utils.h" diff --git a/src/pathman_workers.h b/src/pathman_workers.h index a6b06dd6..974f3087 100644 --- a/src/pathman_workers.h +++ b/src/pathman_workers.h @@ -188,4 +188,9 @@ UnpackDatumFromByteArray(Datum *datum, Size datum_size, bool typbyval, return ((uint8 *) byte_array) + datum_size; } +/* + * Create partition to store 'value' using specific BGW. + */ +Oid create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type); + #endif diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 869e9c73..9181b3bc 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -11,41 +11,26 @@ #include "pg_compat.h" -#include "pathman.h" #include "init.h" #include "hooks.h" -#include "partition_creation.h" +#include "pathman.h" #include "partition_filter.h" #include "planner_tree_modification.h" #include "runtimeappend.h" #include "runtime_merge_append.h" -#include "utils.h" -#include "xact_handling.h" #include "postgres.h" -#include "access/heapam.h" -#include "access/htup_details.h" -#include "access/transam.h" -#include "access/xact.h" -#include "catalog/pg_cast.h" -#include "catalog/pg_type.h" -#include "executor/spi.h" #include "foreign/fdwapi.h" -#include "fmgr.h" #include "miscadmin.h" #include "optimizer/clauses.h" #include "optimizer/plancat.h" #include "optimizer/prep.h" #include "optimizer/restrictinfo.h" #include "optimizer/cost.h" -#include "utils/builtins.h" #include "utils/datum.h" #include "utils/lsyscache.h" -#include "utils/memutils.h" #include "utils/rel.h" -#include "utils/syscache.h" #include "utils/selfuncs.h" -#include "utils/snapmgr.h" #include "utils/typcache.h" @@ -61,21 +46,6 @@ Oid pathman_config_params_relid = InvalidOid; void _PG_init(void); -/* "Partition creation"-related functions */ -static Datum extract_binary_interval_from_text(Datum interval_text, - Oid part_atttype, - Oid *interval_type); - -static Oid spawn_partitions_val(Oid parent_relid, - Datum range_bound_min, - Datum range_bound_max, - Oid range_bound_type, - Datum interval_binary, - Oid interval_type, - Datum value, - Oid value_type); - - /* Expression tree handlers */ static Node *wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue); @@ -132,27 +102,6 @@ static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, Relids required_outer); -/* - * Compare two Datums using the given comarison function. - * - * flinfo is a pointer to FmgrInfo, arg1 & arg2 are Datums. - */ -#define check_lt(finfo, arg1, arg2) \ - ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) < 0 ) - -#define check_le(finfo, arg1, arg2) \ - ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) <= 0 ) - -#define check_eq(finfo, arg1, arg2) \ - ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) == 0 ) - -#define check_ge(finfo, arg1, arg2) \ - ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) >= 0 ) - -#define check_gt(finfo, arg1, arg2) \ - ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) > 0 ) - - /* We can transform Param into Const provided that 'econtext' is available */ #define IsConstValue(wcxt, node) \ ( IsA((node), Const) || (WcxtHasExprContext(wcxt) ? IsA((node), Param) : false) ) @@ -454,349 +403,6 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, return childRTindex; } -/* - * Append\prepend partitions if there's no partition to store 'value'. - * - * Used by create_partitions_internal(). - * - * NB: 'value' type is not needed since we've already taken - * it into account while searching for the 'cmp_proc'. - */ -static Oid -spawn_partitions_val(Oid parent_relid, /* parent's Oid */ - Datum range_bound_min, /* parent's MIN boundary */ - Datum range_bound_max, /* parent's MAX boundary */ - Oid range_bound_type, /* type of boundary's value */ - Datum interval_binary, /* interval in binary form */ - Oid interval_type, /* INTERVALOID or prel->atttype */ - Datum value, /* value to be INSERTed */ - Oid value_type) /* type of value */ -{ - bool should_append; /* append or prepend? */ - - Operator move_bound_op; /* descriptor */ - Oid move_bound_optype; /* operator's ret type */ - - FmgrInfo cmp_value_bound_finfo, /* exec 'value (>=|<) bound' */ - move_bound_finfo; /* exec 'bound + interval' */ - - Datum cur_leading_bound, /* boundaries of a new partition */ - cur_following_bound; - - Oid last_partition = InvalidOid; - - - fill_type_cmp_fmgr_info(&cmp_value_bound_finfo, value_type, range_bound_type); - - /* value >= MAX_BOUNDARY */ - if (check_ge(&cmp_value_bound_finfo, value, range_bound_max)) - { - should_append = true; - cur_leading_bound = range_bound_max; - } - - /* value < MIN_BOUNDARY */ - else if (check_lt(&cmp_value_bound_finfo, value, range_bound_min)) - { - should_append = false; - cur_leading_bound = range_bound_min; - } - - /* There's a gap, halt and emit ERROR */ - else elog(ERROR, "cannot spawn a partition inside a gap"); - - /* Get "move bound operator" descriptor */ - move_bound_op = get_binary_operator(should_append ? "+" : "-", - range_bound_type, - interval_type); - /* Get operator's ret type */ - move_bound_optype = get_operator_ret_type(move_bound_op); - - /* Get operator's underlying function */ - fmgr_info(oprfuncid(move_bound_op), &move_bound_finfo); - - /* Don't forget to release system cache */ - ReleaseSysCache(move_bound_op); - - /* Perform some casts if types don't match */ - if (move_bound_optype != range_bound_type) - { - cur_leading_bound = perform_type_cast(cur_leading_bound, - range_bound_type, - move_bound_optype, - NULL); /* might emit ERROR */ - - /* Update 'range_bound_type' */ - range_bound_type = move_bound_optype; - - /* Fetch new comparison function */ - fill_type_cmp_fmgr_info(&cmp_value_bound_finfo, - value_type, - range_bound_type); - } - - /* Execute comparison function cmp(value, cur_leading_bound) */ - while (should_append ? - check_ge(&cmp_value_bound_finfo, value, cur_leading_bound) : - check_lt(&cmp_value_bound_finfo, value, cur_leading_bound)) - { - Datum args[2]; - - /* Assign the 'following' boundary to current 'leading' value */ - cur_following_bound = cur_leading_bound; - - /* Move leading bound by interval (exec 'leading (+|-) INTERVAL') */ - cur_leading_bound = FunctionCall2(&move_bound_finfo, - cur_leading_bound, - interval_binary); - - args[0] = should_append ? cur_following_bound : cur_leading_bound; - args[1] = should_append ? cur_leading_bound : cur_following_bound; - - last_partition = create_single_range_partition_internal(parent_relid, - args[0], args[1], - range_bound_type, - NULL, NULL); - -#ifdef USE_ASSERT_CHECKING - elog(DEBUG2, "%s partition with following='%s' & leading='%s' [%u]", - (should_append ? "Appending" : "Prepending"), - DebugPrintDatum(cur_following_bound, range_bound_type), - DebugPrintDatum(cur_leading_bound, range_bound_type), - MyProcPid); -#endif - } - - return last_partition; -} - -/* - * Convert interval from TEXT to binary form using partitioned column's type. - */ -static Datum -extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ - Oid part_atttype, /* partitioned column's type */ - Oid *interval_type) /* returned value */ -{ - Datum interval_binary; - const char *interval_cstring; - - interval_cstring = TextDatumGetCString(interval_text); - - /* If 'part_atttype' is a *date type*, cast 'range_interval' to INTERVAL */ - if (is_date_type_internal(part_atttype)) - { - int32 interval_typmod = PATHMAN_CONFIG_interval_typmod; - - /* Convert interval from CSTRING to internal form */ - interval_binary = DirectFunctionCall3(interval_in, - CStringGetDatum(interval_cstring), - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(interval_typmod)); - if (interval_type) - *interval_type = INTERVALOID; - } - /* Otherwise cast it to the partitioned column's type */ - else - { - HeapTuple htup; - Oid typein_proc = InvalidOid; - - htup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(part_atttype)); - if (HeapTupleIsValid(htup)) - { - typein_proc = ((Form_pg_type) GETSTRUCT(htup))->typinput; - ReleaseSysCache(htup); - } - else - elog(ERROR, "Cannot find input function for type %u", part_atttype); - - /* - * Convert interval from CSTRING to 'prel->atttype'. - * - * Note: We pass 3 arguments in case - * 'typein_proc' also takes Oid & typmod. - */ - interval_binary = OidFunctionCall3(typein_proc, - CStringGetDatum(interval_cstring), - ObjectIdGetDatum(part_atttype), - Int32GetDatum(-1)); - if (interval_type) - *interval_type = part_atttype; - } - - return interval_binary; -} - -/* - * Append partitions (if needed) and return Oid of the partition to contain value. - * - * NB: This function should not be called directly, use create_partitions() instead. - */ -Oid -create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) -{ - MemoryContext old_mcxt = CurrentMemoryContext; - Oid partid = InvalidOid; /* last created partition (or InvalidOid) */ - - PG_TRY(); - { - const PartRelationInfo *prel; - LockAcquireResult lock_result; /* could we lock the parent? */ - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - - /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL)) - { - Oid base_bound_type; /* base type of prel->atttype */ - Oid base_value_type; /* base type of value_type */ - - /* Fetch PartRelationInfo by 'relid' */ - prel = get_pathman_relation_info_after_lock(relid, true, &lock_result); - shout_if_prel_is_invalid(relid, prel, PT_RANGE); - - /* Fetch base types of prel->atttype & value_type */ - base_bound_type = getBaseType(prel->atttype); - base_value_type = getBaseType(value_type); - - /* Search for a suitable partition if we didn't hold it */ - Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); - if (lock_result == LOCKACQUIRE_OK) - { - Oid *parts; - int nparts; - - /* Search for matching partitions */ - parts = find_partitions_for_value(value, value_type, prel, &nparts); - - /* Shout if there's more than one */ - if (nparts > 1) - elog(ERROR, ERR_PART_ATTR_MULTIPLE); - - /* It seems that we got a partition! */ - else if (nparts == 1) - { - /* Unlock the parent (we're not going to spawn) */ - xact_unlock_partitioned_rel(relid); - - /* Simply return the suitable partition */ - partid = parts[0]; - } - - /* Don't forget to free */ - pfree(parts); - } - - /* Else spawn a new one (we hold a lock on the parent) */ - if (partid == InvalidOid) - { - Datum bound_min, /* absolute MIN */ - bound_max; /* absolute MAX */ - - Oid interval_type = InvalidOid; - Datum interval_binary, /* assigned 'width' of one partition */ - interval_text; - - /* Read max & min range values from PartRelationInfo */ - bound_min = PrelGetRangesArray(prel)[0].min; - bound_max = PrelGetRangesArray(prel)[PrelLastChild(prel)].max; - - /* Copy datums on order to protect them from cache invalidation */ - bound_min = datumCopy(bound_min, prel->attbyval, prel->attlen); - bound_max = datumCopy(bound_max, prel->attbyval, prel->attlen); - - /* Retrieve interval as TEXT from tuple */ - interval_text = values[Anum_pathman_config_range_interval - 1]; - - /* Convert interval to binary representation */ - interval_binary = extract_binary_interval_from_text(interval_text, - base_bound_type, - &interval_type); - - /* At last, spawn partitions to store the value */ - partid = spawn_partitions_val(PrelParentRelid(prel), - bound_min, bound_max, base_bound_type, - interval_binary, interval_type, - value, base_value_type); - } - } - else - elog(ERROR, "pg_pathman's config does not contain relation \"%s\"", - get_rel_name_or_relid(relid)); - } - PG_CATCH(); - { - ErrorData *edata; - - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - edata = CopyErrorData(); - FlushErrorState(); - - elog(LOG, "create_partitions_internal(): %s [%u]", - edata->message, MyProcPid); - - FreeErrorData(edata); - - /* Reset 'partid' in case of error */ - partid = InvalidOid; - } - PG_END_TRY(); - - return partid; -} - -/* - * Create RANGE partitions (if needed) using either BGW or current backend. - * - * Returns Oid of the partition to store 'value'. - */ -Oid -create_partitions_for_value(Oid relid, Datum value, Oid value_type) -{ - TransactionId rel_xmin; - Oid last_partition = InvalidOid; - - /* Check that table is partitioned and fetch xmin */ - if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin)) - { - bool part_in_prev_xact = - TransactionIdPrecedes(rel_xmin, GetCurrentTransactionId()) || - TransactionIdEquals(rel_xmin, FrozenTransactionId); - - /* - * If table has been partitioned in some previous xact AND - * we don't hold any conflicting locks, run BGWorker. - */ - if (part_in_prev_xact && !xact_bgw_conflicting_lock_exists(relid)) - { - elog(DEBUG2, "create_partitions(): chose BGWorker [%u]", MyProcPid); - last_partition = create_partitions_for_value_bg_worker(relid, - value, - value_type); - } - /* Else it'd be better for the current backend to create partitions */ - else - { - elog(DEBUG2, "create_partitions(): chose backend [%u]", MyProcPid); - last_partition = create_partitions_for_value_internal(relid, - value, - value_type); - } - } - else - elog(ERROR, "relation \"%s\" is not partitioned by pg_pathman", - get_rel_name_or_relid(relid)); - - /* Check that 'last_partition' is valid */ - if (last_partition == InvalidOid) - elog(ERROR, "could not create new partitions for relation \"%s\"", - get_rel_name_or_relid(relid)); - - return last_partition; -} - /* * Given RangeEntry array and 'value', return selected * RANGE partitions inside the WrapperNode. From 4827d9f28ae569e2b87c49db641bd6ee09482de5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 16 Nov 2016 21:34:44 +0300 Subject: [PATCH 0065/1124] create partition in parent's tablespace by default --- src/partition_creation.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/partition_creation.c b/src/partition_creation.c index 8d742e47..e220a325 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -24,6 +24,7 @@ #include "commands/event_trigger.h" #include "commands/sequence.h" #include "commands/tablecmds.h" +#include "commands/tablespace.h" #include "miscadmin.h" #include "parser/parse_func.h" #include "parser/parse_relation.h" @@ -549,6 +550,10 @@ create_single_partition_internal(Oid parent_relid, partition_rv = makeRangeVar(parent_nsp_name, part_name, -1); } + /* If no 'tablespace' is provided, get parent's tablespace */ + if (!tablespace) + tablespace = get_tablespace_name(get_rel_tablespace(parent_relid)); + /* Initialize TableLikeClause structure */ NodeSetTag(&like_clause, T_TableLikeClause); like_clause.relation = copyObject(parent_rv); From 6c863e3d5f2a80a127e701a6d3d3b8979fc574ca Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 17 Nov 2016 12:32:50 +0300 Subject: [PATCH 0066/1124] testgres test for foreign hash partitions added --- tests/partitioning_test.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tests/partitioning_test.py b/tests/partitioning_test.py index c71c9b9b..f651f383 100644 --- a/tests/partitioning_test.py +++ b/tests/partitioning_test.py @@ -354,6 +354,14 @@ def test_foreign_table(self): master.start() master.psql('postgres', 'create extension pg_pathman') master.psql('postgres', 'create extension postgres_fdw') + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions master.psql( 'postgres', '''create table abc(id serial, name text); @@ -406,6 +414,34 @@ def test_foreign_table(self): # Testing drop partitions (including foreign partitions) master.safe_psql('postgres', 'select drop_partitions(\'abc\')') + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql( + 'postgres', + '''create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2)''') + fserv.safe_psql('postgres', 'create table f_hash_test(id serial, name text)') + + master.safe_psql( + 'postgres', + '''import foreign schema public limit to (f_hash_test) + from server fserv into public''' + ) + master.safe_psql( + 'postgres', + 'select replace_hash_partition(\'hash_test_1\', \'f_hash_test\')') + master.safe_psql('postgres', 'insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('postgres', 'select * from hash_test'), + '1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' + ) + master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') + if __name__ == "__main__": unittest.main() From 0e54eef941377fb8b3947bb8aef16c534ae4b027 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 17 Nov 2016 15:10:55 +0300 Subject: [PATCH 0067/1124] Add test case for recursive CTE --- expected/pathman_basic.out | 30 +++++++++++++++++++++++++++++- sql/pathman_basic.sql | 9 +++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 966b24af..e3c68c9c 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -2001,7 +2001,35 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2 Filter: (c1 < 2500) (12 rows) +/* Test recursive CTE */ +create table test.recursive_cte_test_tbl(id int not null, name text not null); +select * from create_hash_partitions('test.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||id from generate_series(1,100) f(id); +insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||(id + 1) from generate_series(1,100) f(id); +insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||(id + 2) from generate_series(1,100) f(id); +select * from test.recursive_cte_test_tbl where id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +with recursive test as (select min(name) as name from test.recursive_cte_test_tbl where id = 5 union all select (select min(name) from test.recursive_cte_test_tbl where id = 5 and name > test.name) from test where name is not null) select * from test; + name +------- + name5 + name6 + name7 + +(4 rows) + DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 48 other objects +NOTICE: drop cascades to 51 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 036240c4..e3940cb3 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -517,6 +517,15 @@ SELECT set_enable_parent('test.index_on_childs', true); VACUUM ANALYZE test.index_on_childs; EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; +/* Test recursive CTE */ +create table test.recursive_cte_test_tbl(id int not null, name text not null); +select * from create_hash_partitions('test.recursive_cte_test_tbl', 'id', 2); +insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||id from generate_series(1,100) f(id); +insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||(id + 1) from generate_series(1,100) f(id); +insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||(id + 2) from generate_series(1,100) f(id); +select * from test.recursive_cte_test_tbl where id = 5; +with recursive test as (select min(name) as name from test.recursive_cte_test_tbl where id = 5 union all select (select min(name) from test.recursive_cte_test_tbl where id = 5 and name > test.name) from test where name is not null) select * from test; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; From ee9ccbc57696df4f295d40c03cb867d37ce07891 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 17 Nov 2016 17:26:18 +0300 Subject: [PATCH 0068/1124] improve type handling in function spawn_partitions_val() (issue #65) --- expected/pathman_basic.out | 47 ++++++++++++++++++++++++- sql/pathman_basic.sql | 20 +++++++++++ src/partition_creation.c | 72 ++++++++++++++++++++++++++++---------- src/utils.c | 6 ++++ 4 files changed, 126 insertions(+), 19 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index fb6e7f03..70d763e5 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -320,6 +320,51 @@ SELECT count(*) FROM test.insert_into_select_copy; DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; NOTICE: drop cascades to 5 other objects +/* Test INSERT hooking with DATE type */ +CREATE TABLE test.insert_date_test(val DATE NOT NULL); +SELECT pathman.create_partitions_from_range('test.insert_date_test', 'val', + date '20161001', date '20170101', interval '1 month'); +NOTICE: sequence "insert_date_test_seq" does not exist, skipping + create_partitions_from_range +------------------------------ + 4 +(1 row) + +INSERT INTO test.insert_date_test VALUES ('20161201'); /* just insert the date */ +SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; + count +------- + 4 +(1 row) + +INSERT INTO test.insert_date_test VALUES ('20170311'); /* append new partitions */ +SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; + count +------- + 6 +(1 row) + +INSERT INTO test.insert_date_test VALUES ('20160812'); /* prepend new partitions */ +SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; + count +------- + 8 +(1 row) + +SELECT min(val) FROM test.insert_date_test; /* check first date */ + min +------------ + 08-12-2016 +(1 row) + +SELECT max(val) FROM test.insert_date_test; /* check last date */ + max +------------ + 03-11-2017 +(1 row) + +DROP TABLE test.insert_date_test CASCADE; +NOTICE: drop cascades to 8 other objects /* Test special case: ONLY statement with not-ONLY for partitioned table */ CREATE TABLE test.from_only_test(val INT NOT NULL); INSERT INTO test.from_only_test SELECT generate_series(1, 20); @@ -1982,6 +2027,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2 (12 rows) DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 48 other objects +NOTICE: drop cascades to 49 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 4768acf0..a9f7b440 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -101,6 +101,26 @@ SELECT count(*) FROM test.insert_into_select_copy; DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; +/* Test INSERT hooking with DATE type */ +CREATE TABLE test.insert_date_test(val DATE NOT NULL); +SELECT pathman.create_partitions_from_range('test.insert_date_test', 'val', + date '20161001', date '20170101', interval '1 month'); + +INSERT INTO test.insert_date_test VALUES ('20161201'); /* just insert the date */ +SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; + +INSERT INTO test.insert_date_test VALUES ('20170311'); /* append new partitions */ +SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; + +INSERT INTO test.insert_date_test VALUES ('20160812'); /* prepend new partitions */ +SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; + +SELECT min(val) FROM test.insert_date_test; /* check first date */ +SELECT max(val) FROM test.insert_date_test; /* check last date */ + +DROP TABLE test.insert_date_test CASCADE; + + /* Test special case: ONLY statement with not-ONLY for partitioned table */ CREATE TABLE test.from_only_test(val INT NOT NULL); INSERT INTO test.from_only_test SELECT generate_series(1, 20); diff --git a/src/partition_creation.c b/src/partition_creation.c index e220a325..47863d85 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -41,6 +41,10 @@ static Datum extract_binary_interval_from_text(Datum interval_text, Oid part_atttype, Oid *interval_type); +static void extract_op_func_and_ret_type(char *opname, Oid type1, Oid type2, + Oid *move_bound_op_func, + Oid *move_bound_op_ret_type); + static Oid spawn_partitions_val(Oid parent_relid, Datum range_bound_min, Datum range_bound_max, @@ -353,6 +357,29 @@ extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ return interval_binary; } +/* + * Fetch binary operator by name and return it's function and ret type. + */ +static void +extract_op_func_and_ret_type(char *opname, Oid type1, Oid type2, + Oid *move_bound_op_func, /* returned value #1 */ + Oid *move_bound_op_ret_type) /* returned value #2 */ +{ + Operator op; + + /* Get "move bound operator" descriptor */ + op = get_binary_operator(opname, type1, type2); + if (!op) + elog(ERROR, "missing %s operator for types %s and %s", + opname, format_type_be(type1), format_type_be(type2)); + + *move_bound_op_func = oprfuncid(op); + *move_bound_op_ret_type = get_operator_ret_type(op); + + /* Don't forget to release system cache */ + ReleaseSysCache(op); +} + /* * Append\prepend partitions if there's no partition to store 'value'. * @@ -373,8 +400,8 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ { bool should_append; /* append or prepend? */ - Operator move_bound_op; /* descriptor */ - Oid move_bound_optype; /* operator's ret type */ + Oid move_bound_op_func, /* operator's function */ + move_bound_op_ret_type; /* operator's ret type */ FmgrInfo cmp_value_bound_finfo, /* exec 'value (>=|<) bound' */ move_bound_finfo; /* exec 'bound + interval' */ @@ -404,36 +431,45 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ /* There's a gap, halt and emit ERROR */ else elog(ERROR, "cannot spawn a partition inside a gap"); - /* Get "move bound operator" descriptor */ - move_bound_op = get_binary_operator(should_append ? "+" : "-", - range_bound_type, - interval_type); - /* Get operator's ret type */ - move_bound_optype = get_operator_ret_type(move_bound_op); - - /* Get operator's underlying function */ - fmgr_info(oprfuncid(move_bound_op), &move_bound_finfo); - - /* Don't forget to release system cache */ - ReleaseSysCache(move_bound_op); + /* Fetch operator's underlying function and ret type */ + extract_op_func_and_ret_type(should_append ? "+" : "-", + range_bound_type, + interval_type, + &move_bound_op_func, + &move_bound_op_ret_type); - /* Perform some casts if types don't match */ - if (move_bound_optype != range_bound_type) + /* Perform casts if types don't match (e.g. date + interval = timestamp) */ + if (move_bound_op_ret_type != range_bound_type) { + /* Cast 'cur_leading_bound' to 'move_bound_op_ret_type' */ cur_leading_bound = perform_type_cast(cur_leading_bound, range_bound_type, - move_bound_optype, + move_bound_op_ret_type, NULL); /* might emit ERROR */ /* Update 'range_bound_type' */ - range_bound_type = move_bound_optype; + range_bound_type = move_bound_op_ret_type; /* Fetch new comparison function */ fill_type_cmp_fmgr_info(&cmp_value_bound_finfo, value_type, range_bound_type); + + /* Since type has changed, fetch another operator */ + extract_op_func_and_ret_type(should_append ? "+" : "-", + range_bound_type, + interval_type, + &move_bound_op_func, + &move_bound_op_ret_type); + + /* What, again? Don't want to deal with this nightmare */ + if (move_bound_op_ret_type != range_bound_type) + elog(ERROR, "error in spawn_partitions_val()"); } + /* Get operator's underlying function */ + fmgr_info(move_bound_op_func, &move_bound_finfo); + /* Execute comparison function cmp(value, cur_leading_bound) */ while (should_append ? check_ge(&cmp_value_bound_finfo, value, cur_leading_bound) : diff --git a/src/utils.c b/src/utils.c index c4a2d66d..279e01db 100644 --- a/src/utils.c +++ b/src/utils.c @@ -66,6 +66,12 @@ fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2) Oid cmp_proc_oid; TypeCacheEntry *tce; + if (IsBinaryCoercible(type1, type2)) + type1 = type2; + + else if (IsBinaryCoercible(type2, type1)) + type2 = type1; + tce = lookup_type_cache(type1, TYPECACHE_BTREE_OPFAMILY); cmp_proc_oid = get_opfamily_proc(tce->btree_opf, From a8ae1887dffd5ae5b0666679efbbf6ab863c8646 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 17 Nov 2016 17:52:22 +0300 Subject: [PATCH 0069/1124] hide NOTICE messages using GUC magic --- src/partition_creation.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/partition_creation.c b/src/partition_creation.c index 47863d85..7701f7e8 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -668,6 +668,15 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) ObjectAddress table_addr; Datum toast_options; static char *validnsps[] = HEAP_RELOPT_NAMESPACES; + int guc_level; + + /* Create new GUC level... */ + guc_level = NewGUCNestLevel(); + + /* ... and set client_min_messages = WARNING */ + (void) set_config_option("client_min_messages", "WARNING", + PGC_USERSET, PGC_S_SESSION, + GUC_ACTION_SAVE, true, 0, false); /* Create new partition owned by parent's posessor */ table_addr = DefineRelation(create_stmt, RELKIND_RELATION, relowner, NULL); @@ -693,6 +702,9 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) /* Now create the toast table if needed */ NewRelationCreateToastTable(table_addr.objectId, toast_options); + /* Restore original GUC values */ + AtEOXact_GUC(true, guc_level); + /* Return the address */ return table_addr; } From 7e1de7bcd9cafe50e7d5e58243e80f723421529d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 17 Nov 2016 19:47:54 +0300 Subject: [PATCH 0070/1124] UPDATE and DELETE rows in parent if 'enable_parent' is true --- expected/pathman_basic.out | 134 +++++++++++++++++++++++++++++++- sql/pathman_basic.sql | 51 ++++++++++++ src/planner_tree_modification.c | 3 + 3 files changed, 187 insertions(+), 1 deletion(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 70d763e5..3432b94c 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1108,6 +1108,138 @@ SELECT * FROM ttt; Filter: (value = 2) (5 rows) +/* + * Test CTE query (DELETE) - by @parihaaraka + */ +CREATE TABLE test.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test.cte_del_xacts (pdate) SELECT gen_date FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +create table test.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +/* create 2 partitions */ +SELECT pathman.create_range_partitions('test.cte_del_xacts'::regclass, 'pdate', '2016-01-01'::date, '50 days'::interval); +NOTICE: sequence "cte_del_xacts_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test.cte_del_xacts_specdata) +DELETE FROM test.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + Delete on cte_del_xacts_2 t_2 + CTE tmp + -> Seq Scan on cte_del_xacts_specdata + -> Hash Join + Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Index Scan using cte_del_xacts_pkey on cte_del_xacts t + -> Hash Join + Hash Cond: ((tmp.tid = t_1.id) AND (tmp.pdate = t_1.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Index Scan using cte_del_xacts_1_pkey on cte_del_xacts_1 t_1 + -> Hash Join + Hash Cond: ((tmp.tid = t_2.id) AND (tmp.pdate = t_2.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Index Scan using cte_del_xacts_2_pkey on cte_del_xacts_2 t_2 +(24 rows) + +SELECT pathman.drop_partitions('test.cte_del_xacts'); /* now drop partitions */ +NOTICE: function test.cte_del_xacts_upd_trig_func() does not exist, skipping +NOTICE: 50 rows copied from test.cte_del_xacts_1 +NOTICE: 50 rows copied from test.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT pathman.create_range_partitions('test.cte_del_xacts'::regclass, 'pdate', '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT pathman.set_enable_parent('test.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test.cte_del_xacts_specdata) +DELETE FROM test.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + CTE tmp + -> Seq Scan on cte_del_xacts_specdata + -> Hash Join + Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Index Scan using cte_del_xacts_pkey on cte_del_xacts t + -> Hash Join + Hash Cond: ((tmp.tid = t_1.id) AND (tmp.pdate = t_1.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Index Scan using cte_del_xacts_1_pkey on cte_del_xacts_1 t_1 +(17 rows) + +/* parent disabled! */ +SELECT pathman.set_enable_parent('test.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test.cte_del_xacts_specdata) +DELETE FROM test.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +------------------------------------------------------------------------------ + Delete on cte_del_xacts_1 t + CTE tmp + -> Seq Scan on cte_del_xacts_specdata + -> Hash Join + Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Index Scan using cte_del_xacts_1_pkey on cte_del_xacts_1 t +(9 rows) + +DROP TABLE test.cte_del_xacts, test.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to table test.cte_del_xacts_1 /* * Test split and merge */ @@ -2027,6 +2159,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2 (12 rows) DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 49 other objects +NOTICE: drop cascades to 50 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index a9f7b440..6f4cab93 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -275,6 +275,57 @@ EXPLAIN (COSTS OFF) WITH ttt AS (SELECT * FROM test.hash_rel WHERE value = 2) SELECT * FROM ttt; + +/* + * Test CTE query (DELETE) - by @parihaaraka + */ +CREATE TABLE test.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test.cte_del_xacts (pdate) SELECT gen_date FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; + +create table test.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); + +/* create 2 partitions */ +SELECT pathman.create_range_partitions('test.cte_del_xacts'::regclass, 'pdate', '2016-01-01'::date, '50 days'::interval); + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test.cte_del_xacts_specdata) +DELETE FROM test.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + +SELECT pathman.drop_partitions('test.cte_del_xacts'); /* now drop partitions */ + +/* create 1 partition */ +SELECT pathman.create_range_partitions('test.cte_del_xacts'::regclass, 'pdate', '2016-01-01'::date, '1 year'::interval); + +/* parent enabled! */ +SELECT pathman.set_enable_parent('test.cte_del_xacts', true); +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test.cte_del_xacts_specdata) +DELETE FROM test.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + +/* parent disabled! */ +SELECT pathman.set_enable_parent('test.cte_del_xacts', false); +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test.cte_del_xacts_specdata) +DELETE FROM test.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + +DROP TABLE test.cte_del_xacts, test.cte_del_xacts_specdata CASCADE; + + /* * Test split and merge */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 73ead1fc..f6d2ea2b 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -306,6 +306,9 @@ handle_modification_query(Query *parse) /* Exit if it's not partitioned */ if (!prel) return; + /* Exit if we must include parent */ + if (prel->enable_parent) return; + /* Parse syntax tree and extract partition ranges */ ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); expr = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); From 216b4da29ad8d5c2436fcb366e5c8d7694997e0a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 18 Nov 2016 13:43:03 +0300 Subject: [PATCH 0071/1124] add description for the latest test --- expected/pathman_basic.out | 2 +- sql/pathman_basic.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 3432b94c..31b44d4f 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1109,7 +1109,7 @@ SELECT * FROM ttt; (5 rows) /* - * Test CTE query (DELETE) - by @parihaaraka + * Test CTE query (DELETE) - by @parihaaraka (add varno to WalkerContext) */ CREATE TABLE test.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); INSERT INTO test.cte_del_xacts (pdate) SELECT gen_date FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 6f4cab93..8b8228f6 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -277,7 +277,7 @@ SELECT * FROM ttt; /* - * Test CTE query (DELETE) - by @parihaaraka + * Test CTE query (DELETE) - by @parihaaraka (add varno to WalkerContext) */ CREATE TABLE test.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); INSERT INTO test.cte_del_xacts (pdate) SELECT gen_date FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; From fc9d807abe72aa9f2697dd28534090344e19cb21 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 18 Nov 2016 14:32:16 +0300 Subject: [PATCH 0072/1124] add CTE test involving pl/pgsql function --- expected/pathman_basic.out | 33 ++++++++++++++++++++++++++++++++- sql/pathman_basic.sql | 26 +++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 2 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 31b44d4f..4c7f98a6 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1109,7 +1109,7 @@ SELECT * FROM ttt; (5 rows) /* - * Test CTE query (DELETE) - by @parihaaraka (add varno to WalkerContext) + * Test CTE query - by @parihaaraka (add varno to WalkerContext) */ CREATE TABLE test.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); INSERT INTO test.cte_del_xacts (pdate) SELECT gen_date FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; @@ -1120,6 +1120,7 @@ create table test.cte_del_xacts_specdata state_code SMALLINT NOT NULL DEFAULT 8, regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL ); +INSERT INTO test.cte_del_xacts_specdata VALUES(1, 1, 1, current_timestamp); /* for subquery test */ /* create 2 partitions */ SELECT pathman.create_range_partitions('test.cte_del_xacts'::regclass, 'pdate', '2016-01-01'::date, '50 days'::interval); NOTICE: sequence "cte_del_xacts_seq" does not exist, skipping @@ -1238,6 +1239,36 @@ WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; -> Index Scan using cte_del_xacts_1_pkey on cte_del_xacts_1 t (9 rows) +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test.cte_del_xacts_specdata + WHERE state_code != test.cte_del_xacts_stab('test')) +SELECT * FROM test.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test.cte_del_xacts_specdata + WHERE state_code != test.cte_del_xacts_stab('test')) +SELECT * FROM test.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test.cte_del_xacts_stab(TEXT); DROP TABLE test.cte_del_xacts, test.cte_del_xacts_specdata CASCADE; NOTICE: drop cascades to table test.cte_del_xacts_1 /* diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 8b8228f6..f5d6fa42 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -277,7 +277,7 @@ SELECT * FROM ttt; /* - * Test CTE query (DELETE) - by @parihaaraka (add varno to WalkerContext) + * Test CTE query - by @parihaaraka (add varno to WalkerContext) */ CREATE TABLE test.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); INSERT INTO test.cte_del_xacts (pdate) SELECT gen_date FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; @@ -289,6 +289,7 @@ create table test.cte_del_xacts_specdata state_code SMALLINT NOT NULL DEFAULT 8, regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL ); +INSERT INTO test.cte_del_xacts_specdata VALUES(1, 1, 1, current_timestamp); /* for subquery test */ /* create 2 partitions */ SELECT pathman.create_range_partitions('test.cte_del_xacts'::regclass, 'pdate', '2016-01-01'::date, '50 days'::interval); @@ -323,6 +324,29 @@ WITH tmp AS ( DELETE FROM test.cte_del_xacts t USING tmp WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; + +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test.cte_del_xacts_specdata + WHERE state_code != test.cte_del_xacts_stab('test')) +SELECT * FROM test.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test.cte_del_xacts_specdata + WHERE state_code != test.cte_del_xacts_stab('test')) +SELECT * FROM test.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + +DROP FUNCTION test.cte_del_xacts_stab(TEXT); DROP TABLE test.cte_del_xacts, test.cte_del_xacts_specdata CASCADE; From 862874bf85dd1db97938eb2a2818d4559b9e45cd Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 21 Nov 2016 16:34:23 +0300 Subject: [PATCH 0073/1124] tests for hash_replace_partition() added --- expected/pathman_basic.out | 22 ++++++++++++++-------- sql/pathman_basic.sql | 7 ++++++- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index c4e6d2b5..a3e5921c 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -920,13 +920,19 @@ Inherits: test.hash_rel INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; DROP TABLE test.hash_rel_0; -EXPLAIN SELECT * FROM test.hash_rel; - QUERY PLAN ---------------------------------------------------------------------------------------------- - Append (cost=10000000000.00..30000000032.44 rows=2044 width=12) - -> Seq Scan on hash_rel_extern (cost=10000000000.00..10000000030.40 rows=2040 width=12) - -> Seq Scan on hash_rel_1 (cost=10000000000.00..10000000001.02 rows=2 width=12) - -> Seq Scan on hash_rel_2 (cost=10000000000.00..10000000001.02 rows=2 width=12) +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +ERROR: partition must have the exact same structure as parent +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 (4 rows) /* @@ -1508,7 +1514,7 @@ SELECT count(*) FROM bool_test WHERE b = false; DROP TABLE bool_test CASCADE; NOTICE: drop cascades to 3 other objects DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 14 other objects DROP EXTENSION pg_pathman CASCADE; NOTICE: drop cascades to 3 other objects DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index c622d971..daf1e05a 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -243,7 +243,12 @@ SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern') \d+ test.hash_rel_extern INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; DROP TABLE test.hash_rel_0; -EXPLAIN SELECT * FROM test.hash_rel; +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; /* * Clean up From d3e490a007276e098c419e3d825b7e70c30423c8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 21 Nov 2016 17:36:53 +0300 Subject: [PATCH 0074/1124] handle inlined SQL functions in pathman_post_parse_analysis_hook() --- src/hooks.c | 24 ++++++++++++++++++++++-- src/planner_tree_modification.c | 8 ++++++++ src/planner_tree_modification.h | 1 + 3 files changed, 31 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 22cf8a0a..06e5e26b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -516,9 +516,7 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) if (query->commandType == CMD_UTILITY && (xact_is_transaction_stmt(query->utilityStmt) || xact_is_set_transaction_stmt(query->utilityStmt))) - { return; - } /* Finish delayed invalidation jobs */ if (IsPathmanReady()) @@ -532,6 +530,28 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) { load_config(); /* perform main cache initialization */ } + + /* Process inlined SQL functions (we've already entered planning stage) */ + if (IsPathmanReady() && get_refcount_parenthood_statuses() > 0) + { + /* Check that pg_pathman is the last extension loaded */ + if (post_parse_analyze_hook != pathman_post_parse_analysis_hook) + { + char *spl_value; /* value of "shared_preload_libraries" GUC */ + + spl_value = GetConfigOptionByName("shared_preload_libraries", NULL, false); + + ereport(ERROR, + (errmsg("extension conflict has been detected"), + errdetail("shared_preload_libraries = \"%s\"", spl_value), + errhint("pg_pathman should be the last extension listed in " + "\"shared_preload_libraries\" GUC in order to " + "prevent possible conflicts with other extensions"))); + } + + /* Modify query tree if needed */ + pathman_transform_query(query); + } } /* diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index f6d2ea2b..3ea63524 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -598,6 +598,14 @@ incr_refcount_parenthood_statuses(void) per_table_parenthood_mapping_refcount++; } +/* Return current value of usage counter */ +uint32 +get_refcount_parenthood_statuses(void) +{ + /* incr_refcount_parenthood_statuses() is called by pathman_planner_hook() */ + return per_table_parenthood_mapping_refcount; +} + /* Reset all cached statuses if needed (query end) */ void decr_refcount_parenthood_statuses(bool entirely) diff --git a/src/planner_tree_modification.h b/src/planner_tree_modification.h index 6572de9f..80485cc2 100644 --- a/src/planner_tree_modification.h +++ b/src/planner_tree_modification.h @@ -45,6 +45,7 @@ void assign_rel_parenthood_status(uint32 query_id, Oid relid, rel_parenthood_status new_status); rel_parenthood_status get_rel_parenthood_status(uint32 query_id, Oid relid); void incr_refcount_parenthood_statuses(void); +uint32 get_refcount_parenthood_statuses(void); void decr_refcount_parenthood_statuses(bool entirely); From 860c57895b843b40bd5676b2155ef9cf4a439534 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 21 Nov 2016 17:47:33 +0300 Subject: [PATCH 0075/1124] fixes for PG 9.5 in pathman_post_parse_analysis_hook() --- src/hooks.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/hooks.c b/src/hooks.c index 06e5e26b..53acc35a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -539,7 +539,11 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) { char *spl_value; /* value of "shared_preload_libraries" GUC */ +#if PG_VERSION_NUM >= 90600 spl_value = GetConfigOptionByName("shared_preload_libraries", NULL, false); +#else + spl_value = GetConfigOptionByName("shared_preload_libraries", NULL); +#endif ereport(ERROR, (errmsg("extension conflict has been detected"), From 010bf7ed0bb3bf670961849c531b711eb3e91642 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 21 Nov 2016 18:31:18 +0300 Subject: [PATCH 0076/1124] test inlined SQL functions --- expected/pathman_basic.out | 34 ++++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 16 ++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 4c7f98a6..58802050 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1080,6 +1080,40 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 (20 rows) +/* + * Test inlined SQL functions + */ +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); + QUERY PLAN +-------------------------------------- + Limit + -> Append + -> Seq Scan on sql_inline_0 + Filter: (id = 5) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); + QUERY PLAN +-------------------------------------- + Limit + -> Append + -> Seq Scan on sql_inline_2 + Filter: (id = 1) +(4 rows) + +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; +NOTICE: drop cascades to 3 other objects /* * Test CTE query */ diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index f5d6fa42..304063ad 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -264,6 +264,22 @@ JOIN test.range_rel j2 on j2.id = j1.id JOIN test.num_range_rel j3 on j3.id = j1.id WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; +/* + * Test inlined SQL functions + */ +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; + +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); + +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; + /* * Test CTE query */ From dc86c76ddba65843af637f8702fbd0ecc50814b6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 21 Nov 2016 20:11:53 +0300 Subject: [PATCH 0077/1124] call fill_type_cmp_fmgr_info() in handle_binary_opexpr() only for RANGE part. cases, improve fill_type_cmp_fmgr_info() --- expected/pathman_basic.out | 24 ++++++++++++++++++++++++ sql/pathman_basic.sql | 13 ++++++++++++- src/pg_pathman.c | 11 ++++++----- src/utils.c | 24 ++++++++++++++++-------- 4 files changed, 58 insertions(+), 14 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 58802050..465ba610 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1114,6 +1114,30 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); DROP FUNCTION test.sql_inline_func(int); DROP TABLE test.sql_inline CASCADE; NOTICE: drop cascades to 3 other objects +/* + * Test by @baiyinqiqi (issue #60) + */ +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT * FROM test.hash_varchar WHERE val = 'a'; + val +----- +(0 rows) + +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + val +----- + 12 +(1 row) + +DROP TABLE test.hash_varchar CASCADE; +NOTICE: drop cascades to 4 other objects /* * Test CTE query */ diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 304063ad..8d175c15 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -280,6 +280,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); DROP FUNCTION test.sql_inline_func(int); DROP TABLE test.sql_inline CASCADE; +/* + * Test by @baiyinqiqi (issue #60) + */ +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); + +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); +SELECT * FROM test.hash_varchar WHERE val = 'a'; +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + +DROP TABLE test.hash_varchar CASCADE; + /* * Test CTE query */ @@ -291,7 +303,6 @@ EXPLAIN (COSTS OFF) WITH ttt AS (SELECT * FROM test.hash_rel WHERE value = 2) SELECT * FROM ttt; - /* * Test CTE query - by @parihaaraka (add varno to WalkerContext) */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 9181b3bc..3deedf25 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -704,7 +704,6 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, { int strategy; TypeCacheEntry *tce; - FmgrInfo cmp_func; Oid vartype; const OpExpr *expr = (const OpExpr *) result->orig; const PartRelationInfo *prel = context->prel; @@ -730,10 +729,6 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, if (strategy == 0) goto binary_opexpr_return; - fill_type_cmp_fmgr_info(&cmp_func, - getBaseType(c->consttype), - getBaseType(prel->atttype)); - switch (prel->parttype) { case PT_HASH: @@ -754,6 +749,12 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, case PT_RANGE: { + FmgrInfo cmp_func; + + fill_type_cmp_fmgr_info(&cmp_func, + getBaseType(c->consttype), + getBaseType(prel->atttype)); + select_range_partitions(c->constvalue, &cmp_func, PrelGetRangesArray(context->prel), diff --git a/src/utils.c b/src/utils.c index 279e01db..3bd28040 100644 --- a/src/utils.c +++ b/src/utils.c @@ -64,7 +64,8 @@ void fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2) { Oid cmp_proc_oid; - TypeCacheEntry *tce; + TypeCacheEntry *tce_1, + *tce_2; if (IsBinaryCoercible(type1, type2)) type1 = type2; @@ -72,20 +73,27 @@ fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2) else if (IsBinaryCoercible(type2, type1)) type2 = type1; - tce = lookup_type_cache(type1, TYPECACHE_BTREE_OPFAMILY); + tce_1 = lookup_type_cache(type1, TYPECACHE_BTREE_OPFAMILY); + tce_2 = lookup_type_cache(type2, TYPECACHE_BTREE_OPFAMILY); - cmp_proc_oid = get_opfamily_proc(tce->btree_opf, - type1, - type2, + if (tce_1->btree_opf != tce_2->btree_opf) + goto fill_type_cmp_fmgr_info_error; + + cmp_proc_oid = get_opfamily_proc(tce_1->btree_opf, + tce_1->btree_opintype, + tce_2->btree_opintype, BTORDER_PROC); if (cmp_proc_oid == InvalidOid) - elog(ERROR, "missing comparison function for types %s & %s", - format_type_be(type1), format_type_be(type2)); + goto fill_type_cmp_fmgr_info_error; fmgr_info(cmp_proc_oid, finfo); - return; + return; /* exit safely */ + +fill_type_cmp_fmgr_info_error: + elog(ERROR, "missing comparison function for types %s & %s", + format_type_be(type1), format_type_be(type2)); } List * From 7d813646f2cd68dd4b916dbab50d2084e6e31227 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 22 Nov 2016 12:52:15 +0300 Subject: [PATCH 0078/1124] more comments for fill_type_cmp_fmgr_info() --- src/utils.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/utils.c b/src/utils.c index 3bd28040..be2534c4 100644 --- a/src/utils.c +++ b/src/utils.c @@ -67,6 +67,7 @@ fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2) TypeCacheEntry *tce_1, *tce_2; + /* Check type compatibility */ if (IsBinaryCoercible(type1, type2)) type1 = type2; @@ -76,6 +77,7 @@ fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2) tce_1 = lookup_type_cache(type1, TYPECACHE_BTREE_OPFAMILY); tce_2 = lookup_type_cache(type2, TYPECACHE_BTREE_OPFAMILY); + /* Both types should belong to the same opfamily */ if (tce_1->btree_opf != tce_2->btree_opf) goto fill_type_cmp_fmgr_info_error; @@ -84,13 +86,16 @@ fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2) tce_2->btree_opintype, BTORDER_PROC); - if (cmp_proc_oid == InvalidOid) + /* No such function, emit ERROR */ + if (!OidIsValid(cmp_proc_oid)) goto fill_type_cmp_fmgr_info_error; + /* Fill FmgrInfo struct */ fmgr_info(cmp_proc_oid, finfo); - return; /* exit safely */ + return; /* everything is OK */ +/* Handle errors (no such function) */ fill_type_cmp_fmgr_info_error: elog(ERROR, "missing comparison function for types %s & %s", format_type_be(type1), format_type_be(type2)); From fa6f4c159499cd00d606ecf3b1f30cd7fab813b2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 22 Nov 2016 18:06:19 +0300 Subject: [PATCH 0079/1124] clean code, take lock in PathmanRenameConstraint() --- src/hooks.c | 2 +- src/init.c | 14 +++++-- src/init.h | 16 +++----- src/pl_funcs.c | 4 +- src/utility_stmt_hooking.c | 75 +++++++++++++++++++------------------- src/utility_stmt_hooking.h | 2 +- 6 files changed, 57 insertions(+), 56 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index dbee736f..e28b0c6c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -653,7 +653,7 @@ pathman_process_utility_hook(Node *parsetree, * Rename check constraint of a table if it is a partition managed * by pg_pathman */ - PathmanDoRenameConstraint((RenameStmt *) parsetree); + PathmanRenameConstraint((RenameStmt *) parsetree); } } diff --git a/src/init.c b/src/init.c index 2be77763..4f1f9e7e 100644 --- a/src/init.c +++ b/src/init.c @@ -593,12 +593,18 @@ find_inheritance_children_array(Oid parentrelId, /* * Generate check constraint name for a partition. * - * This function does not perform sanity checks at all. + * These functions does not perform sanity checks at all. */ char * -build_check_constraint_name_internal(Oid relid, AttrNumber attno) +build_check_constraint_name_relid_internal(Oid relid, AttrNumber attno) { - return build_check_constraint_name_by_relname(get_rel_name(relid), attno); + return build_check_constraint_name_relname_internal(get_rel_name(relid), attno); +} + +char * +build_check_constraint_name_relname_internal(char *relname, AttrNumber attno) +{ + return psprintf("pathman_%s_%u_check", relname, attno); } /* @@ -807,7 +813,7 @@ get_partition_constraint_expr(Oid partition, AttrNumber part_attno) bool conbin_isnull; Expr *expr; /* expression tree for constraint */ - conname = build_check_constraint_name_internal(partition, part_attno); + conname = build_check_constraint_name_relid_internal(partition, part_attno); conid = get_relation_constraint_oid(partition, conname, true); if (conid == InvalidOid) { diff --git a/src/init.h b/src/init.h index 1179c027..d4c4850a 100644 --- a/src/init.h +++ b/src/init.h @@ -86,15 +86,6 @@ extern PathmanInitState pg_pathman_init_state; pg_pathman_init_state.initialization_needed = true; \ } while (0) -/* - * Generate check constraint name for given relname - */ -static inline char * -build_check_constraint_name_by_relname(char *relname, AttrNumber attno) -{ - return psprintf("pathman_%s_%u_check", relname, attno); -} - /* * Save and restore PathmanInitState. @@ -132,8 +123,11 @@ find_children_status find_inheritance_children_array(Oid parentrelId, uint32 *children_size, Oid **children); -char *build_check_constraint_name_internal(Oid relid, - AttrNumber attno); +char *build_check_constraint_name_relid_internal(Oid relid, + AttrNumber attno); + +char *build_check_constraint_name_relname_internal(char *relname, + AttrNumber attno); bool pathman_config_contains_relation(Oid relid, Datum *values, diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 4a454a35..9d68e5e0 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -521,7 +521,7 @@ build_check_constraint_name_attnum(PG_FUNCTION_ARGS) elog(ERROR, "Cannot build check constraint name: " "invalid attribute number %i", attnum); - result = build_check_constraint_name_internal(relid, attnum); + result = build_check_constraint_name_relid_internal(relid, attnum); PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); } @@ -541,7 +541,7 @@ build_check_constraint_name_attname(PG_FUNCTION_ARGS) elog(ERROR, "relation \"%s\" has no column \"%s\"", get_rel_name_or_relid(relid), text_to_cstring(attname)); - result = build_check_constraint_name_internal(relid, attnum); + result = build_check_constraint_name_relid_internal(relid, attnum); PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 65cbd729..ba8a727c 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -626,44 +626,45 @@ prepare_rri_fdw_for_copy(EState *estate, } /* - * Rename check constraint of table if it is a partition + * Rename check constraint of table if it's a partition */ void -PathmanDoRenameConstraint(const RenameStmt *stmt) +PathmanRenameConstraint(const RenameStmt *stmt) { - Oid partition = RangeVarGetRelid(stmt->relation, NoLock, true); - Oid parent = get_rel_parent(partition); - - if (partition != InvalidOid && parent != InvalidOid) - { - char *old_constraint_name, - *new_constraint_name; - const PartRelationInfo *prel = get_pathman_relation_info(parent); - - if (prel) - { - RangeVar *rngVar; - RenameStmt *s; - - /* Generate old constraint name */ - old_constraint_name = build_check_constraint_name_by_relname( - get_rel_name(partition), - prel->attnum); - - /* Generate new constraint name */ - new_constraint_name = build_check_constraint_name_by_relname( - stmt->newname, - prel->attnum); - - /* Build check constraint RENAME statement */ - s = makeNode(RenameStmt); - s->renameType = OBJECT_TABCONSTRAINT; - s->relation = stmt->relation; - s->subname = old_constraint_name; - s->newname = new_constraint_name; - s->missing_ok = false; - - RenameConstraint(s); - } - } + Oid partition_relid, + parent_relid; + char *old_constraint_name, + *new_constraint_name; + RenameStmt *rename_stmt; + const PartRelationInfo *prel; + + partition_relid = RangeVarGetRelid(stmt->relation, AccessShareLock, false); + parent_relid = get_rel_parent(partition_relid); + + /* Skip if there's no parent */ + if (!OidIsValid(parent_relid)) return; + + /* Fetch partitioning data */ + prel = get_pathman_relation_info(parent_relid); + + /* Skip if this table is not partitioned */ + if (!prel) return; + + /* Generate old constraint name */ + old_constraint_name = build_check_constraint_name_relid_internal(partition_relid, + prel->attnum); + + /* Generate new constraint name */ + new_constraint_name = build_check_constraint_name_relname_internal(stmt->newname, + prel->attnum); + + /* Build check constraint RENAME statement */ + rename_stmt = makeNode(RenameStmt); + rename_stmt->renameType = OBJECT_TABCONSTRAINT; + rename_stmt->relation = stmt->relation; + rename_stmt->subname = old_constraint_name; + rename_stmt->newname = new_constraint_name; + rename_stmt->missing_ok = false; + + RenameConstraint(rename_stmt); } diff --git a/src/utility_stmt_hooking.h b/src/utility_stmt_hooking.h index b207581b..3dd87822 100644 --- a/src/utility_stmt_hooking.h +++ b/src/utility_stmt_hooking.h @@ -19,6 +19,6 @@ bool is_pathman_related_copy(Node *parsetree); void PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed); -void PathmanDoRenameConstraint(const RenameStmt *stmt); +void PathmanRenameConstraint(const RenameStmt *stmt); #endif From a68274260d2530c257da9d794741f15919fdc529 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 23 Nov 2016 13:56:24 +0300 Subject: [PATCH 0080/1124] half open ranges --- expected/pathman_calamity.out | 16 ----- sql/pathman_calamity.sql | 7 --- src/init.c | 106 ++++++++++++++++++++++------------ src/partition_creation.c | 46 ++++++++++++++- src/partition_creation.h | 4 +- src/pg_pathman.c | 6 +- src/pl_range_funcs.c | 3 + src/rangeset.h | 2 - src/relation_info.h | 3 +- 9 files changed, 126 insertions(+), 67 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index ebea830a..aaab85df 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -238,22 +238,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is ena -> Seq Scan on part_ok_3 (5 rows) -ALTER TABLE calamity.wrong_partition -ADD CONSTRAINT pathman_wrong_partition_1_check -CHECK (val < 10); /* wrong constraint */ -SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); -ERROR: Wrong constraint format for RANGE partition "wrong_partition" -EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ - Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 -(5 rows) - -ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; ALTER TABLE calamity.wrong_partition ADD CONSTRAINT pathman_wrong_partition_1_check CHECK (val = 1 OR val = 2); /* wrong constraint */ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index f7f01699..a93cfea4 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -85,13 +85,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is ena SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ -ALTER TABLE calamity.wrong_partition -ADD CONSTRAINT pathman_wrong_partition_1_check -CHECK (val < 10); /* wrong constraint */ -SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); -EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ -ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; - ALTER TABLE calamity.wrong_partition ADD CONSTRAINT pathman_wrong_partition_1_check CHECK (val = 1 OR val = 2); /* wrong constraint */ diff --git a/src/init.c b/src/init.c index a022c959..ed4d2741 100644 --- a/src/init.c +++ b/src/init.c @@ -75,8 +75,8 @@ static int cmp_range_entries(const void *p1, const void *p2, void *arg); static bool validate_range_constraint(const Expr *expr, const PartRelationInfo *prel, - Datum *min, - Datum *max); + Datum *lower, Datum *upper, + bool *lower_null, bool *upper_null); static bool validate_hash_constraint(const Expr *expr, const PartRelationInfo *prel, @@ -375,14 +375,18 @@ fill_prel_with_partitions(const Oid *partitions, case PT_RANGE: { - Datum range_min, range_max; + Datum lower, upper; + bool lower_null, upper_null; if (validate_range_constraint(con_expr, prel, - &range_min, &range_max)) + &lower, &upper, + &lower_null, &upper_null)) { - prel->ranges[i].child_oid = partitions[i]; - prel->ranges[i].min = range_min; - prel->ranges[i].max = range_max; + prel->ranges[i].child_oid = partitions[i]; + prel->ranges[i].min = lower; + prel->ranges[i].max = upper; + prel->ranges[i].infinite_min = lower_null; + prel->ranges[i].infinite_max = upper_null; } else { @@ -864,61 +868,91 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) Oid cmp_proc_oid = *(Oid *) arg; + /* If range is half open */ + if (v1->infinite_min) + if (v2->infinite_min) + return Int32GetDatum(0); + return Int32GetDatum(-1); + + /* Else if range is closed */ return OidFunctionCall2(cmp_proc_oid, v1->min, v2->min); } /* - * Validates range constraint. It MUST have this exact format: + * Validates range constraint. It MUST have one of the following formats: * * VARIABLE >= CONST AND VARIABLE < CONST + * VARIABLE >= CONST + * VARIABLE < CONST * - * Writes 'min' & 'max' values on success. + * Writes 'lower' & 'upper' and 'lower_null' & 'upper_null' values on success. */ static bool validate_range_constraint(const Expr *expr, const PartRelationInfo *prel, - Datum *min, - Datum *max) + Datum *lower, Datum *upper, + bool *lower_null, bool *upper_null) { const TypeCacheEntry *tce; - const BoolExpr *boolexpr = (const BoolExpr *) expr; const OpExpr *opexpr; int strategy; - if (!expr) - return false; +/* Validates a single expression of kind VAR >= CONST or VAR < CONST */ +#define validate_range_expr(expr) \ + { \ + Datum val; \ + opexpr = (OpExpr *) (expr); \ + strategy = get_op_opfamily_strategy(opexpr->opno, tce->btree_opf); \ + \ + /* Get const value */ \ + if (!read_opexpr_const(opexpr, prel, &val)) \ + return false; \ + \ + /* Set min or max depending on operator */ \ + switch (strategy) \ + { \ + case BTGreaterEqualStrategyNumber: \ + *lower_null = false; \ + *lower = val; \ + break; \ + case BTLessStrategyNumber: \ + *upper_null = false; \ + *upper = val; \ + break; \ + default: \ + return false; \ + } \ + } - /* it should be an AND operator on top */ - if (!and_clause((Node *) expr)) + if (!expr) return false; - + *lower_null = *upper_null = false; tce = lookup_type_cache(prel->atttype, TYPECACHE_BTREE_OPFAMILY); - /* check that left operand is >= operator */ - opexpr = (OpExpr *) linitial(boolexpr->args); - strategy = get_op_opfamily_strategy(opexpr->opno, tce->btree_opf); - - if (strategy == BTGreaterEqualStrategyNumber) + /* It could be either AND operator on top or just an OpExpr */ + if (and_clause((Node *) expr)) { - if (!read_opexpr_const(opexpr, prel, min)) - return false; - } - else - return false; + const BoolExpr *boolexpr = (const BoolExpr *) expr; + ListCell *lc; + + foreach (lc, boolexpr->args) + { + Node *arg = lfirst(lc); - /* check that right operand is < operator */ - opexpr = (OpExpr *) lsecond(boolexpr->args); - strategy = get_op_opfamily_strategy(opexpr->opno, tce->btree_opf); + if(!IsA(arg, OpExpr)) + return false; - if (strategy == BTLessStrategyNumber) + validate_range_expr(arg); + } + return true; + } + else if(IsA(expr, OpExpr)) { - if (!read_opexpr_const(opexpr, prel, max)) - return false; + validate_range_expr(expr); + return true; } - else - return false; - return true; + return false; } /* diff --git a/src/partition_creation.c b/src/partition_creation.c index 7701f7e8..668411de 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -834,11 +834,29 @@ build_range_check_constraint(Oid child_relid, return range_constr; } +// static int16_t +// cmp_boundaries(FmgrInfo cmp_func, Datum s1, Datum s2, bool s1_infinite, bool s2_infinite) +// { +// if (s1_infinite && s2_infinite) +// elog(ERROR, +// "two half open ranges are overlap"); + +// if (s1_infinite) +// return -1; + +// if (s2_infinite) +// return 1; + +// return DatumGetInt16(FunctionCall2(&cmp_func, start_value, ranges[i].max)); +// } + /* Check if range overlaps with any partitions */ bool check_range_available(Oid parent_relid, Datum start_value, Datum end_value, + bool infinite_start, + bool infinite_end, Oid value_type, bool raise_error) { @@ -864,8 +882,32 @@ check_range_available(Oid parent_relid, ranges = PrelGetRangesArray(prel); for (i = 0; i < PrelChildrenCount(prel); i++) { - int c1 = FunctionCall2(&cmp_func, start_value, ranges[i].max), - c2 = FunctionCall2(&cmp_func, end_value, ranges[i].min); + // int c1 = cmp_boundaries(cmp_func, start_value, ranges[i].max, infinite_start, ranges[i].infinite_max); + // int c2 = cmp_boundaries(cmp_func, end_value, ranges[i].min, infinite_end, ranges[i].infinite_min); + + /* If both ranges are half open then they are obviously overlap */ + // if (infinite_start && ranges[i].infinite_max) + // return false; + // if (infinite_end && ranges[i].infinite_min) + // return false; + + // int c1 = FunctionCall2(&cmp_func, start_value, ranges[i].max), + // c2 = FunctionCall2(&cmp_func, end_value, ranges[i].min); + int c1, c2; + + /* + * If the range we're checking starts with minus infinity or current + * range is ends in plus infinity then the left boundary of the first + * one is on the left + */ + c1 = (infinite_start || ranges[i].infinite_max) ? + -1 : FunctionCall2(&cmp_func, start_value, ranges[i].max); + /* + * Similary check the right boundary of first range is on the right + * of the beginning of second one + */ + c2 = (infinite_end || ranges[i].infinite_min) ? + -1 : FunctionCall2(&cmp_func, end_value, ranges[i].max); /* There's someone! */ if (c1 < 0 && c2 > 0) diff --git a/src/partition_creation.h b/src/partition_creation.h index f89ff1ca..5e4345da 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -35,9 +35,11 @@ Node * build_raw_range_check_tree(char *attname, Datum end_value, Oid value_type); -bool check_range_available(Oid partition_relid, +bool check_range_available(Oid parent_relid, Datum start_value, Datum end_value, + bool infinite_start, + bool infinite_end, Oid value_type, bool raise_error); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 9181b3bc..be8037cd 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -446,8 +446,10 @@ select_range_partitions(const Datum value, Assert(cmp_func); /* Corner cases */ - cmp_min = FunctionCall2(cmp_func, value, ranges[startidx].min), - cmp_max = FunctionCall2(cmp_func, value, ranges[endidx].max); + cmp_min = ranges[startidx].infinite_min ? + 1 : DatumGetInt32(FunctionCall2(cmp_func, value, ranges[startidx].min)); + cmp_max = ranges[endidx].infinite_max ? + -1 : DatumGetInt32(FunctionCall2(cmp_func, value, ranges[endidx].max)); if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 0dba13ca..16a2b784 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -166,12 +166,15 @@ check_range_available_pl(PG_FUNCTION_ARGS) Datum start_value = PG_GETARG_DATUM(1), end_value = PG_GETARG_DATUM(2); + bool start_null = PG_ARGISNULL(1), + end_null = PG_ARGISNULL(2); Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); /* Raise ERROR if range overlaps with any partition */ check_range_available(parent_relid, start_value, end_value, + start_null, end_null, value_type, true); diff --git a/src/rangeset.h b/src/rangeset.h index dd65ef1c..0e2efbe6 100644 --- a/src/rangeset.h +++ b/src/rangeset.h @@ -28,7 +28,6 @@ typedef struct { uint32 upper; /* lossy + upper_bound */ } IndexRange; - /* Convenience macros for make_irange(...) */ #define IR_LOSSY true #define IR_COMPLETE false @@ -43,7 +42,6 @@ typedef struct { #define irange_lower(irange) ( (uint32) (irange.lower & IRANGE_BONDARY_MASK) ) #define irange_upper(irange) ( (uint32) (irange.upper & IRANGE_BONDARY_MASK) ) - #define lfirst_irange(lc) ( *(IndexRange *) lfirst(lc) ) #define lappend_irange(list, irange) ( lappend((list), alloc_irange(irange)) ) #define lcons_irange(irange, list) ( lcons(alloc_irange(irange), (list)) ) diff --git a/src/relation_info.h b/src/relation_info.h index b6796976..9c5866f9 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -33,9 +33,10 @@ typedef enum typedef struct { Oid child_oid; - Datum min, max; + bool infinite_min, + infinite_max; } RangeEntry; /* From fd46919c5f5b2487309329f7ecba7d86d29bd84b Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 23 Nov 2016 14:56:57 +0300 Subject: [PATCH 0081/1124] fix config loading --- src/init.c | 2 ++ src/partition_creation.c | 37 +++++-------------------------------- 2 files changed, 7 insertions(+), 32 deletions(-) diff --git a/src/init.c b/src/init.c index ed4d2741..d88a9e3a 100644 --- a/src/init.c +++ b/src/init.c @@ -870,9 +870,11 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) /* If range is half open */ if (v1->infinite_min) + { if (v2->infinite_min) return Int32GetDatum(0); return Int32GetDatum(-1); + } /* Else if range is closed */ return OidFunctionCall2(cmp_proc_oid, v1->min, v2->min); diff --git a/src/partition_creation.c b/src/partition_creation.c index 668411de..b099093a 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -834,22 +834,6 @@ build_range_check_constraint(Oid child_relid, return range_constr; } -// static int16_t -// cmp_boundaries(FmgrInfo cmp_func, Datum s1, Datum s2, bool s1_infinite, bool s2_infinite) -// { -// if (s1_infinite && s2_infinite) -// elog(ERROR, -// "two half open ranges are overlap"); - -// if (s1_infinite) -// return -1; - -// if (s2_infinite) -// return 1; - -// return DatumGetInt16(FunctionCall2(&cmp_func, start_value, ranges[i].max)); -// } - /* Check if range overlaps with any partitions */ bool check_range_available(Oid parent_relid, @@ -882,32 +866,21 @@ check_range_available(Oid parent_relid, ranges = PrelGetRangesArray(prel); for (i = 0; i < PrelChildrenCount(prel); i++) { - // int c1 = cmp_boundaries(cmp_func, start_value, ranges[i].max, infinite_start, ranges[i].infinite_max); - // int c2 = cmp_boundaries(cmp_func, end_value, ranges[i].min, infinite_end, ranges[i].infinite_min); - - /* If both ranges are half open then they are obviously overlap */ - // if (infinite_start && ranges[i].infinite_max) - // return false; - // if (infinite_end && ranges[i].infinite_min) - // return false; - - // int c1 = FunctionCall2(&cmp_func, start_value, ranges[i].max), - // c2 = FunctionCall2(&cmp_func, end_value, ranges[i].min); int c1, c2; /* * If the range we're checking starts with minus infinity or current - * range is ends in plus infinity then the left boundary of the first - * one is on the left + * range ends in plus infinity then the left boundary of the first + * range is on the left. Otherwise compare specific values */ c1 = (infinite_start || ranges[i].infinite_max) ? -1 : FunctionCall2(&cmp_func, start_value, ranges[i].max); /* - * Similary check the right boundary of first range is on the right - * of the beginning of second one + * Similary check that right boundary of the range we're checking is on + * the right of the beginning of the current one */ c2 = (infinite_end || ranges[i].infinite_min) ? - -1 : FunctionCall2(&cmp_func, end_value, ranges[i].max); + -1 : FunctionCall2(&cmp_func, end_value, ranges[i].min); /* There's someone! */ if (c1 < 0 && c2 > 0) From 01ef629726c1a1780c07a6447ae35508739064d5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 23 Nov 2016 17:38:23 +0300 Subject: [PATCH 0082/1124] further refactoring and improvements (new function is_pathman_related_table_rename(), remove redundant function get_rel_parent() --- expected/pathman_utility_stmt_hooking.out | 10 +- sql/pathman_utility_stmt_hooking.sql | 11 +- src/hooks.c | 24 ++-- src/init.c | 2 +- src/init.h | 2 +- src/utility_stmt_hooking.c | 136 ++++++++++++++-------- src/utility_stmt_hooking.h | 13 ++- src/utils.c | 29 ----- src/utils.h | 1 - 9 files changed, 120 insertions(+), 108 deletions(-) diff --git a/expected/pathman_utility_stmt_hooking.out b/expected/pathman_utility_stmt_hooking.out index 5fb95f2e..94dbee45 100644 --- a/expected/pathman_utility_stmt_hooking.out +++ b/expected/pathman_utility_stmt_hooking.out @@ -220,16 +220,16 @@ Inherits: rename.test CREATE OR REPLACE FUNCTION add_constraint(rel regclass, att text) RETURNS VOID AS $$ declare - constraint_name text := build_check_constraint_name(rel, 'a'); + constraint_name text := build_check_constraint_name(rel, 'a'); BEGIN - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (a < 100);', - rel, constraint_name); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (a < 100);', + rel, constraint_name); END $$ LANGUAGE plpgsql; /* - * Check that it doesn't affect regular inherited tables that aren't managed - * by pg_pathman + * Check that it doesn't affect regular inherited + * tables that aren't managed by pg_pathman */ CREATE TABLE rename.test_inh (LIKE rename.test INCLUDING ALL); CREATE TABLE rename.test_inh_1 (LIKE rename.test INCLUDING ALL); diff --git a/sql/pathman_utility_stmt_hooking.sql b/sql/pathman_utility_stmt_hooking.sql index de06c26f..89e9225c 100644 --- a/sql/pathman_utility_stmt_hooking.sql +++ b/sql/pathman_utility_stmt_hooking.sql @@ -93,6 +93,7 @@ SELECT * FROM copy_stmt_hooking.test ORDER BY val; DROP SCHEMA copy_stmt_hooking CASCADE; + /* * Test auto check constraint renaming */ @@ -108,17 +109,17 @@ ALTER TABLE rename.test_0 RENAME TO test_one; CREATE OR REPLACE FUNCTION add_constraint(rel regclass, att text) RETURNS VOID AS $$ declare - constraint_name text := build_check_constraint_name(rel, 'a'); + constraint_name text := build_check_constraint_name(rel, 'a'); BEGIN - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (a < 100);', - rel, constraint_name); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (a < 100);', + rel, constraint_name); END $$ LANGUAGE plpgsql; /* - * Check that it doesn't affect regular inherited tables that aren't managed - * by pg_pathman + * Check that it doesn't affect regular inherited + * tables that aren't managed by pg_pathman */ CREATE TABLE rename.test_inh (LIKE rename.test INCLUDING ALL); CREATE TABLE rename.test_inh_1 (LIKE rename.test INCLUDING ALL); diff --git a/src/hooks.c b/src/hooks.c index e28b0c6c..9026eb58 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -35,10 +35,6 @@ shmem_startup_hook_type shmem_startup_hook_next = NULL; ProcessUtility_hook_type process_utility_hook_next = NULL; -#define is_table_rename_statement(s) \ - IsA((s), RenameStmt) && ((RenameStmt *)(s))->renameType == OBJECT_TABLE - - /* Take care of joins */ void pathman_join_pathlist_hook(PlannerInfo *root, @@ -631,9 +627,12 @@ pathman_process_utility_hook(Node *parsetree, DestReceiver *dest, char *completionTag) { - /* Override standard COPY statement if needed */ if (IsPathmanReady()) { + Oid partition_relid; + AttrNumber partitioned_col; + + /* Override standard COPY statement if needed */ if (is_pathman_related_copy(parsetree)) { uint64 processed; @@ -647,14 +646,13 @@ pathman_process_utility_hook(Node *parsetree, return; /* don't call standard_ProcessUtility() or hooks */ } - if (is_table_rename_statement(parsetree)) - { - /* - * Rename check constraint of a table if it is a partition managed - * by pg_pathman - */ - PathmanRenameConstraint((RenameStmt *) parsetree); - } + /* Override standard RENAME statement if needed */ + if (is_pathman_related_table_rename(parsetree, + &partition_relid, + &partitioned_col)) + PathmanRenameConstraint(partition_relid, + partitioned_col, + (const RenameStmt *) parsetree); } /* Call hooks set by other extensions if needed */ diff --git a/src/init.c b/src/init.c index 4f1f9e7e..1b31a2e7 100644 --- a/src/init.c +++ b/src/init.c @@ -602,7 +602,7 @@ build_check_constraint_name_relid_internal(Oid relid, AttrNumber attno) } char * -build_check_constraint_name_relname_internal(char *relname, AttrNumber attno) +build_check_constraint_name_relname_internal(const char *relname, AttrNumber attno) { return psprintf("pathman_%s_%u_check", relname, attno); } diff --git a/src/init.h b/src/init.h index d4c4850a..474dde2f 100644 --- a/src/init.h +++ b/src/init.h @@ -126,7 +126,7 @@ find_children_status find_inheritance_children_array(Oid parentrelId, char *build_check_constraint_name_relid_internal(Oid relid, AttrNumber attno); -char *build_check_constraint_name_relname_internal(char *relname, +char *build_check_constraint_name_relname_internal(const char *relname, AttrNumber attno); bool pathman_config_contains_relation(Oid relid, diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index ba8a727c..26d43b5c 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -11,8 +11,8 @@ * ------------------------------------------------------------------------ */ -#include "utility_stmt_hooking.h" #include "init.h" +#include "utility_stmt_hooking.h" #include "partition_filter.h" #include "relation_info.h" @@ -20,18 +20,14 @@ #include "access/sysattr.h" #include "access/xact.h" #include "catalog/namespace.h" -#include "catalog/pg_attribute.h" #include "commands/copy.h" #include "commands/trigger.h" #include "commands/tablecmds.h" -#include "executor/executor.h" #include "foreign/fdwapi.h" #include "miscadmin.h" -#include "nodes/makefuncs.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/memutils.h" -#include "utils/rel.h" #include "utils/rls.h" #include "libpq/libpq.h" @@ -74,7 +70,7 @@ bool is_pathman_related_copy(Node *parsetree) { CopyStmt *copy_stmt = (CopyStmt *) parsetree; - Oid partitioned_table; + Oid parent_relid; Assert(IsPathmanReady()); @@ -93,14 +89,14 @@ is_pathman_related_copy(Node *parsetree) return false; /* Get partition's Oid while locking it */ - partitioned_table = RangeVarGetRelid(copy_stmt->relation, - (copy_stmt->is_from ? - RowExclusiveLock : - AccessShareLock), - false); + parent_relid = RangeVarGetRelid(copy_stmt->relation, + (copy_stmt->is_from ? + RowExclusiveLock : + AccessShareLock), + false); /* Check that relation is partitioned */ - if (get_pathman_relation_info(partitioned_table)) + if (get_pathman_relation_info(parent_relid)) { ListCell *lc; @@ -121,7 +117,7 @@ is_pathman_related_copy(Node *parsetree) elog(ERROR, "COPY is not supported for partitioned tables on Windows"); #else elog(DEBUG1, "Overriding default behavior for COPY [%u]", - partitioned_table); + parent_relid); #endif return true; @@ -130,6 +126,57 @@ is_pathman_related_copy(Node *parsetree) return false; } +/* + * Is pg_pathman supposed to handle this table rename stmt? + */ +bool +is_pathman_related_table_rename(Node *parsetree, + Oid *partition_relid_out, /* ret value */ + AttrNumber *partitioned_col_out) /* ret value */ +{ + RenameStmt *rename_stmt = (RenameStmt *) parsetree; + Oid partition_relid, + parent_relid; + const PartRelationInfo *prel; + PartParentSearch parent_search; + + Assert(IsPathmanReady()); + + /* Set default values */ + if (partition_relid_out) *partition_relid_out = InvalidOid; + if (partitioned_col_out) *partitioned_col_out = InvalidAttrNumber; + + if (!IsA(parsetree, RenameStmt)) + return false; + + /* Are we going to rename some table? */ + if (rename_stmt->renameType != OBJECT_TABLE) + return false; + + /* Assume it's a partition, fetch its Oid */ + partition_relid = RangeVarGetRelid(rename_stmt->relation, + AccessShareLock, + false); + + /* Try fetching parent of this table */ + parent_relid = get_parent_of_partition(partition_relid, &parent_search); + if (parent_search != PPS_ENTRY_PART_PARENT) + return false; + + /* Is parent partitioned? */ + if ((prel = get_pathman_relation_info(parent_relid)) != NULL) + { + /* Return 'partition_relid' and 'prel->attnum' */ + if (partition_relid_out) *partition_relid_out = partition_relid; + if (partitioned_col_out) *partitioned_col_out = prel->attnum; + + return true; + } + + return false; +} + + /* * CopyGetAttnums - build an integer list of attnums to be copied * @@ -238,6 +285,7 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) "psql's \\copy command also works for anyone."))); } + /* Check that we have a relation */ if (stmt->relation) { TupleDesc tupDesc; @@ -363,13 +411,9 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) rel = NULL; } } - else - { - Assert(stmt->query); - query = stmt->query; - rel = NULL; - } + /* This should never happen (see is_pathman_related_copy()) */ + else elog(ERROR, "error in function \"%s\"", CppAsString(PathmanDoCopy)); /* COPY ... FROM ... */ if (is_from) @@ -626,45 +670,35 @@ prepare_rri_fdw_for_copy(EState *estate, } /* - * Rename check constraint of table if it's a partition + * Rename RANGE\HASH check constraint of a partition on table rename event. */ void -PathmanRenameConstraint(const RenameStmt *stmt) +PathmanRenameConstraint(Oid partition_relid, /* cached partition Oid */ + AttrNumber partitioned_col, /* partitioned column */ + const RenameStmt *part_rename_stmt) /* partition rename stmt */ { - Oid partition_relid, - parent_relid; - char *old_constraint_name, - *new_constraint_name; - RenameStmt *rename_stmt; - const PartRelationInfo *prel; - - partition_relid = RangeVarGetRelid(stmt->relation, AccessShareLock, false); - parent_relid = get_rel_parent(partition_relid); - - /* Skip if there's no parent */ - if (!OidIsValid(parent_relid)) return; - - /* Fetch partitioning data */ - prel = get_pathman_relation_info(parent_relid); - - /* Skip if this table is not partitioned */ - if (!prel) return; + char *old_constraint_name, + *new_constraint_name; + RenameStmt rename_stmt; /* Generate old constraint name */ - old_constraint_name = build_check_constraint_name_relid_internal(partition_relid, - prel->attnum); + old_constraint_name = + build_check_constraint_name_relid_internal(partition_relid, + partitioned_col); /* Generate new constraint name */ - new_constraint_name = build_check_constraint_name_relname_internal(stmt->newname, - prel->attnum); + new_constraint_name = + build_check_constraint_name_relname_internal(part_rename_stmt->newname, + partitioned_col); /* Build check constraint RENAME statement */ - rename_stmt = makeNode(RenameStmt); - rename_stmt->renameType = OBJECT_TABCONSTRAINT; - rename_stmt->relation = stmt->relation; - rename_stmt->subname = old_constraint_name; - rename_stmt->newname = new_constraint_name; - rename_stmt->missing_ok = false; - - RenameConstraint(rename_stmt); + memset((void *) &rename_stmt, 0, sizeof(RenameStmt)); + NodeSetTag(&rename_stmt, T_RenameStmt); + rename_stmt.renameType = OBJECT_TABCONSTRAINT; + rename_stmt.relation = part_rename_stmt->relation; + rename_stmt.subname = old_constraint_name; + rename_stmt.newname = new_constraint_name; + rename_stmt.missing_ok = false; + + RenameConstraint(&rename_stmt); } diff --git a/src/utility_stmt_hooking.h b/src/utility_stmt_hooking.h index 3dd87822..333ab492 100644 --- a/src/utility_stmt_hooking.h +++ b/src/utility_stmt_hooking.h @@ -1,7 +1,8 @@ /* ------------------------------------------------------------------------ * * utility_stmt_hooking.h - * Transaction-specific locks and other functions + * Override COPY TO/FROM and ALTER TABLE ... RENAME statements + * for partitioned tables * * Copyright (c) 2016, Postgres Professional * @@ -17,8 +18,16 @@ #include "nodes/nodes.h" +/* Various traits */ bool is_pathman_related_copy(Node *parsetree); +bool is_pathman_related_table_rename(Node *parsetree, + Oid *partition_relid_out, + AttrNumber *partitioned_col_out); + +/* Statement handlers */ void PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed); -void PathmanRenameConstraint(const RenameStmt *stmt); +void PathmanRenameConstraint(Oid partition_relid, + AttrNumber partitioned_col, + const RenameStmt *partition_rename_stmt); #endif diff --git a/src/utils.c b/src/utils.c index 4d103e71..217bf186 100644 --- a/src/utils.c +++ b/src/utils.c @@ -259,35 +259,6 @@ get_rel_owner(Oid relid) return InvalidOid; } -/* - * Lookup for a parent table - */ -Oid -get_rel_parent(Oid relid) -{ - ScanKeyData key[1]; - Relation relation; - HeapTuple inheritsTuple; - Oid inhparent = InvalidOid; - SysScanDesc scan; - - relation = heap_open(InheritsRelationId, AccessShareLock); - ScanKeyInit(&key[0], - Anum_pg_inherits_inhrelid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(relid)); - scan = systable_beginscan(relation, InvalidOid, false, - NULL, 1, key); - - if ((inheritsTuple = systable_getnext(scan)) != NULL) - inhparent = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhparent; - - systable_endscan(scan); - heap_close(relation, AccessShareLock); - - return inhparent; -} - /* * Checks that callback function meets specific requirements. * It must have the only JSONB argument and BOOL return type. diff --git a/src/utils.h b/src/utils.h index 6ed950f7..5946dba1 100644 --- a/src/utils.h +++ b/src/utils.h @@ -37,7 +37,6 @@ List * list_reverse(List *l); char get_rel_persistence(Oid relid); #endif Oid get_rel_owner(Oid relid); -Oid get_rel_parent(Oid relid); Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); From c3e8ed87b9423e2bae1baf0826ee9a5a258cd973 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 25 Nov 2016 14:11:42 +0300 Subject: [PATCH 0083/1124] reimplement HASH partition creation machinery in C language --- expected/pathman_calamity.out | 12 +- hash.sql | 53 ++---- sql/pathman_calamity.sql | 5 + src/partition_creation.c | 312 +++++++++++++++++++++++++++++----- src/partition_creation.h | 27 +++ src/pl_funcs.c | 23 +-- src/pl_hash_funcs.c | 34 ++++ src/utils.c | 30 +++- src/utils.h | 17 +- 9 files changed, 404 insertions(+), 109 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index ebea830a..31a578ec 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -293,6 +293,16 @@ SHOW pg_pathman.enable; on (1 row) +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 8 other objects +NOTICE: drop cascades to 11 other objects DROP EXTENSION pg_pathman; diff --git a/hash.sql b/hash.sql index 4d8781f9..f7947e4b 100644 --- a/hash.sql +++ b/hash.sql @@ -51,43 +51,10 @@ BEGIN INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) VALUES (parent_relid, attribute, 1); - /* Create partitions and update pg_pathman configuration */ - FOR partnum IN 0..partitions_count-1 - LOOP - v_child_relname := format('%s.%s', - quote_ident(v_plain_schema), - quote_ident(v_plain_relname || '_' || partnum)); - - EXECUTE format( - 'CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) INHERITS (%2$s) TABLESPACE %s', - v_child_relname, - parent_relid::TEXT, - @extschema@.get_rel_tablespace_name(parent_relid)); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s - CHECK (@extschema@.get_hash_part_idx(%s(%s), %s) = %s)', - v_child_relname, - @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, - attribute), - v_hashfunc::TEXT, - attribute, - partitions_count, - partnum); - - PERFORM @extschema@.copy_foreign_keys(parent_relid, v_child_relname::REGCLASS); - - /* Fetch init_callback from 'params' table */ - WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) - FROM stub_callback - LEFT JOIN @extschema@.pathman_config_params AS params - ON params.partrel = parent_relid - INTO v_init_callback; - - PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - v_child_relname::REGCLASS, - v_init_callback); - END LOOP; + /* Create partitions */ + PERFORM @extschema@.create_hash_partitions_internal(parent_relid, + attribute, + partitions_count); /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); @@ -202,7 +169,7 @@ BEGIN old_fields, att_fmt, new_fields, child_relname_format, @extschema@.get_type_hash_func(atttype)::TEXT); - /* Create trigger on every partition */ + /* Create trigger on each partition */ FOR num IN 0..partitions_count-1 LOOP EXECUTE format(trigger, @@ -215,6 +182,16 @@ BEGIN END $$ LANGUAGE plpgsql; +/* + * Just create HASH partitions, called by create_hash_partitions(). + */ +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER) +RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' +LANGUAGE C STRICT; + /* * Returns hash function OID for specified type */ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index f7f01699..c510a4ff 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -109,6 +109,11 @@ ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_c /* check GUC variable */ SHOW pg_pathman.enable; +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); + DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/partition_creation.c b/src/partition_creation.c index 7701f7e8..da355454 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -35,6 +35,7 @@ #include "utils/jsonb.h" #include "utils/lsyscache.h" #include "utils/syscache.h" +#include "utils/typcache.h" static Datum extract_binary_interval_from_text(Datum interval_text, @@ -54,18 +55,28 @@ static Oid spawn_partitions_val(Oid parent_relid, Datum value, Oid value_type); +static void create_single_partition_common(Oid partition_relid, + Constraint *check_constraint, + init_callback_params *callback_params); + static Oid create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace, char **partitioned_column); -static char *choose_partition_name(Oid parent_relid, Oid parent_nsp); +static char *choose_range_partition_name(Oid parent_relid, Oid parent_nsp); +static char *choose_hash_partition_name(Oid parent_relid, uint32 part_idx); static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, Oid relowner); static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); +static Constraint *make_constraint_common(char *name, Node *raw_expr); + +static Value make_string_value_struct(char *str); +static Value make_int_value_struct(int int_val); + /* * --------------------------------------- @@ -83,11 +94,22 @@ create_single_range_partition_internal(Oid parent_relid, char *tablespace) { Oid partition_relid; - Relation child_relation; Constraint *check_constr; char *partitioned_column; init_callback_params callback_params; + /* Generate a name if asked to */ + if (!partition_rv) + { + Oid parent_nsp = get_rel_namespace(parent_relid); + char *parent_nsp_name = get_namespace_name(parent_nsp); + char *partition_name; + + partition_name = choose_range_partition_name(parent_relid, parent_nsp); + + partition_rv = makeRangeVar(parent_nsp_name, partition_name, -1); + } + /* Create a partition & get 'partitioned_column' */ partition_relid = create_single_partition_internal(parent_relid, partition_rv, @@ -101,25 +123,95 @@ create_single_range_partition_internal(Oid parent_relid, end_value, value_type); + /* Cook args for init_callback */ + MakeInitCallbackRangeParams(&callback_params, InvalidOid, + parent_relid, partition_relid, + start_value, end_value, value_type); + + /* Add constraint & execute init_callback */ + create_single_partition_common(partition_relid, + check_constr, + &callback_params); + + /* Return the Oid */ + return partition_relid; +} + +/* Create one HASH partition */ +Oid +create_single_hash_partition_internal(Oid parent_relid, + uint32 part_idx, + uint32 part_count, + Oid value_type, + RangeVar *partition_rv, + char *tablespace) +{ + Oid partition_relid; + Constraint *check_constr; + char *partitioned_column; + init_callback_params callback_params; + + /* Generate a name if asked to */ + if (!partition_rv) + { + Oid parent_nsp = get_rel_namespace(parent_relid); + char *parent_nsp_name = get_namespace_name(parent_nsp); + char *partition_name; + + partition_name = choose_hash_partition_name(parent_relid, part_idx); + + partition_rv = makeRangeVar(parent_nsp_name, partition_name, -1); + } + + /* Create a partition & get 'partitioned_column' */ + partition_relid = create_single_partition_internal(parent_relid, + partition_rv, + tablespace, + &partitioned_column); + + /* Build check constraint for HASH partition */ + check_constr = build_hash_check_constraint(partition_relid, + partitioned_column, + part_idx, + part_count, + value_type); + + /* Cook args for init_callback */ + MakeInitCallbackHashParams(&callback_params, InvalidOid, + parent_relid, partition_relid); + + /* Add constraint & execute init_callback */ + create_single_partition_common(partition_relid, + check_constr, + &callback_params); + + /* Return the Oid */ + return partition_relid; +} + +/* Add constraint & execute init_callback */ +void +create_single_partition_common(Oid partition_relid, + Constraint *check_constraint, + init_callback_params *callback_params) +{ + Relation child_relation; + /* Open the relation and add new check constraint & fkeys */ child_relation = heap_open(partition_relid, AccessExclusiveLock); AddRelationNewConstraints(child_relation, NIL, - list_make1(check_constr), + list_make1(check_constraint), false, true, true); heap_close(child_relation, NoLock); + /* Make constraint visible */ CommandCounterIncrement(); /* Finally invoke 'init_callback' */ - MakeInitCallbackRangeParams(&callback_params, InvalidOid, - parent_relid, partition_relid, - start_value, end_value, value_type); - invoke_part_callback(&callback_params); + invoke_part_callback(callback_params); + /* Make possible changes visible */ CommandCounterIncrement(); - - /* Return the Oid */ - return partition_relid; } /* @@ -505,9 +597,9 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ return last_partition; } -/* Choose a good name for a partition */ +/* Choose a good name for a RANGE partition */ static char * -choose_partition_name(Oid parent_relid, Oid parent_nsp) +choose_range_partition_name(Oid parent_relid, Oid parent_nsp) { Datum part_num; Oid part_seq_relid; @@ -519,6 +611,13 @@ choose_partition_name(Oid parent_relid, Oid parent_nsp) return psprintf("%s_%u", get_rel_name(parent_relid), DatumGetInt32(part_num)); } +/* Choose a good name for a HASH partition */ +static char * +choose_hash_partition_name(Oid parent_relid, uint32 part_idx) +{ + return psprintf("%s_%u", get_rel_name(parent_relid), part_idx); +} + /* Create a partition-like table (no constraints yet) */ static Oid create_single_partition_internal(Oid parent_relid, @@ -574,17 +673,7 @@ create_single_partition_internal(Oid parent_relid, /* Make up parent's RangeVar */ parent_rv = makeRangeVar(parent_nsp_name, parent_name, -1); - /* Generate a name if asked to */ - if (!partition_rv) - { - char *part_name; - - /* Make up a name for the partition */ - part_name = choose_partition_name(parent_relid, parent_nsp); - - /* Make RangeVar for the partition */ - partition_rv = makeRangeVar(parent_nsp_name, part_name, -1); - } + Assert(partition_rv); /* If no 'tablespace' is provided, get parent's tablespace */ if (!tablespace) @@ -768,11 +857,13 @@ build_raw_range_check_tree(char *attname, col_ref->location = -1; /* Left boundary */ - left_const->val = *makeString(datum_to_cstring(start_value, value_type)); + left_const->val = make_string_value_struct(datum_to_cstring(start_value, + value_type)); left_const->location = -1; /* Right boundary */ - right_const->val = *makeString(datum_to_cstring(end_value, value_type)); + right_const->val = make_string_value_struct(datum_to_cstring(end_value, + value_type)); right_const->location = -1; /* Left comparison (VAR >= start_value) */ @@ -804,7 +895,7 @@ build_range_check_constraint(Oid child_relid, Datum end_value, Oid value_type) { - Constraint *range_constr; + Constraint *hash_constr; char *range_constr_name; AttrNumber attnum; @@ -813,25 +904,13 @@ build_range_check_constraint(Oid child_relid, range_constr_name = build_check_constraint_name_internal(child_relid, attnum); /* Initialize basic properties of a CHECK constraint */ - range_constr = makeNode(Constraint); - range_constr->conname = range_constr_name; - range_constr->deferrable = false; - range_constr->initdeferred = false; - range_constr->location = -1; - range_constr->contype = CONSTR_CHECK; - range_constr->is_no_inherit = true; - - /* Validate existing data using this constraint */ - range_constr->skip_validation = false; - range_constr->initially_valid = true; - - /* Finally we should build an expression tree */ - range_constr->raw_expr = build_raw_range_check_tree(attname, - start_value, - end_value, - value_type); + hash_constr = make_constraint_common(range_constr_name, + build_raw_range_check_tree(attname, + start_value, + end_value, + value_type)); /* Everything seems to be fine */ - return range_constr; + return hash_constr; } /* Check if range overlaps with any partitions */ @@ -883,6 +962,151 @@ check_range_available(Oid parent_relid, return true; } +/* Build HASH check constraint expression tree */ +Node * +build_raw_hash_check_tree(char *attname, + uint32 part_idx, + uint32 part_count, + Oid value_type) +{ + A_Expr *eq_oper = makeNode(A_Expr); + FuncCall *part_idx_call = makeNode(FuncCall), + *hash_call = makeNode(FuncCall); + ColumnRef *hashed_column = makeNode(ColumnRef); + A_Const *part_idx_c = makeNode(A_Const), + *part_count_c = makeNode(A_Const); + + List *get_hash_part_idx_proc; + + Oid hash_proc; + TypeCacheEntry *tce; + + tce = lookup_type_cache(value_type, TYPECACHE_HASH_PROC); + hash_proc = tce->hash_proc; + + /* Partitioned column */ + hashed_column->fields = list_make1(makeString(attname)); + hashed_column->location = -1; + + /* Total amount of partitions */ + part_count_c->val = make_int_value_struct(part_count); + part_count_c->location = -1; + + /* Index of this partition (hash % total amount) */ + part_idx_c->val = make_int_value_struct(part_idx); + part_idx_c->location = -1; + + /* Call hash_proc() */ + hash_call->funcname = list_make1(makeString(get_func_name(hash_proc))); + hash_call->args = list_make1(hashed_column); + hash_call->agg_order = NIL; + hash_call->agg_filter = NULL; + hash_call->agg_within_group = false; + hash_call->agg_star = false; + hash_call->agg_distinct = false; + hash_call->func_variadic = false; + hash_call->over = NULL; + hash_call->location = -1; + + /* Build schema-qualified name of function get_hash_part_idx() */ + get_hash_part_idx_proc = + list_make2(makeString(get_namespace_name(get_pathman_schema())), + makeString("get_hash_part_idx")); + + /* Call get_hash_part_idx() */ + part_idx_call->funcname = get_hash_part_idx_proc; + part_idx_call->args = list_make2(hash_call, part_count_c); + part_idx_call->agg_order = NIL; + part_idx_call->agg_filter = NULL; + part_idx_call->agg_within_group = false; + part_idx_call->agg_star = false; + part_idx_call->agg_distinct = false; + part_idx_call->func_variadic = false; + part_idx_call->over = NULL; + part_idx_call->location = -1; + + /* Construct equality operator */ + eq_oper->kind = AEXPR_OP; + eq_oper->name = list_make1(makeString("=")); + eq_oper->lexpr = (Node *) part_idx_call; + eq_oper->rexpr = (Node *) part_idx_c; + eq_oper->location = -1; + + return (Node *) eq_oper; +} + +/* Build complete HASH check constraint */ +Constraint * +build_hash_check_constraint(Oid child_relid, + char *attname, + uint32 part_idx, + uint32 part_count, + Oid value_type) +{ + Constraint *hash_constr; + char *hash_constr_name; + AttrNumber attnum; + + /* Build a correct name for this constraint */ + attnum = get_attnum(child_relid, attname); + hash_constr_name = build_check_constraint_name_internal(child_relid, attnum); + + /* Initialize basic properties of a CHECK constraint */ + hash_constr = make_constraint_common(hash_constr_name, + build_raw_hash_check_tree(attname, + part_idx, + part_count, + value_type)); + /* Everything seems to be fine */ + return hash_constr; +} + +static Constraint * +make_constraint_common(char *name, Node *raw_expr) +{ + Constraint *constraint; + + /* Initialize basic properties of a CHECK constraint */ + constraint = makeNode(Constraint); + constraint->conname = name; + constraint->deferrable = false; + constraint->initdeferred = false; + constraint->location = -1; + constraint->contype = CONSTR_CHECK; + constraint->is_no_inherit = true; + + /* Validate existing data using this constraint */ + constraint->skip_validation = false; + constraint->initially_valid = true; + + /* Finally we should build an expression tree */ + constraint->raw_expr = raw_expr; + + return constraint; +} + +static Value +make_string_value_struct(char *str) +{ + Value val; + + val.type = T_String; + val.val.str = str; + + return val; +} + +static Value +make_int_value_struct(int int_val) +{ + Value val; + + val.type = T_Integer; + val.val.ival = int_val; + + return val; +} + /* * --------------------- diff --git a/src/partition_creation.h b/src/partition_creation.h index f89ff1ca..23d0ce48 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -14,9 +14,12 @@ #include "nodes/parsenodes.h" +/* Create RANGE partitions to store some value */ Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type); + +/* Create one RANGE partition */ Oid create_single_range_partition_internal(Oid parent_relid, Datum start_value, Datum end_value, @@ -24,6 +27,16 @@ Oid create_single_range_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace); +/* Create one HASH partition */ +Oid create_single_hash_partition_internal(Oid parent_relid, + uint32 part_idx, + uint32 part_count, + Oid value_type, + RangeVar *partition_rv, + char *tablespace); + + +/* RANGE constraints */ Constraint * build_range_check_constraint(Oid child_relid, char *attname, Datum start_value, @@ -42,6 +55,18 @@ bool check_range_available(Oid partition_relid, bool raise_error); +/* HASH constraints */ +Constraint * build_hash_check_constraint(Oid child_relid, + char *attname, + uint32 part_idx, + uint32 part_count, + Oid value_type); + +Node * build_raw_hash_check_tree(char *attname, + uint32 part_idx, + uint32 part_count, Oid value_type); + + /* Partitioning callback type */ typedef enum { @@ -77,6 +102,7 @@ typedef struct } params; } init_callback_params; + #define MakeInitCallbackRangeParams(params_p, cb, parent, child, start, end, type) \ do \ { \ @@ -104,4 +130,5 @@ typedef struct (params_p)->partition_relid = (child); \ } while (0) + void invoke_part_callback(init_callback_params *cb_params); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 892e62ce..8bdc4674 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -201,27 +201,10 @@ get_base_type_pl(PG_FUNCTION_ARGS) Datum get_attribute_type_pl(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); - Oid result; - HeapTuple tp; - - /* NOTE: for now it's the most efficient way */ - tp = SearchSysCacheAttName(relid, text_to_cstring(attname)); - if (HeapTupleIsValid(tp)) - { - Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); - result = att_tup->atttypid; - ReleaseSysCache(tp); - - PG_RETURN_OID(result); - } - else - elog(ERROR, "Cannot find type name for attribute \"%s\" " - "of relation \"%s\"", - text_to_cstring(attname), get_rel_name_or_relid(relid)); + Oid relid = PG_GETARG_OID(0); + text *attname = PG_GETARG_TEXT_P(1); - PG_RETURN_NULL(); /* keep compiler happy */ + PG_RETURN_OID(get_attribute_type(relid, text_to_cstring(attname), false)); } /* diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 6dc0916f..a60e4432 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -9,16 +9,50 @@ */ #include "pathman.h" +#include "partition_creation.h" +#include "relation_info.h" +#include "utils/builtins.h" #include "utils/typcache.h" /* Function declarations */ +PG_FUNCTION_INFO_V1( create_hash_partitions_internal ); PG_FUNCTION_INFO_V1( get_type_hash_func ); PG_FUNCTION_INFO_V1( get_hash_part_idx ); +/* + * Create HASH partitions implementation (written in C). + */ +Datum +create_hash_partitions_internal(PG_FUNCTION_ARGS) +{ + Oid parent_relid = PG_GETARG_OID(0); + Datum partitioned_col_name = PG_GETARG_DATUM(1); + Oid partitioned_col_type; + uint32 part_count = PG_GETARG_INT32(2), + i; + + /* Check that there's no partitions yet */ + if (get_pathman_relation_info(parent_relid)) + elog(ERROR, "cannot add new HASH partitions"); + + partitioned_col_type = get_attribute_type(parent_relid, + TextDatumGetCString(partitioned_col_name), + false); + + for (i = 0; i < part_count; i++) + { + /* Create a partition (copy FKs, invoke callbacks etc) */ + create_single_hash_partition_internal(parent_relid, i, part_count, + partitioned_col_type, NULL, NULL); + } + + PG_RETURN_VOID(); +} + /* * Returns hash function's OID for a specified type. */ diff --git a/src/utils.c b/src/utils.c index be2534c4..fe2eba9f 100644 --- a/src/utils.c +++ b/src/utils.c @@ -261,7 +261,7 @@ get_rel_persistence(Oid relid) #endif /* - * Returns relation owner + * Get relation owner. */ Oid get_rel_owner(Oid relid) @@ -283,6 +283,34 @@ get_rel_owner(Oid relid) return InvalidOid; } +/* + * Get type of column by its name. + */ +Oid +get_attribute_type(Oid relid, const char *attname, bool missing_ok) +{ + Oid result; + HeapTuple tp; + + /* NOTE: for now it's the most efficient way */ + tp = SearchSysCacheAttName(relid, attname); + if (HeapTupleIsValid(tp)) + { + Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); + result = att_tup->atttypid; + ReleaseSysCache(tp); + + return result; + } + + if (!missing_ok) + elog(ERROR, "cannot find type name for attribute \"%s\" " + "of relation \"%s\"", + attname, get_rel_name_or_relid(relid)); + + return InvalidOid; +} + /* * Checks that callback function meets specific requirements. * It must have the only JSONB argument and BOOL return type. diff --git a/src/utils.h b/src/utils.h index 9052cfae..ac146649 100644 --- a/src/utils.h +++ b/src/utils.h @@ -34,20 +34,27 @@ bool check_security_policy_internal(Oid relid, Oid role); Oid get_pathman_schema(void); List * list_reverse(List *l); +/* + * Useful functions for relations. + */ +Oid get_rel_owner(Oid relid); +char * get_rel_name_or_relid(Oid relid); +Oid get_attribute_type(Oid relid, const char *attname, bool missing_ok); #if PG_VERSION_NUM < 90600 char get_rel_persistence(Oid relid); #endif -Oid get_rel_owner(Oid relid); - -Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); /* - * Handy execution-stage functions. + * Operator-related stuff. */ -char * get_rel_name_or_relid(Oid relid); Operator get_binary_operator(char *opname, Oid arg1, Oid arg2); Oid get_operator_ret_type(Operator op); void fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2); + +/* + * Print values and cast types. + */ char * datum_to_cstring(Datum datum, Oid typid); +Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); #endif From f2e6be0bbe0fc7c35d335f0b34ee0288d2875c29 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 25 Nov 2016 14:22:57 +0300 Subject: [PATCH 0084/1124] fix unresolved function name in partition_creation.c --- src/partition_creation.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index e84c1c2a..4d6897ef 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1050,7 +1050,8 @@ build_hash_check_constraint(Oid child_relid, /* Build a correct name for this constraint */ attnum = get_attnum(child_relid, attname); - hash_constr_name = build_check_constraint_name_internal(child_relid, attnum); + hash_constr_name = build_check_constraint_name_relid_internal(child_relid, + attnum); /* Initialize basic properties of a CHECK constraint */ hash_constr = make_constraint_common(hash_constr_name, From 3056b6d96e1c5a7501d1255bb6f2bb04d0f016df Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 25 Nov 2016 14:23:52 +0300 Subject: [PATCH 0085/1124] RANGE & HASH constraints should be inherited by default (constraint->is_no_inherit = false) --- src/partition_creation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 4d6897ef..d82c17e6 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1075,7 +1075,7 @@ make_constraint_common(char *name, Node *raw_expr) constraint->initdeferred = false; constraint->location = -1; constraint->contype = CONSTR_CHECK; - constraint->is_no_inherit = true; + constraint->is_no_inherit = false; /* Validate existing data using this constraint */ constraint->skip_validation = false; From f6548bae03948a14514f6fd0be02917bdbecce39 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 25 Nov 2016 15:08:42 +0300 Subject: [PATCH 0086/1124] make function disable_pathman_for() also remove row in pathman_config_params --- expected/pathman_calamity.out | 35 ++++++++++++++++++++++++++++++++++- init.sql | 4 ++++ sql/pathman_calamity.sql | 8 ++++++++ 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 31a578ec..9652835f 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -303,6 +303,39 @@ SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ +NOTICE: function calamity.to_be_disabled_upd_trig_func() does not exist, skipping + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 11 other objects +NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; diff --git a/init.sql b/init.sql index 179f8aff..f2847edf 100644 --- a/init.sql +++ b/init.sql @@ -340,7 +340,11 @@ $$ BEGIN PERFORM @extschema@.validate_relname(parent_relid); + /* Delete rows from both config tables */ DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + /* Drop triggers on update */ PERFORM @extschema@.drop_triggers(parent_relid); /* Notify backend about changes */ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index c510a4ff..466ef343 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -114,6 +114,14 @@ CREATE TABLE calamity.hash_two_times(val serial); SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; From da062450342c3fa26e7b5521ea5b595e139dc752 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 25 Nov 2016 15:12:25 +0300 Subject: [PATCH 0087/1124] add missing include to pl_hash_funcs.c --- src/pl_hash_funcs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index a60e4432..6d2e8f74 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -11,6 +11,7 @@ #include "pathman.h" #include "partition_creation.h" #include "relation_info.h" +#include "utils.h" #include "utils/builtins.h" #include "utils/typcache.h" From 000130d6d1c485cdbdcda2ff2570a6cf58351cfe Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 25 Nov 2016 15:52:58 +0300 Subject: [PATCH 0088/1124] more tests for create_hash_partitions_internal() --- expected/pathman_calamity.out | 2 ++ sql/pathman_calamity.sql | 1 + 2 files changed, 3 insertions(+) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 9652835f..03db9b8f 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -295,6 +295,8 @@ SHOW pg_pathman.enable; /* check function create_hash_partitions_internal() (called for the 2nd time) */ CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); create_hash_partitions ------------------------ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 466ef343..460ac1a2 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -111,6 +111,7 @@ SHOW pg_pathman.enable; /* check function create_hash_partitions_internal() (called for the 2nd time) */ CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); From 33d2cf731c6b6ef7c765fd42bba11ddf9953ed0f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 25 Nov 2016 18:15:31 +0300 Subject: [PATCH 0089/1124] reimplement functions validate_relname(), get_number_of_partitions() in C language, refactoring & code cleanup --- expected/pathman_calamity.out | 24 +- hash.sql | 3 +- init.sql | 118 ++- range.sql | 14 +- sql/pathman_calamity.sql | 7 +- src/partition_creation.c | 2 +- src/pl_funcs.c | 49 +- src/relation_info.c | 3 +- tests/python/partitioning_test.py | 1290 ++++++++++++++--------------- 9 files changed, 774 insertions(+), 736 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 03db9b8f..e82e295d 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -14,12 +14,28 @@ set client_min_messages = NOTICE; CREATE TABLE calamity.part_test(val serial); /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); - validate_relname --------------------- - calamity.part_test + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t (1 row) -/* SELECT validate_relname(NULL); -- FIXME: %s */ /* check function get_parent_of_partition() */ SELECT get_parent_of_partition('calamity.part_test'); ERROR: "part_test" is not a partition diff --git a/hash.sql b/hash.sql index f7947e4b..633542b4 100644 --- a/hash.sql +++ b/hash.sql @@ -147,8 +147,7 @@ BEGIN att_val_fmt, att_fmt; - partitions_count := COUNT(*) FROM pg_catalog.pg_inherits - WHERE inhparent = parent_relid::oid; + partitions_count := @extschema@.get_number_of_partitions(parent_relid); /* Build trigger & trigger function's names */ funcname := @extschema@.build_update_trigger_func_name(parent_relid); diff --git a/init.sql b/init.sql index f2847edf..f9d9b5eb 100644 --- a/init.sql +++ b/init.sql @@ -104,15 +104,6 @@ SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config', ''); SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config_params', ''); -CREATE OR REPLACE FUNCTION @extschema@.partitions_count(relation REGCLASS) -RETURNS INT AS -$$ -BEGIN - RETURN count(*) FROM pg_inherits WHERE inhparent = relation; -END -$$ -LANGUAGE plpgsql STRICT; - /* * Add a row describing the optional parameter to pathman_config_params. */ @@ -185,7 +176,8 @@ RETURNS TABLE ( partattr TEXT, range_min TEXT, range_max TEXT) -AS 'pg_pathman', 'show_partition_list_internal' LANGUAGE C STRICT; +AS 'pg_pathman', 'show_partition_list_internal' +LANGUAGE C STRICT; /* * View for show_partition_list(). @@ -206,7 +198,8 @@ RETURNS TABLE ( relid REGCLASS, processed INT, status TEXT) -AS 'pg_pathman', 'show_concurrent_part_tasks_internal' LANGUAGE C STRICT; +AS 'pg_pathman', 'show_concurrent_part_tasks_internal' +LANGUAGE C STRICT; /* * View for show_concurrent_part_tasks(). @@ -353,28 +346,6 @@ END $$ LANGUAGE plpgsql STRICT; -/* - * Validates relation name. It must be schema qualified. - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relname( - cls REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - relname TEXT; - -BEGIN - relname = @extschema@.get_schema_qualified_name(cls); - - IF relname IS NULL THEN - RAISE EXCEPTION 'relation %s does not exist', cls; - END IF; - - RETURN relname; -END -$$ -LANGUAGE plpgsql; - /* * Aggregates several common relation checks before partitioning. * Suitable for every partitioning type. @@ -444,25 +415,6 @@ END $$ LANGUAGE plpgsql STRICT; -/* - * Returns the schema-qualified name of table. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_schema_qualified_name( - cls REGCLASS, - delimiter TEXT DEFAULT '.', - suffix TEXT DEFAULT '') -RETURNS TEXT AS -$$ -BEGIN - RETURN (SELECT quote_ident(relnamespace::regnamespace::text) || - delimiter || - quote_ident(relname || suffix) - FROM pg_catalog.pg_class - WHERE oid = cls::oid); -END -$$ -LANGUAGE plpgsql STRICT; - /* * Check if two relations have equal structures. */ @@ -662,17 +614,27 @@ RETURNS VOID AS 'pg_pathman', 'on_partitions_removed' LANGUAGE C STRICT; +/* + * Get number of partitions managed by pg_pathman. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( + parent_relid REGCLASS) +RETURNS INT4 AS 'pg_pathman', 'get_number_of_partitions_pl' +LANGUAGE C STRICT; + /* * Get parent of pg_pathman's partition. */ -CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition(REGCLASS) +CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition( + partition_relid REGCLASS) RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' LANGUAGE C STRICT; /* * Extract basic type of a domain. */ -CREATE OR REPLACE FUNCTION @extschema@.get_base_type(REGTYPE) +CREATE OR REPLACE FUNCTION @extschema@.get_base_type( + typid REGTYPE) RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' LANGUAGE C STRICT; @@ -680,23 +642,34 @@ LANGUAGE C STRICT; * Returns attribute type name for relation. */ CREATE OR REPLACE FUNCTION @extschema@.get_attribute_type( - REGCLASS, TEXT) + relid REGCLASS, + attname TEXT) RETURNS REGTYPE AS 'pg_pathman', 'get_attribute_type_pl' LANGUAGE C STRICT; /* * Return tablespace name for specified relation. */ -CREATE OR REPLACE FUNCTION @extschema@.get_rel_tablespace_name(REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'get_rel_tablespace_name' +CREATE OR REPLACE FUNCTION @extschema@.get_tablespace( + relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_tablespace_pl' LANGUAGE C STRICT; +/* + * Check that relation exists. + */ +CREATE OR REPLACE FUNCTION @extschema@.validate_relname( + relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'validate_relname' +LANGUAGE C; + /* * Checks if attribute is nullable */ CREATE OR REPLACE FUNCTION @extschema@.is_attribute_nullable( - REGCLASS, TEXT) + relid REGCLASS, + attname TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'is_attribute_nullable' LANGUAGE C STRICT; @@ -713,12 +686,14 @@ LANGUAGE C STRICT; * Build check constraint name for a specified relation's column. */ CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - REGCLASS, INT2) + partition_relid REGCLASS, + partitioned_col INT2) RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attnum' LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - REGCLASS, TEXT) + partition_relid REGCLASS, + partitioned_col TEXT) RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attname' LANGUAGE C STRICT; @@ -726,12 +701,12 @@ LANGUAGE C STRICT; * Build update trigger and its underlying function's names. */ CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_name( - REGCLASS) + relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_name' LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_func_name( - REGCLASS) + relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_func_name' LANGUAGE C STRICT; @@ -746,7 +721,8 @@ CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; -CREATE OR REPLACE FUNCTION @extschema@.invalidate_relcache(relid OID) +CREATE OR REPLACE FUNCTION @extschema@.invalidate_relcache( + OID) RETURNS VOID AS 'pg_pathman' LANGUAGE C STRICT; @@ -755,18 +731,18 @@ LANGUAGE C STRICT; * Lock partitioned relation to restrict concurrent * modification of partitioning scheme. */ - CREATE OR REPLACE FUNCTION @extschema@.lock_partitioned_relation( - REGCLASS) - RETURNS VOID AS 'pg_pathman', 'lock_partitioned_relation' - LANGUAGE C STRICT; +CREATE OR REPLACE FUNCTION @extschema@.lock_partitioned_relation( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'lock_partitioned_relation' +LANGUAGE C STRICT; /* * Lock relation to restrict concurrent modification of data. */ - CREATE OR REPLACE FUNCTION @extschema@.prevent_relation_modification( - REGCLASS) - RETURNS VOID AS 'pg_pathman', 'prevent_relation_modification' - LANGUAGE C STRICT; +CREATE OR REPLACE FUNCTION @extschema@.prevent_relation_modification( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'prevent_relation_modification' +LANGUAGE C STRICT; /* diff --git a/range.sql b/range.sql index 1fb2ec79..445bbbad 100644 --- a/range.sql +++ b/range.sql @@ -158,7 +158,7 @@ BEGIN parent_relid, start_value, start_value + p_interval, - @extschema@.get_rel_tablespace_name(parent_relid); + @extschema@.get_tablespace(parent_relid); start_value := start_value + p_interval; END LOOP; @@ -270,7 +270,7 @@ BEGIN parent_relid, start_value, start_value + p_interval, - tablespace := @extschema@.get_rel_tablespace_name(parent_relid)); + tablespace := @extschema@.get_tablespace(parent_relid)); start_value := start_value + p_interval; END LOOP; @@ -343,7 +343,7 @@ BEGIN parent_relid, start_value, start_value + p_interval, - tablespace := @extschema@.get_rel_tablespace_name(parent_relid)); + tablespace := @extschema@.get_tablespace(parent_relid)); start_value := start_value + p_interval; part_count := part_count + 1; @@ -416,7 +416,7 @@ BEGIN parent_relid, start_value, start_value + p_interval, - @extschema@.get_rel_tablespace_name(parent_relid); + @extschema@.get_tablespace(parent_relid); start_value := start_value + p_interval; part_count := part_count + 1; @@ -733,7 +733,7 @@ DECLARE v_atttype REGTYPE; BEGIN - IF @extschema@.partitions_count(parent_relid) = 0 THEN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN RAISE EXCEPTION 'cannot append to empty partitions set'; END IF; @@ -843,7 +843,7 @@ DECLARE v_atttype REGTYPE; BEGIN - IF @extschema@.partitions_count(parent_relid) = 0 THEN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN RAISE EXCEPTION 'cannot prepend to empty partitions set'; END IF; @@ -907,7 +907,7 @@ BEGIN END IF; /* check range overlap */ - IF @extschema@.partitions_count(parent_relid) > 0 THEN + IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 460ac1a2..e2e080df 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -16,7 +16,12 @@ CREATE TABLE calamity.part_test(val serial); /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); -/* SELECT validate_relname(NULL); -- FIXME: %s */ +SELECT validate_relname(1::REGCLASS); +SELECT validate_relname(NULL); + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); +SELECT get_number_of_partitions(NULL) IS NULL; /* check function get_parent_of_partition() */ SELECT get_parent_of_partition('calamity.part_test'); diff --git a/src/partition_creation.c b/src/partition_creation.c index d82c17e6..60d2efc5 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -556,7 +556,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ /* What, again? Don't want to deal with this nightmare */ if (move_bound_op_ret_type != range_bound_type) - elog(ERROR, "error in spawn_partitions_val()"); + elog(ERROR, "error in function " CppAsString(spawn_partitions_val)); } /* Get operator's underlying function */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 770be9ed..1103b239 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -36,10 +36,11 @@ PG_FUNCTION_INFO_V1( on_partitions_created ); PG_FUNCTION_INFO_V1( on_partitions_updated ); PG_FUNCTION_INFO_V1( on_partitions_removed ); +PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); PG_FUNCTION_INFO_V1( get_base_type_pl ); PG_FUNCTION_INFO_V1( get_attribute_type_pl ); -PG_FUNCTION_INFO_V1( get_rel_tablespace_name ); +PG_FUNCTION_INFO_V1( get_tablespace_pl ); PG_FUNCTION_INFO_V1( show_partition_list_internal ); @@ -48,6 +49,7 @@ PG_FUNCTION_INFO_V1( build_update_trigger_name ); PG_FUNCTION_INFO_V1( build_check_constraint_name_attnum ); PG_FUNCTION_INFO_V1( build_check_constraint_name_attname ); +PG_FUNCTION_INFO_V1( validate_relname ); PG_FUNCTION_INFO_V1( is_date_type ); PG_FUNCTION_INFO_V1( is_attribute_nullable ); @@ -158,6 +160,22 @@ on_partitions_removed(PG_FUNCTION_ARGS) * ------------------------ */ +/* + * Get number of relation's partitions managed by pg_pathman. + */ +Datum +get_number_of_partitions_pl(PG_FUNCTION_ARGS) +{ + Oid parent = PG_GETARG_OID(0); + const PartRelationInfo *prel; + + /* If we couldn't find PartRelationInfo, return 0 */ + if ((prel = get_pathman_relation_info(parent)) == NULL) + PG_RETURN_INT32(0); + + PG_RETURN_INT32(PrelChildrenCount(prel)); +} + /* * Get parent of a specified partition. */ @@ -211,7 +229,7 @@ get_attribute_type_pl(PG_FUNCTION_ARGS) * Return tablespace name for specified relation */ Datum -get_rel_tablespace_name(PG_FUNCTION_ARGS) +get_tablespace_pl(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); Oid tablespace_id; @@ -419,6 +437,29 @@ show_partition_list_internal(PG_FUNCTION_ARGS) * -------- */ +/* Check that relation exists. Usually we pass regclass as text, hence the name */ +Datum +validate_relname(PG_FUNCTION_ARGS) +{ + Oid relid; + + /* We don't accept NULL */ + if (PG_ARGISNULL(0)) + ereport(ERROR, (errmsg("relation should not be NULL"), + errdetail("function " CppAsString(validate_relname) + " received NULL argument"))); + + /* Fetch relation's Oid */ + relid = PG_GETARG_OID(0); + + if (!check_relation_exists(relid)) + ereport(ERROR, (errmsg("relation \"%u\" does not exist", relid), + errdetail("triggered in function " + CppAsString(validate_relname)))); + + PG_RETURN_VOID(); +} + Datum is_date_type(PG_FUNCTION_ARGS) { @@ -784,8 +825,8 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) break; default: - elog(ERROR, "error in function \"%s\"", - CppAsString(invoke_on_partition_created_callback)); + elog(ERROR, "error in function " + CppAsString(invoke_on_partition_created_callback)); } /* Now it's time to call it! */ diff --git a/src/relation_info.c b/src/relation_info.c index 58fb71b7..29d1e6a0 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -178,7 +178,8 @@ refresh_pathman_relation_info(Oid relid, /* Error: unknown result code */ default: - elog(ERROR, "error in " CppAsString(find_inheritance_children_array)); + elog(ERROR, "error in function " + CppAsString(find_inheritance_children_array)); } /* diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index a03437bc..6d66410f 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1,7 +1,7 @@ # coding: utf-8 """ concurrent_partitioning_test.py - Tests concurrent partitioning worker with simultaneous update queries + Tests concurrent partitioning worker with simultaneous update queries Copyright (c) 2015-2016, Postgres Professional """ @@ -14,654 +14,654 @@ def if_fdw_enabled(func): - """To run tests with FDW support set environment variable TEST_FDW=1""" - def wrapper(*args, **kwargs): - if os.environ.get('FDW_DISABLED') != '1': - func(*args, **kwargs) - else: - print('Warning: FDW features tests are disabled, skipping...') - return wrapper + """To run tests with FDW support set environment variable TEST_FDW=1""" + def wrapper(*args, **kwargs): + if os.environ.get('FDW_DISABLED') != '1': + func(*args, **kwargs) + else: + print('Warning: FDW features tests are disabled, skipping...') + return wrapper class PartitioningTests(unittest.TestCase): - def setUp(self): - self.setup_cmd = [ - 'create table abc(id serial, t text)', - 'insert into abc select generate_series(1, 300000)', - 'select create_hash_partitions(\'abc\', \'id\', 3, partition_data := false)', - ] - - def tearDown(self): - stop_all() - - def start_new_pathman_cluster(self, name='test', allows_streaming=False): - node = get_new_node(name) - node.init(allows_streaming=allows_streaming) - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - return node - - def init_test_data(self, node): - """Initialize pg_pathman extension and test data""" - for cmd in self.setup_cmd: - node.safe_psql('postgres', cmd) - - def catchup_replica(self, master, replica): - """Wait until replica synchronizes with master""" - master.poll_query_until( - 'postgres', - 'SELECT pg_current_xlog_location() <= replay_location ' - 'FROM pg_stat_replication WHERE application_name = \'%s\'' - % replica.name) - - def printlog(self, logfile): - with open(logfile, 'r') as log: - for line in log.readlines(): - print(line) - - def test_concurrent(self): - """Tests concurrent partitioning""" - try: - node = self.start_new_pathman_cluster() - self.init_test_data(node) - - node.psql( - 'postgres', - 'select partition_table_concurrently(\'abc\')') - - while True: - # update some rows to check for deadlocks - node.safe_psql( - 'postgres', - ''' - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - ''') - - count = node.execute( - 'postgres', - 'select count(*) from pathman_concurrent_part_tasks') - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() - except Exception, e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e - - def test_replication(self): - """Tests how pg_pathman works with replication""" - node = get_new_node('master') - replica = get_new_node('repl') - - try: - # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 300000 - ) - - # check that direct UPDATE in pathman_config_params invalidates - # cache - node.psql( - 'postgres', - 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 0 - ) - except Exception, e: - self.printlog(node.logs_dir + '/postgresql.log') - self.printlog(replica.logs_dir + '/postgresql.log') - raise e - - def test_locks(self): - """Test that a session trying to create new partitions waits for other - sessions if they doing the same""" - - import threading - import time - - class Flag: - def __init__(self, value): - self.flag = value - - def set(self, value): - self.flag = value - - def get(self): - return self.flag - - # There is one flag for each thread which shows if thread have done - # its work - flags = [Flag(False) for i in xrange(3)] - - # All threads synchronizes though this lock - lock = threading.Lock() - - # Define thread function - def add_partition(node, flag, query): - """ We expect that this query will wait until another session - commits or rolls back""" - node.safe_psql('postgres', query) - with lock: - flag.set(True) - - # Initialize master server - node = get_new_node('master') - - try: - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + - 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' - ) - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] - for i in range(3): - thread = threading.Thread( - target=add_partition, - args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) - - # Commit transaction. Since then other sessions can create - # partitions - con.commit() - - # Now wait until each thread finishes - for thread in threads: - thread.join() - - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' - ), - '6\n' - ) - except Exception, e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e - - def test_tablespace(self): - """Check tablespace support""" - - def check_tablespace(node, tablename, tablespace): - res = node.execute( - 'postgres', - 'select get_rel_tablespace_name(\'{}\')'.format(tablename)) - if len(res) == 0: - return False - - return res[0][0] == tablespace - - node = get_new_node('master') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - - # create tablespace - path = os.path.join(node.data_dir, 'test_space_location') - os.mkdir(path) - node.psql( - 'postgres', - 'create tablespace test_space location \'{}\''.format(path)) - - # create table in this tablespace - node.psql( - 'postgres', - 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql( - 'postgres', - 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended\')') - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # now let's specify tablespace explicitly - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')') - self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) - - @if_fdw_enabled - def test_foreign_table(self): - """Test foreign tables""" - - # Start master server - master = get_new_node('test') - master.init() - master.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') - master.psql( - 'postgres', - '''create table abc(id serial, name text); - select create_range_partitions('abc', 'id', 0, 10, 2)''') - - # Current user name (needed for user mapping) - username = master.execute('postgres', 'select current_user')[0][0] - - # Start foreign server - fserv = get_new_node('fserv') - fserv.init().start() - fserv.safe_psql('postgres', 'create table ftable(id serial, name text)') - fserv.safe_psql('postgres', 'insert into ftable values (25, \'foreign\')') - - # Create foreign table and attach it to partitioned table - master.safe_psql( - 'postgres', - '''create server fserv - foreign data wrapper postgres_fdw - options (dbname 'postgres', host '127.0.0.1', port '{}')'''.format(fserv.port) - ) - master.safe_psql( - 'postgres', - '''create user mapping for {0} - server fserv - options (user '{0}')'''.format(username) - ) - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (ftable) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') - - # Check that table attached to partitioned table - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable'), - '25|foreign\n' - ) - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - '25|foreign\n26|part\n' - ) - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') - - def test_parallel_nodes(self): - """Test parallel queries under partitions""" - - import json - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - node.start() - - # Check version of postgres server - # If version < 9.6 skip all tests for parallel queries - version = int(node.psql("postgres", "show server_version_num")[1]) - if version < 90600: - return - - # Prepare test database - node.psql('postgres', 'create extension pg_pathman') - node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table range_partitioned alter column i set not null') - node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)') - node.psql('postgres', 'vacuum analyze range_partitioned') - - node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table hash_partitioned alter column i set not null') - node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)') - node.psql('postgres', 'vacuum analyze hash_partitioned') - - node.psql('postgres', """ - create or replace function query_plan(query text) returns jsonb as $$ - declare - plan jsonb; - begin - execute 'explain (costs off, format json)' || query into plan; - return plan; - end; - $$ language plpgsql; - """) - - # Helper function for json equality - def ordered(obj): - if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered(x) for x in obj) - else: - return obj - - # Test parallel select - with node.connect() as con: - con.execute('set max_parallel_workers_per_gather = 2') - con.execute('set min_parallel_relation_size = 0') - con.execute('set parallel_setup_cost = 0') - con.execute('set parallel_tuple_cost = 0') - - # Check parallel aggregate plan - test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Finalize", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Partial", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check count of returned tuples - count = con.execute('select count(*) from range_partitioned where i < 1500')[0][0] - self.assertEqual(count, 1499) - - # Check simple parallel seq scan plan with limit - test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Limit", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check tuples returned by query above - res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5') - res_tuples = sorted(map(lambda x: x[0], res_tuples)) - expected = [1, 2, 3, 4, 5] - self.assertEqual(res_tuples, expected) - - # Check the case when none partition is selected in result plan - test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Result", - "Parallel Aware": false, - "One-Time Filter": "false" - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Remove all objects for testing - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_creation_insert(self): - """Test concurrent partition creation on INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute('insert into ins_test select generate_series(1, 50)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.execute('insert into ins_test values(51)') - - # Step 1: lock partitioned table in con1 - con1.begin() - con1.execute('lock table ins_test in share update exclusive mode') - - # Step 2: try inserting new value in con2 (waiting) - t = threading.Thread(target=con2_thread) - t.start() - - # Step 3: try inserting new value in con1 (success, unlock) - con1.execute('insert into ins_test values(52)') - con1.commit() - - # Step 4: wait for con2 - t.join() - - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'ins_test'::regclass - order by range_min, range_max - """) - - # check number of partitions - self.assertEqual(len(rows), 6) - - # check range_max of partitions - self.assertEqual(int(rows[0][5]), 11) - self.assertEqual(int(rows[1][5]), 21) - self.assertEqual(int(rows[2][5]), 31) - self.assertEqual(int(rows[3][5]), 41) - self.assertEqual(int(rows[4][5]), 51) - self.assertEqual(int(rows[5][5]), 61) - - # Stop instance and finish work - node.stop() - node.cleanup() + def setUp(self): + self.setup_cmd = [ + 'create table abc(id serial, t text)', + 'insert into abc select generate_series(1, 300000)', + 'select create_hash_partitions(\'abc\', \'id\', 3, partition_data := false)', + ] + + def tearDown(self): + stop_all() + + def start_new_pathman_cluster(self, name='test', allows_streaming=False): + node = get_new_node(name) + node.init(allows_streaming=allows_streaming) + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman\'\n') + node.start() + node.psql('postgres', 'create extension pg_pathman') + return node + + def init_test_data(self, node): + """Initialize pg_pathman extension and test data""" + for cmd in self.setup_cmd: + node.safe_psql('postgres', cmd) + + def catchup_replica(self, master, replica): + """Wait until replica synchronizes with master""" + master.poll_query_until( + 'postgres', + 'SELECT pg_current_xlog_location() <= replay_location ' + 'FROM pg_stat_replication WHERE application_name = \'%s\'' + % replica.name) + + def printlog(self, logfile): + with open(logfile, 'r') as log: + for line in log.readlines(): + print(line) + + def test_concurrent(self): + """Tests concurrent partitioning""" + try: + node = self.start_new_pathman_cluster() + self.init_test_data(node) + + node.psql( + 'postgres', + 'select partition_table_concurrently(\'abc\')') + + while True: + # update some rows to check for deadlocks + node.safe_psql( + 'postgres', + ''' + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + ''') + + count = node.execute( + 'postgres', + 'select count(*) from pathman_concurrent_part_tasks') + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('postgres', 'select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('postgres', 'select count(*) from abc') + self.assertEqual(data[0][0], 300000) + + node.stop() + except Exception, e: + self.printlog(node.logs_dir + '/postgresql.log') + raise e + + def test_replication(self): + """Tests how pg_pathman works with replication""" + node = get_new_node('master') + replica = get_new_node('repl') + + try: + # initialize master server + node = self.start_new_pathman_cluster(allows_streaming=True) + node.backup('my_backup') + + # initialize replica from backup + replica.init_from_backup(node, 'my_backup', has_streaming=True) + replica.start() + + # initialize pg_pathman extension and some test data + self.init_test_data(node) + + # wait until replica catches up + self.catchup_replica(node, replica) + + # check that results are equal + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc') + ) + + # enable parent and see if it is enabled in replica + node.psql('postgres', 'select enable_parent(\'abc\'') + + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc') + ) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc') + ) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], + 300000 + ) + + # check that direct UPDATE in pathman_config_params invalidates + # cache + node.psql( + 'postgres', + 'update pathman_config_params set enable_parent = false') + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc') + ) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc') + ) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], + 0 + ) + except Exception, e: + self.printlog(node.logs_dir + '/postgresql.log') + self.printlog(replica.logs_dir + '/postgresql.log') + raise e + + def test_locks(self): + """Test that a session trying to create new partitions waits for other + sessions if they doing the same""" + + import threading + import time + + class Flag: + def __init__(self, value): + self.flag = value + + def set(self, value): + self.flag = value + + def get(self): + return self.flag + + # There is one flag for each thread which shows if thread have done + # its work + flags = [Flag(False) for i in xrange(3)] + + # All threads synchronizes though this lock + lock = threading.Lock() + + # Define thread function + def add_partition(node, flag, query): + """ We expect that this query will wait until another session + commits or rolls back""" + node.safe_psql('postgres', query) + with lock: + flag.set(True) + + # Initialize master server + node = get_new_node('master') + + try: + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman\'\n') + node.start() + node.safe_psql( + 'postgres', + 'create extension pg_pathman; ' + + 'create table abc(id serial, t text); ' + + 'insert into abc select generate_series(1, 100000); ' + + 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' + ) + + # Start transaction that will create partition + con = node.connect() + con.begin() + con.execute('select append_range_partition(\'abc\')') + + # Start threads that suppose to add new partitions and wait some + # time + query = [ + 'select prepend_range_partition(\'abc\')', + 'select append_range_partition(\'abc\')', + 'select add_range_partition(\'abc\', 500000, 550000)', + ] + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, + args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # This threads should wait until current transaction finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), False) + + # Commit transaction. Since then other sessions can create + # partitions + con.commit() + + # Now wait until each thread finishes + for thread in threads: + thread.join() + + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) + + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + 'postgres', + 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' + ), + '6\n' + ) + except Exception, e: + self.printlog(node.logs_dir + '/postgresql.log') + raise e + + def test_tablespace(self): + """Check tablespace support""" + + def check_tablespace(node, tablename, tablespace): + res = node.execute( + 'postgres', + 'select get_tablespace(\'{}\')'.format(tablename)) + if len(res) == 0: + return False + + return res[0][0] == tablespace + + node = get_new_node('master') + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman\'\n') + node.start() + node.psql('postgres', 'create extension pg_pathman') + + # create tablespace + path = os.path.join(node.data_dir, 'test_space_location') + os.mkdir(path) + node.psql( + 'postgres', + 'create tablespace test_space location \'{}\''.format(path)) + + # create table in this tablespace + node.psql( + 'postgres', + 'create table abc(a serial, b int) tablespace test_space') + + # create three partitions. Excpect that they will be created in the + # same tablespace as the parent table + node.psql( + 'postgres', + 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') + self.assertTrue(check_tablespace(node, 'abc', 'test_space')) + + # check tablespace for appended partition + node.psql( + 'postgres', + 'select append_range_partition(\'abc\', \'abc_appended\')') + self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) + + # check tablespace for prepended partition + node.psql( + 'postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended\')') + self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) + + # check tablespace for prepended partition + node.psql( + 'postgres', + 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') + self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + + # now let's specify tablespace explicitly + node.psql( + 'postgres', + 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') + node.psql( + 'postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')') + node.psql( + 'postgres', + 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')') + self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + + @if_fdw_enabled + def test_foreign_table(self): + """Test foreign tables""" + + # Start master server + master = get_new_node('test') + master.init() + master.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') + master.start() + master.psql('postgres', 'create extension pg_pathman') + master.psql('postgres', 'create extension postgres_fdw') + master.psql( + 'postgres', + '''create table abc(id serial, name text); + select create_range_partitions('abc', 'id', 0, 10, 2)''') + + # Current user name (needed for user mapping) + username = master.execute('postgres', 'select current_user')[0][0] + + # Start foreign server + fserv = get_new_node('fserv') + fserv.init().start() + fserv.safe_psql('postgres', 'create table ftable(id serial, name text)') + fserv.safe_psql('postgres', 'insert into ftable values (25, \'foreign\')') + + # Create foreign table and attach it to partitioned table + master.safe_psql( + 'postgres', + '''create server fserv + foreign data wrapper postgres_fdw + options (dbname 'postgres', host '127.0.0.1', port '{}')'''.format(fserv.port) + ) + master.safe_psql( + 'postgres', + '''create user mapping for {0} + server fserv + options (user '{0}')'''.format(username) + ) + master.safe_psql( + 'postgres', + '''import foreign schema public limit to (ftable) + from server fserv into public''' + ) + master.safe_psql( + 'postgres', + 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') + + # Check that table attached to partitioned table + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable'), + '25|foreign\n' + ) + + # Check that we can successfully insert new data into foreign partition + master.safe_psql('postgres', 'insert into abc values (26, \'part\')') + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable order by id'), + '25|foreign\n26|part\n' + ) + + # Testing drop partitions (including foreign partitions) + master.safe_psql('postgres', 'select drop_partitions(\'abc\')') + + def test_parallel_nodes(self): + """Test parallel queries under partitions""" + + import json + + # Init and start postgres instance with preload pg_pathman module + node = get_new_node('test') + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + version = int(node.psql("postgres", "show server_version_num")[1]) + if version < 90600: + return + + # Prepare test database + node.psql('postgres', 'create extension pg_pathman') + node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i') + node.psql('postgres', 'alter table range_partitioned alter column i set not null') + node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)') + node.psql('postgres', 'vacuum analyze range_partitioned') + + node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i') + node.psql('postgres', 'alter table hash_partitioned alter column i set not null') + node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)') + node.psql('postgres', 'vacuum analyze hash_partitioned') + + node.psql('postgres', """ + create or replace function query_plan(query text) returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + # Helper function for json equality + def ordered(obj): + if isinstance(obj, dict): + return sorted((k, ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered(x) for x in obj) + else: + return obj + + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check count of returned tuples + count = con.execute('select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) + + # Check simple parallel seq scan plan with limit + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check tuples returned by query above + res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5') + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] + self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Remove all objects for testing + node.psql('postgres', 'drop table range_partitioned cascade') + node.psql('postgres', 'drop table hash_partitioned cascade') + node.psql('postgres', 'drop extension pg_pathman cascade') + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_conc_part_creation_insert(self): + """Test concurrent partition creation on INSERT""" + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'ins_test' and partition it + with node.connect() as con0: + con0.begin() + con0.execute('create table ins_test(val int not null)') + con0.execute('insert into ins_test select generate_series(1, 50)') + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('lock table ins_test in share update exclusive mode') + + # Step 2: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 4: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) + + # Stop instance and finish work + node.stop() + node.cleanup() if __name__ == "__main__": - unittest.main() + unittest.main() From 984f2f45bd2662029fa0aac8aad1676fb9c6f382 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 25 Nov 2016 19:40:58 +0300 Subject: [PATCH 0090/1124] slightly improved error messages --- expected/pathman_basic.out | 6 +++--- init.sql | 2 +- range.sql | 12 +++++------- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 42058067..240496b7 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -10,7 +10,7 @@ INSERT INTO test.hash_rel VALUES (1, 1); INSERT INTO test.hash_rel VALUES (2, 2); INSERT INTO test.hash_rel VALUES (3, 3); SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); -ERROR: partitioning key 'value' must be NOT NULL +ERROR: partitioning key "value" must be NOT NULL ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); create_hash_partitions @@ -131,10 +131,10 @@ CREATE INDEX ON test.range_rel (dt); INSERT INTO test.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); -ERROR: partitioning key 'dt' must be NOT NULL +ERROR: partitioning key "dt" must be NOT NULL ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); -ERROR: not enough partitions to fit all values of 'dt' +ERROR: not enough partitions to fit all values of "dt" SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); NOTICE: sequence "range_rel_seq" does not exist, skipping create_range_partitions diff --git a/init.sql b/init.sql index f9d9b5eb..c933ce9b 100644 --- a/init.sql +++ b/init.sql @@ -376,7 +376,7 @@ BEGIN END IF; IF @extschema@.is_attribute_nullable(relation, p_attribute) THEN - RAISE EXCEPTION 'partitioning key ''%'' must be NOT NULL', p_attribute; + RAISE EXCEPTION 'partitioning key "%" must be NOT NULL', p_attribute; END IF; /* Check if there are foreign keys that reference the relation */ diff --git a/range.sql b/range.sql index 445bbbad..54d6f342 100644 --- a/range.sql +++ b/range.sql @@ -45,19 +45,17 @@ BEGIN /* Check if column has NULL values */ IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN - RAISE EXCEPTION '''%'' column contains NULL values', attribute; + RAISE EXCEPTION 'column "%" contains NULL values', attribute; END IF; /* Check lower boundary */ IF start_value > v_min THEN - RAISE EXCEPTION 'start value is less than minimum value of ''%''', - attribute; + RAISE EXCEPTION 'start value is less than min value of "%"', attribute; END IF; /* Check upper boundary */ IF end_value <= v_max THEN - RAISE EXCEPTION 'not enough partitions to fit all values of ''%''', - attribute; + RAISE EXCEPTION 'not enough partitions to fit all values of "%"', attribute; END IF; END $$ LANGUAGE plpgsql; @@ -225,7 +223,7 @@ BEGIN END IF; IF v_max IS NULL THEN - RAISE EXCEPTION '''%'' column has NULL values', attribute; + RAISE EXCEPTION 'column "%" has NULL values', attribute; END IF; p_count := 0; @@ -577,7 +575,7 @@ BEGIN /* Check if this is a RANGE partition */ IF v_part_type != 2 THEN - RAISE EXCEPTION 'specified partitions aren''t RANGE partitions'; + RAISE EXCEPTION 'specified partitions are not RANGE partitions'; END IF; v_atttype := @extschema@.get_attribute_type(partition1, v_attname); From 833707c4f0b492b25194c3f7d10d435e23e24bac Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 25 Nov 2016 20:01:46 +0300 Subject: [PATCH 0091/1124] refactoring to make half open partitions work (not finished yet) --- range.sql | 41 ++++++-- src/init.c | 49 +++++++--- src/partition_creation.c | 162 ++++++++++++++++++++++--------- src/partition_creation.h | 24 +++-- src/pg_pathman.c | 16 +-- src/pl_funcs.c | 25 +++-- src/pl_range_funcs.c | 203 ++++++++++++++++++++++++++++++--------- src/relation_info.h | 55 ++++++++--- 8 files changed, 419 insertions(+), 156 deletions(-) diff --git a/range.sql b/range.sql index 1fb2ec79..8d758759 100644 --- a/range.sql +++ b/range.sql @@ -505,7 +505,8 @@ BEGIN partition_name); /* Copy data */ - v_cond := @extschema@.build_range_condition(v_attname, split_value, p_range[2]); + v_cond := @extschema@.build_range_condition(v_new_partition::regclass, + v_attname, split_value, p_range[2]); EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) INSERT INTO %s SELECT * FROM part_data', partition::TEXT, @@ -513,7 +514,8 @@ BEGIN v_new_partition); /* Alter original partition */ - v_cond := @extschema@.build_range_condition(v_attname, p_range[1], split_value); + v_cond := @extschema@.build_range_condition(partition::regclass, + v_attname, p_range[1], split_value); v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', @@ -612,6 +614,8 @@ DECLARE v_attname TEXT; v_atttype REGTYPE; v_check_name TEXT; + v_lower_bound dummy%TYPE; + v_upper_bound dummy%TYPE; BEGIN SELECT attname FROM @extschema@.pathman_config @@ -642,13 +646,28 @@ BEGIN partition1::TEXT, v_check_name); + /* Determine left bound */ + IF p_range[1] IS NULL OR p_range[3] IS NULL THEN + v_lower_bound := NULL; + ELSE + v_lower_bound := least(p_range[1], p_range[3]); + END IF; + + /* Determine right bound */ + IF p_range[2] IS NULL OR p_range[4] IS NULL THEN + v_upper_bound := NULL; + ELSE + v_upper_bound := greatest(p_range[2], p_range[4]); + END IF; + /* and create a new one */ EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', partition1::TEXT, v_check_name, - @extschema@.build_range_condition(v_attname, - least(p_range[1], p_range[3]), - greatest(p_range[2], p_range[4]))); + @extschema@.build_range_condition(partition1, + v_attname, + v_lower_bound, + v_upper_bound)); /* Copy data from second partition to the first one */ EXECUTE format('WITH part_data AS (DELETE FROM %s RETURNING *) @@ -745,6 +764,10 @@ BEGIN USING parent_relid INTO p_range; + IF p_range[2] IS NULL THEN + RAISE EXCEPTION 'Cannot append partition because last partition is half open'; + END IF; + IF @extschema@.is_date_type(p_atttype) THEN v_part_name := @extschema@.create_single_range_partition( parent_relid, @@ -855,6 +878,10 @@ BEGIN USING parent_relid INTO p_range; + IF p_range[1] IS NULL THEN + RAISE EXCEPTION 'Cannot prepend partition because first partition is half open'; + END IF; + IF @extschema@.is_date_type(p_atttype) THEN v_part_name := @extschema@.create_single_range_partition( parent_relid, @@ -1045,7 +1072,8 @@ BEGIN EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', partition::TEXT, @extschema@.build_check_constraint_name(partition, v_attname), - @extschema@.build_range_condition(v_attname, + @extschema@.build_range_condition(partition, + v_attname, start_value, end_value)); @@ -1230,6 +1258,7 @@ SET client_min_messages = WARNING; * Construct CHECK constraint condition for a range partition. */ CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( + p_relid REGCLASS, p_attname TEXT, start_value ANYELEMENT, end_value ANYELEMENT) diff --git a/src/init.c b/src/init.c index d88a9e3a..31766838 100644 --- a/src/init.c +++ b/src/init.c @@ -383,10 +383,13 @@ fill_prel_with_partitions(const Oid *partitions, &lower_null, &upper_null)) { prel->ranges[i].child_oid = partitions[i]; - prel->ranges[i].min = lower; - prel->ranges[i].max = upper; - prel->ranges[i].infinite_min = lower_null; - prel->ranges[i].infinite_max = upper_null; + // prel->ranges[i].min = lower; + // prel->ranges[i].max = upper; + // prel->ranges[i].infinite_min = lower_null; + // prel->ranges[i].infinite_max = upper_null; + (&prel->ranges[i].min)->value = lower; + MakeInfinitable(&prel->ranges[i].min, lower, lower_null); + MakeInfinitable(&prel->ranges[i].max, upper, upper_null); } else { @@ -428,13 +431,21 @@ fill_prel_with_partitions(const Oid *partitions, old_mcxt = MemoryContextSwitchTo(TopMemoryContext); for (i = 0; i < PrelChildrenCount(prel); i++) { - prel->ranges[i].max = datumCopy(prel->ranges[i].max, - prel->attbyval, - prel->attlen); - - prel->ranges[i].min = datumCopy(prel->ranges[i].min, - prel->attbyval, - prel->attlen); + // prel->ranges[i].max = datumCopy(prel->ranges[i].max, + // prel->attbyval, + // prel->attlen); + CopyInfinitable(&(prel->ranges[i].max), + &(prel->ranges[i].max), + prel->attbyval, + prel->attlen); + + // prel->ranges[i].min = datumCopy(prel->ranges[i].min, + // prel->attbyval, + // prel->attlen); + CopyInfinitable(&prel->ranges[i].min, + &prel->ranges[i].min, + prel->attbyval, + prel->attlen); } MemoryContextSwitchTo(old_mcxt); @@ -869,15 +880,21 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) Oid cmp_proc_oid = *(Oid *) arg; /* If range is half open */ - if (v1->infinite_min) + if (IsInfinite(&v1->min)) { - if (v2->infinite_min) - return Int32GetDatum(0); + // if (IsInfinite(&v2->min)) + // return Int32GetDatum(0); return Int32GetDatum(-1); } + if (IsInfinite(&v2->min)) + { + return Int32GetDatum(1); + } /* Else if range is closed */ - return OidFunctionCall2(cmp_proc_oid, v1->min, v2->min); + return OidFunctionCall2(cmp_proc_oid, + InfinitableGetValue(&v1->min), + InfinitableGetValue(&v2->min)); } /* @@ -928,7 +945,7 @@ validate_range_constraint(const Expr *expr, if (!expr) return false; - *lower_null = *upper_null = false; + *lower_null = *upper_null = true; tce = lookup_type_cache(prel->atttype, TYPECACHE_BTREE_OPFAMILY); /* It could be either AND operator on top or just an OpExpr */ diff --git a/src/partition_creation.c b/src/partition_creation.c index b099093a..1d7263ac 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -76,8 +76,8 @@ static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); /* Create one RANGE partition [start_value, end_value) */ Oid create_single_range_partition_internal(Oid parent_relid, - Datum start_value, - Datum end_value, + const Infinitable *start_value, + const Infinitable *end_value, Oid value_type, RangeVar *partition_rv, char *tablespace) @@ -113,7 +113,7 @@ create_single_range_partition_internal(Oid parent_relid, /* Finally invoke 'init_callback' */ MakeInitCallbackRangeParams(&callback_params, InvalidOid, parent_relid, partition_relid, - start_value, end_value, value_type); + *start_value, *end_value, value_type); invoke_part_callback(&callback_params); CommandCounterIncrement(); @@ -243,16 +243,41 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) /* Else spawn a new one (we hold a lock on the parent) */ if (partid == InvalidOid) { + RangeEntry *ranges = PrelGetRangesArray(prel); Datum bound_min, /* absolute MIN */ bound_max; /* absolute MAX */ + // Infinitable bound_min, /* lower bound of all partitions */ + // bound_max; /* upper bound of all partitions */ + // Infinitable start, + // end; Oid interval_type = InvalidOid; Datum interval_binary, /* assigned 'width' of one partition */ interval_text; + // bound_min = ranges[0].min; + // bound_max = ranges[PrelLastChild(prel)].max; + + // start.value = !IsInfinite(&bound_min) ? + // datumCopy(InfinitableGetValue(&bound_min), + // prel->attbyval, + // prel->attlen) : + // (Datum) 0; + // start.is_infinite = IsInfinite(&bound_min); + + // end.value = !IsInfinite(&bound_max) ? + // datumCopy(InfinitableGetValue(&bound_max), + // prel->attbyval, + // prel->attlen) : + // (Datum) 0; + // end.is_infinite = IsInfinite(&bound_max); + /* Read max & min range values from PartRelationInfo */ - bound_min = PrelGetRangesArray(prel)[0].min; - bound_max = PrelGetRangesArray(prel)[PrelLastChild(prel)].max; + /* TODO */ + // bound_min = PrelGetRangesArray(prel)[0].min; + // bound_max = PrelGetRangesArray(prel)[PrelLastChild(prel)].max; + bound_min = InfinitableGetValue(&ranges[0].min); + bound_max = InfinitableGetValue(&ranges[PrelLastChild(prel)].max); /* Copy datums on order to protect them from cache invalidation */ bound_min = datumCopy(bound_min, prel->attbyval, prel->attlen); @@ -476,6 +501,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ check_lt(&cmp_value_bound_finfo, value, cur_leading_bound)) { Datum args[2]; + Infinitable bounds[2]; /* Assign the 'following' boundary to current 'leading' value */ cur_following_bound = cur_leading_bound; @@ -488,8 +514,11 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ args[0] = should_append ? cur_following_bound : cur_leading_bound; args[1] = should_append ? cur_leading_bound : cur_following_bound; + MakeInfinitable(&bounds[0], args[0], false); + MakeInfinitable(&bounds[1], args[1], false); + last_partition = create_single_range_partition_internal(parent_relid, - args[0], args[1], + &bounds[0], &bounds[1], range_bound_type, NULL, NULL); @@ -752,8 +781,8 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) /* Build RANGE check constraint expression tree */ Node * build_raw_range_check_tree(char *attname, - Datum start_value, - Datum end_value, + const Infinitable *start_value, + const Infinitable *end_value, Oid value_type) { BoolExpr *and_oper = makeNode(BoolExpr); @@ -767,31 +796,44 @@ build_raw_range_check_tree(char *attname, col_ref->fields = list_make1(makeString(attname)); col_ref->location = -1; - /* Left boundary */ - left_const->val = *makeString(datum_to_cstring(start_value, value_type)); - left_const->location = -1; - - /* Right boundary */ - right_const->val = *makeString(datum_to_cstring(end_value, value_type)); - right_const->location = -1; + and_oper->boolop = AND_EXPR; + and_oper->args = NIL; + and_oper->location = -1; /* Left comparison (VAR >= start_value) */ - left_arg->name = list_make1(makeString(">=")); - left_arg->kind = AEXPR_OP; - left_arg->lexpr = (Node *) col_ref; - left_arg->rexpr = (Node *) left_const; - left_arg->location = -1; + if (!IsInfinite(start_value)) + { + /* Left boundary */ + left_const->val = *makeString( + datum_to_cstring(InfinitableGetValue(start_value), value_type)); + left_const->location = -1; + + left_arg->name = list_make1(makeString(">=")); + left_arg->kind = AEXPR_OP; + left_arg->lexpr = (Node *) col_ref; + left_arg->rexpr = (Node *) left_const; + left_arg->location = -1; + and_oper->args = lappend(and_oper->args, left_arg); + } /* Right comparision (VAR < end_value) */ - right_arg->name = list_make1(makeString("<")); - right_arg->kind = AEXPR_OP; - right_arg->lexpr = (Node *) col_ref; - right_arg->rexpr = (Node *) right_const; - right_arg->location = -1; + if (!IsInfinite(end_value)) + { + /* Right boundary */ + right_const->val = *makeString( + datum_to_cstring(InfinitableGetValue(end_value), value_type)); + right_const->location = -1; + + right_arg->name = list_make1(makeString("<")); + right_arg->kind = AEXPR_OP; + right_arg->lexpr = (Node *) col_ref; + right_arg->rexpr = (Node *) right_const; + right_arg->location = -1; + and_oper->args = lappend(and_oper->args, right_arg); + } - and_oper->boolop = AND_EXPR; - and_oper->args = list_make2(left_arg, right_arg); - and_oper->location = -1; + if (and_oper->args == NIL) + elog(ERROR, "Cannot create infinite range constraint"); return (Node *) and_oper; } @@ -800,8 +842,8 @@ build_raw_range_check_tree(char *attname, Constraint * build_range_check_constraint(Oid child_relid, char *attname, - Datum start_value, - Datum end_value, + const Infinitable *start_value, + const Infinitable *end_value, Oid value_type) { Constraint *range_constr; @@ -837,10 +879,8 @@ build_range_check_constraint(Oid child_relid, /* Check if range overlaps with any partitions */ bool check_range_available(Oid parent_relid, - Datum start_value, - Datum end_value, - bool infinite_start, - bool infinite_end, + const Infinitable *start_value, + const Infinitable *end_value, Oid value_type, bool raise_error) { @@ -873,23 +913,30 @@ check_range_available(Oid parent_relid, * range ends in plus infinity then the left boundary of the first * range is on the left. Otherwise compare specific values */ - c1 = (infinite_start || ranges[i].infinite_max) ? - -1 : FunctionCall2(&cmp_func, start_value, ranges[i].max); + c1 = (IsInfinite(start_value) || IsInfinite(&ranges[i].max)) ? + -1 : + FunctionCall2(&cmp_func, + InfinitableGetValue(start_value), + InfinitableGetValue(&ranges[i].max)); /* * Similary check that right boundary of the range we're checking is on * the right of the beginning of the current one */ - c2 = (infinite_end || ranges[i].infinite_min) ? - -1 : FunctionCall2(&cmp_func, end_value, ranges[i].min); + c2 = (IsInfinite(end_value) || IsInfinite(&ranges[i].min)) ? + 1 : + FunctionCall2(&cmp_func, + InfinitableGetValue(end_value), + InfinitableGetValue(&ranges[i].min)); /* There's someone! */ if (c1 < 0 && c2 > 0) { if (raise_error) + /* TODO: print infinity */ elog(ERROR, "specified range [%s, %s) overlaps " "with existing partitions", - datum_to_cstring(start_value, value_type), - datum_to_cstring(end_value, value_type)); + datum_to_cstring(InfinitableGetValue(start_value), value_type), + datum_to_cstring(InfinitableGetValue(end_value), value_type)); else return false; } @@ -917,6 +964,12 @@ invoke_init_callback_internal(init_callback_params *cb_params) pushJsonbValue(&jsonb_state, val_type, (value)); \ } while (0) +#define JSB_INIT_NULL_VAL(value, val_type) \ + do { \ + (value)->type = jbvNull; \ + pushJsonbValue(&jsonb_state, val_type, (value)); \ + } while (0) + Oid parent_oid = cb_params->parent_relid; Oid partition_oid = cb_params->partition_relid; @@ -949,13 +1002,13 @@ invoke_init_callback_internal(init_callback_params *cb_params) { char *start_value, *end_value; - Datum sv_datum = cb_params->params.range_params.start_value, - ev_datum = cb_params->params.range_params.end_value; + Infinitable sv_datum = cb_params->params.range_params.start_value, + ev_datum = cb_params->params.range_params.end_value; Oid type = cb_params->params.range_params.value_type; /* Convert min & max to CSTRING */ - start_value = datum_to_cstring(sv_datum, type); - end_value = datum_to_cstring(ev_datum, type); + // start_value = datum_to_cstring(sv_datum, type); + // end_value = datum_to_cstring(ev_datum, type); pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); @@ -965,10 +1018,27 @@ invoke_init_callback_internal(init_callback_params *cb_params) JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(partition_oid)); JSB_INIT_VAL(&key, WJB_KEY, "parttype"); JSB_INIT_VAL(&val, WJB_VALUE, PartTypeToCString(PT_RANGE)); + + /* Lower bound */ JSB_INIT_VAL(&key, WJB_KEY, "range_min"); - JSB_INIT_VAL(&val, WJB_VALUE, start_value); + if (!IsInfinite(&sv_datum)) + { + start_value = datum_to_cstring(InfinitableGetValue(&sv_datum), type); + JSB_INIT_VAL(&val, WJB_VALUE, start_value); + } + else + JSB_INIT_NULL_VAL(&val, WJB_VALUE); + + /* Upper bound */ JSB_INIT_VAL(&key, WJB_KEY, "range_max"); - JSB_INIT_VAL(&val, WJB_VALUE, end_value); + if (!IsInfinite(&ev_datum)) + { + end_value = datum_to_cstring(InfinitableGetValue(&ev_datum), type); + JSB_INIT_VAL(&val, WJB_VALUE, end_value); + } + else + JSB_INIT_NULL_VAL(&val, WJB_VALUE); + // JSB_INIT_VAL(&val, WJB_VALUE, end_value); result = pushJsonbValue(&jsonb_state, WJB_END_OBJECT, NULL); } diff --git a/src/partition_creation.h b/src/partition_creation.h index 5e4345da..cab922f5 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -18,28 +18,26 @@ Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type); Oid create_single_range_partition_internal(Oid parent_relid, - Datum start_value, - Datum end_value, + const Infinitable *start_value, + const Infinitable *end_value, Oid value_type, RangeVar *partition_rv, char *tablespace); Constraint * build_range_check_constraint(Oid child_relid, char *attname, - Datum start_value, - Datum end_value, + const Infinitable *start_value, + const Infinitable *end_value, Oid value_type); Node * build_raw_range_check_tree(char *attname, - Datum start_value, - Datum end_value, + const Infinitable *start_value, + const Infinitable *end_value, Oid value_type); bool check_range_available(Oid parent_relid, - Datum start_value, - Datum end_value, - bool infinite_start, - bool infinite_end, + const Infinitable *start_value, + const Infinitable *end_value, Oid value_type, bool raise_error); @@ -71,9 +69,9 @@ typedef struct struct { - Datum start_value, - end_value; - Oid value_type; + Infinitable start_value, + end_value; + Oid value_type; } range_params; } params; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index be8037cd..8d0f52a2 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -446,10 +446,10 @@ select_range_partitions(const Datum value, Assert(cmp_func); /* Corner cases */ - cmp_min = ranges[startidx].infinite_min ? - 1 : DatumGetInt32(FunctionCall2(cmp_func, value, ranges[startidx].min)); - cmp_max = ranges[endidx].infinite_max ? - -1 : DatumGetInt32(FunctionCall2(cmp_func, value, ranges[endidx].max)); + cmp_min = IsInfinite(&ranges[startidx].min) ? + 1 : DatumGetInt32(FunctionCall2(cmp_func, value, InfinitableGetValue(&ranges[startidx].min))); + cmp_max = IsInfinite(&ranges[endidx].max) ? + -1 : DatumGetInt32(FunctionCall2(cmp_func, value, InfinitableGetValue(&ranges[endidx].max))); if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || @@ -496,8 +496,12 @@ select_range_partitions(const Datum value, current_re = &ranges[i]; - cmp_min = FunctionCall2(cmp_func, value, current_re->min); - cmp_max = FunctionCall2(cmp_func, value, current_re->max); + // cmp_min = FunctionCall2(cmp_func, value, current_re->min); + // cmp_max = FunctionCall2(cmp_func, value, current_re->max); + cmp_min = IsInfinite(¤t_re->min) ? + 1 : FunctionCall2(cmp_func, value, InfinitableGetValue(¤t_re->min)); + cmp_max = IsInfinite(¤t_re->max) ? + -1 : FunctionCall2(cmp_func, value, InfinitableGetValue(¤t_re->max)); is_less = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); is_greater = (cmp_max > 0 || (cmp_max >= 0 && strategy != BTLessStrategyNumber)); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 892e62ce..2c50ff25 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -397,9 +397,10 @@ show_partition_list_internal(PG_FUNCTION_ARGS) re = &PrelGetRangesArray(prel)[usercxt->child_number]; - rmin = CStringGetTextDatum(datum_to_cstring(re->min, + /* TODO: infinite */ + rmin = CStringGetTextDatum(datum_to_cstring(InfinitableGetValue(&re->min), prel->atttype)); - rmax = CStringGetTextDatum(datum_to_cstring(re->max, + rmax = CStringGetTextDatum(datum_to_cstring(InfinitableGetValue(&re->max), prel->atttype)); values[Anum_pathman_pl_partition - 1] = re->child_oid; @@ -778,24 +779,32 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) case 5: { - Datum sv_datum, - ev_datum; + // Datum sv_datum, + // ev_datum; + Infinitable start, + end; Oid value_type; if (PG_ARGISNULL(ARG_RANGE_START) || PG_ARGISNULL(ARG_RANGE_END)) elog(ERROR, "both bounds must be provided for RANGE partition"); /* Fetch start & end values for RANGE + their type */ - sv_datum = PG_GETARG_DATUM(ARG_RANGE_START); - ev_datum = PG_GETARG_DATUM(ARG_RANGE_END); + // sv_datum = PG_GETARG_DATUM(ARG_RANGE_START); + // ev_datum = PG_GETARG_DATUM(ARG_RANGE_END); + MakeInfinitable(&start, + PG_GETARG_DATUM(ARG_RANGE_START), + PG_ARGISNULL(ARG_RANGE_START)); + MakeInfinitable(&end, + PG_GETARG_DATUM(ARG_RANGE_END), + PG_ARGISNULL(ARG_RANGE_END)); value_type = get_fn_expr_argtype(fcinfo->flinfo, ARG_RANGE_START); MakeInitCallbackRangeParams(&callback_params, callback_oid, parent_oid, partition_oid, - sv_datum, - ev_datum, + start, + end, value_type); } break; diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 16a2b784..b3a093c1 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -15,11 +15,22 @@ #include "utils.h" #include "catalog/namespace.h" +#include "parser/parse_relation.h" +#include "parser/parse_expr.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/lsyscache.h" +#include "utils/ruleutils.h" +static char *deparse_constraint(Oid relid, Node *expr); +static ArrayType *construct_infinitable_array(Infinitable **elems, + uint32_t nelems, + Oid elmtype, + int elmlen, + bool elmbyval, + char elmalign); + /* Function declarations */ PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); @@ -48,8 +59,12 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) Oid parent_relid; /* RANGE boundaries + value type */ - Datum start_value, - end_value; + // Datum start_value, + // end_value; + // bool infinite_start, + // infinite_end; + Infinitable start, + end; Oid value_type; /* Optional: name & tablespace */ @@ -64,19 +79,23 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) if (PG_ARGISNULL(0)) elog(ERROR, "'parent_relid' should not be NULL"); - /* Handle 'start_value' */ - if (PG_ARGISNULL(1)) - elog(ERROR, "'start_value' should not be NULL"); + // /* Handle 'start_value' */ + // if (PG_ARGISNULL(1)) + // elog(ERROR, "'start_value' should not be NULL"); - /* Handle 'end_value' */ - if (PG_ARGISNULL(2)) - elog(ERROR, "'end_value' should not be NULL"); + // /* Handle 'end_value' */ + // if (PG_ARGISNULL(2)) + // elog(ERROR, "'end_value' should not be NULL"); /* Fetch mandatory args */ parent_relid = PG_GETARG_OID(0); - start_value = PG_GETARG_DATUM(1); - end_value = PG_GETARG_DATUM(2); + // start_value = PG_GETARG_DATUM(1); + // end_value = PG_GETARG_DATUM(2); + // infinite_start = PG_ARGISNULL(1); + // infinite_end = PG_ARGISNULL(2); value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + MakeInfinitable(&start, PG_GETARG_DATUM(1), PG_ARGISNULL(1)); + MakeInfinitable(&end, PG_GETARG_DATUM(2), PG_ARGISNULL(2)); /* Fetch 'partition_name' */ if (!PG_ARGISNULL(3)) @@ -99,8 +118,8 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* Create a new RANGE partition and return its Oid */ partition_relid = create_single_range_partition_internal(parent_relid, - start_value, - end_value, + &start, + &end, value_type, partition_name_rv, tablespace); @@ -164,17 +183,21 @@ check_range_available_pl(PG_FUNCTION_ARGS) { Oid parent_relid = PG_GETARG_OID(0); - Datum start_value = PG_GETARG_DATUM(1), - end_value = PG_GETARG_DATUM(2); - bool start_null = PG_ARGISNULL(1), - end_null = PG_ARGISNULL(2); - Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + // Datum start_value = PG_GETARG_DATUM(1), + // end_value = PG_GETARG_DATUM(2); + // bool start_null = PG_ARGISNULL(1), + // end_null = PG_ARGISNULL(2); + Infinitable start_value, + end_value; + Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + + MakeInfinitable(&start_value, PG_GETARG_DATUM(1), PG_ARGISNULL(1)); + MakeInfinitable(&end_value, PG_GETARG_DATUM(2), PG_ARGISNULL(2)); /* Raise ERROR if range overlaps with any partition */ check_range_available(parent_relid, - start_value, - end_value, - start_null, end_null, + &start_value, + &end_value, value_type, true); @@ -224,11 +247,26 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) if (ranges[i].child_oid == partition_relid) { ArrayType *arr; - Datum elems[2] = { ranges[i].min, ranges[i].max }; + // Datum elems[2] = { InfinitableGetValue(&ranges[i].min), + // InfinitableGetValue(&ranges[i].max) }; + // bool nulls[2] = { IsInfinite(&ranges[i].min), + // IsInfinite(&ranges[i].max) }; + // int dims[1] = { 2 }; + // int lbs[1] = { 1 }; - arr = construct_array(elems, 2, prel->atttype, - prel->attlen, prel->attbyval, - prel->attalign); + // arr = construct_md_array(elems, nulls, 1, dims, lbs, + // prel->atttype, prel->attlen, + // prel->attbyval, prel->attalign); + + // arr = construct_array(elems, 2, prel->atttype, + // prel->attlen, prel->attbyval, + // prel->attalign); + Infinitable *elems[2] = { &ranges[i].min, &ranges[i].max }; + + + arr = construct_infinitable_array(elems, 2, + prel->atttype, prel->attlen, + prel->attbyval, prel->attalign); PG_RETURN_ARRAYTYPE_P(arr); } @@ -253,7 +291,8 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) { Oid parent_relid = InvalidOid; int partition_idx = 0; - Datum elems[2]; + // Datum elems[2]; + Infinitable *elems[2]; RangeEntry *ranges; const PartRelationInfo *prel; @@ -287,14 +326,15 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) ranges = PrelGetRangesArray(prel); - elems[0] = ranges[partition_idx].min; - elems[1] = ranges[partition_idx].max; + elems[0] = &ranges[partition_idx].min; + elems[1] = &ranges[partition_idx].max; - PG_RETURN_ARRAYTYPE_P(construct_array(elems, 2, - prel->atttype, - prel->attlen, - prel->attbyval, - prel->attalign)); + PG_RETURN_ARRAYTYPE_P( + construct_infinitable_array(elems, 2, + prel->atttype, + prel->attlen, + prel->attbyval, + prel->attalign)); } @@ -308,30 +348,60 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) Datum build_range_condition(PG_FUNCTION_ARGS) { - text *attname = PG_GETARG_TEXT_P(0); + Oid relid = PG_GETARG_OID(0); + text *attname = PG_GETARG_TEXT_P(1); - Datum min_bound = PG_GETARG_DATUM(1), - max_bound = PG_GETARG_DATUM(2); + Infinitable min, + max; + Oid bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + Constraint *con; + char *result; - Oid min_bound_type = get_fn_expr_argtype(fcinfo->flinfo, 1), - max_bound_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + MakeInfinitable(&min, PG_GETARG_DATUM(2), PG_ARGISNULL(2)); + MakeInfinitable(&max, PG_GETARG_DATUM(3), PG_ARGISNULL(3)); - char *result; + con = build_range_check_constraint(relid, text_to_cstring(attname), + &min, &max, + bounds_type); - /* This is not going to trigger (not now, at least), just for the safety */ - if (min_bound_type != max_bound_type) - elog(ERROR, "cannot build range condition: " - "boundaries should be of the same type"); - - /* Create range condition CSTRING */ - result = psprintf("%1$s >= '%2$s' AND %1$s < '%3$s'", - text_to_cstring(attname), - datum_to_cstring(min_bound, min_bound_type), - datum_to_cstring(max_bound, max_bound_type)); + result = deparse_constraint(relid, con->raw_expr); PG_RETURN_TEXT_P(cstring_to_text(result)); } +/* + * Transform constraint into cstring + */ +static char * +deparse_constraint(Oid relid, Node *expr) +{ + Relation rel; + RangeTblEntry *rte; + Node *cooked_expr; + ParseState *pstate; + List *context; + char *result; + + context = deparse_context_for(get_rel_name(relid), relid); + + rel = heap_open(relid, NoLock); + + /* Initialize parse state */ + pstate = make_parsestate(NULL); + rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); + addRTEtoQuery(pstate, rte, true, true, true); + + /* Transform constraint into executable expression (i.e. cook it) */ + cooked_expr = transformExpr(pstate, expr, EXPR_KIND_CHECK_CONSTRAINT); + + /* Transform expression into string */ + result = deparse_expression(cooked_expr, context, false, false); + + heap_close(rel, NoLock); + + return result; +} + Datum build_sequence_name(PG_FUNCTION_ARGS) { @@ -347,3 +417,40 @@ build_sequence_name(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(result)); } + +/* + * Build 1d array of Infinitable elements + * + * The main difference from construct_array() is that it will substitute + * infinite values with NULL's + */ +static ArrayType * +construct_infinitable_array(Infinitable **elems, + uint32_t nelems, + Oid elmtype, + int elmlen, + bool elmbyval, + char elmalign) +{ + ArrayType *arr; + Datum *data; + bool *nulls; + int dims[1] = { nelems }; + int lbs[1] = { 1 }; + int i; + + data = palloc(sizeof(Datum) * nelems); + nulls = palloc(sizeof(bool) * nelems); + + for (i = 0; i < nelems; i++) + { + data[i] = InfinitableGetValue(elems[i]); + nulls[i] = IsInfinite(elems[i]); + } + + arr = construct_md_array(data, nulls, 1, dims, lbs, + elmtype, elmlen, + elmbyval, elmalign); + + return arr; +} diff --git a/src/relation_info.h b/src/relation_info.h index 9c5866f9..80385496 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -17,6 +17,32 @@ #include "storage/lock.h" +/* Infinitable datum values */ +typedef struct +{ + Datum value; + bool is_infinite; +} Infinitable; + +#define MakeInfinitable(inf, _value, _is_infinite) \ + do \ + { \ + (inf)->value = (_value); \ + (inf)->is_infinite = (_is_infinite); \ + } while (0) + +#define IsInfinite(i) ((i)->is_infinite) +#define InfinitableGetValue(i) ((i)->value) +#define CopyInfinitable(i_to, i_from, by_val, len) \ + do \ + { \ + (i_to)->value = !IsInfinite(i_from) ? \ + datumCopy((i_from)->value, (by_val), (len)) : \ + (Datum) 0; \ + (i_to)->is_infinite = IsInfinite(i_from); \ + } while (0) + + /* * Partitioning type. */ @@ -33,10 +59,10 @@ typedef enum typedef struct { Oid child_oid; - Datum min, + Infinitable min, max; - bool infinite_min, - infinite_max; + // bool infinite_min, + // infinite_max; } RangeEntry; /* @@ -159,19 +185,19 @@ FreeChildrenArray(PartRelationInfo *prel) Assert(PrelIsValid(prel)); /* Remove relevant PartParentInfos */ - if ((prel)->children) + if (prel->children) { for (i = 0; i < PrelChildrenCount(prel); i++) { - Oid child = (prel)->children[i]; + Oid child = prel->children[i]; /* If it's *always been* relid's partition, free cache */ if (PrelParentRelid(prel) == get_parent_of_partition(child, NULL)) forget_parent_of_partition(child, NULL); } - pfree((prel)->children); - (prel)->children = NULL; + pfree(prel->children); + prel->children = NULL; } } @@ -183,20 +209,23 @@ FreeRangesArray(PartRelationInfo *prel) Assert(PrelIsValid(prel)); /* Remove RangeEntries array */ - if ((prel)->ranges) + if (prel->ranges) { /* Remove persistent entries if not byVal */ - if (!(prel)->attbyval) + if (!prel->attbyval) { for (i = 0; i < PrelChildrenCount(prel); i++) { - pfree(DatumGetPointer((prel)->ranges[i].min)); - pfree(DatumGetPointer((prel)->ranges[i].max)); + if (!IsInfinite(&prel->ranges[i].min)) + pfree(DatumGetPointer(InfinitableGetValue(&prel->ranges[i].min))); + + if (!IsInfinite(&prel->ranges[i].max)) + pfree(DatumGetPointer(InfinitableGetValue(&prel->ranges[i].max))); } } - pfree((prel)->ranges); - (prel)->ranges = NULL; + pfree(prel->ranges); + prel->ranges = NULL; } } From 9cbefb553e986e739c05ec071199f66406fe12e7 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 28 Nov 2016 18:28:07 +0300 Subject: [PATCH 0092/1124] rename Infinitable to Bound, added plus and minus infinity, added a Bound comparison function --- src/init.c | 62 ++++++++++++++++++---------------- src/partition_creation.c | 66 ++++++++++++++++++------------------ src/partition_creation.h | 18 +++++----- src/pg_pathman.c | 8 ++--- src/pl_funcs.c | 32 +++++++++++------- src/pl_range_funcs.c | 57 +++++++++++++++---------------- src/relation_info.h | 73 +++++++++++++++++++++++++++------------- 7 files changed, 176 insertions(+), 140 deletions(-) diff --git a/src/init.c b/src/init.c index 31766838..2d5e92e8 100644 --- a/src/init.c +++ b/src/init.c @@ -383,13 +383,12 @@ fill_prel_with_partitions(const Oid *partitions, &lower_null, &upper_null)) { prel->ranges[i].child_oid = partitions[i]; - // prel->ranges[i].min = lower; - // prel->ranges[i].max = upper; - // prel->ranges[i].infinite_min = lower_null; - // prel->ranges[i].infinite_max = upper_null; - (&prel->ranges[i].min)->value = lower; - MakeInfinitable(&prel->ranges[i].min, lower, lower_null); - MakeInfinitable(&prel->ranges[i].max, upper, upper_null); + MakeBound(&prel->ranges[i].min, + lower, + lower_null ? MINUS_INFINITY : FINITE); + MakeBound(&prel->ranges[i].max, + upper, + upper_null ? PLUS_INFINITY : FINITE); } else { @@ -417,11 +416,15 @@ fill_prel_with_partitions(const Oid *partitions, if (prel->parttype == PT_RANGE) { MemoryContext old_mcxt; + FmgrInfo flinfo; + + /* Prepare function info */ + fmgr_info(prel->cmp_proc, &flinfo); /* Sort partitions by RangeEntry->min asc */ qsort_arg((void *) prel->ranges, PrelChildrenCount(prel), sizeof(RangeEntry), cmp_range_entries, - (void *) &prel->cmp_proc); + (void *) &flinfo); /* Initialize 'prel->children' array */ for (i = 0; i < PrelChildrenCount(prel); i++) @@ -434,7 +437,7 @@ fill_prel_with_partitions(const Oid *partitions, // prel->ranges[i].max = datumCopy(prel->ranges[i].max, // prel->attbyval, // prel->attlen); - CopyInfinitable(&(prel->ranges[i].max), + CopyBound(&(prel->ranges[i].max), &(prel->ranges[i].max), prel->attbyval, prel->attlen); @@ -442,7 +445,7 @@ fill_prel_with_partitions(const Oid *partitions, // prel->ranges[i].min = datumCopy(prel->ranges[i].min, // prel->attbyval, // prel->attlen); - CopyInfinitable(&prel->ranges[i].min, + CopyBound(&prel->ranges[i].min, &prel->ranges[i].min, prel->attbyval, prel->attlen); @@ -876,25 +879,26 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) { const RangeEntry *v1 = (const RangeEntry *) p1; const RangeEntry *v2 = (const RangeEntry *) p2; - - Oid cmp_proc_oid = *(Oid *) arg; - - /* If range is half open */ - if (IsInfinite(&v1->min)) - { - // if (IsInfinite(&v2->min)) - // return Int32GetDatum(0); - return Int32GetDatum(-1); - } - if (IsInfinite(&v2->min)) - { - return Int32GetDatum(1); - } - - /* Else if range is closed */ - return OidFunctionCall2(cmp_proc_oid, - InfinitableGetValue(&v1->min), - InfinitableGetValue(&v2->min)); + FmgrInfo *flinfo = (FmgrInfo *) arg; + + return cmp_bounds(flinfo, &v1->min, &v2->min); + + // /* If range is half open */ + // if (IsInfinite(&v1->min)) + // { + // // if (IsInfinite(&v2->min)) + // // return Int32GetDatum(0); + // return Int32GetDatum(-1); + // } + // if (IsInfinite(&v2->min)) + // { + // return Int32GetDatum(1); + // } + + // /* Else if range is closed */ + // return OidFunctionCall2(cmp_proc_oid, + // BoundGetValue(&v1->min), + // BoundGetValue(&v2->min)); } /* diff --git a/src/partition_creation.c b/src/partition_creation.c index 1d7263ac..ee7eb930 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -76,8 +76,8 @@ static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); /* Create one RANGE partition [start_value, end_value) */ Oid create_single_range_partition_internal(Oid parent_relid, - const Infinitable *start_value, - const Infinitable *end_value, + const Bound *start_value, + const Bound *end_value, Oid value_type, RangeVar *partition_rv, char *tablespace) @@ -276,8 +276,8 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) /* TODO */ // bound_min = PrelGetRangesArray(prel)[0].min; // bound_max = PrelGetRangesArray(prel)[PrelLastChild(prel)].max; - bound_min = InfinitableGetValue(&ranges[0].min); - bound_max = InfinitableGetValue(&ranges[PrelLastChild(prel)].max); + bound_min = BoundGetValue(&ranges[0].min); + bound_max = BoundGetValue(&ranges[PrelLastChild(prel)].max); /* Copy datums on order to protect them from cache invalidation */ bound_min = datumCopy(bound_min, prel->attbyval, prel->attlen); @@ -501,7 +501,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ check_lt(&cmp_value_bound_finfo, value, cur_leading_bound)) { Datum args[2]; - Infinitable bounds[2]; + Bound bounds[2]; /* Assign the 'following' boundary to current 'leading' value */ cur_following_bound = cur_leading_bound; @@ -514,8 +514,8 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ args[0] = should_append ? cur_following_bound : cur_leading_bound; args[1] = should_append ? cur_leading_bound : cur_following_bound; - MakeInfinitable(&bounds[0], args[0], false); - MakeInfinitable(&bounds[1], args[1], false); + MakeBound(&bounds[0], args[0], FINITE); + MakeBound(&bounds[1], args[1], FINITE); last_partition = create_single_range_partition_internal(parent_relid, &bounds[0], &bounds[1], @@ -781,8 +781,8 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) /* Build RANGE check constraint expression tree */ Node * build_raw_range_check_tree(char *attname, - const Infinitable *start_value, - const Infinitable *end_value, + const Bound *start_value, + const Bound *end_value, Oid value_type) { BoolExpr *and_oper = makeNode(BoolExpr); @@ -805,7 +805,7 @@ build_raw_range_check_tree(char *attname, { /* Left boundary */ left_const->val = *makeString( - datum_to_cstring(InfinitableGetValue(start_value), value_type)); + datum_to_cstring(BoundGetValue(start_value), value_type)); left_const->location = -1; left_arg->name = list_make1(makeString(">=")); @@ -821,7 +821,7 @@ build_raw_range_check_tree(char *attname, { /* Right boundary */ right_const->val = *makeString( - datum_to_cstring(InfinitableGetValue(end_value), value_type)); + datum_to_cstring(BoundGetValue(end_value), value_type)); right_const->location = -1; right_arg->name = list_make1(makeString("<")); @@ -842,8 +842,8 @@ build_raw_range_check_tree(char *attname, Constraint * build_range_check_constraint(Oid child_relid, char *attname, - const Infinitable *start_value, - const Infinitable *end_value, + const Bound *start_value, + const Bound *end_value, Oid value_type) { Constraint *range_constr; @@ -879,8 +879,8 @@ build_range_check_constraint(Oid child_relid, /* Check if range overlaps with any partitions */ bool check_range_available(Oid parent_relid, - const Infinitable *start_value, - const Infinitable *end_value, + const Bound *start, + const Bound *end, Oid value_type, bool raise_error) { @@ -913,30 +913,32 @@ check_range_available(Oid parent_relid, * range ends in plus infinity then the left boundary of the first * range is on the left. Otherwise compare specific values */ - c1 = (IsInfinite(start_value) || IsInfinite(&ranges[i].max)) ? - -1 : - FunctionCall2(&cmp_func, - InfinitableGetValue(start_value), - InfinitableGetValue(&ranges[i].max)); + // c1 = (IsInfinite(start) || IsInfinite(&ranges[i].max)) ? + // -1 : + // FunctionCall2(&cmp_func, + // BoundGetValue(start), + // BoundGetValue(&ranges[i].max)); /* * Similary check that right boundary of the range we're checking is on * the right of the beginning of the current one */ - c2 = (IsInfinite(end_value) || IsInfinite(&ranges[i].min)) ? - 1 : - FunctionCall2(&cmp_func, - InfinitableGetValue(end_value), - InfinitableGetValue(&ranges[i].min)); + // c2 = (IsInfinite(end) || IsInfinite(&ranges[i].min)) ? + // 1 : + // FunctionCall2(&cmp_func, + // BoundGetValue(end), + // BoundGetValue(&ranges[i].min)); + + c1 = cmp_bounds(&cmp_func, start, &ranges[i].max); + c2 = cmp_bounds(&cmp_func, end, &ranges[i].min); /* There's someone! */ if (c1 < 0 && c2 > 0) { if (raise_error) - /* TODO: print infinity */ elog(ERROR, "specified range [%s, %s) overlaps " "with existing partitions", - datum_to_cstring(InfinitableGetValue(start_value), value_type), - datum_to_cstring(InfinitableGetValue(end_value), value_type)); + !IsInfinite(start) ? datum_to_cstring(BoundGetValue(start), value_type) : "NULL", + !IsInfinite(end) ? datum_to_cstring(BoundGetValue(end), value_type) : "NULL"); else return false; } @@ -1002,8 +1004,8 @@ invoke_init_callback_internal(init_callback_params *cb_params) { char *start_value, *end_value; - Infinitable sv_datum = cb_params->params.range_params.start_value, - ev_datum = cb_params->params.range_params.end_value; + Bound sv_datum = cb_params->params.range_params.start_value, + ev_datum = cb_params->params.range_params.end_value; Oid type = cb_params->params.range_params.value_type; /* Convert min & max to CSTRING */ @@ -1023,7 +1025,7 @@ invoke_init_callback_internal(init_callback_params *cb_params) JSB_INIT_VAL(&key, WJB_KEY, "range_min"); if (!IsInfinite(&sv_datum)) { - start_value = datum_to_cstring(InfinitableGetValue(&sv_datum), type); + start_value = datum_to_cstring(BoundGetValue(&sv_datum), type); JSB_INIT_VAL(&val, WJB_VALUE, start_value); } else @@ -1033,7 +1035,7 @@ invoke_init_callback_internal(init_callback_params *cb_params) JSB_INIT_VAL(&key, WJB_KEY, "range_max"); if (!IsInfinite(&ev_datum)) { - end_value = datum_to_cstring(InfinitableGetValue(&ev_datum), type); + end_value = datum_to_cstring(BoundGetValue(&ev_datum), type); JSB_INIT_VAL(&val, WJB_VALUE, end_value); } else diff --git a/src/partition_creation.h b/src/partition_creation.h index cab922f5..eb61dd7f 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -18,26 +18,26 @@ Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type); Oid create_single_range_partition_internal(Oid parent_relid, - const Infinitable *start_value, - const Infinitable *end_value, + const Bound *start_value, + const Bound *end_value, Oid value_type, RangeVar *partition_rv, char *tablespace); Constraint * build_range_check_constraint(Oid child_relid, char *attname, - const Infinitable *start_value, - const Infinitable *end_value, + const Bound *start_value, + const Bound *end_value, Oid value_type); Node * build_raw_range_check_tree(char *attname, - const Infinitable *start_value, - const Infinitable *end_value, + const Bound *start_value, + const Bound *end_value, Oid value_type); bool check_range_available(Oid parent_relid, - const Infinitable *start_value, - const Infinitable *end_value, + const Bound *start_value, + const Bound *end_value, Oid value_type, bool raise_error); @@ -69,7 +69,7 @@ typedef struct struct { - Infinitable start_value, + Bound start_value, end_value; Oid value_type; } range_params; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 8d0f52a2..fcd2112d 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -447,9 +447,9 @@ select_range_partitions(const Datum value, /* Corner cases */ cmp_min = IsInfinite(&ranges[startidx].min) ? - 1 : DatumGetInt32(FunctionCall2(cmp_func, value, InfinitableGetValue(&ranges[startidx].min))); + 1 : DatumGetInt32(FunctionCall2(cmp_func, value, BoundGetValue(&ranges[startidx].min))); cmp_max = IsInfinite(&ranges[endidx].max) ? - -1 : DatumGetInt32(FunctionCall2(cmp_func, value, InfinitableGetValue(&ranges[endidx].max))); + -1 : DatumGetInt32(FunctionCall2(cmp_func, value, BoundGetValue(&ranges[endidx].max))); if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || @@ -499,9 +499,9 @@ select_range_partitions(const Datum value, // cmp_min = FunctionCall2(cmp_func, value, current_re->min); // cmp_max = FunctionCall2(cmp_func, value, current_re->max); cmp_min = IsInfinite(¤t_re->min) ? - 1 : FunctionCall2(cmp_func, value, InfinitableGetValue(¤t_re->min)); + 1 : FunctionCall2(cmp_func, value, BoundGetValue(¤t_re->min)); cmp_max = IsInfinite(¤t_re->max) ? - -1 : FunctionCall2(cmp_func, value, InfinitableGetValue(¤t_re->max)); + -1 : FunctionCall2(cmp_func, value, BoundGetValue(¤t_re->max)); is_less = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); is_greater = (cmp_max > 0 || (cmp_max >= 0 && strategy != BTLessStrategyNumber)); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 2c50ff25..4e636d07 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -397,11 +397,17 @@ show_partition_list_internal(PG_FUNCTION_ARGS) re = &PrelGetRangesArray(prel)[usercxt->child_number]; - /* TODO: infinite */ - rmin = CStringGetTextDatum(datum_to_cstring(InfinitableGetValue(&re->min), - prel->atttype)); - rmax = CStringGetTextDatum(datum_to_cstring(InfinitableGetValue(&re->max), - prel->atttype)); + /* Lower bound text */ + rmin = !IsInfinite(&re->min) ? + CStringGetTextDatum( + datum_to_cstring(BoundGetValue(&re->min), prel->atttype)) : + CStringGetTextDatum("NULL"); + + /* Upper bound text */ + rmax = !IsInfinite(&re->max) ? + CStringGetTextDatum( + datum_to_cstring(BoundGetValue(&re->max), prel->atttype)) : + CStringGetTextDatum("NULL"); values[Anum_pathman_pl_partition - 1] = re->child_oid; values[Anum_pathman_pl_range_min - 1] = rmin; @@ -781,8 +787,8 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) { // Datum sv_datum, // ev_datum; - Infinitable start, - end; + Bound start, + end; Oid value_type; if (PG_ARGISNULL(ARG_RANGE_START) || PG_ARGISNULL(ARG_RANGE_END)) @@ -791,12 +797,12 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) /* Fetch start & end values for RANGE + their type */ // sv_datum = PG_GETARG_DATUM(ARG_RANGE_START); // ev_datum = PG_GETARG_DATUM(ARG_RANGE_END); - MakeInfinitable(&start, - PG_GETARG_DATUM(ARG_RANGE_START), - PG_ARGISNULL(ARG_RANGE_START)); - MakeInfinitable(&end, - PG_GETARG_DATUM(ARG_RANGE_END), - PG_ARGISNULL(ARG_RANGE_END)); + MakeBound(&start, + PG_GETARG_DATUM(ARG_RANGE_START), + PG_ARGISNULL(ARG_RANGE_START) ? MINUS_INFINITY : FINITE); + MakeBound(&end, + PG_GETARG_DATUM(ARG_RANGE_END), + PG_ARGISNULL(ARG_RANGE_END) ? PLUS_INFINITY : FINITE); value_type = get_fn_expr_argtype(fcinfo->flinfo, ARG_RANGE_START); MakeInitCallbackRangeParams(&callback_params, diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index b3a093c1..a589d3cc 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -24,7 +24,7 @@ static char *deparse_constraint(Oid relid, Node *expr); -static ArrayType *construct_infinitable_array(Infinitable **elems, +static ArrayType *construct_infinitable_array(Bound **elems, uint32_t nelems, Oid elmtype, int elmlen, @@ -63,7 +63,7 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) // end_value; // bool infinite_start, // infinite_end; - Infinitable start, + Bound start, end; Oid value_type; @@ -79,14 +79,6 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) if (PG_ARGISNULL(0)) elog(ERROR, "'parent_relid' should not be NULL"); - // /* Handle 'start_value' */ - // if (PG_ARGISNULL(1)) - // elog(ERROR, "'start_value' should not be NULL"); - - // /* Handle 'end_value' */ - // if (PG_ARGISNULL(2)) - // elog(ERROR, "'end_value' should not be NULL"); - /* Fetch mandatory args */ parent_relid = PG_GETARG_OID(0); // start_value = PG_GETARG_DATUM(1); @@ -94,8 +86,12 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) // infinite_start = PG_ARGISNULL(1); // infinite_end = PG_ARGISNULL(2); value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - MakeInfinitable(&start, PG_GETARG_DATUM(1), PG_ARGISNULL(1)); - MakeInfinitable(&end, PG_GETARG_DATUM(2), PG_ARGISNULL(2)); + MakeBound(&start, + PG_GETARG_DATUM(1), + PG_ARGISNULL(1) ? MINUS_INFINITY : FINITE); + MakeBound(&end, + PG_GETARG_DATUM(2), + PG_ARGISNULL(2) ? PLUS_INFINITY : FINITE); /* Fetch 'partition_name' */ if (!PG_ARGISNULL(3)) @@ -181,18 +177,17 @@ find_or_create_range_partition(PG_FUNCTION_ARGS) Datum check_range_available_pl(PG_FUNCTION_ARGS) { - Oid parent_relid = PG_GETARG_OID(0); - - // Datum start_value = PG_GETARG_DATUM(1), - // end_value = PG_GETARG_DATUM(2); - // bool start_null = PG_ARGISNULL(1), - // end_null = PG_ARGISNULL(2); - Infinitable start_value, + Oid parent_relid = PG_GETARG_OID(0); + Bound start_value, end_value; Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - MakeInfinitable(&start_value, PG_GETARG_DATUM(1), PG_ARGISNULL(1)); - MakeInfinitable(&end_value, PG_GETARG_DATUM(2), PG_ARGISNULL(2)); + MakeBound(&start_value, + PG_GETARG_DATUM(1), + PG_ARGISNULL(1) ? MINUS_INFINITY : FINITE); + MakeBound(&end_value, + PG_GETARG_DATUM(2), + PG_ARGISNULL(2) ? PLUS_INFINITY : FINITE); /* Raise ERROR if range overlaps with any partition */ check_range_available(parent_relid, @@ -261,7 +256,7 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) // arr = construct_array(elems, 2, prel->atttype, // prel->attlen, prel->attbyval, // prel->attalign); - Infinitable *elems[2] = { &ranges[i].min, &ranges[i].max }; + Bound *elems[2] = { &ranges[i].min, &ranges[i].max }; arr = construct_infinitable_array(elems, 2, @@ -292,7 +287,7 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) Oid parent_relid = InvalidOid; int partition_idx = 0; // Datum elems[2]; - Infinitable *elems[2]; + Bound *elems[2]; RangeEntry *ranges; const PartRelationInfo *prel; @@ -351,14 +346,18 @@ build_range_condition(PG_FUNCTION_ARGS) Oid relid = PG_GETARG_OID(0); text *attname = PG_GETARG_TEXT_P(1); - Infinitable min, + Bound min, max; Oid bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 2); Constraint *con; char *result; - MakeInfinitable(&min, PG_GETARG_DATUM(2), PG_ARGISNULL(2)); - MakeInfinitable(&max, PG_GETARG_DATUM(3), PG_ARGISNULL(3)); + MakeBound(&min, + PG_GETARG_DATUM(2), + PG_ARGISNULL(2) ? MINUS_INFINITY : FINITE); + MakeBound(&max, + PG_GETARG_DATUM(3), + PG_ARGISNULL(3) ? PLUS_INFINITY : FINITE); con = build_range_check_constraint(relid, text_to_cstring(attname), &min, &max, @@ -419,13 +418,13 @@ build_sequence_name(PG_FUNCTION_ARGS) } /* - * Build 1d array of Infinitable elements + * Build an 1d array of Bound elements * * The main difference from construct_array() is that it will substitute * infinite values with NULL's */ static ArrayType * -construct_infinitable_array(Infinitable **elems, +construct_infinitable_array(Bound **elems, uint32_t nelems, Oid elmtype, int elmlen, @@ -444,7 +443,7 @@ construct_infinitable_array(Infinitable **elems, for (i = 0; i < nelems; i++) { - data[i] = InfinitableGetValue(elems[i]); + data[i] = BoundGetValue(elems[i]); nulls[i] = IsInfinite(elems[i]); } diff --git a/src/relation_info.h b/src/relation_info.h index 80385496..6981c6d9 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -15,33 +15,60 @@ #include "access/attnum.h" #include "port/atomics.h" #include "storage/lock.h" +#include "fmgr.h" +#define BOUND_INFINITY_MASK 0x01 +#define BOUND_NEGATIVE_MASK 0x02 -/* Infinitable datum values */ +/* Range bound */ typedef struct { - Datum value; - bool is_infinite; -} Infinitable; - -#define MakeInfinitable(inf, _value, _is_infinite) \ - do \ - { \ - (inf)->value = (_value); \ - (inf)->is_infinite = (_is_infinite); \ + Datum value; /* Actual value if not infinite */ + uint8 is_infinite; /* bitmask where the least significant bit + is indicates if the bound is infinite and + the second one indicates if bound + is negative */ +} Bound; + +#define FINITE 0 +#define PLUS_INFINITY (0 | BOUND_INFINITY_MASK) +#define MINUS_INFINITY (0 | BOUND_INFINITY_MASK | BOUND_NEGATIVE_MASK) + +#define MakeBound(inf, _value, _infinity_type) \ + do \ + { \ + (inf)->value = (_value); \ + (inf)->is_infinite = (_infinity_type); \ } while (0) -#define IsInfinite(i) ((i)->is_infinite) -#define InfinitableGetValue(i) ((i)->value) -#define CopyInfinitable(i_to, i_from, by_val, len) \ - do \ - { \ - (i_to)->value = !IsInfinite(i_from) ? \ - datumCopy((i_from)->value, (by_val), (len)) : \ - (Datum) 0; \ - (i_to)->is_infinite = IsInfinite(i_from); \ +#define IsInfinite(i) ((i)->is_infinite & BOUND_INFINITY_MASK) +#define IsPlusInfinity(i) (IsInfinite(i) && !((i)->is_infinite & BOUND_NEGATIVE_MASK)) +#define IsMinusInfinity(i) (IsInfinite(i) && ((i)->is_infinite & BOUND_NEGATIVE_MASK)) +#define BoundGetValue(i) ((i)->value) +#define CopyBound(i_to, i_from, by_val, len) \ + do \ + { \ + (i_to)->value = !IsInfinite(i_from) ? \ + datumCopy((i_from)->value, (by_val), (len)) : \ + (Datum) 0; \ + (i_to)->is_infinite = (i_from)->is_infinite; \ } while (0) +/* + * Comparison macros for bounds + * If both bounds are minus infinite or plus infinite then they are equal. + * Else call original comparison function. + */ +inline static int8_t +cmp_bounds(FmgrInfo *cmp_func, const Bound *b1, const Bound *b2) +{ + if (IsMinusInfinity(b1) || IsPlusInfinity(b2)) + return -1; + if (IsMinusInfinity(b2) || IsPlusInfinity(b1)) + return 1; + + return FunctionCall2(cmp_func, BoundGetValue(b1), BoundGetValue(b2)); +} /* * Partitioning type. @@ -59,10 +86,8 @@ typedef enum typedef struct { Oid child_oid; - Infinitable min, + Bound min, max; - // bool infinite_min, - // infinite_max; } RangeEntry; /* @@ -217,10 +242,10 @@ FreeRangesArray(PartRelationInfo *prel) for (i = 0; i < PrelChildrenCount(prel); i++) { if (!IsInfinite(&prel->ranges[i].min)) - pfree(DatumGetPointer(InfinitableGetValue(&prel->ranges[i].min))); + pfree(DatumGetPointer(BoundGetValue(&prel->ranges[i].min))); if (!IsInfinite(&prel->ranges[i].max)) - pfree(DatumGetPointer(InfinitableGetValue(&prel->ranges[i].max))); + pfree(DatumGetPointer(BoundGetValue(&prel->ranges[i].max))); } } From d1249527ea043fc6de0d0edb710b7c7ef134f8fe Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 28 Nov 2016 19:50:19 +0300 Subject: [PATCH 0093/1124] implement function copy_acl_privileges(), improved test 'pathman_permissions', execute create_or_replace_sequence() after the config row has been inserted into PATHMAN_CONFIG, allow partition creation to users with INSERT privilege --- expected/pathman_permissions.out | 54 +++++++++---- range.sql | 32 ++++---- sql/pathman_permissions.sql | 33 ++++++-- src/partition_creation.c | 135 +++++++++++++++++++++++++++++++ src/partition_creation.h | 4 + 5 files changed, 220 insertions(+), 38 deletions(-) diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 6a7aeea1..d2fc4886 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -5,18 +5,20 @@ CREATE ROLE user1 LOGIN; CREATE ROLE user2 LOGIN; GRANT USAGE, CREATE ON SCHEMA permissions TO user1; GRANT USAGE, CREATE ON SCHEMA permissions TO user2; -ALTER DEFAULT PRIVILEGES FOR ROLE user1 -IN SCHEMA permissions -GRANT SELECT, INSERT ON TABLES -TO user2; /* Switch to #1 */ SET ROLE user1; CREATE TABLE permissions.user1_table(id serial, a int); INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; -/* Should fail */ +/* Should fail (can't SELECT) */ +SET ROLE user2; +SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +ERROR: permission denied for relation user1_table +/* Grant SELECT to user2 */ +SET ROLE user1; +GRANT SELECT ON permissions.user1_table TO user2; +/* Should fail (don't own parent) */ SET ROLE user2; SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); -NOTICE: sequence "user1_table_seq" does not exist, skipping WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" ERROR: new row violates row-level security policy for table "pathman_config" /* Should be ok */ @@ -58,9 +60,23 @@ WARNING: only the owner or superuser can change partitioning configuration of t /* No rights to insert, should fail */ SET ROLE user2; INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); -/* Have rights, should be ok (bgw connects as user1) */ +ERROR: permission denied for relation user1_table +/* No rights to create partitions (need INSERT privilege) */ +SET ROLE user2; +SELECT prepend_range_partition('permissions.user1_table'); +ERROR: permission denied for parent relation "user1_table" +/* Allow user2 to create partitions */ SET ROLE user1; GRANT INSERT ON permissions.user1_table TO user2; +/* Should be able to prepend a partition */ +SET ROLE user2; +SELECT prepend_range_partition('permissions.user1_table'); + prepend_range_partition +--------------------------- + permissions.user1_table_4 +(1 row) + +/* Have rights, should be ok (parent's ACL is shared by new children) */ SET ROLE user2; INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; id | a @@ -68,11 +84,18 @@ INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; 35 | 0 (1 row) -SELECT relacl FROM pg_class WHERE oid = 'permissions.user1_table_4'::regclass; - relacl --------------------------------------- - {user1=arwdDxt/user1,user2=ar/user1} -(1 row) +SELECT relname, relacl FROM pg_class +WHERE oid = ANY (SELECT partition FROM pathman_partition_list + WHERE parent = 'permissions.user1_table'::REGCLASS + ORDER BY range_max::int DESC + LIMIT 3) +ORDER BY relname; /* we also check ACL for "user1_table_2" */ + relname | relacl +---------------+-------------------------------------- + user1_table_2 | {user1=arwdDxt/user1,user2=r/user1} + user1_table_5 | {user1=arwdDxt/user1,user2=ar/user1} + user1_table_6 | {user1=arwdDxt/user1,user2=ar/user1} +(3 rows) /* Try to drop partition, should fail */ SELECT drop_range_partition('permissions.user1_table_4'); @@ -95,11 +118,12 @@ SELECT drop_partitions('permissions.user1_table'); NOTICE: function permissions.user1_table_upd_trig_func() does not exist, skipping NOTICE: 10 rows copied from permissions.user1_table_1 NOTICE: 10 rows copied from permissions.user1_table_2 -NOTICE: 0 rows copied from permissions.user1_table_3 -NOTICE: 2 rows copied from permissions.user1_table_4 +NOTICE: 0 rows copied from permissions.user1_table_4 +NOTICE: 0 rows copied from permissions.user1_table_5 +NOTICE: 1 rows copied from permissions.user1_table_6 drop_partitions ----------------- - 4 + 5 (1 row) /* Switch to #2 */ diff --git a/range.sql b/range.sql index 54d6f342..fbe8a2f2 100644 --- a/range.sql +++ b/range.sql @@ -138,14 +138,14 @@ BEGIN v_atttype::TEXT); END IF; - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - /* Insert new entry to pathman config */ INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) VALUES (parent_relid, attribute, 2, p_interval::TEXT); + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + /* Create first partition */ FOR i IN 1..p_count LOOP @@ -253,14 +253,14 @@ BEGIN end_value); END IF; - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - /* Insert new entry to pathman config */ INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) VALUES (parent_relid, attribute, 2, p_interval::TEXT); + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + /* create first partition */ FOR i IN 1..p_count LOOP @@ -327,14 +327,14 @@ BEGIN start_value, end_value); - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - /* Insert new entry to pathman config */ INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) VALUES (parent_relid, attribute, 2, p_interval::TEXT); + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + WHILE start_value <= end_value LOOP PERFORM @extschema@.create_single_range_partition( @@ -397,14 +397,14 @@ BEGIN start_value, end_value); - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - /* Insert new entry to pathman config */ INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) VALUES (parent_relid, attribute, 2, p_interval::TEXT); + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + WHILE start_value <= end_value LOOP EXECUTE diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index d784eca0..cfd1d66b 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -9,17 +9,21 @@ CREATE ROLE user2 LOGIN; GRANT USAGE, CREATE ON SCHEMA permissions TO user1; GRANT USAGE, CREATE ON SCHEMA permissions TO user2; -ALTER DEFAULT PRIVILEGES FOR ROLE user1 -IN SCHEMA permissions -GRANT SELECT, INSERT ON TABLES -TO user2; /* Switch to #1 */ SET ROLE user1; CREATE TABLE permissions.user1_table(id serial, a int); INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; -/* Should fail */ +/* Should fail (can't SELECT) */ +SET ROLE user2; +SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + +/* Grant SELECT to user2 */ +SET ROLE user1; +GRANT SELECT ON permissions.user1_table TO user2; + +/* Should fail (don't own parent) */ SET ROLE user2; SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); @@ -46,12 +50,27 @@ WHERE partrel = 'permissions.user1_table'::regclass; SET ROLE user2; INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); -/* Have rights, should be ok (bgw connects as user1) */ +/* No rights to create partitions (need INSERT privilege) */ +SET ROLE user2; +SELECT prepend_range_partition('permissions.user1_table'); + +/* Allow user2 to create partitions */ SET ROLE user1; GRANT INSERT ON permissions.user1_table TO user2; + +/* Should be able to prepend a partition */ +SET ROLE user2; +SELECT prepend_range_partition('permissions.user1_table'); + +/* Have rights, should be ok (parent's ACL is shared by new children) */ SET ROLE user2; INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; -SELECT relacl FROM pg_class WHERE oid = 'permissions.user1_table_4'::regclass; +SELECT relname, relacl FROM pg_class +WHERE oid = ANY (SELECT partition FROM pathman_partition_list + WHERE parent = 'permissions.user1_table'::REGCLASS + ORDER BY range_max::int DESC + LIMIT 3) +ORDER BY relname; /* we also check ACL for "user1_table_2" */ /* Try to drop partition, should fail */ SELECT drop_range_partition('permissions.user1_table_4'); diff --git a/src/partition_creation.c b/src/partition_creation.c index 60d2efc5..653342d6 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -17,8 +17,10 @@ #include "access/htup_details.h" #include "access/reloptions.h" +#include "access/sysattr.h" #include "access/xact.h" #include "catalog/heap.h" +#include "catalog/pg_authid.h" #include "catalog/pg_type.h" #include "catalog/toasting.h" #include "commands/event_trigger.h" @@ -32,7 +34,9 @@ #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/datum.h" +#include "utils/fmgroids.h" #include "utils/jsonb.h" +#include "utils/snapmgr.h" #include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/typcache.h" @@ -71,6 +75,7 @@ static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, Oid relowner); static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); +static void copy_acl_privileges(Oid parent_relid, Oid partition_relid); static Constraint *make_constraint_common(char *name, Node *raw_expr); @@ -603,11 +608,31 @@ choose_range_partition_name(Oid parent_relid, Oid parent_nsp) { Datum part_num; Oid part_seq_relid; + Oid save_userid; + int save_sec_context; + bool need_priv_escalation = !superuser(); /* we might be a SU */ part_seq_relid = get_relname_relid(build_sequence_name_internal(parent_relid), parent_nsp); + + /* Do we have to escalate privileges? */ + if (need_priv_escalation) + { + /* Get current user's Oid and security context */ + GetUserIdAndSecContext(&save_userid, &save_sec_context); + + /* Become superuser in order to bypass sequence ACL checks */ + SetUserIdAndSecContext(BOOTSTRAP_SUPERUSERID, + save_sec_context | SECURITY_LOCAL_USERID_CHANGE); + } + + /* Get next integer for partition name */ part_num = DirectFunctionCall1(nextval_oid, ObjectIdGetDatum(part_seq_relid)); + /* Restore user's privileges */ + if (need_priv_escalation) + SetUserIdAndSecContext(save_userid, save_sec_context); + return psprintf("%s_%u", get_rel_name(parent_relid), DatumGetInt32(part_num)); } @@ -644,6 +669,9 @@ create_single_partition_internal(Oid parent_relid, List *create_stmts; ListCell *lc; + Oid save_userid; + int save_sec_context; + bool need_priv_escalation = !superuser(); /* we might be a SU */ /* Lock parent and check if it exists */ LockRelationOid(parent_relid, ShareUpdateExclusiveLock); @@ -699,6 +727,27 @@ create_single_partition_internal(Oid parent_relid, create_stmt.tablespacename = tablespace; create_stmt.if_not_exists = false; + /* Do we have to escalate privileges? */ + if (need_priv_escalation) + { + /* Get current user's Oid and security context */ + GetUserIdAndSecContext(&save_userid, &save_sec_context); + + /* Check that user's allowed to spawn partitions */ + if (ACLCHECK_OK != pg_class_aclcheck(parent_relid, save_userid, + ACL_SPAWN_PARTITIONS)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied for parent relation \"%s\"", + get_rel_name_or_relid(parent_relid)), + errdetail("user is not allowed to create new partitions"), + errhint("consider granting INSERT privilege"))); + + /* Become superuser in order to bypass various ACL checks */ + SetUserIdAndSecContext(BOOTSTRAP_SUPERUSERID, + save_sec_context | SECURITY_LOCAL_USERID_CHANGE); + } + /* Generate columns using the parent table */ create_stmts = transformCreateStmt(&create_stmt, NULL); @@ -723,6 +772,12 @@ create_single_partition_internal(Oid parent_relid, /* Copy FOREIGN KEYS of the parent table */ copy_foreign_keys(parent_relid, partition_relid); + + /* Make changes visible */ + CommandCounterIncrement(); + + /* Copy ACL privileges of the parent table */ + copy_acl_privileges(parent_relid, partition_relid); } else if (IsA(cur_stmt, CreateForeignTableStmt)) { @@ -747,6 +802,10 @@ create_single_partition_internal(Oid parent_relid, CommandCounterIncrement(); } + /* Restore user's privileges */ + if (need_priv_escalation) + SetUserIdAndSecContext(save_userid, save_sec_context); + return partition_relid; } @@ -798,6 +857,82 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) return table_addr; } +/* Copy ACL privileges of parent table */ +static void +copy_acl_privileges(Oid parent_relid, Oid partition_relid) +{ + Relation pg_class_rel; + + TupleDesc pg_class_desc; + + HeapTuple htup; + + ScanKeyData skey; + SysScanDesc scan; + + Datum acl_datum; + bool acl_null; + + pg_class_rel = heap_open(RelationRelationId, RowExclusiveLock); + + pg_class_desc = RelationGetDescr(pg_class_rel); + + htup = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); + if (!HeapTupleIsValid(htup)) + elog(ERROR, "cache lookup failed for relation %u", parent_relid); + + /* Get parent's ACL */ + acl_datum = heap_getattr(htup, Anum_pg_class_relacl, pg_class_desc, &acl_null); + + /* Copy datum if it's not NULL */ + if (!acl_null) + acl_datum = datumCopy(acl_datum, + pg_class_desc->attrs[Anum_pg_class_relacl - 1]->attbyval, + pg_class_desc->attrs[Anum_pg_class_relacl - 1]->attlen); + + /* Release 'htup' */ + ReleaseSysCache(htup); + + /* Search for 'partition_relid' */ + ScanKeyInit(&skey, + ObjectIdAttributeNumber, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(partition_relid)); + + scan = systable_beginscan(pg_class_rel, ClassOidIndexId, true, + GetLatestSnapshot(), 1, &skey); + + /* There should be exactly one tuple (our child) */ + if (HeapTupleIsValid(htup = systable_getnext(scan))) + { + ItemPointerData iptr; + Datum values[Natts_pg_class] = { (Datum) 0 }; + bool nulls[Natts_pg_class] = { false }; + bool replaces[Natts_pg_class] = { false }; + + /* Copy ItemPointer of this tuple */ + iptr = htup->t_self; + + values[Anum_pg_class_relacl - 1] = acl_datum; /* ACL array */ + nulls[Anum_pg_class_relacl - 1] = acl_null; /* do we have ACL? */ + replaces[Anum_pg_class_relacl - 1] = true; + + /* Build new tuple with parent's ACL */ + htup = heap_modify_tuple(htup, RelationGetDescr(pg_class_rel), + values, nulls, replaces); + + /* Update child's tuple */ + simple_heap_update(pg_class_rel, &iptr, htup); + + /* Don't forget to update indexes */ + CatalogUpdateIndexes(pg_class_rel, htup); + } + + systable_endscan(scan); + + heap_close(pg_class_rel, RowExclusiveLock); +} + /* Copy foreign keys of parent table */ static void copy_foreign_keys(Oid parent_relid, Oid partition_oid) diff --git a/src/partition_creation.h b/src/partition_creation.h index 23d0ce48..18ba5beb 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -14,6 +14,10 @@ #include "nodes/parsenodes.h" +/* ACL privilege for partition creation */ +#define ACL_SPAWN_PARTITIONS ACL_INSERT + + /* Create RANGE partitions to store some value */ Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type); From 5531ce08d14e8c72f2f769c6c2f74162d712f7f8 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 29 Nov 2016 12:53:57 +0300 Subject: [PATCH 0094/1124] regression tests for half-open ranges --- expected/pathman_basic.out | 58 +++++++++++++++++++++++++++++++++++++- range.sql | 4 +-- sql/pathman_basic.sql | 14 +++++++++ src/relation_info.h | 2 -- 4 files changed, 73 insertions(+), 5 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 4c7f98a6..b00c4b7a 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1452,6 +1452,62 @@ CREATE TABLE test.range_rel_test2 ( dt TIMESTAMP); SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); ERROR: partition must have the exact same structure as parent +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); + add_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); + add_range_partition +------------------------------ + test.range_rel_plus_infinity +(1 row) + +SELECT pathman.append_range_partition('test.range_rel'); +ERROR: Cannot append partition because last partition's range is half open +SELECT pathman.prepend_range_partition('test.range_rel'); +ERROR: Cannot prepend partition because first partition's range is half open +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); + attach_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; + parent | partition | parttype | partattr | range_min | range_max +----------------+-------------------------------+----------+----------+--------------------------+-------------------------- + test.range_rel | test.range_rel_minus_infinity | 2 | dt | NULL | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 + test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 + test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 + test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 + test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 + test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | NULL +(8 rows) + +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on range_rel_minus_infinity + -> Seq Scan on range_rel_8 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on range_rel_6 + -> Seq Scan on range_rel_plus_infinity +(3 rows) + /* * Zero partitions count and adding partitions with specified name */ @@ -1560,7 +1616,7 @@ NOTICE: 0 rows copied from test.num_range_rel_6 DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; -NOTICE: drop cascades to 7 other objects +NOTICE: drop cascades to 9 other objects /* Test automatic partition creation */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, diff --git a/range.sql b/range.sql index 8d758759..7592c22b 100644 --- a/range.sql +++ b/range.sql @@ -765,7 +765,7 @@ BEGIN INTO p_range; IF p_range[2] IS NULL THEN - RAISE EXCEPTION 'Cannot append partition because last partition is half open'; + RAISE EXCEPTION 'Cannot append partition because last partition''s range is half open'; END IF; IF @extschema@.is_date_type(p_atttype) THEN @@ -879,7 +879,7 @@ BEGIN INTO p_range; IF p_range[1] IS NULL THEN - RAISE EXCEPTION 'Cannot prepend partition because first partition is half open'; + RAISE EXCEPTION 'Cannot prepend partition because first partition''s range is half open'; END IF; IF @extschema@.is_date_type(p_atttype) THEN diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index f5d6fa42..6d815352 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -398,6 +398,20 @@ CREATE TABLE test.range_rel_test2 ( dt TIMESTAMP); SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); +SELECT pathman.append_range_partition('test.range_rel'); +SELECT pathman.prepend_range_partition('test.range_rel'); +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + /* * Zero partitions count and adding partitions with specified name */ diff --git a/src/relation_info.h b/src/relation_info.h index 6981c6d9..ab063dca 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -56,8 +56,6 @@ typedef struct /* * Comparison macros for bounds - * If both bounds are minus infinite or plus infinite then they are equal. - * Else call original comparison function. */ inline static int8_t cmp_bounds(FmgrInfo *cmp_func, const Bound *b1, const Bound *b2) From 5d11c16c7fea3d51bba434544b9bc4778a75a926 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 29 Nov 2016 14:28:32 +0300 Subject: [PATCH 0095/1124] replaced spaces with tabs in python tests --- tests/python/partitioning_test.py | 68 +++++++++++++++---------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index bea6d706..f360a03c 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -354,13 +354,13 @@ def test_foreign_table(self): master.psql('postgres', 'create extension pg_pathman') master.psql('postgres', 'create extension postgres_fdw') - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions master.psql( 'postgres', '''create table abc(id serial, name text); @@ -413,33 +413,33 @@ def test_foreign_table(self): # Testing drop partitions (including foreign partitions) master.safe_psql('postgres', 'select drop_partitions(\'abc\')') - # HASH partitioning with FDW: - # - create hash partitioned table in master - # - create foreign table - # - replace local partition with foreign one - # - insert data - # - drop partitions - master.psql( - 'postgres', - '''create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2)''') - fserv.safe_psql('postgres', 'create table f_hash_test(id serial, name text)') - - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (f_hash_test) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select replace_hash_partition(\'hash_test_1\', \'f_hash_test\')') - master.safe_psql('postgres', 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - '1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' - ) - master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql( + 'postgres', + '''create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2)''') + fserv.safe_psql('postgres', 'create table f_hash_test(id serial, name text)') + + master.safe_psql( + 'postgres', + '''import foreign schema public limit to (f_hash_test) + from server fserv into public''' + ) + master.safe_psql( + 'postgres', + 'select replace_hash_partition(\'hash_test_1\', \'f_hash_test\')') + master.safe_psql('postgres', 'insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('postgres', 'select * from hash_test'), + '1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' + ) + master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') def test_parallel_nodes(self): """Test parallel queries under partitions""" From 966e0c3fc9263e022bee86f0761a0b6152695da0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 29 Nov 2016 16:01:40 +0300 Subject: [PATCH 0096/1124] improve function copy_acl_privileges(): copy ACL for each column of parent --- expected/pathman_permissions.out | 21 ++++- sql/pathman_permissions.sql | 9 ++- src/partition_creation.c | 135 ++++++++++++++++++++++++++++--- 3 files changed, 151 insertions(+), 14 deletions(-) diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index d2fc4886..ca75a7e5 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -68,6 +68,7 @@ ERROR: permission denied for parent relation "user1_table" /* Allow user2 to create partitions */ SET ROLE user1; GRANT INSERT ON permissions.user1_table TO user2; +GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ /* Should be able to prepend a partition */ SET ROLE user2; SELECT prepend_range_partition('permissions.user1_table'); @@ -76,6 +77,24 @@ SELECT prepend_range_partition('permissions.user1_table'); permissions.user1_table_4 (1 row) +SELECT attname, attacl from pg_attribute +WHERE attrelid = (SELECT partition FROM pathman_partition_list + WHERE parent = 'permissions.user1_table'::REGCLASS + ORDER BY range_min::int ASC /* prepend */ + LIMIT 1) +ORDER BY attname; /* check ACL for each column */ + attname | attacl +----------+----------------- + a | {user2=w/user1} + cmax | + cmin | + ctid | + id | + tableoid | + xmax | + xmin | +(8 rows) + /* Have rights, should be ok (parent's ACL is shared by new children) */ SET ROLE user2; INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; @@ -87,7 +106,7 @@ INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; SELECT relname, relacl FROM pg_class WHERE oid = ANY (SELECT partition FROM pathman_partition_list WHERE parent = 'permissions.user1_table'::REGCLASS - ORDER BY range_max::int DESC + ORDER BY range_max::int DESC /* append */ LIMIT 3) ORDER BY relname; /* we also check ACL for "user1_table_2" */ relname | relacl diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index cfd1d66b..75c2f935 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -57,10 +57,17 @@ SELECT prepend_range_partition('permissions.user1_table'); /* Allow user2 to create partitions */ SET ROLE user1; GRANT INSERT ON permissions.user1_table TO user2; +GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ /* Should be able to prepend a partition */ SET ROLE user2; SELECT prepend_range_partition('permissions.user1_table'); +SELECT attname, attacl from pg_attribute +WHERE attrelid = (SELECT partition FROM pathman_partition_list + WHERE parent = 'permissions.user1_table'::REGCLASS + ORDER BY range_min::int ASC /* prepend */ + LIMIT 1) +ORDER BY attname; /* check ACL for each column */ /* Have rights, should be ok (parent's ACL is shared by new children) */ SET ROLE user2; @@ -68,7 +75,7 @@ INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; SELECT relname, relacl FROM pg_class WHERE oid = ANY (SELECT partition FROM pathman_partition_list WHERE parent = 'permissions.user1_table'::REGCLASS - ORDER BY range_max::int DESC + ORDER BY range_max::int DESC /* append */ LIMIT 3) ORDER BY relname; /* we also check ACL for "user1_table_2" */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 653342d6..2ada6559 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -669,6 +669,7 @@ create_single_partition_internal(Oid parent_relid, List *create_stmts; ListCell *lc; + /* Current user and security context */ Oid save_userid; int save_sec_context; bool need_priv_escalation = !superuser(); /* we might be a SU */ @@ -861,21 +862,29 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) static void copy_acl_privileges(Oid parent_relid, Oid partition_relid) { - Relation pg_class_rel; + Relation pg_class_rel, + pg_attribute_rel; - TupleDesc pg_class_desc; + TupleDesc pg_class_desc, + pg_attribute_desc; HeapTuple htup; - - ScanKeyData skey; + ScanKeyData skey[2]; SysScanDesc scan; Datum acl_datum; bool acl_null; + Snapshot snapshot; + pg_class_rel = heap_open(RelationRelationId, RowExclusiveLock); + pg_attribute_rel = heap_open(AttributeRelationId, RowExclusiveLock); + + /* Get most recent snapshot */ + snapshot = RegisterSnapshot(GetLatestSnapshot()); pg_class_desc = RelationGetDescr(pg_class_rel); + pg_attribute_desc = RelationGetDescr(pg_attribute_rel); htup = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); if (!HeapTupleIsValid(htup)) @@ -886,21 +895,25 @@ copy_acl_privileges(Oid parent_relid, Oid partition_relid) /* Copy datum if it's not NULL */ if (!acl_null) - acl_datum = datumCopy(acl_datum, - pg_class_desc->attrs[Anum_pg_class_relacl - 1]->attbyval, - pg_class_desc->attrs[Anum_pg_class_relacl - 1]->attlen); + { + Form_pg_attribute acl_column; + + acl_column = pg_class_desc->attrs[Anum_pg_class_relacl - 1]; + + acl_datum = datumCopy(acl_datum, acl_column->attbyval, acl_column->attlen); + } /* Release 'htup' */ ReleaseSysCache(htup); /* Search for 'partition_relid' */ - ScanKeyInit(&skey, + ScanKeyInit(&skey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(partition_relid)); - scan = systable_beginscan(pg_class_rel, ClassOidIndexId, true, - GetLatestSnapshot(), 1, &skey); + scan = systable_beginscan(pg_class_rel, ClassOidIndexId, + true, snapshot, 1, skey); /* There should be exactly one tuple (our child) */ if (HeapTupleIsValid(htup = systable_getnext(scan))) @@ -918,8 +931,7 @@ copy_acl_privileges(Oid parent_relid, Oid partition_relid) replaces[Anum_pg_class_relacl - 1] = true; /* Build new tuple with parent's ACL */ - htup = heap_modify_tuple(htup, RelationGetDescr(pg_class_rel), - values, nulls, replaces); + htup = heap_modify_tuple(htup, pg_class_desc, values, nulls, replaces); /* Update child's tuple */ simple_heap_update(pg_class_rel, &iptr, htup); @@ -930,7 +942,106 @@ copy_acl_privileges(Oid parent_relid, Oid partition_relid) systable_endscan(scan); + + /* Search for 'parent_relid's columns */ + ScanKeyInit(&skey[0], + Anum_pg_attribute_attrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(parent_relid)); + + /* Consider only user-defined columns (>0) */ + ScanKeyInit(&skey[1], + Anum_pg_attribute_attnum, + BTEqualStrategyNumber, F_INT2GT, + Int16GetDatum(InvalidAttrNumber)); + + scan = systable_beginscan(pg_attribute_rel, + AttributeRelidNumIndexId, + true, snapshot, 2, skey); + + /* Go through the list of parent's columns */ + while (HeapTupleIsValid(htup = systable_getnext(scan))) + { + ScanKeyData subskey[2]; + SysScanDesc subscan; + HeapTuple subhtup; + + AttrNumber cur_attnum; + bool cur_attnum_null; + + /* Get parent column's ACL */ + acl_datum = heap_getattr(htup, Anum_pg_attribute_attacl, + pg_attribute_desc, &acl_null); + + /* Copy datum if it's not NULL */ + if (!acl_null) + { + Form_pg_attribute acl_column; + + acl_column = pg_attribute_desc->attrs[Anum_pg_attribute_attacl - 1]; + + acl_datum = datumCopy(acl_datum, + acl_column->attbyval, + acl_column->attlen); + } + + /* Fetch number of current column */ + cur_attnum = DatumGetInt16(heap_getattr(htup, Anum_pg_attribute_attnum, + pg_attribute_desc, &cur_attnum_null)); + Assert(cur_attnum_null == false); /* must not be NULL! */ + + /* Search for 'partition_relid' */ + ScanKeyInit(&subskey[0], + Anum_pg_attribute_attrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(partition_relid)); + + /* Search for 'partition_relid's columns */ + ScanKeyInit(&subskey[1], + Anum_pg_attribute_attnum, + BTEqualStrategyNumber, F_INT2EQ, + Int16GetDatum(cur_attnum)); + + subscan = systable_beginscan(pg_attribute_rel, + AttributeRelidNumIndexId, + true, snapshot, 2, subskey); + + /* There should be exactly one tuple (our child's column) */ + if (HeapTupleIsValid(subhtup = systable_getnext(subscan))) + { + ItemPointerData iptr; + Datum values[Natts_pg_attribute] = { (Datum) 0 }; + bool nulls[Natts_pg_attribute] = { false }; + bool replaces[Natts_pg_attribute] = { false }; + + /* Copy ItemPointer of this tuple */ + iptr = subhtup->t_self; + + values[Anum_pg_attribute_attacl - 1] = acl_datum; /* ACL array */ + nulls[Anum_pg_attribute_attacl - 1] = acl_null; /* do we have ACL? */ + replaces[Anum_pg_attribute_attacl - 1] = true; + + /* Build new tuple with parent's ACL */ + subhtup = heap_modify_tuple(subhtup, pg_attribute_desc, + values, nulls, replaces); + + /* Update child's tuple */ + simple_heap_update(pg_attribute_rel, &iptr, subhtup); + + /* Don't forget to update indexes */ + CatalogUpdateIndexes(pg_attribute_rel, subhtup); + } + + systable_endscan(subscan); + } + + systable_endscan(scan); + + /* Don't forget to free snapshot */ + UnregisterSnapshot(snapshot); + heap_close(pg_class_rel, RowExclusiveLock); + heap_close(pg_attribute_rel, RowExclusiveLock); } /* Copy foreign keys of parent table */ From 6b88f51b3f96eb7749c3162e19fb5813950e7a7f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 29 Nov 2016 17:36:51 +0300 Subject: [PATCH 0097/1124] remove useless functions and dead code, refactoring and locks for function replace_hash_partition() --- hash.sql | 101 +++++++++++++++++++++++--------------------- init.sql | 20 --------- src/pl_hash_funcs.c | 73 +++++++++----------------------- 3 files changed, 73 insertions(+), 121 deletions(-) diff --git a/hash.sql b/hash.sql index 3e6db21b..5790e68e 100644 --- a/hash.sql +++ b/hash.sql @@ -18,14 +18,6 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( partition_data BOOLEAN DEFAULT TRUE) RETURNS INTEGER AS $$ -DECLARE - v_child_relname TEXT; - v_plain_schema TEXT; - v_plain_relname TEXT; - -- v_atttype REGTYPE; - -- v_hashfunc REGPROC; - v_init_callback REGPROCEDURE; - BEGIN PERFORM @extschema@.validate_relname(parent_relid); @@ -40,13 +32,6 @@ BEGIN attribute := lower(attribute); PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - /* Fetch atttype and its hash function */ - -- v_atttype := @extschema@.get_attribute_type(parent_relid, attribute); - -- v_hashfunc := @extschema@.get_type_hash_func(v_atttype); - - SELECT * INTO v_plain_schema, v_plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - /* Insert new entry to pathman config */ INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) VALUES (parent_relid, attribute, 1); @@ -82,21 +67,26 @@ CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( RETURNS REGCLASS AS $$ DECLARE - v_attname TEXT; + parent_relid REGCLASS; + part_attname TEXT; /* partitioned column */ + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ rel_persistence CHAR; - v_init_callback REGPROCEDURE; - v_parent_relid REGCLASS; - v_part_count INT; - v_part_num INT; + p_init_callback REGPROCEDURE; + BEGIN PERFORM @extschema@.validate_relname(old_partition); PERFORM @extschema@.validate_relname(new_partition); /* Parent relation */ - v_parent_relid := @extschema@.get_parent_of_partition(old_partition); + parent_relid := @extschema@.get_parent_of_partition(old_partition); /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(v_parent_relid); + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(old_partition); + PERFORM @extschema@.prevent_relation_modification(new_partition); /* Ignore temporary tables */ SELECT relpersistence FROM pg_catalog.pg_class @@ -108,52 +98,54 @@ BEGIN END IF; /* Check that new partition has an equal structure as parent does */ - IF NOT @extschema@.validate_relations_equality(v_parent_relid, new_partition) THEN + IF NOT @extschema@.validate_relations_equality(parent_relid, new_partition) THEN RAISE EXCEPTION 'partition must have the exact same structure as parent'; END IF; /* Get partitioning key */ - v_attname := attname FROM @extschema@.pathman_config WHERE partrel = v_parent_relid; - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', v_parent_relid::TEXT; + part_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + IF part_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; END IF; - /* Calculate partitions count and old partition's number */ - v_part_count := count(*) FROM @extschema@.pathman_partition_list WHERE parent = v_parent_relid; - v_part_num := @extschema@.get_partition_hash(v_parent_relid, old_partition); + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS, + part_attname); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND conname = old_constr_name + INTO old_constr_def; /* Detach old partition */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, v_parent_relid); - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT IF EXISTS %s', - old_partition, - @extschema@.build_check_constraint_name(old_partition::REGCLASS, - v_attname)); - - /* Attach new one */ - EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, v_parent_relid); - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', new_partition, - @extschema@.build_check_constraint_name(new_partition::regclass, - v_attname), - @extschema@.build_hash_condition(new_partition::regclass, - v_attname, - v_part_count, - v_part_num)); + @extschema@.build_check_constraint_name(new_partition::REGCLASS, + part_attname), + old_constr_def); /* Fetch init_callback from 'params' table */ WITH stub_callback(stub) as (values (0)) SELECT coalesce(init_callback, 0::REGPROCEDURE) FROM stub_callback LEFT JOIN @extschema@.pathman_config_params AS params - ON params.partrel = v_parent_relid - INTO v_init_callback; + ON params.partrel = parent_relid + INTO p_init_callback; - PERFORM @extschema@.invoke_on_partition_created_callback(v_parent_relid, + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, new_partition, - v_init_callback); + p_init_callback); /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(v_parent_relid); + PERFORM @extschema@.on_update_partitions(parent_relid); RETURN new_partition; END @@ -292,3 +284,14 @@ LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INTEGER, INTEGER) RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' LANGUAGE C STRICT; + +/* + * Build hash condition for a CHECK CONSTRAINT + */ +CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( + parent_relid REGCLASS, + attribute TEXT, + part_count INT4, + part_idx INT4) +RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' +LANGUAGE C STRICT; diff --git a/init.sql b/init.sql index 43688f76..c933ce9b 100644 --- a/init.sql +++ b/init.sql @@ -783,23 +783,3 @@ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( init_callback REGPROCEDURE) RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; - -/* - * Build hash condition for a CHECK CONSTRAINT - */ -CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( - parent_relid REGCLASS, - attname TEXT, - partitions_count INT, - partition_number INT) -RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' -LANGUAGE C; - -/* - * Returns hash value for specified partition (0..N) - */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_hash( - parent_relid REGCLASS, - partition REGCLASS) -RETURNS INT AS 'pg_pathman', 'get_partition_hash' -LANGUAGE C; diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 07c54d7e..772f9ada 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -22,10 +22,11 @@ /* Function declarations */ PG_FUNCTION_INFO_V1( create_hash_partitions_internal ); + PG_FUNCTION_INFO_V1( get_type_hash_func ); PG_FUNCTION_INFO_V1( get_hash_part_idx ); + PG_FUNCTION_INFO_V1( build_hash_condition ); -PG_FUNCTION_INFO_V1( get_partition_hash ); /* @@ -90,68 +91,36 @@ get_hash_part_idx(PG_FUNCTION_ARGS) Datum build_hash_condition(PG_FUNCTION_ARGS) { + Oid parent = PG_GETARG_OID(0); + text *attname = PG_GETARG_TEXT_P(1); + uint32 part_count = PG_GETARG_UINT32(2); + uint32 part_idx = PG_GETARG_UINT32(3); + TypeCacheEntry *tce; + Oid attype; + char *attname_cstring = text_to_cstring(attname); - Oid parent = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); - uint32 partitions_count = PG_GETARG_UINT32(2); - uint32 partition_number = PG_GETARG_UINT32(3); - Oid attyp; - char *result; + char *result; - if (partition_number >= partitions_count) - elog(ERROR, - "Partition number cannot exceed partitions count"); + if (part_idx >= part_count) + elog(ERROR, "'part_idx' must be lower than 'part_count'"); /* Get attribute type and its hash function oid */ - attyp = get_attribute_type(parent, text_to_cstring(attname), false); - if (attyp == InvalidOid) - elog(ERROR, - "Relation '%s' has no attribute '%s'", - get_rel_name(parent), - text_to_cstring(attname)); + attype = get_attribute_type(parent, attname_cstring, false); + if (attype == InvalidOid) + elog(ERROR, "relation \"%s\" has no attribute \"%s\"", + get_rel_name(parent), + attname_cstring); - tce = lookup_type_cache(attyp, TYPECACHE_HASH_PROC); + tce = lookup_type_cache(attype, TYPECACHE_HASH_PROC); /* Create hash condition CSTRING */ result = psprintf("%s.get_hash_part_idx(%s(%s), %u) = %u", get_namespace_name(get_pathman_schema()), get_func_name(tce->hash_proc), - text_to_cstring(attname), - partitions_count, - partition_number); + attname_cstring, + part_count, + part_idx); PG_RETURN_TEXT_P(cstring_to_text(result)); } - -/* - * Returns hash value for specified partition (0..N) - */ -Datum -get_partition_hash(PG_FUNCTION_ARGS) -{ - const PartRelationInfo *prel; - Oid parent = PG_GETARG_OID(0); - Oid partition = PG_GETARG_OID(1); - Oid *children; - int i; - - /* Validate partition type */ - prel = get_pathman_relation_info(parent); - if (!prel || prel->parttype != PT_HASH) - elog(ERROR, - "Relation '%s' isn't partitioned by hash", - get_rel_name(parent)); - - /* Searching for partition */ - children = PrelGetChildrenArray(prel); - for (i=0; ichildren_count; i++) - if (children[i] == partition) - PG_RETURN_UINT32(i); - - /* If we get here then there is no such partition for specified parent */ - elog(ERROR, - "Relation '%s' isn't a part of partitioned table '%s'", - get_rel_name(parent), - get_rel_name(partition)); -} From dc0a854698ccc65cae52f99d7b95f7301f5e6240 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 29 Nov 2016 18:06:33 +0300 Subject: [PATCH 0098/1124] change signature of function build_hash_condition(), more tests --- expected/pathman_calamity.out | 38 +++++++++++++++++++++++++++++++++++ hash.sql | 6 +++--- range.sql | 4 ++-- sql/pathman_calamity.sql | 12 +++++++++++ src/pl_hash_funcs.c | 20 ++++++++---------- 5 files changed, 63 insertions(+), 17 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index e82e295d..83ea60c0 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,6 +12,44 @@ SELECT debug_capture(); set client_min_messages = NOTICE; /* create table to be partitioned */ CREATE TABLE calamity.part_test(val serial); +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); +ERROR: no hash function for type calamity.part_test +/* check function build_range_condition() */ +SELECT build_range_condition('val', 10, 20); + build_range_condition +---------------------------- + val >= '10' AND val < '20' +(1 row) + +SELECT build_range_condition('val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); validate_relname diff --git a/hash.sql b/hash.sql index 5790e68e..5e6cbfc8 100644 --- a/hash.sql +++ b/hash.sql @@ -289,9 +289,9 @@ LANGUAGE C STRICT; * Build hash condition for a CHECK CONSTRAINT */ CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( - parent_relid REGCLASS, + attribute_type REGTYPE, attribute TEXT, - part_count INT4, - part_idx INT4) + partitions_count INT4, + partitions_index INT4) RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' LANGUAGE C STRICT; diff --git a/range.sql b/range.sql index fbe8a2f2..2141668f 100644 --- a/range.sql +++ b/range.sql @@ -1228,11 +1228,11 @@ SET client_min_messages = WARNING; * Construct CHECK constraint condition for a range partition. */ CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( - p_attname TEXT, + attribute TEXT, start_value ANYELEMENT, end_value ANYELEMENT) RETURNS TEXT AS 'pg_pathman', 'build_range_condition' -LANGUAGE C; +LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( parent_relid REGCLASS) diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index e2e080df..9beb0861 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -14,6 +14,18 @@ set client_min_messages = NOTICE; CREATE TABLE calamity.part_test(val serial); +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); +SELECT build_hash_condition('text', 'val', 10, 1); +SELECT build_hash_condition('int4', 'val', 1, 1); +SELECT build_hash_condition('int4', 'val', 10, 20); +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); + +/* check function build_range_condition() */ +SELECT build_range_condition('val', 10, 20); +SELECT build_range_condition('val', 10, NULL) IS NULL; + /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); SELECT validate_relname(1::REGCLASS); diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 772f9ada..882f26b0 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -91,28 +91,24 @@ get_hash_part_idx(PG_FUNCTION_ARGS) Datum build_hash_condition(PG_FUNCTION_ARGS) { - Oid parent = PG_GETARG_OID(0); + Oid atttype = PG_GETARG_OID(0); text *attname = PG_GETARG_TEXT_P(1); - uint32 part_count = PG_GETARG_UINT32(2); - uint32 part_idx = PG_GETARG_UINT32(3); + uint32 part_count = PG_GETARG_UINT32(2), + part_idx = PG_GETARG_UINT32(3); TypeCacheEntry *tce; - Oid attype; char *attname_cstring = text_to_cstring(attname); char *result; if (part_idx >= part_count) - elog(ERROR, "'part_idx' must be lower than 'part_count'"); + elog(ERROR, "'partition_index' must be lower than 'partitions_count'"); - /* Get attribute type and its hash function oid */ - attype = get_attribute_type(parent, attname_cstring, false); - if (attype == InvalidOid) - elog(ERROR, "relation \"%s\" has no attribute \"%s\"", - get_rel_name(parent), - attname_cstring); + tce = lookup_type_cache(atttype, TYPECACHE_HASH_PROC); - tce = lookup_type_cache(attype, TYPECACHE_HASH_PROC); + /* Check that HASH function exists */ + if (!OidIsValid(tce->hash_proc)) + elog(ERROR, "no hash function for type %s", format_type_be(atttype)); /* Create hash condition CSTRING */ result = psprintf("%s.get_hash_part_idx(%s(%s), %u) = %u", From 2a319697c4b7aa4eeae071ecb6f068b84e4f1ef5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 29 Nov 2016 18:45:05 +0300 Subject: [PATCH 0099/1124] add parameter 'lock_parent' to function replace_hash_partition() --- hash.sql | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/hash.sql b/hash.sql index 5e6cbfc8..682b145f 100644 --- a/hash.sql +++ b/hash.sql @@ -59,11 +59,14 @@ SET client_min_messages = WARNING; /* * Replace hash partition with another one. It could be useful in case when - * someone wants to attach foreign table as a partition + * someone wants to attach foreign table as a partition. + * + * lock_parent - should we take an exclusive lock? */ CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( old_partition REGCLASS, - new_partition REGCLASS) + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) RETURNS REGCLASS AS $$ DECLARE @@ -81,8 +84,13 @@ BEGIN /* Parent relation */ parent_relid := @extschema@.get_parent_of_partition(old_partition); - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; /* Acquire data modification lock (prevent further modifications) */ PERFORM @extschema@.prevent_relation_modification(old_partition); From 95751076f390998ae3bd45165a55dfc79cf0f0b8 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 29 Nov 2016 19:57:16 +0300 Subject: [PATCH 0100/1124] fix a bug related to the auto removal of the record from pathman_config on DROP COLUMN --- expected/pathman_basic.out | 26 +++++++++++++++++++------- init.sql | 2 +- sql/pathman_basic.sql | 9 ++++++++- 3 files changed, 28 insertions(+), 9 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index ee723cc6..edaaa435 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1672,7 +1672,8 @@ NOTICE: drop cascades to 7 other objects /* Test automatic partition creation */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, - dt TIMESTAMP NOT NULL); + dt TIMESTAMP NOT NULL, + data TEXT); SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); create_range_partitions ------------------------- @@ -1692,9 +1693,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; (3 rows) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; - id | dt ------+-------------------------- - 137 | Mon Dec 15 00:00:00 2014 + id | dt | data +-----+--------------------------+------ + 137 | Mon Dec 15 00:00:00 2014 | (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; @@ -1706,9 +1707,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; (3 rows) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; - id | dt -----+-------------------------- - 74 | Sun Mar 15 00:00:00 2015 + id | dt | data +----+--------------------------+------ + 74 | Sun Mar 15 00:00:00 2015 | (1 row) SELECT pathman.set_auto('test.range_rel', false); @@ -1726,6 +1727,17 @@ SELECT pathman.set_auto('test.range_rel', true); (1 row) INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; + partrel | attname | parttype | range_interval +----------------+---------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days +(1 row) + DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 20 other objects SELECT * FROM pathman.pathman_config; diff --git a/init.sql b/init.sql index 43688f76..4e7305af 100644 --- a/init.sql +++ b/init.sql @@ -464,7 +464,7 @@ BEGIN SELECT array_agg(cfg.partrel) INTO relids FROM pg_event_trigger_dropped_objects() AS events JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid - WHERE events.classid = pg_class_oid; + WHERE events.classid = pg_class_oid AND events.objsubid = 0; /* Cleanup pathman_config */ DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index de6a6e54..8918568a 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -480,7 +480,8 @@ DROP TABLE test.range_rel CASCADE; /* Test automatic partition creation */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, - dt TIMESTAMP NOT NULL); + dt TIMESTAMP NOT NULL, + data TEXT); SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); INSERT INTO test.range_rel (dt) SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); @@ -498,6 +499,12 @@ INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); SELECT pathman.set_auto('test.range_rel', true); INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; DROP TABLE test.range_rel CASCADE; SELECT * FROM pathman.pathman_config; From d1556b4962a22fbdd67df204d77cb487fc560c62 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 30 Nov 2016 15:00:12 +0300 Subject: [PATCH 0101/1124] refactoring, add column 'spawn_using_bgw' to PATHMAN_CONFIG_PARAMS --- expected/pathman_callbacks.out | 47 +++++++++--- expected/pathman_permissions.out | 6 +- init.sql | 50 +++++++++---- sql/pathman_callbacks.sql | 11 ++- src/init.c | 3 +- src/init.h | 7 ++ src/partition_creation.c | 124 +++++++++++++++++++++++-------- src/partition_creation.h | 1 + src/partition_filter.c | 24 ++---- src/partition_filter.h | 3 +- src/pathman.h | 3 +- src/pl_funcs.c | 8 +- src/relation_info.c | 4 +- src/relation_info.h | 1 - src/utility_stmt_hooking.c | 3 +- src/utils.c | 36 --------- src/utils.h | 1 - 17 files changed, 201 insertions(+), 131 deletions(-) diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 6a997e9e..e1a29dce 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -26,26 +26,52 @@ SELECT set_init_callback('callbacks.abc', (1 row) INSERT INTO callbacks.abc VALUES (123, 1); -INSERT INTO callbacks.abc VALUES (223, 1); +INSERT INTO callbacks.abc VALUES (223, 1); /* show warning */ +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201"} +SELECT set_spawn_using_bgw('callbacks.abc', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +SELECT get_number_of_partitions('callbacks.abc'); + get_number_of_partitions +-------------------------- + 3 +(1 row) + +INSERT INTO callbacks.abc VALUES (323, 1); +SELECT get_number_of_partitions('callbacks.abc'); /* +1 partition (created by BGW) */ + get_number_of_partitions +-------------------------- + 4 +(1 row) + +SELECT set_spawn_using_bgw('callbacks.abc', false); + set_spawn_using_bgw +--------------------- + +(1 row) + SELECT append_range_partition('callbacks.abc'); -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_4", "range_max": "401", "range_min": "301"} +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "501", "range_min": "401"} append_range_partition ------------------------ - callbacks.abc_4 + callbacks.abc_5 (1 row) SELECT prepend_range_partition('callbacks.abc'); -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "1", "range_min": "-99"} +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_6", "range_max": "1", "range_min": "-99"} prepend_range_partition ------------------------- - callbacks.abc_5 + callbacks.abc_6 (1 row) -SELECT add_range_partition('callbacks.abc', 401, 502); -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_6", "range_max": "502", "range_min": "401"} +SELECT add_range_partition('callbacks.abc', 501, 602); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_7", "range_max": "602", "range_min": "501"} add_range_partition --------------------- - callbacks.abc_6 + callbacks.abc_7 (1 row) SELECT drop_partitions('callbacks.abc'); @@ -53,12 +79,13 @@ NOTICE: function callbacks.abc_upd_trig_func() does not exist, skipping NOTICE: 0 rows copied from callbacks.abc_1 NOTICE: 1 rows copied from callbacks.abc_2 NOTICE: 1 rows copied from callbacks.abc_3 -NOTICE: 0 rows copied from callbacks.abc_4 +NOTICE: 1 rows copied from callbacks.abc_4 NOTICE: 0 rows copied from callbacks.abc_5 NOTICE: 0 rows copied from callbacks.abc_6 +NOTICE: 0 rows copied from callbacks.abc_7 drop_partitions ----------------- - 6 + 7 (1 row) /* set callback to be called on HASH partitions */ diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index ca75a7e5..ca95e2d1 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -39,9 +39,9 @@ SELECT * FROM pathman_config; (1 row) SELECT * FROM pathman_config_params; - partrel | enable_parent | auto | init_callback --------------------------+---------------+------+--------------- - permissions.user1_table | f | t | - + partrel | enable_parent | auto | init_callback | spawn_using_bgw +-------------------------+---------------+------+---------------+----------------- + permissions.user1_table | f | t | - | f (1 row) /* Should fail */ diff --git a/init.sql b/init.sql index c933ce9b..db647cfd 100644 --- a/init.sql +++ b/init.sql @@ -26,6 +26,20 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( CHECK (parttype IN (1, 2)) /* check for allowed part types */ ); + +/* + * Checks that callback function meets specific requirements. + * Particularly it must have the only JSONB argument and VOID return type. + * + * NOTE: this function is used in CHECK CONSTRAINT. + */ +CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( + callback REGPROC, + raise_error BOOL DEFAULT FALSE) +RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' +LANGUAGE C STRICT; + + /* * Optional parameters for partitioned tables. * partrel - regclass (relation type, stored as Oid) @@ -37,10 +51,11 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( partrel REGCLASS NOT NULL PRIMARY KEY, enable_parent BOOLEAN NOT NULL DEFAULT FALSE, auto BOOLEAN NOT NULL DEFAULT TRUE, - init_callback REGPROCEDURE NOT NULL DEFAULT 0 + init_callback REGPROCEDURE NOT NULL DEFAULT 0, + spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE + + CHECK (@extschema@.validate_part_callback(init_callback)) /* check signature */ ); -CREATE UNIQUE INDEX i_pathman_config_params -ON @extschema@.pathman_config_params(partrel); GRANT SELECT, INSERT, UPDATE, DELETE ON @extschema@.pathman_config, @extschema@.pathman_config_params @@ -120,7 +135,7 @@ BEGIN USING relation, value; END $$ -LANGUAGE plpgsql; +LANGUAGE plpgsql STRICT; /* * Include\exclude parent relation in query plan. @@ -159,11 +174,25 @@ CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( RETURNS VOID AS $$ BEGIN - PERFORM @extschema@.validate_on_partition_created_callback(callback); PERFORM @extschema@.pathman_set_param(relation, 'init_callback', callback); END $$ -LANGUAGE plpgsql; +LANGUAGE plpgsql STRICT; + +/* + * Set 'spawn using BGW' option + */ +CREATE OR REPLACE FUNCTION @extschema@.set_spawn_using_bgw( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'spawn_using_bgw', value); +END +$$ +LANGUAGE plpgsql STRICT; + /* * Show all existing parents and partitions. @@ -752,15 +781,6 @@ CREATE OR REPLACE FUNCTION @extschema@.debug_capture() RETURNS VOID AS 'pg_pathman', 'debug_capture' LANGUAGE C STRICT; -/* - * Checks that callback function meets specific requirements. Particularly it - * must have the only JSONB argument and VOID return type. - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_on_partition_created_callback( - callback REGPROC) -RETURNS VOID AS 'pg_pathman', 'validate_on_part_init_callback_pl' -LANGUAGE C STRICT; - /* * Invoke init_callback on RANGE partition. diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 3aa174cd..290286e6 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -22,11 +22,18 @@ SELECT set_init_callback('callbacks.abc', 'callbacks.abc_on_part_created_callback'); INSERT INTO callbacks.abc VALUES (123, 1); -INSERT INTO callbacks.abc VALUES (223, 1); +INSERT INTO callbacks.abc VALUES (223, 1); /* show warning */ + +SELECT set_spawn_using_bgw('callbacks.abc', true); +SELECT get_number_of_partitions('callbacks.abc'); +INSERT INTO callbacks.abc VALUES (323, 1); +SELECT get_number_of_partitions('callbacks.abc'); /* +1 partition (created by BGW) */ +SELECT set_spawn_using_bgw('callbacks.abc', false); + SELECT append_range_partition('callbacks.abc'); SELECT prepend_range_partition('callbacks.abc'); -SELECT add_range_partition('callbacks.abc', 401, 502); +SELECT add_range_partition('callbacks.abc', 501, 602); SELECT drop_partitions('callbacks.abc'); diff --git a/src/init.c b/src/init.c index 43825447..fc5bde20 100644 --- a/src/init.c +++ b/src/init.c @@ -128,7 +128,7 @@ init_main_pathman_toggles(void) "Enables automatic partition creation", NULL, &pg_pathman_init_state.auto_partition, - true, + DEFAULT_AUTO, PGC_SUSET, 0, NULL, @@ -730,6 +730,7 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) Assert(!isnull[Anum_pathman_config_params_enable_parent - 1]); Assert(!isnull[Anum_pathman_config_params_auto - 1]); Assert(!isnull[Anum_pathman_config_params_init_callback - 1]); + Assert(!isnull[Anum_pathman_config_params_spawn_using_bgw - 1]); } /* Clean resources */ diff --git a/src/init.h b/src/init.h index 98213cc3..7318d253 100644 --- a/src/init.h +++ b/src/init.h @@ -87,6 +87,13 @@ extern PathmanInitState pg_pathman_init_state; } while (0) +/* Default column values for PATHMAN_CONFIG_PARAMS */ +#define DEFAULT_ENABLE_PARENT false +#define DEFAULT_AUTO true +#define DEFAULT_INIT_CALLBACK InvalidOid +#define DEFAULT_SPAWN_USING_BGW false + + /* * Save and restore PathmanInitState. */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 2ada6559..0d77da87 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -21,6 +21,7 @@ #include "access/xact.h" #include "catalog/heap.h" #include "catalog/pg_authid.h" +#include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "catalog/toasting.h" #include "commands/event_trigger.h" @@ -129,7 +130,8 @@ create_single_range_partition_internal(Oid parent_relid, value_type); /* Cook args for init_callback */ - MakeInitCallbackRangeParams(&callback_params, InvalidOid, + MakeInitCallbackRangeParams(&callback_params, + DEFAULT_INIT_CALLBACK, parent_relid, partition_relid, start_value, end_value, value_type); @@ -182,7 +184,8 @@ create_single_hash_partition_internal(Oid parent_relid, value_type); /* Cook args for init_callback */ - MakeInitCallbackHashParams(&callback_params, InvalidOid, + MakeInitCallbackHashParams(&callback_params, + DEFAULT_INIT_CALLBACK, parent_relid, partition_relid); /* Add constraint & execute init_callback */ @@ -233,15 +236,36 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) /* Check that table is partitioned and fetch xmin */ if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin)) { - bool part_in_prev_xact = + /* Was table partitioned in some previous transaction? */ + bool part_in_prev_xact = TransactionIdPrecedes(rel_xmin, GetCurrentTransactionId()) || TransactionIdEquals(rel_xmin, FrozenTransactionId); + /* Take default values */ + bool spawn_using_bgw = DEFAULT_SPAWN_USING_BGW, + enable_auto = DEFAULT_AUTO; + + /* Values to be extracted from PATHMAN_CONFIG_PARAMS */ + Datum values[Natts_pathman_config_params]; + bool isnull[Natts_pathman_config_params]; + + /* Try fetching options from PATHMAN_CONFIG_PARAMS */ + if (read_pathman_params(relid, values, isnull)) + { + enable_auto = values[Anum_pathman_config_params_auto - 1]; + spawn_using_bgw = values[Anum_pathman_config_params_spawn_using_bgw - 1]; + } + + /* Emit ERROR if automatic partition creation is disabled */ + if (!enable_auto || !IsAutoPartitionEnabled()) + elog(ERROR, ERR_PART_ATTR_NO_PART, datum_to_cstring(value, value_type)); + /* * If table has been partitioned in some previous xact AND * we don't hold any conflicting locks, run BGWorker. */ - if (part_in_prev_xact && !xact_bgw_conflicting_lock_exists(relid)) + if (spawn_using_bgw && part_in_prev_xact && + !xact_bgw_conflicting_lock_exists(relid)) { elog(DEBUG2, "create_partitions(): chose BGWorker [%u]", MyProcPid); last_partition = create_partitions_for_value_bg_worker(relid, @@ -1385,6 +1409,35 @@ invoke_init_callback_internal(init_callback_params *cb_params) key, val; + /* Fetch & cache callback's Oid if needed */ + if (!cb_params->callback_is_cached) + { + Datum param_values[Natts_pathman_config_params]; + bool param_isnull[Natts_pathman_config_params]; + + /* Search for init_callback entry in PATHMAN_CONFIG_PARAMS */ + if (read_pathman_params(parent_oid, param_values, param_isnull)) + { + Datum init_cb_datum; /* Oid of init_callback */ + AttrNumber init_cb_attno = Anum_pathman_config_params_init_callback; + + /* Extract Datum storing callback's Oid */ + init_cb_datum = param_values[init_cb_attno - 1]; + + /* Cache init_callback's Oid */ + cb_params->callback = DatumGetObjectId(init_cb_datum); + cb_params->callback_is_cached = true; + } + } + + /* No callback is set, exit */ + if (!OidIsValid(cb_params->callback)) + return; + + /* Validate the callback's signature */ + validate_part_callback(cb_params->callback, true); + + /* Generate JSONB we're going to pass to callback */ switch (cb_params->parttype) { case PT_HASH: @@ -1436,33 +1489,7 @@ invoke_init_callback_internal(init_callback_params *cb_params) break; } - /* Fetch & cache callback's Oid if needed */ - if (!cb_params->callback_is_cached) - { - Datum param_values[Natts_pathman_config_params]; - bool param_isnull[Natts_pathman_config_params]; - - /* Search for init_callback entry in PATHMAN_CONFIG_PARAMS */ - if (read_pathman_params(parent_oid, param_values, param_isnull)) - { - Datum init_cb_datum; /* Oid of init_callback */ - AttrNumber init_cb_attno = Anum_pathman_config_params_init_callback; - - /* Extract Datum storing callback's Oid */ - init_cb_datum = param_values[init_cb_attno - 1]; - - /* Cache init_callback's Oid */ - cb_params->callback = DatumGetObjectId(init_cb_datum); - } - } - - /* No callback is set, exit */ - if (!OidIsValid(cb_params->callback)) - return; - - /* Validate the callback's signature */ - validate_on_part_init_cb(cb_params->callback, true); - + /* Fetch function call data */ fmgr_info(cb_params->callback, &cb_flinfo); InitFunctionCallInfoData(cb_fcinfo, &cb_flinfo, 1, InvalidOid, NULL, NULL); @@ -1487,3 +1514,38 @@ invoke_part_callback(init_callback_params *cb_params) elog(ERROR, "Unknown callback type: %u", cb_params->cb_type); } } + +/* + * Checks that callback function meets specific requirements. + * It must have the only JSONB argument and BOOL return type. + */ +bool +validate_part_callback(Oid procid, bool emit_error) +{ + HeapTuple tp; + Form_pg_proc functup; + bool is_ok = true; + + if (procid == DEFAULT_INIT_CALLBACK) + return true; + + tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(procid)); + if (!HeapTupleIsValid(tp)) + elog(ERROR, "cache lookup failed for function %u", procid); + + functup = (Form_pg_proc) GETSTRUCT(tp); + + if (functup->pronargs != 1 || + functup->proargtypes.values[0] != JSONBOID || + functup->prorettype != VOIDOID) + is_ok = false; + + ReleaseSysCache(tp); + + if (emit_error && !is_ok) + elog(ERROR, + "Callback function must have the following signature: " + "callback(arg JSONB) RETURNS VOID"); + + return is_ok; +} diff --git a/src/partition_creation.h b/src/partition_creation.h index 18ba5beb..5becce25 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -136,3 +136,4 @@ typedef struct void invoke_part_callback(init_callback_params *cb_params); +bool validate_part_callback(Oid procid, bool emit_error); diff --git a/src/partition_filter.c b/src/partition_filter.c index fd5ba001..eefdfd2c 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -432,8 +432,7 @@ partition_filter_exec(CustomScanState *node) /* Search for a matching partition */ rri_holder = select_partition_for_insert(prel, &state->result_parts, - value, prel->atttype, - estate, true); + value, prel->atttype, estate); estate->es_result_relation_info = rri_holder->result_rel_info; /* Switch back and clean up per-tuple context */ @@ -478,8 +477,7 @@ ResultRelInfoHolder * select_partition_for_insert(const PartRelationInfo *prel, ResultPartsStorage *parts_storage, Datum value, Oid value_type, - EState *estate, - bool spawn_partitions) + EState *estate) { MemoryContext old_cxt; ResultRelInfoHolder *rri_holder; @@ -494,21 +492,11 @@ select_partition_for_insert(const PartRelationInfo *prel, elog(ERROR, ERR_PART_ATTR_MULTIPLE); else if (nparts == 0) { - /* - * If auto partition propagation is enabled then try to create - * new partitions for the key - */ - if (prel->auto_partition && IsAutoPartitionEnabled() && spawn_partitions) - { - selected_partid = create_partitions_for_value(PrelParentRelid(prel), - value, prel->atttype); + selected_partid = create_partitions_for_value(PrelParentRelid(prel), + value, prel->atttype); - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); - } - else - elog(ERROR, ERR_PART_ATTR_NO_PART, - datum_to_cstring(value, prel->atttype)); + /* get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); } else selected_partid = parts[0]; diff --git a/src/partition_filter.h b/src/partition_filter.h index abd1c40e..4ad4296f 100644 --- a/src/partition_filter.h +++ b/src/partition_filter.h @@ -133,7 +133,6 @@ void partition_filter_explain(CustomScanState *node, ResultRelInfoHolder * select_partition_for_insert(const PartRelationInfo *prel, ResultPartsStorage *parts_storage, Datum value, Oid value_type, - EState *estate, - bool spawn_partitions); + EState *estate); #endif diff --git a/src/pathman.h b/src/pathman.h index fb0b6c0e..98cc8db1 100644 --- a/src/pathman.h +++ b/src/pathman.h @@ -55,11 +55,12 @@ * Definitions for the "pathman_config_params" table. */ #define PATHMAN_CONFIG_PARAMS "pathman_config_params" -#define Natts_pathman_config_params 4 +#define Natts_pathman_config_params 5 #define Anum_pathman_config_params_partrel 1 /* primary key */ #define Anum_pathman_config_params_enable_parent 2 /* include parent into plan */ #define Anum_pathman_config_params_auto 3 /* auto partitions creation */ #define Anum_pathman_config_params_init_callback 4 /* partition action callback */ +#define Anum_pathman_config_params_spawn_using_bgw 5 /* should we use spawn BGW? */ /* * Definitions for the "pathman_partition_list" view. diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 1103b239..ede9e05d 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -59,7 +59,7 @@ PG_FUNCTION_INFO_V1( invalidate_relcache ); PG_FUNCTION_INFO_V1( lock_partitioned_relation ); PG_FUNCTION_INFO_V1( prevent_relation_modification ); -PG_FUNCTION_INFO_V1( validate_on_part_init_callback_pl ); +PG_FUNCTION_INFO_V1( validate_part_callback_pl ); PG_FUNCTION_INFO_V1( invoke_on_partition_created_callback ); PG_FUNCTION_INFO_V1( check_security_policy ); @@ -753,11 +753,9 @@ prevent_relation_modification(PG_FUNCTION_ARGS) * It must have the only JSONB argument and BOOL return type. */ Datum -validate_on_part_init_callback_pl(PG_FUNCTION_ARGS) +validate_part_callback_pl(PG_FUNCTION_ARGS) { - validate_on_part_init_cb(PG_GETARG_OID(0), true); - - PG_RETURN_VOID(); + PG_RETURN_BOOL(validate_part_callback(PG_GETARG_OID(0), PG_GETARG_BOOL(1))); } /* diff --git a/src/relation_info.c b/src/relation_info.c index 29d1e6a0..e20bada6 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -206,13 +206,11 @@ refresh_pathman_relation_info(Oid relid, if (read_pathman_params(relid, param_values, param_isnull)) { prel->enable_parent = param_values[Anum_pathman_config_params_enable_parent - 1]; - prel->auto_partition = param_values[Anum_pathman_config_params_auto - 1]; } /* Else set default values if they cannot be found */ else { - prel->enable_parent = false; - prel->auto_partition = true; + prel->enable_parent = DEFAULT_ENABLE_PARENT; } /* We've successfully built a cache entry */ diff --git a/src/relation_info.h b/src/relation_info.h index b6796976..771a8056 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -47,7 +47,6 @@ typedef struct Oid key; /* partitioned table's Oid */ bool valid; /* is this entry valid? */ bool enable_parent; /* include parent to the plan */ - bool auto_partition; /* auto partition creation */ uint32 children_count; Oid *children; /* Oids of child partitions */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 26d43b5c..1c0e9eea 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -554,8 +554,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Search for a matching partition */ rri_holder_child = select_partition_for_insert(prel, &parts_storage, values[prel->attnum - 1], - prel->atttype, - estate, true); + prel->atttype, estate); child_result_rel = rri_holder_child->result_rel_info; estate->es_result_relation_info = child_result_rel; diff --git a/src/utils.c b/src/utils.c index ae4574de..1f086f5f 100644 --- a/src/utils.c +++ b/src/utils.c @@ -18,7 +18,6 @@ #include "catalog/pg_type.h" #include "catalog/pg_extension.h" #include "catalog/pg_operator.h" -#include "catalog/pg_proc.h" #include "catalog/pg_inherits.h" #include "commands/extension.h" #include "miscadmin.h" @@ -312,41 +311,6 @@ get_attribute_type(Oid relid, const char *attname, bool missing_ok) return InvalidOid; } -/* - * Checks that callback function meets specific requirements. - * It must have the only JSONB argument and BOOL return type. - */ -bool -validate_on_part_init_cb(Oid procid, bool emit_error) -{ - HeapTuple tp; - Form_pg_proc functup; - bool is_ok = true; - - if (procid == InvalidOid) - return true; - - tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(procid)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for function %u", procid); - - functup = (Form_pg_proc) GETSTRUCT(tp); - - if (functup->pronargs != 1 || - functup->proargtypes.values[0] != JSONBOID || - functup->prorettype != VOIDOID) - is_ok = false; - - ReleaseSysCache(tp); - - if (emit_error && !is_ok) - elog(ERROR, - "Callback function must have the following signature: " - "callback(arg JSONB) RETURNS VOID"); - - return is_ok; -} - /* * Check if user can alter/drop specified relation. This function is used to * make sure that current user can change pg_pathman's config. Returns true diff --git a/src/utils.h b/src/utils.h index ac146649..07d516b9 100644 --- a/src/utils.h +++ b/src/utils.h @@ -25,7 +25,6 @@ */ bool clause_contains_params(Node *clause); bool is_date_type_internal(Oid typid); -bool validate_on_part_init_cb(Oid procid, bool emit_error); bool check_security_policy_internal(Oid relid, Oid role); /* From bb376845acd12ef5a402faae70a5b98f33826b9d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 30 Nov 2016 15:19:26 +0300 Subject: [PATCH 0102/1124] change default value of argument 'raise_error' for function @extschema@.validate_part_callback() --- init.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init.sql b/init.sql index 4e10dd14..80c6817b 100644 --- a/init.sql +++ b/init.sql @@ -35,7 +35,7 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( */ CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( callback REGPROC, - raise_error BOOL DEFAULT FALSE) + raise_error BOOL DEFAULT TRUE) RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' LANGUAGE C STRICT; From 0de6b2234138344c84effda18e4f27bd09657631 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 30 Nov 2016 16:50:48 +0300 Subject: [PATCH 0103/1124] SpawnPartitionsWorker now connects as current user --- src/pathman_workers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index a38e6873..b744ef07 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -253,7 +253,7 @@ create_partitions_bg_worker_segment(Oid relid, Datum value, Oid value_type) /* Initialize BGW args */ args = (SpawnPartitionArgs *) dsm_segment_address(segment); - args->userid = get_rel_owner(relid); + args->userid = GetUserId(); args->result = InvalidOid; args->dbid = MyDatabaseId; args->partitioned_table = relid; From 55e44c0b855dc5c60ab5f32e701aa2b255516bb0 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 30 Nov 2016 18:31:12 +0300 Subject: [PATCH 0104/1124] added the tablespace attribute to the split_range_partition() function; migration script to version 1.2 --- .gitignore | 2 +- Makefile | 7 +- init.sql | 6 +- pg_pathman--1.1--1.2.sql | 1273 +++++++++++++++ pg_pathman--1.1.sql | 2406 +++++++++++++++++++++++++++++ pg_pathman.control | 4 +- range.sql | 4 +- tests/python/partitioning_test.py | 10 + 8 files changed, 3703 insertions(+), 9 deletions(-) create mode 100644 pg_pathman--1.1--1.2.sql create mode 100644 pg_pathman--1.1.sql diff --git a/.gitignore b/.gitignore index af0698c3..54963e0b 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,4 @@ regression.out *.pyc *.gcda *.gcno -pg_pathman--1.1.sql +pg_pathman--1.2.sql diff --git a/Makefile b/Makefile index 72d71e44..04f77da3 100644 --- a/Makefile +++ b/Makefile @@ -9,9 +9,12 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/partition_creation.o $(WIN32RES) EXTENSION = pg_pathman -EXTVERSION = 1.1 +EXTVERSION = 1.2 DATA_built = pg_pathman--$(EXTVERSION).sql -DATA = pg_pathman--1.0.sql pg_pathman--1.0--1.1.sql +DATA = pg_pathman--1.0.sql \ + pg_pathman--1.0--1.1.sql \ + pg_pathman--1.1.sql \ + pg_pathman--1.1--1.2.sql PGFILEDESC = "pg_pathman - partitioning tool" REGRESS = pathman_basic \ diff --git a/init.sql b/init.sql index 80c6817b..67151581 100644 --- a/init.sql +++ b/init.sql @@ -716,13 +716,13 @@ LANGUAGE C STRICT; */ CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( partition_relid REGCLASS, - partitioned_col INT2) + attribute INT2) RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attnum' LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( partition_relid REGCLASS, - partitioned_col TEXT) + attribute TEXT) RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attname' LANGUAGE C STRICT; @@ -751,7 +751,7 @@ RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; CREATE OR REPLACE FUNCTION @extschema@.invalidate_relcache( - OID) + relid OID) RETURNS VOID AS 'pg_pathman' LANGUAGE C STRICT; diff --git a/pg_pathman--1.1--1.2.sql b/pg_pathman--1.1--1.2.sql new file mode 100644 index 00000000..7ecfeb03 --- /dev/null +++ b/pg_pathman--1.1--1.2.sql @@ -0,0 +1,1273 @@ +/* ------------------------------------------------------------------------ + * + * pg_pathman--1.1--1.2.sql + * Migration scripts to version 1.2 + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + + +/* ------------------------------------------------------------------------ + * Drop irrelevant objects + * ----------------------------------------------------------------------*/ +DROP INDEX i_pathman_config_params; +DROP FUNCTION @extschema@.partitions_count(REGCLASS); +DROP FUNCTION @extschema@.set_init_callback(REGCLASS, REGPROC); +DROP FUNCTION @extschema@.validate_relname(REGCLASS); +DROP FUNCTION @extschema@.get_schema_qualified_name(REGCLASS, TEXT, TEXT); +DROP FUNCTION @extschema@.get_rel_tablespace_name(REGCLASS); +DROP FUNCTION @extschema@.validate_on_partition_created_callback(REGPROC); +DROP FUNCTION @extschema@.get_sequence_name(TEXT, TEXT); +DROP FUNCTION @extschema@.create_single_range_partition(REGCLASS, ANYELEMENT, ANYELEMENT, TEXT, TEXT); +DROP FUNCTION @extschema@.check_overlap(REGCLASS, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.split_range_partition(REGCLASS, ANYELEMENT, TEXT, ANYARRAY); + + +/* ------------------------------------------------------------------------ + * Alter tables + * ----------------------------------------------------------------------*/ +ALTER TABLE @extschema@.pathman_config_params ADD COLUMN spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE; +ALTER TABLE @extschema@.pathman_config_params ADD CHECK (@extschema@.validate_part_callback(init_callback)); + + +/* ------------------------------------------------------------------------ + * Alter functions' modifiers + * ----------------------------------------------------------------------*/ +ALTER FUNCTION @extschema@.pathman_set_param(REGCLASS, TEXT, ANYELEMENT) STRICT; +ALTER FUNCTION @extschema@.build_range_condition(TEXT, ANYELEMENT, ANYELEMENT) STRICT; + + +/* ------------------------------------------------------------------------ + * (Re)create functions + * ----------------------------------------------------------------------*/ +CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( + callback REGPROC, + raise_error BOOL DEFAULT TRUE) +RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( + relation REGCLASS, + callback REGPROC DEFAULT 0) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'init_callback', callback); +END +$$ +LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.set_spawn_using_bgw( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'spawn_using_bgw', value); +END +$$ +LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( + parent_relid REGCLASS) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Delete rows from both config tables */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + /* Drop triggers on update */ + PERFORM @extschema@.drop_triggers(parent_relid); + + /* Notify backend about changes */ + PERFORM @extschema@.on_remove_partitions(parent_relid); +END +$$ +LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( + relation REGCLASS, + p_attribute TEXT) +RETURNS BOOLEAN AS +$$ +DECLARE + v_rec RECORD; + is_referenced BOOLEAN; + rel_persistence CHAR; + +BEGIN + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = relation INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be partitioned', + relation::TEXT; + END IF; + + IF EXISTS (SELECT * FROM @extschema@.pathman_config + WHERE partrel = relation) THEN + RAISE EXCEPTION 'relation "%" has already been partitioned', relation; + END IF; + + IF @extschema@.is_attribute_nullable(relation, p_attribute) THEN + RAISE EXCEPTION 'partitioning key "%" must be NOT NULL', p_attribute; + END IF; + + /* Check if there are foreign keys that reference the relation */ + FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint + WHERE confrelid = relation::REGCLASS::OID) + LOOP + is_referenced := TRUE; + RAISE WARNING 'foreign key "%" references relation "%"', + v_rec.conname, relation; + END LOOP; + + IF is_referenced THEN + RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; + END IF; + + RETURN TRUE; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() +RETURNS event_trigger AS +$$ +DECLARE + obj record; + pg_class_oid oid; + relids regclass[]; +BEGIN + pg_class_oid = 'pg_catalog.pg_class'::regclass; + + /* Find relids to remove from config */ + SELECT array_agg(cfg.partrel) INTO relids + FROM pg_event_trigger_dropped_objects() AS events + JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid + WHERE events.classid = pg_class_oid AND events.objsubid = 0; + + /* Cleanup pathman_config */ + DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); + + /* Cleanup params table too */ + DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( + parent_relid REGCLASS) +RETURNS INT4 AS 'pg_pathman', 'get_number_of_partitions_pl' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.get_tablespace( + relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_tablespace_pl' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.validate_relname( + relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'validate_relname' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) + VALUES (parent_relid, attribute, 1); + + /* Create partitions */ + PERFORM @extschema@.create_hash_partitions_internal(parent_relid, + attribute, + partitions_count); + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Copy data */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN partitions_count; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; + + +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS +$$ +DECLARE + parent_relid REGCLASS; + part_attname TEXT; /* partitioned column */ + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); + + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(old_partition); + PERFORM @extschema@.prevent_relation_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.validate_relations_equality(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Get partitioning key */ + part_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + IF part_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS, + part_attname); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND conname = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS, + part_attname), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN new_partition; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() + RETURNS TRIGGER AS + $body$ + DECLARE + old_idx INTEGER; /* partition indices */ + new_idx INTEGER; + + BEGIN + old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); + new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); + + IF old_idx = new_idx THEN + RETURN NEW; + END IF; + + EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) + USING %5$s; + + EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) + USING %7$s; + + RETURN NULL; + END $body$ + LANGUAGE plpgsql'; + + trigger TEXT := 'CREATE TRIGGER %s + BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE %s()'; + + att_names TEXT; + old_fields TEXT; + new_fields TEXT; + att_val_fmt TEXT; + att_fmt TEXT; + attr TEXT; + plain_schema TEXT; + plain_relname TEXT; + child_relname_format TEXT; + funcname TEXT; + triggername TEXT; + atttype REGTYPE; + partitions_count INTEGER; + +BEGIN + attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF attr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT string_agg(attname, ', '), + string_agg('OLD.' || attname, ', '), + string_agg('NEW.' || attname, ', '), + string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || + attname || ' = $' || attnum || ' ' || + 'ELSE ' || + attname || ' IS NULL END', + ' AND '), + string_agg('$' || attnum, ', ') + FROM pg_catalog.pg_attribute + WHERE attrelid = parent_relid AND attnum > 0 + INTO att_names, + old_fields, + new_fields, + att_val_fmt, + att_fmt; + + partitions_count := @extschema@.get_number_of_partitions(parent_relid); + + /* Build trigger & trigger function's names */ + funcname := @extschema@.build_update_trigger_func_name(parent_relid); + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Build partition name template */ + SELECT * INTO plain_schema, plain_relname + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + child_relname_format := quote_ident(plain_schema) || '.' || + quote_ident(plain_relname || '_%s'); + + /* Fetch base hash function for atttype */ + atttype := @extschema@.get_attribute_type(parent_relid, attr); + + /* Format function definition and execute it */ + EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, + old_fields, att_fmt, new_fields, child_relname_format, + @extschema@.get_type_hash_func(atttype)::TEXT); + + /* Create trigger on each partition */ + FOR num IN 0..partitions_count-1 + LOOP + EXECUTE format(trigger, + triggername, + format(child_relname_format, num), + funcname); + END LOOP; + + return funcname; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER) +RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( + attribute_type REGTYPE, + attribute TEXT, + partitions_count INT4, + partitions_index INT4) +RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.create_or_replace_sequence( + parent_relid REGCLASS, + OUT seq_name TEXT) +AS $$ +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS +$$ +DECLARE + v_min start_value%TYPE; + v_max start_value%TYPE; + v_count BIGINT; + +BEGIN + /* Get min and max values */ + EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) + FROM %2$s WHERE NOT %1$s IS NULL', + attribute, parent_relid::TEXT) + INTO v_count, v_min, v_max; + + /* Check if column has NULL values */ + IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN + RAISE EXCEPTION 'column "%" contains NULL values', attribute; + END IF; + + /* Check lower boundary */ + IF start_value > v_min THEN + RAISE EXCEPTION 'start value is less than min value of "%"', attribute; + END IF; + + /* Check upper boundary */ + IF end_value <= v_max THEN + RAISE EXCEPTION 'not enough partitions to fit all values of "%"', attribute; + END IF; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_atttype REGTYPE; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + i INTEGER; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_count < 0 THEN + RAISE EXCEPTION '"p_count" must not be less than 0'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + v_atttype := @extschema@.get_base_type(pg_typeof(start_value)); + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', ''%s'', ''%s'', ''%s''::%s)', + parent_relid, + attribute, + start_value, + end_value, + v_atttype::TEXT); + END IF; + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Create first partition */ + FOR i IN 1..p_count + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4)', + v_atttype::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_tablespace(parent_relid); + + start_value := start_value + p_interval; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + i INTEGER; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_count < 0 THEN + RAISE EXCEPTION 'partitions count must not be less than zero'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + IF v_max IS NULL THEN + RAISE EXCEPTION 'column "%" has NULL values', attribute; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + END IF; + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* create first partition */ + FOR i IN 1..p_count + LOOP + PERFORM @extschema@.create_single_range_partition( + parent_relid, + start_value, + start_value + p_interval, + tablespace := @extschema@.get_tablespace(parent_relid)); + + start_value := start_value + p_interval; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval ANYELEMENT, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_interval <= 0 THEN + RAISE EXCEPTION 'interval must be positive'; + END IF; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + WHILE start_value <= end_value + LOOP + PERFORM @extschema@.create_single_range_partition( + parent_relid, + start_value, + start_value + p_interval, + tablespace := @extschema@.get_tablespace(parent_relid)); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval INTERVAL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + WHILE start_value <= end_value + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', + @extschema@.get_base_type(pg_typeof(start_value))::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_tablespace(parent_relid); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partition1 REGCLASS, + partition2 REGCLASS) +RETURNS VOID AS +$$ +DECLARE + v_parent1 REGCLASS; + v_parent2 REGCLASS; + v_attname TEXT; + v_part_type INTEGER; + v_atttype REGTYPE; + +BEGIN + IF partition1 = partition2 THEN + RAISE EXCEPTION 'cannot merge partition with itself'; + END IF; + + v_parent1 := @extschema@.get_parent_of_partition(partition1); + v_parent2 := @extschema@.get_parent_of_partition(partition2); + + /* Acquire data modification locks (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition1); + PERFORM @extschema@.prevent_relation_modification(partition2); + + IF v_parent1 != v_parent2 THEN + RAISE EXCEPTION 'cannot merge partitions with different parents'; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent1); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent1 + INTO v_attname, v_part_type; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', v_parent1::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION 'specified partitions are not RANGE partitions'; + END IF; + + v_atttype := @extschema@.get_attribute_type(partition1, v_attname); + + EXECUTE format('SELECT @extschema@.merge_range_partitions_internal($1, $2, $3, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING v_parent1, partition1, partition2; + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent1); +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS +$$ +DECLARE + v_parent REGCLASS; + v_attname TEXT; + v_atttype REGTYPE; + v_cond TEXT; + v_new_partition TEXT; + v_part_type INTEGER; + v_check_name TEXT; + +BEGIN + v_parent = @extschema@.get_parent_of_partition(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent); + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent + INTO v_attname, v_part_type; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', v_parent::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + END IF; + + v_atttype = @extschema@.get_attribute_type(v_parent, v_attname); + + /* Get partition values range */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING partition + INTO p_range; + + IF p_range IS NULL THEN + RAISE EXCEPTION 'could not find specified partition'; + END IF; + + /* Check if value fit into the range */ + IF p_range[1] > split_value OR p_range[2] <= split_value + THEN + RAISE EXCEPTION 'specified value does not fit into the range [%, %)', + p_range[1], p_range[2]; + END IF; + + /* Create new partition */ + v_new_partition := @extschema@.create_single_range_partition(v_parent, + split_value, + p_range[2], + partition_name, + tablespace); + + /* Copy data */ + v_cond := @extschema@.build_range_condition(v_attname, split_value, p_range[2]); + EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition::TEXT, + v_cond, + v_new_partition); + + /* Alter original partition */ + v_cond := @extschema@.build_range_condition(v_attname, p_range[1], split_value); + v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); + + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition::TEXT, + v_check_name); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + v_check_name, + v_cond); + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent); +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot append to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF @extschema@.is_date_type(p_atttype) THEN + v_part_name := @extschema@.create_single_range_partition( + parent_relid, + p_range[2], + p_range[2] + p_interval::interval, + partition_name, + tablespace); + ELSE + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $2 + $3::%s, $4, $5)', + v_atttype::TEXT) + USING + parent_relid, + p_range[2], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + END IF; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot prepend to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF @extschema@.is_date_type(p_atttype) THEN + v_part_name := @extschema@.create_single_range_partition( + parent_relid, + p_range[1] - p_interval::interval, + p_range[1], + partition_name, + tablespace); + ELSE + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2 - $3::%s, $2, $4, $5)', + v_atttype::TEXT) + USING + parent_relid, + p_range[1], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + END IF; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; + END IF; + + /* check range overlap */ + IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN + PERFORM @extschema@.check_range_available(parent_relid, + start_value, + end_value); + END IF; + + /* Create new partition */ + v_part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition::TEXT; + END IF; + + /* check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); + + IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); + + v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname), + @extschema@.build_range_condition(v_attname, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition, + v_init_callback, + start_value, + end_value); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'create_single_range_partition_pl' +LANGUAGE C +SET client_min_messages = WARNING; + + +CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.check_range_available( + parent_relid REGCLASS, + range_min ANYELEMENT, + range_max ANYELEMENT) +RETURNS VOID AS 'pg_pathman', 'check_range_available_pl' +LANGUAGE C; diff --git a/pg_pathman--1.1.sql b/pg_pathman--1.1.sql new file mode 100644 index 00000000..20cb62a5 --- /dev/null +++ b/pg_pathman--1.1.sql @@ -0,0 +1,2406 @@ +/* ------------------------------------------------------------------------ + * + * init.sql + * Creates config table and provides common utility functions + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +/* + * Pathman config + * partrel - regclass (relation type, stored as Oid) + * attname - partitioning key + * parttype - partitioning type: + * 1 - HASH + * 2 - RANGE + * range_interval - base interval for RANGE partitioning as string + */ +CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( + partrel REGCLASS NOT NULL PRIMARY KEY, + attname TEXT NOT NULL, + parttype INTEGER NOT NULL, + range_interval TEXT, + + CHECK (parttype IN (1, 2)) /* check for allowed part types */ +); + +/* + * Optional parameters for partitioned tables. + * partrel - regclass (relation type, stored as Oid) + * enable_parent - add parent table to plan + * auto - enable automatic partition creation + * init_callback - cb to be executed on partition creation + */ +CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( + partrel REGCLASS NOT NULL PRIMARY KEY, + enable_parent BOOLEAN NOT NULL DEFAULT FALSE, + auto BOOLEAN NOT NULL DEFAULT TRUE, + init_callback REGPROCEDURE NOT NULL DEFAULT 0 +); +CREATE UNIQUE INDEX i_pathman_config_params +ON @extschema@.pathman_config_params(partrel); + +GRANT SELECT, INSERT, UPDATE, DELETE +ON @extschema@.pathman_config, @extschema@.pathman_config_params +TO public; + +/* + * Check if current user can alter/drop specified relation + */ +CREATE OR REPLACE FUNCTION @extschema@.check_security_policy(relation regclass) +RETURNS BOOL AS 'pg_pathman', 'check_security_policy' LANGUAGE C STRICT; + +/* + * Row security policy to restrict partitioning operations to owner and + * superusers only + */ +CREATE POLICY deny_modification ON @extschema@.pathman_config +FOR ALL USING (check_security_policy(partrel)); + +CREATE POLICY deny_modification ON @extschema@.pathman_config_params +FOR ALL USING (check_security_policy(partrel)); + +CREATE POLICY allow_select ON @extschema@.pathman_config FOR SELECT USING (true); + +CREATE POLICY allow_select ON @extschema@.pathman_config_params FOR SELECT USING (true); + +ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; +ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; + +/* + * Invalidate relcache every time someone changes parameters config. + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() +RETURNS TRIGGER AS +$$ +BEGIN + IF TG_OP IN ('INSERT', 'UPDATE') THEN + PERFORM @extschema@.invalidate_relcache(NEW.partrel); + END IF; + + IF TG_OP IN ('UPDATE', 'DELETE') THEN + PERFORM @extschema@.invalidate_relcache(OLD.partrel); + END IF; + + IF TG_OP = 'DELETE' THEN + RETURN OLD; + ELSE + RETURN NEW; + END IF; +END +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER pathman_config_params_trigger +BEFORE INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + +/* + * Enable dump of config tables with pg_dump. + */ +SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config', ''); +SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config_params', ''); + + +CREATE OR REPLACE FUNCTION @extschema@.partitions_count(relation REGCLASS) +RETURNS INT AS +$$ +BEGIN + RETURN count(*) FROM pg_inherits WHERE inhparent = relation; +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Add a row describing the optional parameter to pathman_config_params. + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( + relation REGCLASS, + param TEXT, + value ANYELEMENT) +RETURNS VOID AS +$$ +BEGIN + EXECUTE format('INSERT INTO @extschema@.pathman_config_params + (partrel, %1$s) VALUES ($1, $2) + ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) + USING relation, value; +END +$$ +LANGUAGE plpgsql; + +/* + * Include\exclude parent relation in query plan. + */ +CREATE OR REPLACE FUNCTION @extschema@.set_enable_parent( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'enable_parent', value); +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Enable\disable automatic partition creation. + */ +CREATE OR REPLACE FUNCTION @extschema@.set_auto( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'auto', value); +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Set partition creation callback + */ +CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( + relation REGCLASS, + callback REGPROC DEFAULT 0) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.validate_on_partition_created_callback(callback); + PERFORM @extschema@.pathman_set_param(relation, 'init_callback', callback); +END +$$ +LANGUAGE plpgsql; + +/* + * Show all existing parents and partitions. + */ +CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() +RETURNS TABLE ( + parent REGCLASS, + partition REGCLASS, + parttype INT4, + partattr TEXT, + range_min TEXT, + range_max TEXT) +AS 'pg_pathman', 'show_partition_list_internal' LANGUAGE C STRICT; + +/* + * View for show_partition_list(). + */ +CREATE OR REPLACE VIEW @extschema@.pathman_partition_list +AS SELECT * FROM @extschema@.show_partition_list(); + +GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; + +/* + * Show all existing concurrent partitioning tasks. + */ +CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() +RETURNS TABLE ( + userid REGROLE, + pid INT, + dbid OID, + relid REGCLASS, + processed INT, + status TEXT) +AS 'pg_pathman', 'show_concurrent_part_tasks_internal' LANGUAGE C STRICT; + +/* + * View for show_concurrent_part_tasks(). + */ +CREATE OR REPLACE VIEW @extschema@.pathman_concurrent_part_tasks +AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); + +GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; + +/* + * Partition table using ConcurrentPartWorker. + */ +CREATE OR REPLACE FUNCTION @extschema@.partition_table_concurrently( + relation REGCLASS, + batch_size INTEGER DEFAULT 1000, + sleep_time FLOAT8 DEFAULT 1.0) +RETURNS VOID AS 'pg_pathman', 'partition_table_concurrently' +LANGUAGE C STRICT; + +/* + * Stop concurrent partitioning task. + */ +CREATE OR REPLACE FUNCTION @extschema@.stop_concurrent_part_task( + relation REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'stop_concurrent_part_task' +LANGUAGE C STRICT; + + +/* + * Copy rows to partitions concurrently. + */ +CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( + relation REGCLASS, + p_min ANYELEMENT DEFAULT NULL::text, + p_max ANYELEMENT DEFAULT NULL::text, + p_limit INT DEFAULT NULL, + OUT p_total BIGINT) +AS +$$ +DECLARE + v_attr TEXT; + v_limit_clause TEXT := ''; + v_where_clause TEXT := ''; + ctids TID[]; + +BEGIN + SELECT attname INTO v_attr + FROM @extschema@.pathman_config WHERE partrel = relation; + + p_total := 0; + + /* Format LIMIT clause if needed */ + IF NOT p_limit IS NULL THEN + v_limit_clause := format('LIMIT %s', p_limit); + END IF; + + /* Format WHERE clause if needed */ + IF NOT p_min IS NULL THEN + v_where_clause := format('%1$s >= $1', v_attr); + END IF; + + IF NOT p_max IS NULL THEN + IF NOT p_min IS NULL THEN + v_where_clause := v_where_clause || ' AND '; + END IF; + v_where_clause := v_where_clause || format('%1$s < $2', v_attr); + END IF; + + IF v_where_clause != '' THEN + v_where_clause := 'WHERE ' || v_where_clause; + END IF; + + /* Lock rows and copy data */ + RAISE NOTICE 'Copying data to partitions...'; + EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', + relation, v_where_clause, v_limit_clause) + USING p_min, p_max + INTO ctids; + + EXECUTE format(' + WITH data AS ( + DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) + INSERT INTO %1$s SELECT * FROM data', + relation) + USING ctids; + + /* Get number of inserted rows */ + GET DIAGNOSTICS p_total = ROW_COUNT; + RETURN; +END +$$ +LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ + +/* + * Old school way to distribute rows to partitions. + */ +CREATE OR REPLACE FUNCTION @extschema@.partition_data( + parent_relid REGCLASS, + OUT p_total BIGINT) +AS +$$ +DECLARE + relname TEXT; + rec RECORD; + cnt BIGINT := 0; + +BEGIN + p_total := 0; + + /* Create partitions and copy rest of the data */ + EXECUTE format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) + INSERT INTO %1$s SELECT * FROM part_data', + parent_relid::TEXT); + + /* Get number of inserted rows */ + GET DIAGNOSTICS p_total = ROW_COUNT; + RETURN; +END +$$ +LANGUAGE plpgsql STRICT +SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ + +/* + * Disable pathman partitioning for specified relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( + parent_relid REGCLASS) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + PERFORM @extschema@.drop_triggers(parent_relid); + + /* Notify backend about changes */ + PERFORM @extschema@.on_remove_partitions(parent_relid); +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Validates relation name. It must be schema qualified. + */ +CREATE OR REPLACE FUNCTION @extschema@.validate_relname( + cls REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + relname TEXT; + +BEGIN + relname = @extschema@.get_schema_qualified_name(cls); + + IF relname IS NULL THEN + RAISE EXCEPTION 'relation %s does not exist', cls; + END IF; + + RETURN relname; +END +$$ +LANGUAGE plpgsql; + +/* + * Aggregates several common relation checks before partitioning. + * Suitable for every partitioning type. + */ +CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( + relation REGCLASS, + p_attribute TEXT) +RETURNS BOOLEAN AS +$$ +DECLARE + v_rec RECORD; + is_referenced BOOLEAN; + rel_persistence CHAR; + +BEGIN + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = relation INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be partitioned', + relation::TEXT; + END IF; + + IF EXISTS (SELECT * FROM @extschema@.pathman_config + WHERE partrel = relation) THEN + RAISE EXCEPTION 'relation "%" has already been partitioned', relation; + END IF; + + IF @extschema@.is_attribute_nullable(relation, p_attribute) THEN + RAISE EXCEPTION 'partitioning key ''%'' must be NOT NULL', p_attribute; + END IF; + + /* Check if there are foreign keys that reference the relation */ + FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint + WHERE confrelid = relation::REGCLASS::OID) + LOOP + is_referenced := TRUE; + RAISE WARNING 'foreign key "%" references relation "%"', + v_rec.conname, relation; + END LOOP; + + IF is_referenced THEN + RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; + END IF; + + RETURN TRUE; +END +$$ +LANGUAGE plpgsql; + +/* + * Returns relname without quotes or something. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_plain_schema_and_relname( + cls REGCLASS, + OUT schema TEXT, + OUT relname TEXT) +AS +$$ +BEGIN + SELECT pg_catalog.pg_class.relnamespace::regnamespace, + pg_catalog.pg_class.relname + FROM pg_catalog.pg_class WHERE oid = cls::oid + INTO schema, relname; +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Returns the schema-qualified name of table. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_schema_qualified_name( + cls REGCLASS, + delimiter TEXT DEFAULT '.', + suffix TEXT DEFAULT '') +RETURNS TEXT AS +$$ +BEGIN + RETURN (SELECT quote_ident(relnamespace::regnamespace::text) || + delimiter || + quote_ident(relname || suffix) + FROM pg_catalog.pg_class + WHERE oid = cls::oid); +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Check if two relations have equal structures. + */ +CREATE OR REPLACE FUNCTION @extschema@.validate_relations_equality( + relation1 OID, relation2 OID) +RETURNS BOOLEAN AS +$$ +DECLARE + rec RECORD; + +BEGIN + FOR rec IN ( + WITH + a1 AS (select * from pg_catalog.pg_attribute + where attrelid = relation1 and attnum > 0), + a2 AS (select * from pg_catalog.pg_attribute + where attrelid = relation2 and attnum > 0) + SELECT a1.attname name1, a2.attname name2, a1.atttypid type1, a2.atttypid type2 + FROM a1 + FULL JOIN a2 ON a1.attnum = a2.attnum + ) + LOOP + IF rec.name1 IS NULL OR rec.name2 IS NULL OR rec.name1 != rec.name2 THEN + RETURN false; + END IF; + END LOOP; + + RETURN true; +END +$$ +LANGUAGE plpgsql; + +/* + * DDL trigger that removes entry from pathman_config table. + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() +RETURNS event_trigger AS +$$ +DECLARE + obj record; + pg_class_oid oid; + relids regclass[]; +BEGIN + pg_class_oid = 'pg_catalog.pg_class'::regclass; + + /* Find relids to remove from config */ + SELECT array_agg(cfg.partrel) INTO relids + FROM pg_event_trigger_dropped_objects() AS events + JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid + WHERE events.classid = pg_class_oid; + + /* Cleanup pathman_config */ + DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); + + /* Cleanup params table too */ + DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); +END +$$ +LANGUAGE plpgsql; + +/* + * Drop triggers. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( + parent_relid REGCLASS) +RETURNS VOID AS +$$ +BEGIN + EXECUTE format('DROP FUNCTION IF EXISTS %s() CASCADE', + @extschema@.build_update_trigger_func_name(parent_relid)); +END +$$ LANGUAGE plpgsql STRICT; + +/* + * Drop partitions. If delete_data set to TRUE, partitions + * will be dropped with all the data. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( + parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) +RETURNS INTEGER AS +$$ +DECLARE + v_rec RECORD; + v_rows BIGINT; + v_part_count INTEGER := 0; + conf_num_del INTEGER; + v_relkind CHAR; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Drop trigger first */ + PERFORM @extschema@.drop_triggers(parent_relid); + + WITH config_num_deleted AS (DELETE FROM @extschema@.pathman_config + WHERE partrel = parent_relid + RETURNING *) + SELECT count(*) from config_num_deleted INTO conf_num_del; + + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + IF conf_num_del = 0 THEN + RAISE EXCEPTION 'relation "%" has no partitions', parent_relid::TEXT; + END IF; + + FOR v_rec IN (SELECT inhrelid::REGCLASS AS tbl + FROM pg_catalog.pg_inherits + WHERE inhparent::regclass = parent_relid + ORDER BY inhrelid ASC) + LOOP + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + v_rec.tbl::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, v_rec.tbl::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = v_rec.tbl + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', v_rec.tbl::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', v_rec.tbl::TEXT); + END IF; + + v_part_count := v_part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_remove_partitions(parent_relid); + + RETURN v_part_count; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + + +/* + * Copy all of parent's foreign keys. + */ +CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( + parent_relid REGCLASS, + partition REGCLASS) +RETURNS VOID AS +$$ +DECLARE + rec RECORD; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition); + + FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint + WHERE conrelid = parent_relid AND contype = 'f') + LOOP + EXECUTE format('ALTER TABLE %s ADD %s', + partition::TEXT, + pg_catalog.pg_get_constraintdef(rec.conid)); + END LOOP; +END +$$ LANGUAGE plpgsql STRICT; + + +/* + * Create DDL trigger to call pathman_ddl_trigger_func(). + */ +CREATE EVENT TRIGGER pathman_ddl_trigger +ON sql_drop +EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); + + + +CREATE OR REPLACE FUNCTION @extschema@.on_create_partitions( + relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'on_partitions_created' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.on_update_partitions( + relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'on_partitions_updated' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.on_remove_partitions( + relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'on_partitions_removed' +LANGUAGE C STRICT; + + +/* + * Get parent of pg_pathman's partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition(REGCLASS) +RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' +LANGUAGE C STRICT; + +/* + * Extract basic type of a domain. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_base_type(REGTYPE) +RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' +LANGUAGE C STRICT; + +/* + * Returns attribute type name for relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_attribute_type( + REGCLASS, TEXT) +RETURNS REGTYPE AS 'pg_pathman', 'get_attribute_type_pl' +LANGUAGE C STRICT; + +/* + * Return tablespace name for specified relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_rel_tablespace_name(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_rel_tablespace_name' +LANGUAGE C STRICT; + + +/* + * Checks if attribute is nullable + */ +CREATE OR REPLACE FUNCTION @extschema@.is_attribute_nullable( + REGCLASS, TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'is_attribute_nullable' +LANGUAGE C STRICT; + +/* + * Check if regclass is date or timestamp. + */ +CREATE OR REPLACE FUNCTION @extschema@.is_date_type( + typid REGTYPE) +RETURNS BOOLEAN AS 'pg_pathman', 'is_date_type' +LANGUAGE C STRICT; + + +/* + * Build check constraint name for a specified relation's column. + */ +CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( + REGCLASS, INT2) +RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attnum' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( + REGCLASS, TEXT) +RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attname' +LANGUAGE C STRICT; + +/* + * Build update trigger and its underlying function's names. + */ +CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_name( + REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_name' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_func_name( + REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_func_name' +LANGUAGE C STRICT; + + +/* + * Attach a previously partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( + parent_relid REGCLASS, + attname TEXT, + range_interval TEXT DEFAULT NULL) +RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION @extschema@.invalidate_relcache(relid OID) +RETURNS VOID AS 'pg_pathman' +LANGUAGE C STRICT; + + +/* + * Lock partitioned relation to restrict concurrent + * modification of partitioning scheme. + */ + CREATE OR REPLACE FUNCTION @extschema@.lock_partitioned_relation( + REGCLASS) + RETURNS VOID AS 'pg_pathman', 'lock_partitioned_relation' + LANGUAGE C STRICT; + +/* + * Lock relation to restrict concurrent modification of data. + */ + CREATE OR REPLACE FUNCTION @extschema@.prevent_relation_modification( + REGCLASS) + RETURNS VOID AS 'pg_pathman', 'prevent_relation_modification' + LANGUAGE C STRICT; + + +/* + * DEBUG: Place this inside some plpgsql fuction and set breakpoint. + */ +CREATE OR REPLACE FUNCTION @extschema@.debug_capture() +RETURNS VOID AS 'pg_pathman', 'debug_capture' +LANGUAGE C STRICT; + +/* + * Checks that callback function meets specific requirements. Particularly it + * must have the only JSONB argument and VOID return type. + */ +CREATE OR REPLACE FUNCTION @extschema@.validate_on_partition_created_callback( + callback REGPROC) +RETURNS VOID AS 'pg_pathman', 'validate_on_part_init_callback_pl' +LANGUAGE C STRICT; + + +/* + * Invoke init_callback on RANGE partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition REGCLASS, + init_callback REGPROCEDURE, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; + +/* + * Invoke init_callback on HASH partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition REGCLASS, + init_callback REGPROCEDURE) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; +/* ------------------------------------------------------------------------ + * + * hash.sql + * HASH partitioning functions + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +/* + * Creates hash partitions for specified relation + */ +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_child_relname TEXT; + v_plain_schema TEXT; + v_plain_relname TEXT; + v_atttype REGTYPE; + v_hashfunc REGPROC; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Fetch atttype and its hash function */ + v_atttype := @extschema@.get_attribute_type(parent_relid, attribute); + v_hashfunc := @extschema@.get_type_hash_func(v_atttype); + + SELECT * INTO v_plain_schema, v_plain_relname + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) + VALUES (parent_relid, attribute, 1); + + /* Create partitions and update pg_pathman configuration */ + FOR partnum IN 0..partitions_count-1 + LOOP + v_child_relname := format('%s.%s', + quote_ident(v_plain_schema), + quote_ident(v_plain_relname || '_' || partnum)); + + EXECUTE format( + 'CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) INHERITS (%2$s) TABLESPACE %s', + v_child_relname, + parent_relid::TEXT, + @extschema@.get_rel_tablespace_name(parent_relid)); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s + CHECK (@extschema@.get_hash_part_idx(%s(%s), %s) = %s)', + v_child_relname, + @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, + attribute), + v_hashfunc::TEXT, + attribute, + partitions_count, + partnum); + + PERFORM @extschema@.copy_foreign_keys(parent_relid, v_child_relname::REGCLASS); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + v_child_relname::REGCLASS, + v_init_callback); + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Copy data */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN partitions_count; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; + +/* + * Creates an update trigger + */ +CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() + RETURNS TRIGGER AS + $body$ + DECLARE + old_idx INTEGER; /* partition indices */ + new_idx INTEGER; + + BEGIN + old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); + new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); + + IF old_idx = new_idx THEN + RETURN NEW; + END IF; + + EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) + USING %5$s; + + EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) + USING %7$s; + + RETURN NULL; + END $body$ + LANGUAGE plpgsql'; + + trigger TEXT := 'CREATE TRIGGER %s + BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE %s()'; + + att_names TEXT; + old_fields TEXT; + new_fields TEXT; + att_val_fmt TEXT; + att_fmt TEXT; + attr TEXT; + plain_schema TEXT; + plain_relname TEXT; + child_relname_format TEXT; + funcname TEXT; + triggername TEXT; + atttype REGTYPE; + partitions_count INTEGER; + +BEGIN + attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF attr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT string_agg(attname, ', '), + string_agg('OLD.' || attname, ', '), + string_agg('NEW.' || attname, ', '), + string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || + attname || ' = $' || attnum || ' ' || + 'ELSE ' || + attname || ' IS NULL END', + ' AND '), + string_agg('$' || attnum, ', ') + FROM pg_catalog.pg_attribute + WHERE attrelid = parent_relid AND attnum > 0 + INTO att_names, + old_fields, + new_fields, + att_val_fmt, + att_fmt; + + partitions_count := COUNT(*) FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid::oid; + + /* Build trigger & trigger function's names */ + funcname := @extschema@.build_update_trigger_func_name(parent_relid); + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Build partition name template */ + SELECT * INTO plain_schema, plain_relname + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + child_relname_format := quote_ident(plain_schema) || '.' || + quote_ident(plain_relname || '_%s'); + + /* Fetch base hash function for atttype */ + atttype := @extschema@.get_attribute_type(parent_relid, attr); + + /* Format function definition and execute it */ + EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, + old_fields, att_fmt, new_fields, child_relname_format, + @extschema@.get_type_hash_func(atttype)::TEXT); + + /* Create trigger on every partition */ + FOR num IN 0..partitions_count-1 + LOOP + EXECUTE format(trigger, + triggername, + format(child_relname_format, num), + funcname); + END LOOP; + + return funcname; +END +$$ LANGUAGE plpgsql; + +/* + * Returns hash function OID for specified type + */ +CREATE OR REPLACE FUNCTION @extschema@.get_type_hash_func(REGTYPE) +RETURNS REGPROC AS 'pg_pathman', 'get_type_hash_func' +LANGUAGE C STRICT; + +/* + * Calculates hash for integer value + */ +CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INTEGER, INTEGER) +RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' +LANGUAGE C STRICT; +/* ------------------------------------------------------------------------ + * + * range.sql + * RANGE partitioning functions + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +CREATE OR REPLACE FUNCTION @extschema@.get_sequence_name( + plain_schema TEXT, + plain_relname TEXT) +RETURNS TEXT AS +$$ +BEGIN + RETURN format('%s.%s', + quote_ident(plain_schema), + quote_ident(format('%s_seq', plain_relname))); +END +$$ +LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION @extschema@.create_or_replace_sequence( + plain_schema TEXT, + plain_relname TEXT, + OUT seq_name TEXT) +AS $$ +BEGIN + seq_name := @extschema@.get_sequence_name(plain_schema, plain_relname); + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); +END +$$ +LANGUAGE plpgsql; + +/* + * Check RANGE partition boundaries. + */ +CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS +$$ +DECLARE + v_min start_value%TYPE; + v_max start_value%TYPE; + v_count BIGINT; + +BEGIN + /* Get min and max values */ + EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) + FROM %2$s WHERE NOT %1$s IS NULL', + attribute, parent_relid::TEXT) + INTO v_count, v_min, v_max; + + /* Check if column has NULL values */ + IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN + RAISE EXCEPTION '''%'' column contains NULL values', attribute; + END IF; + + /* Check lower boundary */ + IF start_value > v_min THEN + RAISE EXCEPTION 'start value is less than minimum value of ''%''', + attribute; + END IF; + + /* Check upper boundary */ + IF end_value <= v_max THEN + RAISE EXCEPTION 'not enough partitions to fit all values of ''%''', + attribute; + END IF; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_atttype REGTYPE; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + i INTEGER; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_count < 0 THEN + RAISE EXCEPTION '''p_count'' must not be less than 0'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + v_atttype := @extschema@.get_base_type(pg_typeof(start_value)); + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', ''%s'', ''%s'', ''%s''::%s)', + parent_relid, + attribute, + start_value, + end_value, + v_atttype::TEXT); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(schema, relname) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create first partition */ + FOR i IN 1..p_count + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4)', + v_atttype::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_rel_tablespace_name(parent_relid); + + start_value := start_value + p_interval; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on numerical attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + i INTEGER; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_count < 0 THEN + RAISE EXCEPTION 'partitions count must not be less than zero'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + IF v_max IS NULL THEN + RAISE EXCEPTION '''%'' column has NULL values', attribute; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(schema, relname) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* create first partition */ + FOR i IN 1..p_count + LOOP + PERFORM @extschema@.create_single_range_partition( + parent_relid, + start_value, + start_value + p_interval, + tablespace := @extschema@.get_rel_tablespace_name(parent_relid)); + + start_value := start_value + p_interval; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified range + */ +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval ANYELEMENT, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_interval <= 0 THEN + RAISE EXCEPTION 'interval must be positive'; + END IF; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(schema, relname) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + WHILE start_value <= end_value + LOOP + PERFORM @extschema@.create_single_range_partition( + parent_relid, + start_value, + start_value + p_interval, + tablespace := @extschema@.get_rel_tablespace_name(parent_relid)); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified range based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval INTERVAL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(schema, relname) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + WHILE start_value <= end_value + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', + @extschema@.get_base_type(pg_typeof(start_value))::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_rel_tablespace_name(parent_relid); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + +/* + * Creates new RANGE partition. Returns partition name. + * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). + */ +CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_num INT; + v_child_relname TEXT; + v_plain_child_relname TEXT; + v_attname TEXT; + v_plain_schema TEXT; + v_plain_relname TEXT; + v_child_relname_exists BOOL; + v_seq_name TEXT; + v_init_callback REGPROCEDURE; + +BEGIN + v_attname := attname FROM @extschema@.pathman_config + WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT * INTO v_plain_schema, v_plain_relname + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + v_seq_name := @extschema@.get_sequence_name(v_plain_schema, v_plain_relname); + + IF partition_name IS NULL THEN + /* Get next value from sequence */ + LOOP + v_part_num := nextval(v_seq_name); + v_plain_child_relname := format('%s_%s', v_plain_relname, v_part_num); + v_child_relname := format('%s.%s', + quote_ident(v_plain_schema), + quote_ident(v_plain_child_relname)); + + v_child_relname_exists := count(*) > 0 + FROM pg_class + WHERE relname = v_plain_child_relname AND + relnamespace = v_plain_schema::regnamespace + LIMIT 1; + + EXIT WHEN v_child_relname_exists = false; + END LOOP; + ELSE + v_child_relname := partition_name; + END IF; + + IF tablespace IS NULL THEN + tablespace := @extschema@.get_rel_tablespace_name(parent_relid); + END IF; + + EXECUTE format('CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) + INHERITS (%2$s) TABLESPACE %3$s', + v_child_relname, + parent_relid::TEXT, + tablespace); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + v_child_relname, + @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, + v_attname), + @extschema@.build_range_condition(v_attname, + start_value, + end_value)); + + PERFORM @extschema@.copy_foreign_keys(parent_relid, v_child_relname::REGCLASS); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + v_child_relname::REGCLASS, + v_init_callback, + start_value, + end_value); + + RETURN v_child_relname; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; + +/* + * Split RANGE partition + */ +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS +$$ +DECLARE + v_parent REGCLASS; + v_attname TEXT; + v_atttype REGTYPE; + v_cond TEXT; + v_new_partition TEXT; + v_part_type INTEGER; + v_check_name TEXT; + +BEGIN + v_parent = @extschema@.get_parent_of_partition(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent); + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent + INTO v_attname, v_part_type; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', v_parent::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + END IF; + + v_atttype = @extschema@.get_attribute_type(v_parent, v_attname); + + /* Get partition values range */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING partition + INTO p_range; + + IF p_range IS NULL THEN + RAISE EXCEPTION 'could not find specified partition'; + END IF; + + /* Check if value fit into the range */ + IF p_range[1] > split_value OR p_range[2] <= split_value + THEN + RAISE EXCEPTION 'specified value does not fit into the range [%, %)', + p_range[1], p_range[2]; + END IF; + + /* Create new partition */ + v_new_partition := @extschema@.create_single_range_partition(v_parent, + split_value, + p_range[2], + partition_name); + + /* Copy data */ + v_cond := @extschema@.build_range_condition(v_attname, split_value, p_range[2]); + EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition::TEXT, + v_cond, + v_new_partition); + + /* Alter original partition */ + v_cond := @extschema@.build_range_condition(v_attname, p_range[1], split_value); + v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); + + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition::TEXT, + v_check_name); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + v_check_name, + v_cond); + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent); +END +$$ +LANGUAGE plpgsql; + + +/* + * Merge RANGE partitions + */ +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partition1 REGCLASS, + partition2 REGCLASS) +RETURNS VOID AS +$$ +DECLARE + v_parent1 REGCLASS; + v_parent2 REGCLASS; + v_attname TEXT; + v_part_type INTEGER; + v_atttype REGTYPE; + +BEGIN + IF partition1 = partition2 THEN + RAISE EXCEPTION 'cannot merge partition with itself'; + END IF; + + v_parent1 := @extschema@.get_parent_of_partition(partition1); + v_parent2 := @extschema@.get_parent_of_partition(partition2); + + /* Acquire data modification locks (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition1); + PERFORM @extschema@.prevent_relation_modification(partition2); + + IF v_parent1 != v_parent2 THEN + RAISE EXCEPTION 'cannot merge partitions with different parents'; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent1); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent1 + INTO v_attname, v_part_type; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', v_parent1::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION 'specified partitions aren''t RANGE partitions'; + END IF; + + v_atttype := @extschema@.get_attribute_type(partition1, v_attname); + + EXECUTE format('SELECT @extschema@.merge_range_partitions_internal($1, $2, $3, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING v_parent1, partition1, partition2; + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent1); +END +$$ +LANGUAGE plpgsql; + + +/* + * Merge two partitions. All data will be copied to the first one. Second + * partition will be destroyed. + * + * NOTE: dummy field is used to pass the element type to the function + * (it is necessary because of pseudo-types used in function). + */ +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions_internal( + parent_relid REGCLASS, + partition1 REGCLASS, + partition2 REGCLASS, + dummy ANYELEMENT, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS +$$ +DECLARE + v_attname TEXT; + v_atttype REGTYPE; + v_check_name TEXT; + +BEGIN + SELECT attname FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_attname; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + v_atttype = @extschema@.get_attribute_type(parent_relid, v_attname); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%1$s) || + @extschema@.get_part_range($2, NULL::%1$s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING partition1, partition2 + INTO p_range; + + /* Check if ranges are adjacent */ + IF p_range[1] != p_range[4] AND p_range[2] != p_range[3] THEN + RAISE EXCEPTION 'merge failed, partitions must be adjacent'; + END IF; + + /* Drop constraint on first partition... */ + v_check_name := @extschema@.build_check_constraint_name(partition1, v_attname); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition1::TEXT, + v_check_name); + + /* and create a new one */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition1::TEXT, + v_check_name, + @extschema@.build_range_condition(v_attname, + least(p_range[1], p_range[3]), + greatest(p_range[2], p_range[4]))); + + /* Copy data from second partition to the first one */ + EXECUTE format('WITH part_data AS (DELETE FROM %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition2::TEXT, + partition1::TEXT); + + /* Remove second partition */ + EXECUTE format('DROP TABLE %s', partition2::TEXT); +END +$$ LANGUAGE plpgsql; + + +/* + * Append new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + v_atttype REGTYPE; + v_part_name TEXT; + v_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + SELECT attname, range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_attname, v_interval; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); + + EXECUTE + format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING + parent_relid, + v_atttype, + v_interval, + partition_name, + tablespace + INTO + v_part_name; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + +/* + * Spawn logic for append_partition(). We have to + * separate this in order to pass the 'p_range'. + * + * NOTE: we don't take a xact_handling lock here. + */ +CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + +BEGIN + IF @extschema@.partitions_count(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot append to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF @extschema@.is_date_type(p_atttype) THEN + v_part_name := @extschema@.create_single_range_partition( + parent_relid, + p_range[2], + p_range[2] + p_interval::interval, + partition_name, + tablespace); + ELSE + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $2 + $3::%s, $4, $5)', + v_atttype::TEXT) + USING + parent_relid, + p_range[2], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + END IF; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +/* + * Prepend new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + v_atttype REGTYPE; + v_part_name TEXT; + v_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + SELECT attname, range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_attname, v_interval; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); + + EXECUTE + format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING + parent_relid, + v_atttype, + v_interval, + partition_name, + tablespace + INTO + v_part_name; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + +/* + * Spawn logic for prepend_partition(). We have to + * separate this in order to pass the 'p_range'. + * + * NOTE: we don't take a xact_handling lock here. + */ +CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + +BEGIN + IF @extschema@.partitions_count(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot prepend to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF @extschema@.is_date_type(p_atttype) THEN + v_part_name := @extschema@.create_single_range_partition( + parent_relid, + p_range[1] - p_interval::interval, + p_range[1], + partition_name, + tablespace); + ELSE + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2 - $3::%s, $2, $4, $5)', + v_atttype::TEXT) + USING + parent_relid, + p_range[1], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + END IF; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +/* + * Add new partition + */ +CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; + END IF; + + /* check range overlap */ + IF @extschema@.partitions_count(parent_relid) > 0 + AND @extschema@.check_overlap(parent_relid, start_value, end_value) THEN + RAISE EXCEPTION 'specified range overlaps with existing partitions'; + END IF; + + /* Create new partition */ + v_part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +/* + * Drop range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( + partition REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS +$$ +DECLARE + parent_relid REGCLASS; + part_name TEXT; + v_relkind CHAR; + v_rows BIGINT; + v_part_type INTEGER; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition); + part_name := partition::TEXT; /* save the name to be returned */ + + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_part_type; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', partition::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', partition::TEXT); + END IF; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN part_name; +END +$$ +LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + + +/* + * Attach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition::TEXT; + END IF; + + IF @extschema@.check_overlap(parent_relid, start_value, end_value) THEN + RAISE EXCEPTION 'specified range overlaps with existing partitions'; + END IF; + + IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); + + v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname), + @extschema@.build_range_condition(v_attname, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition, + v_init_callback, + start_value, + end_value); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition; +END +$$ +LANGUAGE plpgsql; + + +/* + * Detach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( + partition REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + parent_relid REGCLASS; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + v_attname := attname + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Remove inheritance */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', + partition::TEXT, + parent_relid::TEXT); + + /* Remove check constraint */ + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname)); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition; +END +$$ +LANGUAGE plpgsql; + + +/* + * Creates an update trigger + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_update_trigger( + IN parent_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() + RETURNS TRIGGER AS + $body$ + DECLARE + old_oid Oid; + new_oid Oid; + + BEGIN + old_oid := TG_RELID; + new_oid := @extschema@.find_or_create_range_partition( + ''%2$s''::regclass, NEW.%3$s); + + IF old_oid = new_oid THEN + RETURN NEW; + END IF; + + EXECUTE format(''DELETE FROM %%s WHERE %5$s'', + old_oid::regclass::text) + USING %6$s; + + EXECUTE format(''INSERT INTO %%s VALUES (%7$s)'', + new_oid::regclass::text) + USING %8$s; + + RETURN NULL; + END $body$ + LANGUAGE plpgsql'; + + trigger TEXT := 'CREATE TRIGGER %s ' || + 'BEFORE UPDATE ON %s ' || + 'FOR EACH ROW EXECUTE PROCEDURE %s()'; + + triggername TEXT; + funcname TEXT; + att_names TEXT; + old_fields TEXT; + new_fields TEXT; + att_val_fmt TEXT; + att_fmt TEXT; + attr TEXT; + rec RECORD; + +BEGIN + attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF attr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT string_agg(attname, ', '), + string_agg('OLD.' || attname, ', '), + string_agg('NEW.' || attname, ', '), + string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || + attname || ' = $' || attnum || ' ' || + 'ELSE ' || + attname || ' IS NULL END', + ' AND '), + string_agg('$' || attnum, ', ') + FROM pg_attribute + WHERE attrelid::REGCLASS = parent_relid AND attnum > 0 + INTO att_names, + old_fields, + new_fields, + att_val_fmt, + att_fmt; + + /* Build trigger & trigger function's names */ + funcname := @extschema@.build_update_trigger_func_name(parent_relid); + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Create function for trigger */ + EXECUTE format(func, funcname, parent_relid, attr, 0, att_val_fmt, + old_fields, att_fmt, new_fields); + + /* Create trigger on every partition */ + FOR rec in (SELECT * FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid) + LOOP + EXECUTE format(trigger, + triggername, + rec.inhrelid::REGCLASS::TEXT, + funcname); + END LOOP; + + RETURN funcname; +END +$$ LANGUAGE plpgsql; + +/* + * Construct CHECK constraint condition for a range partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( + p_attname TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS 'pg_pathman', 'build_range_condition' +LANGUAGE C; + +/* + * Returns N-th range (as an array of two elements). + */ +CREATE OR REPLACE FUNCTION @extschema@.get_part_range( + parent_relid REGCLASS, + partition_idx INTEGER, + dummy ANYELEMENT) +RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_idx' +LANGUAGE C; + +/* + * Returns min and max values for specified RANGE partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_part_range( + partition_relid REGCLASS, + dummy ANYELEMENT) +RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' +LANGUAGE C; + +/* + * Checks if range overlaps with existing partitions. + * Returns TRUE if overlaps and FALSE otherwise. + */ +CREATE OR REPLACE FUNCTION @extschema@.check_overlap( + parent_relid REGCLASS, + range_min ANYELEMENT, + range_max ANYELEMENT) +RETURNS BOOLEAN AS 'pg_pathman', 'check_overlap' +LANGUAGE C; + +/* + * Needed for an UPDATE trigger. + */ +CREATE OR REPLACE FUNCTION @extschema@.find_or_create_range_partition( + parent_relid REGCLASS, + value ANYELEMENT) +RETURNS REGCLASS AS 'pg_pathman', 'find_or_create_range_partition' +LANGUAGE C; diff --git a/pg_pathman.control b/pg_pathman.control index 66a1ced3..4d07adf5 100644 --- a/pg_pathman.control +++ b/pg_pathman.control @@ -1,4 +1,4 @@ # pg_pathman extension -comment 'Partitioning tool ver. 1.1' -default_version = '1.1' +comment 'Partitioning tool' +default_version = '1.2' module_pathname='$libdir/pg_pathman' diff --git a/range.sql b/range.sql index 2141668f..396d8f30 100644 --- a/range.sql +++ b/range.sql @@ -442,6 +442,7 @@ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( partition REGCLASS, split_value ANYELEMENT, partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL, OUT p_range ANYARRAY) RETURNS ANYARRAY AS $$ @@ -500,7 +501,8 @@ BEGIN v_new_partition := @extschema@.create_single_range_partition(v_parent, split_value, p_range[2], - partition_name); + partition_name, + tablespace); /* Copy data */ v_cond := @extschema@.build_range_condition(v_attname, split_value, p_range[2]); diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index f360a03c..7f0ee753 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -326,6 +326,12 @@ def check_tablespace(node, tablename, tablespace): 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + # check tablespace for split + node.psql( + 'postgres', + 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') + self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) + # now let's specify tablespace explicitly node.psql( 'postgres', @@ -336,9 +342,13 @@ def check_tablespace(node, tablename, tablespace): node.psql( 'postgres', 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')') + node.psql( + 'postgres', + 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')') self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) @if_fdw_enabled def test_foreign_table(self): From ba29122a9ab5ab5b17508702722dbec623614799 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 30 Nov 2016 18:43:30 +0300 Subject: [PATCH 0105/1124] reimplement pathman_config_params_trigger_func() in C language, fix bug in pathman_relcache_hook() --- init.sql | 26 ++----------------------- src/hooks.c | 7 +++++-- src/pl_funcs.c | 52 ++++++++++++++++++++++++++++++++++++++++++-------- 3 files changed, 51 insertions(+), 34 deletions(-) diff --git a/init.sql b/init.sql index 80c6817b..e57cec76 100644 --- a/init.sql +++ b/init.sql @@ -88,25 +88,8 @@ ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; * Invalidate relcache every time someone changes parameters config. */ CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() -RETURNS TRIGGER AS -$$ -BEGIN - IF TG_OP IN ('INSERT', 'UPDATE') THEN - PERFORM @extschema@.invalidate_relcache(NEW.partrel); - END IF; - - IF TG_OP IN ('UPDATE', 'DELETE') THEN - PERFORM @extschema@.invalidate_relcache(OLD.partrel); - END IF; - - IF TG_OP = 'DELETE' THEN - RETURN OLD; - ELSE - RETURN NEW; - END IF; -END -$$ -LANGUAGE plpgsql; +RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' +LANGUAGE C; CREATE TRIGGER pathman_config_params_trigger BEFORE INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params @@ -750,11 +733,6 @@ CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; -CREATE OR REPLACE FUNCTION @extschema@.invalidate_relcache( - OID) -RETURNS VOID AS 'pg_pathman' -LANGUAGE C STRICT; - /* * Lock partitioned relation to restrict concurrent diff --git a/src/hooks.c b/src/hooks.c index 2709441f..19688ad6 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -614,8 +614,11 @@ pathman_relcache_hook(Datum arg, Oid relid) /* Both syscache and pathman's cache say it isn't a partition */ case PPS_ENTRY_NOT_FOUND: { - if (partitioned_table != InvalidOid) - delay_invalidation_parent_rel(partitioned_table); + Assert(partitioned_table == InvalidOid); + + /* Which means that 'relid' might be parent */ + if (relid != InvalidOid) + delay_invalidation_parent_rel(relid); #ifdef NOT_USED elog(DEBUG2, "Invalidation message for relation %u [%u]", relid, MyProcPid); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ede9e05d..e0947b8e 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -17,9 +17,11 @@ #include "access/htup_details.h" #include "access/nbtree.h" +#include "access/xact.h" #include "catalog/indexing.h" #include "catalog/pg_type.h" #include "commands/tablespace.h" +#include "commands/trigger.h" #include "funcapi.h" #include "miscadmin.h" #include "utils/builtins.h" @@ -54,7 +56,7 @@ PG_FUNCTION_INFO_V1( is_date_type ); PG_FUNCTION_INFO_V1( is_attribute_nullable ); PG_FUNCTION_INFO_V1( add_to_pathman_config ); -PG_FUNCTION_INFO_V1( invalidate_relcache ); +PG_FUNCTION_INFO_V1( pathman_config_params_trigger_func ); PG_FUNCTION_INFO_V1( lock_partitioned_relation ); PG_FUNCTION_INFO_V1( prevent_relation_modification ); @@ -676,17 +678,51 @@ add_to_pathman_config(PG_FUNCTION_ARGS) /* - * Invalidate relcache for a specified relation. + * Invalidate relcache to refresh PartRelationInfo. */ Datum -invalidate_relcache(PG_FUNCTION_ARGS) +pathman_config_params_trigger_func(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - - if (check_relation_exists(relid)) - CacheInvalidateRelcacheByRelid(relid); + TriggerData *trigdata = (TriggerData *) fcinfo->context; + Oid pathman_config_params = get_pathman_config_params_relid(); + Oid partrel; + Datum partrel_datum; + bool partrel_isnull; + + /* Handle user calls */ + if (!CALLED_AS_TRIGGER(fcinfo)) + elog(ERROR, "this function should not be called directly"); + + /* Handle wrong fire mode */ + if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) + elog(ERROR, "%s: must be fired for row", + trigdata->tg_trigger->tgname); + + /* Handle wrong relation */ + if (RelationGetRelid(trigdata->tg_relation) != pathman_config_params) + elog(ERROR, "%s: must be fired for relation \"%s\"", + trigdata->tg_trigger->tgname, + get_rel_name(pathman_config_params)); + + /* Extract partitioned relation's Oid */ + partrel_datum = heap_getattr(trigdata->tg_trigtuple, + Anum_pathman_config_params_partrel, + RelationGetDescr(trigdata->tg_relation), + &partrel_isnull); + Assert(partrel_isnull == false); /* partrel should not be NULL! */ + + partrel = DatumGetObjectId(partrel_datum); + + /* Finally trigger pg_pathman's cache invalidation event */ + if (check_relation_exists(partrel)) + CacheInvalidateRelcacheByRelid(partrel); + + /* Return the tuple we've been given */ + if (trigdata->tg_event & TRIGGER_EVENT_UPDATE) + PG_RETURN_POINTER(trigdata->tg_newtuple); + else + PG_RETURN_POINTER(trigdata->tg_trigtuple); - PG_RETURN_VOID(); } From 1709c152d40935a9e84e977b5f27dcd4a7e36663 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 30 Nov 2016 18:49:03 +0300 Subject: [PATCH 0106/1124] couple fixes to migration script --- pg_pathman--1.1--1.2.sql | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/pg_pathman--1.1--1.2.sql b/pg_pathman--1.1--1.2.sql index 7ecfeb03..a9ae367f 100644 --- a/pg_pathman--1.1--1.2.sql +++ b/pg_pathman--1.1--1.2.sql @@ -22,15 +22,7 @@ DROP FUNCTION @extschema@.validate_on_partition_created_callback(REGPROC); DROP FUNCTION @extschema@.get_sequence_name(TEXT, TEXT); DROP FUNCTION @extschema@.create_single_range_partition(REGCLASS, ANYELEMENT, ANYELEMENT, TEXT, TEXT); DROP FUNCTION @extschema@.check_overlap(REGCLASS, ANYELEMENT, ANYELEMENT); -DROP FUNCTION @extschema@.split_range_partition(REGCLASS, ANYELEMENT, TEXT, ANYARRAY); - - -/* ------------------------------------------------------------------------ - * Alter tables - * ----------------------------------------------------------------------*/ -ALTER TABLE @extschema@.pathman_config_params ADD COLUMN spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE; -ALTER TABLE @extschema@.pathman_config_params ADD CHECK (@extschema@.validate_part_callback(init_callback)); - +DROP FUNCTION @extschema@.split_range_partition(REGCLASS, ANYELEMENT, TEXT, OUT ANYARRAY); /* ------------------------------------------------------------------------ * Alter functions' modifiers @@ -1271,3 +1263,10 @@ CREATE OR REPLACE FUNCTION @extschema@.check_range_available( range_max ANYELEMENT) RETURNS VOID AS 'pg_pathman', 'check_range_available_pl' LANGUAGE C; + + +/* ------------------------------------------------------------------------ + * Alter tables + * ----------------------------------------------------------------------*/ +ALTER TABLE @extschema@.pathman_config_params ADD COLUMN spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE; +ALTER TABLE @extschema@.pathman_config_params ADD CHECK (@extschema@.validate_part_callback(init_callback)); From 288bf73e15d01eced1952c57e8e32d2a55f81368 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 30 Nov 2016 19:19:20 +0300 Subject: [PATCH 0107/1124] documentation fixes and description for replace_hash_partition() function --- README.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 039e6572..7a7788a5 100644 --- a/README.md +++ b/README.md @@ -84,10 +84,9 @@ Done! Now it's time to setup your partitioning schemes. create_hash_partitions(relation REGCLASS, attribute TEXT, partitions_count INTEGER, - partition_name TEXT DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) ``` -Performs HASH partitioning for `relation` by integer key `attribute`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. Partition creation callback is invoked for each partition if set beforehand (see `set_part_init_callback()`). +Performs HASH partitioning for `relation` by integer key `attribute`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. Partition creation callback is invoked for each partition if set beforehand (see `set_init_callback()`). ```plpgsql create_range_partitions(relation REGCLASS, @@ -148,6 +147,14 @@ create_range_update_trigger(parent REGCLASS) Same as above, but for a RANGE-partitioned table. ### Post-creation partition management +```plpgsql +replace_hash_partition(old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +``` +Replaces specified partition of HASH-partitioned table with another table. The `lock_parent` parameter will prevent any INSERT/UPDATE/ALTER TABLE queries to parent table. + + ```plpgsql split_range_partition(partition REGCLASS, split_value ANYELEMENT, From c453ce43bbd358d2402346d0c2c060f20a84f390 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 30 Nov 2016 20:27:18 +0300 Subject: [PATCH 0108/1124] fixes for migration script (pathman_config_params_trigger_func() & invalidate_relcache()) --- pg_pathman--1.1--1.2.sql | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/pg_pathman--1.1--1.2.sql b/pg_pathman--1.1--1.2.sql index a9ae367f..9bdf956b 100644 --- a/pg_pathman--1.1--1.2.sql +++ b/pg_pathman--1.1--1.2.sql @@ -23,6 +23,12 @@ DROP FUNCTION @extschema@.get_sequence_name(TEXT, TEXT); DROP FUNCTION @extschema@.create_single_range_partition(REGCLASS, ANYELEMENT, ANYELEMENT, TEXT, TEXT); DROP FUNCTION @extschema@.check_overlap(REGCLASS, ANYELEMENT, ANYELEMENT); DROP FUNCTION @extschema@.split_range_partition(REGCLASS, ANYELEMENT, TEXT, OUT ANYARRAY); +DROP FUNCTION @extschema@.invalidate_relcache(OID); + +/* drop trigger and its function (PATHMAN_CONFIG_PARAMS) */ +DROP TRIGGER pathman_config_params_trigger; +DROP FUNCTION @extschema@.pathman_config_params_trigger_func(); + /* ------------------------------------------------------------------------ * Alter functions' modifiers @@ -1265,6 +1271,16 @@ RETURNS VOID AS 'pg_pathman', 'check_range_available_pl' LANGUAGE C; +/* Finally create function and trigger (PATHMAN_CONFIG_PARAMS) */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() +RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' +LANGUAGE C; + +CREATE TRIGGER pathman_config_params_trigger +BEFORE INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + + /* ------------------------------------------------------------------------ * Alter tables * ----------------------------------------------------------------------*/ From 4e86365c2dc15157752ef815e608d4d357549a10 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 1 Dec 2016 12:53:01 +0300 Subject: [PATCH 0109/1124] compatibility fix --- src/pg_pathman.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 3deedf25..98cb1261 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1592,7 +1592,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, { /* Generate a partial append path. */ - appendpath = create_append_path(rel, partial_subpaths, NULL, + appendpath = create_append_path_compat(rel, partial_subpaths, NULL, parallel_workers); add_partial_path(rel, (Path *) appendpath); } From ab33a7e2060264d191621685fdcbd43bbcc4a725 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 1 Dec 2016 14:55:26 +0300 Subject: [PATCH 0110/1124] fix missing and misleading include guards --- src/hooks.h | 8 +++++--- src/init.h | 4 +++- src/nodes_common.h | 4 +++- src/partition_creation.h | 7 +++++++ src/partition_filter.h | 8 +++++--- src/pathman.h | 3 ++- src/pathman_workers.h | 4 +++- src/pg_compat.h | 1 + src/rangeset.h | 3 ++- src/relation_info.h | 4 +++- src/runtime_merge_append.h | 4 +++- src/runtimeappend.h | 4 +++- src/utility_stmt_hooking.h | 3 ++- src/utils.h | 4 +++- src/xact_handling.h | 4 +++- 15 files changed, 48 insertions(+), 17 deletions(-) diff --git a/src/hooks.h b/src/hooks.h index 5b349a34..95400fe2 100644 --- a/src/hooks.h +++ b/src/hooks.h @@ -8,8 +8,9 @@ * ------------------------------------------------------------------------ */ -#ifndef JOIN_HOOK_H -#define JOIN_HOOK_H +#ifndef PATHMAN_HOOKS_H +#define PATHMAN_HOOKS_H + #include "postgres.h" #include "optimizer/planner.h" @@ -59,4 +60,5 @@ void pathman_process_utility_hook(Node *parsetree, DestReceiver *dest, char *completionTag); -#endif + +#endif /* PATHMAN_HOOKS_H */ diff --git a/src/init.h b/src/init.h index 7318d253..3a4befad 100644 --- a/src/init.h +++ b/src/init.h @@ -11,6 +11,7 @@ #ifndef PATHMAN_INIT_H #define PATHMAN_INIT_H + #include "relation_info.h" #include "postgres.h" @@ -147,4 +148,5 @@ bool read_pathman_params(Oid relid, Datum *values, bool *isnull); -#endif + +#endif /* PATHMAN_INIT_H */ diff --git a/src/nodes_common.h b/src/nodes_common.h index f0423a48..17818d2d 100644 --- a/src/nodes_common.h +++ b/src/nodes_common.h @@ -11,6 +11,7 @@ #ifndef NODES_COMMON_H #define NODES_COMMON_H + #include "relation_info.h" #include "postgres.h" @@ -100,4 +101,5 @@ void explain_append_common(CustomScanState *node, HTAB *children_table, ExplainState *es); -#endif + +#endif /* NODES_COMMON_H */ diff --git a/src/partition_creation.h b/src/partition_creation.h index 5becce25..1d5d8892 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -8,6 +8,10 @@ *------------------------------------------------------------------------- */ +#ifndef PARTITION_CREATION_H +#define PARTITION_CREATION_H + + #include "relation_info.h" #include "postgres.h" @@ -137,3 +141,6 @@ typedef struct void invoke_part_callback(init_callback_params *cb_params); bool validate_part_callback(Oid procid, bool emit_error); + + +#endif /* PARTITION_CREATION_H */ diff --git a/src/partition_filter.h b/src/partition_filter.h index 4ad4296f..df9a175b 100644 --- a/src/partition_filter.h +++ b/src/partition_filter.h @@ -8,8 +8,9 @@ * ------------------------------------------------------------------------ */ -#ifndef RUNTIME_INSERT_H -#define RUNTIME_INSERT_H +#ifndef PARTITION_FILTER_H +#define PARTITION_FILTER_H + #include "relation_info.h" #include "utils.h" @@ -135,4 +136,5 @@ ResultRelInfoHolder * select_partition_for_insert(const PartRelationInfo *prel, Datum value, Oid value_type, EState *estate); -#endif + +#endif /* PARTITION_FILTER_H */ diff --git a/src/pathman.h b/src/pathman.h index 98cc8db1..32e059b3 100644 --- a/src/pathman.h +++ b/src/pathman.h @@ -11,6 +11,7 @@ #ifndef PATHMAN_H #define PATHMAN_H + #include "relation_info.h" #include "rangeset.h" @@ -193,4 +194,4 @@ WrapperNode *walk_expr_tree(Expr *expr, WalkerContext *context); ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) > 0 ) -#endif /* PATHMAN_H */ +#endif /* PATHMAN_H */ diff --git a/src/pathman_workers.h b/src/pathman_workers.h index 974f3087..25ab5e1d 100644 --- a/src/pathman_workers.h +++ b/src/pathman_workers.h @@ -17,6 +17,7 @@ #ifndef PATHMAN_WORKERS_H #define PATHMAN_WORKERS_H + #include "postgres.h" #include "storage/spin.h" @@ -193,4 +194,5 @@ UnpackDatumFromByteArray(Datum *datum, Size datum_size, bool typbyval, */ Oid create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type); -#endif + +#endif /* PATHMAN_WORKERS_H */ diff --git a/src/pg_compat.h b/src/pg_compat.h index 130dadf4..9e179d64 100644 --- a/src/pg_compat.h +++ b/src/pg_compat.h @@ -11,6 +11,7 @@ #ifndef PG_COMPAT_H #define PG_COMPAT_H + #include "postgres.h" #include "nodes/relation.h" diff --git a/src/rangeset.h b/src/rangeset.h index dd65ef1c..3d443285 100644 --- a/src/rangeset.h +++ b/src/rangeset.h @@ -143,4 +143,5 @@ List *irange_list_intersection(List *a, List *b); int irange_list_length(List *rangeset); bool irange_list_find(List *rangeset, int index, bool *lossy); -#endif + +#endif /* PATHMAN_RANGESET_H */ diff --git a/src/relation_info.h b/src/relation_info.h index 771a8056..d5f3eeac 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -11,6 +11,7 @@ #ifndef RELATION_INFO_H #define RELATION_INFO_H + #include "postgres.h" #include "access/attnum.h" #include "port/atomics.h" @@ -198,4 +199,5 @@ FreeRangesArray(PartRelationInfo *prel) } } -#endif + +#endif /* RELATION_INFO_H */ diff --git a/src/runtime_merge_append.h b/src/runtime_merge_append.h index 8dd8dcb1..9aa6aed9 100644 --- a/src/runtime_merge_append.h +++ b/src/runtime_merge_append.h @@ -13,6 +13,7 @@ #ifndef RUNTIME_MERGE_APPEND_H #define RUNTIME_MERGE_APPEND_H + #include "runtimeappend.h" #include "pathman.h" @@ -78,4 +79,5 @@ void runtimemergeappend_explain(CustomScanState *node, List *ancestors, ExplainState *es); -#endif + +#endif /* RUNTIME_MERGE_APPEND_H */ diff --git a/src/runtimeappend.h b/src/runtimeappend.h index 55c1320e..579afc2a 100644 --- a/src/runtimeappend.h +++ b/src/runtimeappend.h @@ -11,6 +11,7 @@ #ifndef RUNTIME_APPEND_H #define RUNTIME_APPEND_H + #include "pathman.h" #include "nodes_common.h" @@ -91,4 +92,5 @@ void runtimeappend_explain(CustomScanState *node, List *ancestors, ExplainState *es); -#endif + +#endif /* RUNTIME_APPEND_H */ diff --git a/src/utility_stmt_hooking.h b/src/utility_stmt_hooking.h index 333ab492..18f86e2e 100644 --- a/src/utility_stmt_hooking.h +++ b/src/utility_stmt_hooking.h @@ -30,4 +30,5 @@ void PathmanRenameConstraint(Oid partition_relid, AttrNumber partitioned_col, const RenameStmt *partition_rename_stmt); -#endif + +#endif /* COPY_STMT_HOOKING_H */ diff --git a/src/utils.h b/src/utils.h index 07d516b9..e81f6026 100644 --- a/src/utils.h +++ b/src/utils.h @@ -11,6 +11,7 @@ #ifndef PATHMAN_UTILS_H #define PATHMAN_UTILS_H + #include "pathman.h" #include "postgres.h" @@ -56,4 +57,5 @@ void fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2); char * datum_to_cstring(Datum datum, Oid typid); Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); -#endif + +#endif /* PATHMAN_UTILS_H */ diff --git a/src/xact_handling.h b/src/xact_handling.h index b0a5ffe8..0dd60dc1 100644 --- a/src/xact_handling.h +++ b/src/xact_handling.h @@ -11,6 +11,7 @@ #ifndef XACT_HANDLING_H #define XACT_HANDLING_H + #include "pathman.h" #include "postgres.h" @@ -33,4 +34,5 @@ bool xact_is_level_read_committed(void); bool xact_is_transaction_stmt(Node *stmt); bool xact_is_set_transaction_stmt(Node *stmt); -#endif + +#endif /* XACT_HANDLING_H */ From e31e4b5e3845f7d32af7edd5a5f7af236c56ae91 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 1 Dec 2016 17:25:53 +0300 Subject: [PATCH 0111/1124] check pg_pathman's Pl/PgSQL frontend for compatibility with pg_pathman's shared library --- init.sql | 4 ++ pg_pathman--1.1--1.2.sql | 5 +++ src/init.c | 96 ++++++++++++++++++++++++++++++++++++++++ src/init.h | 5 +++ src/pl_funcs.c | 10 +++++ 5 files changed, 120 insertions(+) diff --git a/init.sql b/init.sql index db6c45aa..16a1f71a 100644 --- a/init.sql +++ b/init.sql @@ -759,6 +759,10 @@ CREATE OR REPLACE FUNCTION @extschema@.debug_capture() RETURNS VOID AS 'pg_pathman', 'debug_capture' LANGUAGE C STRICT; +CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() +RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' +LANGUAGE C STRICT; + /* * Invoke init_callback on RANGE partition. diff --git a/pg_pathman--1.1--1.2.sql b/pg_pathman--1.1--1.2.sql index 9bdf956b..1c6c1900 100644 --- a/pg_pathman--1.1--1.2.sql +++ b/pg_pathman--1.1--1.2.sql @@ -1281,6 +1281,11 @@ BEFORE INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); +CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() +RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' +LANGUAGE C STRICT; + + /* ------------------------------------------------------------------------ * Alter tables * ----------------------------------------------------------------------*/ diff --git a/src/init.c b/src/init.c index fc5bde20..411d2672 100644 --- a/src/init.c +++ b/src/init.c @@ -22,6 +22,7 @@ #include "access/sysattr.h" #include "catalog/indexing.h" #include "catalog/pg_constraint.h" +#include "catalog/pg_extension.h" #include "catalog/pg_inherits.h" #include "catalog/pg_inherits_fn.h" #include "catalog/pg_type.h" @@ -62,6 +63,7 @@ PathmanInitState pg_pathman_init_state; /* Shall we install new relcache callback? */ static bool relcache_callback_needed = true; + /* Functions for various local caches */ static bool init_pathman_relation_oids(void); static void fini_pathman_relation_oids(void); @@ -89,6 +91,11 @@ static bool read_opexpr_const(const OpExpr *opexpr, static int oid_cmp(const void *p1, const void *p2); +/* Validate SQL facade */ +static uint32 build_sql_facade_version(char *version_cstr); +static uint32 get_sql_facade_version(void); +static void validate_sql_facade_version(uint32 ver); + /* * Save and restore main init state. */ @@ -167,6 +174,9 @@ load_config(void) if (!init_pathman_relation_oids()) return false; /* remain 'uninitialized', exit before creating main caches */ + /* Validate pg_pathman's Pl/PgSQL facade (might be outdated) */ + validate_sql_facade_version(get_sql_facade_version()); + init_local_cache(); /* create 'partitioned_rels' hash table */ read_pathman_config(); /* read PATHMAN_CONFIG table & fill cache */ @@ -1098,3 +1108,89 @@ oid_cmp(const void *p1, const void *p2) return 1; return 0; } + + +/* Parse cstring and build uint32 representing the version */ +static uint32 +build_sql_facade_version(char *version_cstr) +{ + uint32 version; + + /* expect to see x+.y+.z+ */ + version = strtol(version_cstr, &version_cstr, 10) & 0xFF; + + version <<= 8; + if (strlen(version_cstr) > 1) + version |= (strtol(version_cstr + 1, &version_cstr, 10) & 0xFF); + + version <<= 8; + if (strlen(version_cstr) > 1) + version |= (strtol(version_cstr + 1, &version_cstr, 10) & 0xFF); + + return version; +} + +/* Get version of pg_pathman's facade written in Pl/PgSQL */ +static uint32 +get_sql_facade_version(void) +{ + Relation pg_extension_rel; + ScanKeyData skey; + SysScanDesc scan; + HeapTuple htup; + + Datum datum; + bool isnull; + char *version_cstr; + + /* Look up the extension */ + pg_extension_rel = heap_open(ExtensionRelationId, AccessShareLock); + + ScanKeyInit(&skey, + Anum_pg_extension_extname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum("pg_pathman")); + + scan = systable_beginscan(pg_extension_rel, + ExtensionNameIndexId, + true, NULL, 1, &skey); + + htup = systable_getnext(scan); + + /* Exit if pg_pathman's missing */ + if (!HeapTupleIsValid(htup)) + return 0; + + datum = heap_getattr(htup, Anum_pg_extension_extversion, + RelationGetDescr(pg_extension_rel), &isnull); + Assert(isnull == false); /* extversion should not be NULL */ + + /* Extract pg_pathman's version as cstring */ + version_cstr = text_to_cstring(DatumGetTextPP(datum)); + + systable_endscan(scan); + heap_close(pg_extension_rel, AccessShareLock); + + return build_sql_facade_version(version_cstr); +} + +/* Check that current Pl/PgSQL facade is compatible with internals */ +static void +validate_sql_facade_version(uint32 ver) +{ + Assert(ver > 0); + + /* Compare ver to 'lowest compatible frontend' version */ + if (ver < LOWEST_COMPATIBLE_FRONT) + { + elog(DEBUG1, "current version: %x, lowest compatible: %x", + ver, LOWEST_COMPATIBLE_FRONT); + + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("pg_pathman's Pl/PgSQL frontend is incompatible with " + "its shared library"), + errdetail("consider performing an update procedure"), + errhint(INIT_ERROR_HINT))); + } +} diff --git a/src/init.h b/src/init.h index 3a4befad..2787d831 100644 --- a/src/init.h +++ b/src/init.h @@ -95,6 +95,11 @@ extern PathmanInitState pg_pathman_init_state; #define DEFAULT_SPAWN_USING_BGW false +/* Lowest version of Pl/PgSQL frontend compatible with internals (0xAA_BB_CC) */ +#define LOWEST_COMPATIBLE_FRONT 0x010200 +#define CURRENT_LIB_VERSION 0x010200 + + /* * Save and restore PathmanInitState. */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index e0947b8e..154527c6 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -67,6 +67,7 @@ PG_FUNCTION_INFO_V1( invoke_on_partition_created_callback ); PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( debug_capture ); +PG_FUNCTION_INFO_V1( get_pathman_lib_version ); /* @@ -913,3 +914,12 @@ debug_capture(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } + +/* + * NOTE: just in case. + */ +Datum +get_pathman_lib_version(PG_FUNCTION_ARGS) +{ + PG_RETURN_CSTRING(psprintf("%x", CURRENT_LIB_VERSION)); +} From 581a695b292d8ff1d742b1eb15360d0096c2a9c5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 1 Dec 2016 17:48:40 +0300 Subject: [PATCH 0112/1124] fix migration script (broken DROP TRIGGER, after-update-instruction) --- pg_pathman--1.1--1.2.sql | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/pg_pathman--1.1--1.2.sql b/pg_pathman--1.1--1.2.sql index 1c6c1900..d2512065 100644 --- a/pg_pathman--1.1--1.2.sql +++ b/pg_pathman--1.1--1.2.sql @@ -26,7 +26,7 @@ DROP FUNCTION @extschema@.split_range_partition(REGCLASS, ANYELEMENT, TEXT, OUT DROP FUNCTION @extschema@.invalidate_relcache(OID); /* drop trigger and its function (PATHMAN_CONFIG_PARAMS) */ -DROP TRIGGER pathman_config_params_trigger; +DROP TRIGGER pathman_config_params_trigger ON @extschema@.pathman_config_params; DROP FUNCTION @extschema@.pathman_config_params_trigger_func(); @@ -1291,3 +1291,14 @@ LANGUAGE C STRICT; * ----------------------------------------------------------------------*/ ALTER TABLE @extschema@.pathman_config_params ADD COLUMN spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE; ALTER TABLE @extschema@.pathman_config_params ADD CHECK (@extschema@.validate_part_callback(init_callback)); + + +/* ------------------------------------------------------------------------ + * Final words of wisdom + * ----------------------------------------------------------------------*/ +DO language plpgsql +$$ + BEGIN + RAISE WARNING 'Don''t forget to execute "SET pg_pathman.enable = t" to activate pg_pathman'; + END +$$; \ No newline at end of file From f52920a5362ca5427973be9db0fd62b9508bb2da Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 1 Dec 2016 18:00:26 +0300 Subject: [PATCH 0113/1124] migration script: drop function create_or_replace_sequence(TEXT, TEXT) --- pg_pathman--1.1--1.2.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/pg_pathman--1.1--1.2.sql b/pg_pathman--1.1--1.2.sql index d2512065..4af311b3 100644 --- a/pg_pathman--1.1--1.2.sql +++ b/pg_pathman--1.1--1.2.sql @@ -20,6 +20,7 @@ DROP FUNCTION @extschema@.get_schema_qualified_name(REGCLASS, TEXT, TEXT); DROP FUNCTION @extschema@.get_rel_tablespace_name(REGCLASS); DROP FUNCTION @extschema@.validate_on_partition_created_callback(REGPROC); DROP FUNCTION @extschema@.get_sequence_name(TEXT, TEXT); +DROP FUNCTION @extschema@.create_or_replace_sequence(TEXT, TEXT); DROP FUNCTION @extschema@.create_single_range_partition(REGCLASS, ANYELEMENT, ANYELEMENT, TEXT, TEXT); DROP FUNCTION @extschema@.check_overlap(REGCLASS, ANYELEMENT, ANYELEMENT); DROP FUNCTION @extschema@.split_range_partition(REGCLASS, ANYELEMENT, TEXT, OUT ANYARRAY); From e48fc6c6eca66e66c3b37562b6273e07503acf8e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 1 Dec 2016 19:28:12 +0300 Subject: [PATCH 0114/1124] initialize variables (reported by valgrind) --- src/pg_pathman.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 98cb1261..10769666 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -135,8 +135,10 @@ _PG_init(void) /* NOTE: we don't need LWLocks now. RequestAddinLWLocks(1); */ /* Assign pg_pathman's initial state */ - temp_init_state.initialization_needed = true; temp_init_state.pg_pathman_enable = true; + temp_init_state.auto_partition = true; + temp_init_state.override_copy = true; + temp_init_state.initialization_needed = true; /* Apply initial state */ restore_pathman_init_state(&temp_init_state); From 5f455c14bcdff6113c35d5b4e3ce18901709f4f9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 2 Dec 2016 14:37:50 +0300 Subject: [PATCH 0115/1124] improve README.md --- README.md | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 7a7788a5..c54b8b99 100644 --- a/README.md +++ b/README.md @@ -254,6 +254,11 @@ Set partition creation callback to be invoked for each attached or created parti } ``` +```plpgsql +set_set_spawn_using_bgw(relation REGCLASS, value BOOLEAN) +``` +When INSERTing new data beyond the partitioning range, use SpawnPartitionsWorker to create new partitions in a separate transaction. + ## Views and tables #### `pathman_config` --- main config storage @@ -262,19 +267,18 @@ CREATE TABLE IF NOT EXISTS pathman_config ( partrel REGCLASS NOT NULL PRIMARY KEY, attname TEXT NOT NULL, parttype INTEGER NOT NULL, - range_interval TEXT, - - CHECK (parttype IN (1, 2)) /* check for allowed part types */ ); + range_interval TEXT); ``` This table stores a list of partitioned tables. #### `pathman_config_params` --- optional parameters ```plpgsql CREATE TABLE IF NOT EXISTS pathman_config_params ( - partrel REGCLASS NOT NULL PRIMARY KEY, - enable_parent BOOLEAN NOT NULL DEFAULT TRUE, - auto BOOLEAN NOT NULL DEFAULT TRUE, - init_callback REGPROCEDURE NOT NULL DEFAULT 0); + partrel REGCLASS NOT NULL PRIMARY KEY, + enable_parent BOOLEAN NOT NULL DEFAULT TRUE, + auto BOOLEAN NOT NULL DEFAULT TRUE, + init_callback REGPROCEDURE NOT NULL DEFAULT 0, + spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE); ``` This table stores optional parameters which override standard behavior. From 97d54fc694d3f6707c6a47907992f0ceffd9f34f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 5 Dec 2016 15:47:53 +0300 Subject: [PATCH 0116/1124] remove BOM marks --- src/partition_creation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 0d77da87..54832c40 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1,4 +1,4 @@ -/*------------------------------------------------------------------------- +/*------------------------------------------------------------------------- * * partition_creation.c * Various functions for partition creation. From 2991144d527a5c1602f0c69c7872aed4271f2b78 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 5 Dec 2016 16:42:21 +0300 Subject: [PATCH 0117/1124] fix function choose_range_partition_name() --- src/partition_creation.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 54832c40..c0e6765f 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -657,7 +657,9 @@ choose_range_partition_name(Oid parent_relid, Oid parent_nsp) if (need_priv_escalation) SetUserIdAndSecContext(save_userid, save_sec_context); - return psprintf("%s_%u", get_rel_name(parent_relid), DatumGetInt32(part_num)); + return psprintf("%s_" UINT64_FORMAT, + get_rel_name(parent_relid), + DatumGetUInt64(part_num)); } /* Choose a good name for a HASH partition */ From d71fbcd2c13551d8c6486c7ca96e6dcb8ac3515a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 5 Dec 2016 17:03:27 +0300 Subject: [PATCH 0118/1124] use DatumGetInt64 instead of DatumGetUInt64 (9.5 compatibility) --- src/partition_creation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index c0e6765f..69c1a055 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -659,7 +659,7 @@ choose_range_partition_name(Oid parent_relid, Oid parent_nsp) return psprintf("%s_" UINT64_FORMAT, get_rel_name(parent_relid), - DatumGetUInt64(part_num)); + (uint64) DatumGetInt64(part_num)); /* can't use UInt64 on 9.5 */ } /* Choose a good name for a HASH partition */ From 135c8528bd155dfa66eeb52a674511c48122a35c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 5 Dec 2016 17:11:54 +0300 Subject: [PATCH 0119/1124] update CURRENT_LIB_VERSION --- src/init.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/init.h b/src/init.h index 2787d831..c769937e 100644 --- a/src/init.h +++ b/src/init.h @@ -97,7 +97,9 @@ extern PathmanInitState pg_pathman_init_state; /* Lowest version of Pl/PgSQL frontend compatible with internals (0xAA_BB_CC) */ #define LOWEST_COMPATIBLE_FRONT 0x010200 -#define CURRENT_LIB_VERSION 0x010200 + +/* Current version on native C library (0xAA_BB_CC) */ +#define CURRENT_LIB_VERSION 0x010201 /* From 8249f7e983e0def9344935edb93474ebffaaec9d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 6 Dec 2016 14:41:55 +0300 Subject: [PATCH 0120/1124] fix src\partition_creation.h(101): error C2016 --- src/partition_creation.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/partition_creation.h b/src/partition_creation.h index 1d5d8892..ec36edf0 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -97,7 +97,7 @@ typedef struct { struct { - /* nothing */ + void *none; /* nothing (struct should have at least 1 element) */ } hash_params; struct @@ -130,9 +130,9 @@ typedef struct do \ { \ memset((void *) (params_p), 0, sizeof(init_callback_params)); \ + (params_p)->cb_type = PT_INIT_CALLBACK; \ (params_p)->callback = (cb); \ (params_p)->callback_is_cached = false; \ - (params_p)->cb_type = PT_INIT_CALLBACK; \ (params_p)->parttype = PT_HASH; \ (params_p)->parent_relid = (parent); \ (params_p)->partition_relid = (child); \ From f518cf5624a7f8b2d5afd142eac55c19d82e5615 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 7 Dec 2016 17:55:52 +0300 Subject: [PATCH 0121/1124] resolve bgw_main_* functions by name (BGW) --- src/pathman_workers.c | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index b744ef07..21af4441 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -47,15 +47,19 @@ PG_FUNCTION_INFO_V1( show_concurrent_part_tasks_internal ); PG_FUNCTION_INFO_V1( stop_concurrent_part_task ); +/* + * Dynamically resolve functions (for BGW API). + */ +extern PGDLLEXPORT void bgw_main_spawn_partitions(Datum main_arg); +extern PGDLLEXPORT void bgw_main_concurrent_part(Datum main_arg); + + static void handle_sigterm(SIGNAL_ARGS); static void bg_worker_load_config(const char *bgw_name); static void start_bg_worker(const char bgworker_name[BGW_MAXLEN], - bgworker_main_type bgw_main_func, + const char bgworker_proc[BGW_MAXLEN], Datum bgw_arg, bool wait_for_shutdown); -static void bgw_main_spawn_partitions(Datum main_arg); -static void bgw_main_concurrent_part(Datum main_arg); - /* * Function context for concurrent_part_tasks_internal() SRF. @@ -157,7 +161,7 @@ bg_worker_load_config(const char *bgw_name) */ static void start_bg_worker(const char bgworker_name[BGW_MAXLEN], - bgworker_main_type bgw_main_func, + const char bgworker_proc[BGW_MAXLEN], Datum bgw_arg, bool wait_for_shutdown) { #define HandleError(condition, new_state) \ @@ -179,12 +183,16 @@ start_bg_worker(const char bgworker_name[BGW_MAXLEN], /* Initialize worker struct */ memcpy(worker.bgw_name, bgworker_name, BGW_MAXLEN); - worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; - worker.bgw_start_time = BgWorkerStart_RecoveryFinished; - worker.bgw_restart_time = BGW_NEVER_RESTART; - worker.bgw_main = bgw_main_func; - worker.bgw_main_arg = bgw_arg; - worker.bgw_notify_pid = MyProcPid; + memcpy(worker.bgw_function_name, bgworker_proc, BGW_MAXLEN); + memcpy(worker.bgw_library_name, "pg_pathman", BGW_MAXLEN); + + worker.bgw_flags = BGWORKER_SHMEM_ACCESS | + BGWORKER_BACKEND_DATABASE_CONNECTION; + worker.bgw_start_time = BgWorkerStart_RecoveryFinished; + worker.bgw_restart_time = BGW_NEVER_RESTART; + worker.bgw_main = NULL; + worker.bgw_main_arg = bgw_arg; + worker.bgw_notify_pid = MyProcPid; /* Start dynamic worker */ bgw_started = RegisterDynamicBackgroundWorker(&worker, &bgw_handle); @@ -301,7 +309,7 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) /* Start worker and wait for it to finish */ start_bg_worker(spawn_partitions_bgw, - bgw_main_spawn_partitions, + CppAsString(bgw_main_spawn_partitions), UInt32GetDatum(segment_handle), true); @@ -323,7 +331,7 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) /* * Entry point for SpawnPartitionsWorker's process. */ -static void +void bgw_main_spawn_partitions(Datum main_arg) { dsm_handle handle = DatumGetUInt32(main_arg); @@ -403,7 +411,7 @@ bgw_main_spawn_partitions(Datum main_arg) /* * Entry point for ConcurrentPartWorker's process. */ -static void +void bgw_main_concurrent_part(Datum main_arg) { int rows; @@ -671,7 +679,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* Start worker (we should not wait) */ start_bg_worker(concurrent_part_bgw, - bgw_main_concurrent_part, + CppAsString(bgw_main_concurrent_part), Int32GetDatum(empty_slot_idx), false); From 350edca71f0b5c2d6d8e4e8e2467e752183112fe Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 20 Dec 2016 17:15:25 +0300 Subject: [PATCH 0122/1124] clean code in range.sql --- init.sql | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/init.sql b/init.sql index 16a1f71a..a81af522 100644 --- a/init.sql +++ b/init.sql @@ -314,11 +314,6 @@ CREATE OR REPLACE FUNCTION @extschema@.partition_data( OUT p_total BIGINT) AS $$ -DECLARE - relname TEXT; - rec RECORD; - cnt BIGINT := 0; - BEGIN p_total := 0; @@ -699,13 +694,13 @@ LANGUAGE C STRICT; */ CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( partition_relid REGCLASS, - attribute INT2) + attribute INT2) RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attnum' LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( partition_relid REGCLASS, - attribute TEXT) + attribute TEXT) RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attname' LANGUAGE C STRICT; From c2a77f59391c996a4d15c36d1830e4409e991513 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 21 Dec 2016 14:54:26 +0300 Subject: [PATCH 0123/1124] fix bug in handle_modification_query() (erroneous optimization) --- src/planner_tree_modification.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 3ea63524..c541ef3f 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -301,6 +301,10 @@ handle_modification_query(Query *parse) return; rte = rt_fetch(result_rel, parse->rtable); + + /* Exit if it's DELETE FROM ONLY table */ + if (!rte->inh) return; + prel = get_pathman_relation_info(rte->relid); /* Exit if it's not partitioned */ From 9555f744b01a6f4380840cecbdd12abe403c0a94 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 22 Dec 2016 15:16:03 +0300 Subject: [PATCH 0124/1124] refactoring, new log messages (DEBUG2), fix function find_inheritance_children_array() --- src/init.c | 59 ++++++++++++++++++++++++---------------- src/partition_creation.c | 8 ++---- src/pathman_workers.c | 25 +++++++++++++---- src/relation_info.c | 15 +++++++++- src/xact_handling.c | 11 ++++++++ src/xact_handling.h | 1 + 6 files changed, 82 insertions(+), 37 deletions(-) diff --git a/src/init.c b/src/init.c index 411d2672..cfa25f19 100644 --- a/src/init.c +++ b/src/init.c @@ -486,25 +486,28 @@ find_inheritance_children_array(Oid parentrelId, SysScanDesc scan; ScanKeyData key[1]; HeapTuple inheritsTuple; - Oid inhrelid; + Oid *oidarr; uint32 maxoids, - numoids, - i; + numoids; + + Oid *result = NULL; + uint32 nresult = 0; + + uint32 i; + + Assert(lockmode != NoLock); + + /* Init safe return values */ + *children_size = 0; + *children = NULL; /* - * Can skip the scan if pg_class shows the relation has never had a - * subclass. + * Can skip the scan if pg_class shows the + * relation has never had a subclass. */ if (!has_subclass(parentrelId)) - { - /* Init return values */ - *children_size = 0; - children = NULL; - - /* Ok, could not find any children */ return FCS_NO_CHILDREN; - } /* * Scan pg_inherits and build a working array of subclass OIDs. @@ -525,6 +528,8 @@ find_inheritance_children_array(Oid parentrelId, while ((inheritsTuple = systable_getnext(scan)) != NULL) { + Oid inhrelid; + inhrelid = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhrelid; if (numoids >= maxoids) { @@ -547,12 +552,10 @@ find_inheritance_children_array(Oid parentrelId, if (numoids > 1) qsort(oidarr, numoids, sizeof(Oid), oid_cmp); - /* - * Acquire locks and build the result list. - */ + /* Acquire locks and build the result list */ for (i = 0; i < numoids; i++) { - inhrelid = oidarr[i]; + Oid inhrelid = oidarr[i]; if (lockmode != NoLock) { @@ -567,9 +570,7 @@ find_inheritance_children_array(Oid parentrelId, for (j = 0; j < i; j++) UnlockRelationOid(oidarr[j], lockmode); - /* Init return values */ - *children_size = numoids; - *children = oidarr; + pfree(oidarr); /* We couldn't lock this child, retreat! */ return FCS_COULD_NOT_LOCK; @@ -586,18 +587,28 @@ find_inheritance_children_array(Oid parentrelId, { /* Release useless lock */ UnlockRelationOid(inhrelid, lockmode); + /* And ignore this relation */ continue; } } + + /* Alloc array if it's the first time */ + if (nresult == 0) + result = palloc(numoids * sizeof(Oid)); + + /* Save Oid of the existing relation */ + result[nresult++] = inhrelid; } - /* Init return values */ - *children_size = numoids; - *children = oidarr; + /* Set return values */ + *children_size = nresult; + *children = result; + + pfree(oidarr); - /* Ok, we have children */ - return FCS_FOUND; + /* Do we have children? */ + return nresult > 0 ? FCS_FOUND : FCS_NO_CHILDREN; } /* diff --git a/src/partition_creation.c b/src/partition_creation.c index 69c1a055..6b53c6f4 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -236,11 +236,6 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) /* Check that table is partitioned and fetch xmin */ if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin)) { - /* Was table partitioned in some previous transaction? */ - bool part_in_prev_xact = - TransactionIdPrecedes(rel_xmin, GetCurrentTransactionId()) || - TransactionIdEquals(rel_xmin, FrozenTransactionId); - /* Take default values */ bool spawn_using_bgw = DEFAULT_SPAWN_USING_BGW, enable_auto = DEFAULT_AUTO; @@ -264,7 +259,8 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) * If table has been partitioned in some previous xact AND * we don't hold any conflicting locks, run BGWorker. */ - if (spawn_using_bgw && part_in_prev_xact && + if (spawn_using_bgw && + xact_object_is_visible(rel_xmin) && !xact_bgw_conflicting_lock_exists(relid)) { elog(DEBUG2, "create_partitions(): chose BGWorker [%u]", MyProcPid); diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 21af4441..235241be 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -19,6 +19,7 @@ #include "pathman_workers.h" #include "relation_info.h" #include "utils.h" +#include "xact_handling.h" #include "access/htup_details.h" #include "access/xact.h" @@ -601,11 +602,12 @@ bgw_main_concurrent_part(Datum main_arg) Datum partition_table_concurrently(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - int32 batch_size = PG_GETARG_INT32(1); - float8 sleep_time = PG_GETARG_FLOAT8(2); - int empty_slot_idx = -1, /* do we have a slot for BGWorker? */ - i; + Oid relid = PG_GETARG_OID(0); + int32 batch_size = PG_GETARG_INT32(1); + float8 sleep_time = PG_GETARG_FLOAT8(2); + int empty_slot_idx = -1, /* do we have a slot for BGWorker? */ + i; + TransactionId rel_xmin; /* Check batch_size */ if (batch_size < 1 || batch_size > 10000) @@ -622,6 +624,16 @@ partition_table_concurrently(PG_FUNCTION_ARGS) get_pathman_relation_info_after_lock(relid, true, NULL), /* Partitioning type does not matter here */ PT_INDIFFERENT); + + /* Check that partitioning operation result is visible */ + if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin)) + { + if (!xact_object_is_visible(rel_xmin)) + ereport(ERROR, (errmsg("cannot start %s", concurrent_part_bgw), + errdetail("table is being partitioned now"))); + } + else elog(ERROR, "cannot find relation %d", relid); + /* * Look for an empty slot and also check that a concurrent * partitioning operation for this table hasn't been started yet @@ -686,7 +698,8 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* Tell user everything's fine */ elog(NOTICE, "worker started, you can stop it " - "with the following command: select %s('%s');", + "with the following command: select %s.%s('%s');", + get_namespace_name(get_pathman_schema()), CppAsString(stop_concurrent_part_task), get_rel_name(relid)); diff --git a/src/relation_info.c b/src/relation_info.c index e20bada6..ee9be8fe 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -162,17 +162,26 @@ refresh_pathman_relation_info(Oid relid, { /* If there's no children at all, remove this entry */ case FCS_NO_CHILDREN: + elog(DEBUG2, "refresh: relation %u has no children [%u]", + relid, MyProcPid); + UnlockRelationOid(relid, lockmode); remove_pathman_relation_info(relid); return NULL; /* exit */ /* If can't lock children, leave an invalid entry */ case FCS_COULD_NOT_LOCK: + elog(DEBUG2, "refresh: cannot lock children of relation %u [%u]", + relid, MyProcPid); + UnlockRelationOid(relid, lockmode); return NULL; /* exit */ /* Found some children, just unlock parent */ case FCS_FOUND: + elog(DEBUG2, "refresh: found children of relation %u [%u]", + relid, MyProcPid); + UnlockRelationOid(relid, lockmode); break; /* continue */ @@ -200,7 +209,8 @@ refresh_pathman_relation_info(Oid relid, UnlockRelationOid(prel_children[i], lockmode); } - pfree(prel_children); + if (prel_children) + pfree(prel_children); /* Read additional parameters ('enable_parent' and 'auto' at the moment) */ if (read_pathman_params(relid, param_values, param_isnull)) @@ -292,6 +302,9 @@ get_pathman_relation_info(Oid relid) "Fetching %s record for relation %u from pg_pathman's cache [%u]", (prel ? "live" : "NULL"), relid, MyProcPid); + /* Make sure that 'prel' is valid */ + Assert(PrelIsValid(prel)); + return prel; } diff --git a/src/xact_handling.c b/src/xact_handling.c index 898cc9b8..39030e6a 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -11,6 +11,7 @@ #include "xact_handling.h" #include "postgres.h" +#include "access/transam.h" #include "access/xact.h" #include "catalog/catalog.h" #include "miscadmin.h" @@ -156,6 +157,16 @@ xact_is_set_transaction_stmt(Node *stmt) return false; } +/* + * Check if object is visible in newer transactions. + */ +bool +xact_object_is_visible(TransactionId obj_xmin) +{ + return TransactionIdPrecedes(obj_xmin, GetCurrentTransactionId()) || + TransactionIdEquals(obj_xmin, FrozenTransactionId); +} + /* * Do we hold the specified lock? */ diff --git a/src/xact_handling.h b/src/xact_handling.h index 0dd60dc1..d10064a1 100644 --- a/src/xact_handling.h +++ b/src/xact_handling.h @@ -33,6 +33,7 @@ bool xact_bgw_conflicting_lock_exists(Oid relid); bool xact_is_level_read_committed(void); bool xact_is_transaction_stmt(Node *stmt); bool xact_is_set_transaction_stmt(Node *stmt); +bool xact_object_is_visible(TransactionId obj_xmin); #endif /* XACT_HANDLING_H */ From 756b7f3c6723977dc3c85203b66563b02ad7ebc1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 22 Dec 2016 18:50:57 +0300 Subject: [PATCH 0125/1124] abort running concurrent partitioning task if table is not partitioned anymore --- src/pathman_workers.c | 53 +++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 235241be..930c5623 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -36,6 +36,7 @@ #include "utils/datum.h" #include "utils/memutils.h" #include "utils/lsyscache.h" +#include "utils/syscache.h" #include "utils/typcache.h" #include "utils/resowner.h" #include "utils/snapmgr.h" @@ -488,6 +489,17 @@ bgw_main_concurrent_part(Datum main_arg) int ret; bool isnull; + /* Make sure that relation exists and has partitions */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid)) || + get_pathman_relation_info(part_slot->relid) == NULL) + { + /* Fail fast */ + failures_count = PART_WORKER_MAX_ATTEMPTS; + + elog(ERROR, "relation %u is not partitioned (or does not exist)", + part_slot->relid); + } + ret = SPI_execute_with_args(sql, 2, types, vals, nulls, false, 0); if (ret == SPI_OK_SELECT) { @@ -525,25 +537,6 @@ bgw_main_concurrent_part(Datum main_arg) FreeErrorData(error); - /* - * The most common exception we can catch here is a deadlock with - * concurrent user queries. Check that attempts count doesn't exceed - * some reasonable value - */ - if (failures_count >= PART_WORKER_MAX_ATTEMPTS) - { - /* Mark slot as FREE */ - cps_set_status(part_slot, CPS_FREE); - - elog(LOG, - "concurrent partitioning worker has canceled the task because " - "maximum amount of attempts (%d) had been exceeded, " - "see the error message below", - PART_WORKER_MAX_ATTEMPTS); - - return; /* exit quickly */ - } - /* Set 'failed' flag */ failed = true; } @@ -552,7 +545,27 @@ bgw_main_concurrent_part(Datum main_arg) SPI_finish(); PopActiveSnapshot(); - if (failed) + /* + * The most common exception we can catch here is a deadlock with + * concurrent user queries. Check that attempts count doesn't exceed + * some reasonable value + */ + if (failures_count >= PART_WORKER_MAX_ATTEMPTS) + { + AbortCurrentTransaction(); + + /* Mark slot as FREE */ + cps_set_status(part_slot, CPS_FREE); + + elog(LOG, + "concurrent partitioning worker has canceled the task because " + "maximum amount of attempts (%d) had been exceeded, " + "see the error message below", + PART_WORKER_MAX_ATTEMPTS); + + return; + } + else if (failed) { /* Abort transaction and sleep for a second */ AbortCurrentTransaction(); From 41e1e7ace37d161977d546b5c3ae1e48c41b7af5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 23 Dec 2016 16:34:11 +0300 Subject: [PATCH 0126/1124] rewrite some code (make clang analyzer happy again) --- src/pathman_workers.c | 58 ++++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 930c5623..b3a55b76 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -486,35 +486,42 @@ bgw_main_concurrent_part(Datum main_arg) /* Exec ret = _partition_data_concurrent() */ PG_TRY(); { - int ret; - bool isnull; - /* Make sure that relation exists and has partitions */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid)) || - get_pathman_relation_info(part_slot->relid) == NULL) + if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid)) && + get_pathman_relation_info(part_slot->relid) != NULL) { - /* Fail fast */ - failures_count = PART_WORKER_MAX_ATTEMPTS; + int ret; + bool isnull; - elog(ERROR, "relation %u is not partitioned (or does not exist)", - part_slot->relid); - } + ret = SPI_execute_with_args(sql, 2, types, vals, nulls, false, 0); + if (ret == SPI_OK_SELECT) + { + TupleDesc tupdesc = SPI_tuptable->tupdesc; + HeapTuple tuple = SPI_tuptable->vals[0]; - ret = SPI_execute_with_args(sql, 2, types, vals, nulls, false, 0); - if (ret == SPI_OK_SELECT) - { - TupleDesc tupdesc = SPI_tuptable->tupdesc; - HeapTuple tuple = SPI_tuptable->vals[0]; + Assert(SPI_processed == 1); /* there should be 1 result at most */ - Assert(SPI_processed == 1); /* there should be 1 result at most */ + rows = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 1, &isnull)); - rows = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 1, &isnull)); + Assert(!isnull); /* ... and ofc it must not be NULL */ + } + } + /* Otherwise it's time to exit */ + else + { + failures_count = PART_WORKER_MAX_ATTEMPTS; - Assert(!isnull); /* ... and ofc it must not be NULL */ + elog(LOG, "relation %u is not partitioned (or does not exist)", + part_slot->relid); } } PG_CATCH(); { + /* + * The most common exception we can catch here is a deadlock with + * concurrent user queries. Check that attempts count doesn't exceed + * some reasonable value + */ ErrorData *error; char *sleep_time_str; @@ -545,11 +552,7 @@ bgw_main_concurrent_part(Datum main_arg) SPI_finish(); PopActiveSnapshot(); - /* - * The most common exception we can catch here is a deadlock with - * concurrent user queries. Check that attempts count doesn't exceed - * some reasonable value - */ + /* We've run out of attempts, exit */ if (failures_count >= PART_WORKER_MAX_ATTEMPTS) { AbortCurrentTransaction(); @@ -563,14 +566,19 @@ bgw_main_concurrent_part(Datum main_arg) "see the error message below", PART_WORKER_MAX_ATTEMPTS); - return; + return; /* time to exit */ } + + /* Failed this time, wait */ else if (failed) { /* Abort transaction and sleep for a second */ AbortCurrentTransaction(); + DirectFunctionCall1(pg_sleep, Float8GetDatum(part_slot->sleep_time)); } + + /* Everything is fine */ else { /* Commit transaction and reset 'failures_count' */ @@ -592,7 +600,7 @@ bgw_main_concurrent_part(Datum main_arg) if (cps_check_status(part_slot) == CPS_STOPPING) break; } - while(rows > 0 || failed); /* do while there's still rows to be relocated */ + while(rows > 0 || failed); /* do while there's still rows to be relocated */ /* Reclaim the resources */ pfree(sql); From 8d5364f89c6afa81e3b12d9a799f766c3e7e7034 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 27 Dec 2016 14:11:00 +0300 Subject: [PATCH 0127/1124] fix code coverage test facility --- travis/pg-travis-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index 1b29038f..ca22b0e6 100644 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -99,7 +99,7 @@ pip install $pip_packages # run python tests cd tests/python PG_CONFIG=$config_path python -m unittest partitioning_test || status=$? -cd .. +cd ../.. set -u From 4bbab4ce3e3ee467cfbd199daf0ba3cd649b8c13 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 27 Dec 2016 17:05:32 +0300 Subject: [PATCH 0128/1124] extend calamity tests --- expected/pathman_calamity.out | 126 +++++++++++++++++++++++++++++++++- sql/pathman_calamity.sql | 34 +++++++++ 2 files changed, 159 insertions(+), 1 deletion(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 83ea60c0..ca22b1c1 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,6 +12,127 @@ SELECT debug_capture(); set client_min_messages = NOTICE; /* create table to be partitioned */ CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +NOTICE: sequence "part_test_seq" does not exist, skipping + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); +NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; /* check function build_hash_condition() */ SELECT build_hash_condition('int4', 'val', 10, 1); build_hash_condition @@ -204,6 +325,9 @@ SELECT build_update_trigger_func_name(NULL) IS NULL; t (1 row) +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::regclass); +ERROR: cannot find worker for relation "1" /* check invoke_on_partition_created_callback() for RANGE */ SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, NULL::int); ERROR: both bounds must be provided for RANGE partition @@ -393,5 +517,5 @@ SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disab (1 row) DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 15 other objects +NOTICE: drop cascades to 16 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 9beb0861..86ba405d 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -14,6 +14,37 @@ set client_min_messages = NOTICE; CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +SELECT drop_partitions('calamity.part_test'); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +SELECT drop_partitions('calamity.part_test'); + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +SELECT append_range_partition('calamity.part_test'); +SELECT drop_partitions('calamity.part_test'); + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +SELECT append_range_partition('calamity.part_test'); +SELECT drop_partitions('calamity.part_test'); + +SELECT count(*) FROM calamity.part_test; + +DELETE FROM calamity.part_test; + + +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; +SELECT drop_partitions('calamity.part_test', true); +DELETE FROM calamity.part_test; + + /* check function build_hash_condition() */ SELECT build_hash_condition('int4', 'val', 10, 1); SELECT build_hash_condition('text', 'val', 10, 1); @@ -71,6 +102,9 @@ SELECT build_update_trigger_name(NULL) IS NULL; SELECT build_update_trigger_func_name('calamity.part_test'); SELECT build_update_trigger_func_name(NULL) IS NULL; +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::regclass); + /* check invoke_on_partition_created_callback() for RANGE */ SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, NULL::int); SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, 1, NULL); From 49018205bcfbecb58759c124807355a27e2e2b8e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Jan 2017 16:05:46 +0300 Subject: [PATCH 0129/1124] detach_range_partition(): lock parent in access exclusive mode (needed for refresh_pathman_relation_info()) --- range.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/range.sql b/range.sql index 396d8f30..49e035a3 100644 --- a/range.sql +++ b/range.sql @@ -1087,7 +1087,7 @@ BEGIN parent_relid := @extschema@.get_parent_of_partition(partition); /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.prevent_relation_modification(parent_relid); v_attname := attname FROM @extschema@.pathman_config From 04b1b20b2fd2daa3628ff028789598b2d56afaa4 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 9 Jan 2017 16:30:32 +0300 Subject: [PATCH 0130/1124] changes to plpgsql funcs for enterprise edition --- init.sql | 41 +++++++++++++++++------ range.sql | 72 ++++++++++++++++++++-------------------- src/partition_creation.c | 3 ++ src/pl_funcs.c | 19 ++++++++++- 4 files changed, 87 insertions(+), 48 deletions(-) diff --git a/init.sql b/init.sql index 16a1f71a..89e025c4 100644 --- a/init.sql +++ b/init.sql @@ -182,12 +182,12 @@ LANGUAGE plpgsql STRICT; */ CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() RETURNS TABLE ( - parent REGCLASS, - partition REGCLASS, - parttype INT4, - partattr TEXT, - range_min TEXT, - range_max TEXT) + parent REGCLASS, + "partition" REGCLASS, + parttype INT4, + partattr TEXT, + range_min TEXT, + range_max TEXT) AS 'pg_pathman', 'show_partition_list_internal' LANGUAGE C STRICT; @@ -580,7 +580,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is */ CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( parent_relid REGCLASS, - partition REGCLASS) + partition_relid REGCLASS) RETURNS VOID AS $$ DECLARE @@ -588,19 +588,30 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition); + PERFORM @extschema@.validate_relname(partition_relid); FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint WHERE conrelid = parent_relid AND contype = 'f') LOOP EXECUTE format('ALTER TABLE %s ADD %s', - partition::TEXT, + partition_relid::TEXT, pg_catalog.pg_get_constraintdef(rec.conid)); END LOOP; END $$ LANGUAGE plpgsql STRICT; +/* + * Partitioning key + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key(relid REGCLASS) +RETURNS TEXT AS +$$ + SELECT attname FROM pathman_config WHERE partrel = relid; +$$ +LANGUAGE sql STRICT; + + /* * Create DDL trigger to call pathman_ddl_trigger_func(). */ @@ -659,6 +670,14 @@ CREATE OR REPLACE FUNCTION @extschema@.get_attribute_type( RETURNS REGTYPE AS 'pg_pathman', 'get_attribute_type_pl' LANGUAGE C STRICT; +/* + * Return partition key type + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( + relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type' +LANGUAGE C STRICT; + /* * Return tablespace name for specified relation. */ @@ -769,7 +788,7 @@ LANGUAGE C STRICT; */ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, - partition REGCLASS, + "partition" REGCLASS, init_callback REGPROCEDURE, start_value ANYELEMENT, end_value ANYELEMENT) @@ -781,7 +800,7 @@ LANGUAGE C; */ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, - partition REGCLASS, + "partition" REGCLASS, init_callback REGPROCEDURE) RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; diff --git a/range.sql b/range.sql index 51a3d863..810acfc6 100644 --- a/range.sql +++ b/range.sql @@ -439,7 +439,7 @@ $$ LANGUAGE plpgsql; * Split RANGE partition */ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( - partition REGCLASS, + partition_relid REGCLASS, split_value ANYELEMENT, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL, @@ -456,13 +456,13 @@ DECLARE v_check_name TEXT; BEGIN - v_parent = @extschema@.get_parent_of_partition(partition); + v_parent = @extschema@.get_parent_of_partition(partition_relid); /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(v_parent); /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition); + PERFORM @extschema@.prevent_relation_modification(partition_relid); SELECT attname, parttype FROM @extschema@.pathman_config @@ -475,7 +475,7 @@ BEGIN /* Check if this is a RANGE partition */ IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; v_atttype = @extschema@.get_attribute_type(v_parent, v_attname); @@ -483,7 +483,7 @@ BEGIN /* Get partition values range */ EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', @extschema@.get_base_type(v_atttype)::TEXT) - USING partition + USING partition_relid INTO p_range; IF p_range IS NULL THEN @@ -509,21 +509,21 @@ BEGIN v_attname, split_value, p_range[2]); EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) INSERT INTO %s SELECT * FROM part_data', - partition::TEXT, + partition_relid::TEXT, v_cond, v_new_partition); /* Alter original partition */ - v_cond := @extschema@.build_range_condition(partition::regclass, + v_cond := @extschema@.build_range_condition(partition_relid::regclass, v_attname, p_range[1], split_value); - v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); + v_check_name := @extschema@.build_check_constraint_name(partition_relid, v_attname); EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, + partition_relid::TEXT, v_check_name); EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, + partition_relid::TEXT, v_check_name, v_cond); @@ -958,7 +958,7 @@ LANGUAGE plpgsql; * Drop range partition */ CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( - partition REGCLASS, + partition_relid REGCLASS, delete_data BOOLEAN DEFAULT TRUE) RETURNS TEXT AS $$ @@ -970,8 +970,8 @@ DECLARE v_part_type INTEGER; BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition); - part_name := partition::TEXT; /* save the name to be returned */ + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + part_name := partition_relid::TEXT; /* save the name to be returned */ SELECT parttype FROM @extschema@.pathman_config @@ -980,7 +980,7 @@ BEGIN /* Check if this is a RANGE partition */ IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; /* Acquire lock on parent */ @@ -989,15 +989,15 @@ BEGIN IF NOT delete_data THEN EXECUTE format('INSERT INTO %s SELECT * FROM %s', parent_relid::TEXT, - partition::TEXT); + partition_relid::TEXT); GET DIAGNOSTICS v_rows = ROW_COUNT; /* Show number of copied rows */ - RAISE NOTICE '% rows copied from %', v_rows, partition::TEXT; + RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; END IF; SELECT relkind FROM pg_catalog.pg_class - WHERE oid = partition + WHERE oid = partition_relid INTO v_relkind; /* @@ -1006,9 +1006,9 @@ BEGIN * DROP TABLE or DROP FOREIGN TABLE. */ IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', partition::TEXT); + EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); ELSE - EXECUTE format('DROP TABLE %s', partition::TEXT); + EXECUTE format('DROP TABLE %s', partition_relid::TEXT); END IF; /* Invalidate cache */ @@ -1026,7 +1026,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is */ CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( parent_relid REGCLASS, - partition REGCLASS, + partition_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT) RETURNS TEXT AS @@ -1038,29 +1038,29 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition); + PERFORM @extschema@.validate_relname(partition_relid); /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(parent_relid); /* Ignore temporary tables */ SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = partition INTO rel_persistence; + WHERE oid = partition_relid INTO rel_persistence; IF rel_persistence = 't'::CHAR THEN RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', - partition::TEXT; + partition_relid::TEXT; END IF; /* check range overlap */ PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); - IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN + IF NOT @extschema@.validate_relations_equality(parent_relid, partition_relid) THEN RAISE EXCEPTION 'partition must have the exact same structure as parent'; END IF; /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; @@ -1070,9 +1070,9 @@ BEGIN /* Set check constraint */ EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname), - @extschema@.build_range_condition(partition, + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid, v_attname), + @extschema@.build_range_condition(partition_relid, v_attname, start_value, end_value)); @@ -1086,7 +1086,7 @@ BEGIN INTO v_init_callback; PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - partition, + partition_relid, v_init_callback, start_value, end_value); @@ -1094,7 +1094,7 @@ BEGIN /* Invalidate cache */ PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN partition; + RETURN partition_relid; END $$ LANGUAGE plpgsql; @@ -1104,7 +1104,7 @@ LANGUAGE plpgsql; * Detach range partition */ CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( - partition REGCLASS) + partition_relid REGCLASS) RETURNS TEXT AS $$ DECLARE @@ -1112,7 +1112,7 @@ DECLARE parent_relid REGCLASS; BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition); + parent_relid := @extschema@.get_parent_of_partition(partition_relid); /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(parent_relid); @@ -1127,18 +1127,18 @@ BEGIN /* Remove inheritance */ EXECUTE format('ALTER TABLE %s NO INHERIT %s', - partition::TEXT, + partition_relid::TEXT, parent_relid::TEXT); /* Remove check constraint */ EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname)); + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid, v_attname)); /* Invalidate cache */ PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN partition; + RETURN partition_relid; END $$ LANGUAGE plpgsql; diff --git a/src/partition_creation.c b/src/partition_creation.c index 86473663..84dc34d2 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -780,6 +780,9 @@ create_single_partition_internal(Oid parent_relid, create_stmt.oncommit = ONCOMMIT_NOOP; create_stmt.tablespacename = tablespace; create_stmt.if_not_exists = false; +#ifdef PGPRO_VERSION + create_stmt.partition_info = NULL; +#endif /* Do we have to escalate privileges? */ if (need_priv_escalation) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index f3281e52..edda6754 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -42,6 +42,7 @@ PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); PG_FUNCTION_INFO_V1( get_base_type_pl ); PG_FUNCTION_INFO_V1( get_attribute_type_pl ); +PG_FUNCTION_INFO_V1( get_partition_key_type ); PG_FUNCTION_INFO_V1( get_tablespace_pl ); PG_FUNCTION_INFO_V1( show_partition_list_internal ); @@ -228,6 +229,23 @@ get_attribute_type_pl(PG_FUNCTION_ARGS) PG_RETURN_OID(get_attribute_type(relid, text_to_cstring(attname), false)); } +/* + * Return partition key type + */ +Datum +get_partition_key_type(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + const PartRelationInfo *prel = get_pathman_relation_info(relid); + + if (!prel) + elog(ERROR, + "Relation '%s' isn't partitioned by pg_pathman", + get_rel_name(relid)); + + PG_RETURN_OID(prel->atttype); +} + /* * Return tablespace name for specified relation */ @@ -254,7 +272,6 @@ get_tablespace_pl(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(result)); } - /* * ---------------------- * Common purpose VIEWs From 592efd0878d40a2317ac90c6356cc636b1bacc09 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jan 2017 12:20:03 +0300 Subject: [PATCH 0131/1124] fix assert in get_pathman_relation_info() --- src/relation_info.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/relation_info.c b/src/relation_info.c index ee9be8fe..681711c4 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -303,7 +303,7 @@ get_pathman_relation_info(Oid relid) (prel ? "live" : "NULL"), relid, MyProcPid); /* Make sure that 'prel' is valid */ - Assert(PrelIsValid(prel)); + Assert(!prel || PrelIsValid(prel)); return prel; } From 64cfd2570a4214691de72cff82acaebeb74addad Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Tue, 10 Jan 2017 16:24:05 +0300 Subject: [PATCH 0132/1124] Add stress test for concurrent detach --- tests/python/partitioning_test.py | 53 +++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 7f0ee753..568be2d9 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -7,9 +7,12 @@ """ import unittest +import math from testgres import get_new_node, stop_all import time import os +import re +import subprocess import threading @@ -708,6 +711,56 @@ def con2_thread(): node.stop() node.cleanup() + def test_concurrent_detach(self): + """Test concurrent detach partition with contiguous tuple inserting and spawning new partitions""" + + # Init parameters + num_insert_workers = 8 + detach_timeout = 0.1 # time in sec between successive inserts and detachs + num_detachs = 100 # estimated number of detachs + inserts_advance = 1 # abvance in sec of inserts process under detachs + test_interval = int(math.ceil(detach_timeout * num_detachs)) + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec + with node.connect() as con0: + con0.begin() + con0.execute('create table ts_range_partitioned(ts timestamp not null)') + con0.execute("select create_range_partitions('ts_range_partitioned', 'ts', current_timestamp, interval '%f', 1)" % detach_timeout) + con0.commit() + + # Run in background inserts and detachs processes + FNULL = open(os.devnull, 'w') + inserts = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ + "-j", "%i" % num_insert_workers, + "-c", "%i" % num_insert_workers, + "-f", "pgbench_scripts/insert_current_timestamp.pgbench", + "-T", "%i" % (test_interval+inserts_advance) + ]) + time.sleep(inserts_advance) + detachs = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ + "-D", "timeout=%f" % detach_timeout, + "-f", "pgbench_scripts/detachs_in_timeout.pgbench", + "-T", "%i" % test_interval + ]) + + # Wait for completion of processes + inserts.wait() + detachs.wait() + + # Obtain error log from inserts process + inserts_errors = inserts.stderr.read() + + self.assertIsNone( + re.search("ERROR: constraint", inserts_errors), + msg="Race condition between detach and concurrent inserts with append partition is expired") + + # Stop instance and finish work + node.stop() + node.cleanup() + if __name__ == "__main__": unittest.main() From 115296a0dca0df42942f31491ed2bdefce9a9e33 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Jan 2017 13:58:29 +0300 Subject: [PATCH 0133/1124] borrow make_inh_translation_list() from postgres --- src/pg_pathman.c | 135 ++++++++++++++++++++++++++++------------------- 1 file changed, 81 insertions(+), 54 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 10769666..baeed277 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -74,9 +74,8 @@ static bool pull_var_param(const WalkerContext *ctx, /* Misc */ -static List *make_inh_translation_list_simplified(Relation oldrelation, - Relation newrelation, - Index newvarno); +static void make_inh_translation_list(Relation oldrelation, Relation newrelation, + Index newvarno, List **translated_vars); /* Copied from allpaths.h */ @@ -165,79 +164,107 @@ _PG_init(void) } /* - * Build the list of translations from parent Vars to child Vars - * for an inheritance child. + * make_inh_translation_list + * Build the list of translations from parent Vars to child Vars for + * an inheritance child. * - * NOTE: Inspired by function make_inh_translation_list(). + * For paranoia's sake, we match type/collation as well as attribute name. + * + * NOTE: borrowed from prepunion.c */ -static List * -make_inh_translation_list_simplified(Relation oldrelation, - Relation newrelation, - Index newvarno) +static void +make_inh_translation_list(Relation oldrelation, Relation newrelation, + Index newvarno, List **translated_vars) { List *vars = NIL; TupleDesc old_tupdesc = RelationGetDescr(oldrelation); TupleDesc new_tupdesc = RelationGetDescr(newrelation); - int oldnatts = RelationGetNumberOfAttributes(oldrelation); - int newnatts = RelationGetNumberOfAttributes(newrelation); + int oldnatts = old_tupdesc->natts; + int newnatts = new_tupdesc->natts; int old_attno; - /* Amounts of attributes must match */ - if (oldnatts != newnatts) - goto inh_translation_list_error; - - /* We expect that parent and partition have an identical tupdesc */ for (old_attno = 0; old_attno < oldnatts; old_attno++) { - Form_pg_attribute old_att, - new_att; - Oid atttypid; - int32 atttypmod; - Oid attcollation; - - old_att = old_tupdesc->attrs[old_attno]; - new_att = new_tupdesc->attrs[old_attno]; - - /* Attribute definitions must match */ - if (old_att->attisdropped != new_att->attisdropped || - old_att->atttypid != new_att->atttypid || - old_att->atttypmod != new_att->atttypmod || - old_att->attcollation != new_att->attcollation || - strcmp(NameStr(old_att->attname), NameStr(new_att->attname)) != 0) + Form_pg_attribute att; + char *attname; + Oid atttypid; + int32 atttypmod; + Oid attcollation; + int new_attno; + + att = old_tupdesc->attrs[old_attno]; + if (att->attisdropped) { - goto inh_translation_list_error; + /* Just put NULL into this list entry */ + vars = lappend(vars, NULL); + continue; } + attname = NameStr(att->attname); + atttypid = att->atttypid; + atttypmod = att->atttypmod; + attcollation = att->attcollation; - if (old_att->attisdropped) + /* + * When we are generating the "translation list" for the parent table + * of an inheritance set, no need to search for matches. + */ + if (oldrelation == newrelation) { - /* Just put NULL into this list entry */ - vars = lappend(vars, NULL); + vars = lappend(vars, makeVar(newvarno, + (AttrNumber) (old_attno + 1), + atttypid, + atttypmod, + attcollation, + 0)); continue; } - atttypid = old_att->atttypid; - atttypmod = old_att->atttypmod; - attcollation = old_att->attcollation; + /* + * Otherwise we have to search for the matching column by name. + * There's no guarantee it'll have the same column position, because + * of cases like ALTER TABLE ADD COLUMN and multiple inheritance. + * However, in simple cases it will be the same column number, so try + * that before we go groveling through all the columns. + * + * Note: the test for (att = ...) != NULL cannot fail, it's just a + * notational device to include the assignment into the if-clause. + */ + if (old_attno < newnatts && + (att = new_tupdesc->attrs[old_attno]) != NULL && + !att->attisdropped && att->attinhcount != 0 && + strcmp(attname, NameStr(att->attname)) == 0) + new_attno = old_attno; + else + { + for (new_attno = 0; new_attno < newnatts; new_attno++) + { + att = new_tupdesc->attrs[new_attno]; + if (!att->attisdropped && att->attinhcount != 0 && + strcmp(attname, NameStr(att->attname)) == 0) + break; + } + if (new_attno >= newnatts) + elog(ERROR, "could not find inherited attribute \"%s\" of relation \"%s\"", + attname, RelationGetRelationName(newrelation)); + } + + /* Found it, check type and collation match */ + if (atttypid != att->atttypid || atttypmod != att->atttypmod) + elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's type", + attname, RelationGetRelationName(newrelation)); + if (attcollation != att->attcollation) + elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's collation", + attname, RelationGetRelationName(newrelation)); vars = lappend(vars, makeVar(newvarno, - (AttrNumber) (old_attno + 1), + (AttrNumber) (new_attno + 1), atttypid, atttypmod, attcollation, 0)); } - /* Everything's ok */ - return vars; - -/* We end up here if any attribute differs */ -inh_translation_list_error: - elog(ERROR, "partition \"%s\" must have exact" - "same structure as parent \"%s\"", - RelationGetRelationName(newrelation), - RelationGetRelationName(oldrelation)); - - return NIL; /* keep compiler happy */ + *translated_vars = vars; } /* @@ -295,9 +322,9 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, appinfo->parent_relid = parent_rti; appinfo->child_relid = childRTindex; appinfo->parent_reloid = parent_rte->relid; - appinfo->translated_vars = make_inh_translation_list_simplified(parent_relation, - child_relation, - childRTindex); + + make_inh_translation_list(parent_relation, child_relation, childRTindex, + &appinfo->translated_vars); /* Now append 'appinfo' to 'root->append_rel_list' */ root->append_rel_list = lappend(root->append_rel_list, appinfo); From 1871b0450a755ff35d6d9e7f006b9c08bd722b94 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Jan 2017 14:07:38 +0300 Subject: [PATCH 0134/1124] trick clang analyzer (dereference of NULL pointer) --- src/pg_pathman.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index baeed277..a44047a4 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -239,6 +239,17 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, for (new_attno = 0; new_attno < newnatts; new_attno++) { att = new_tupdesc->attrs[new_attno]; + + /* + * Make clang analyzer happy: + * + * Access to field 'attisdropped' results + * in a dereference of a null pointer + */ + if (!att) + elog(ERROR, "error in function " + CppAsString(make_inh_translation_list)); + if (!att->attisdropped && att->attinhcount != 0 && strcmp(attname, NameStr(att->attname)) == 0) break; From 92251fb2b39aa8d75bd123959f574ed4ec33165f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Jan 2017 16:14:14 +0300 Subject: [PATCH 0135/1124] rename copy_acl_privileges() -> postprocess_child_table_and_atts(), set 'attislocal' = true for child attributes --- src/partition_creation.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 6b53c6f4..2e4d86ba 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -76,7 +76,7 @@ static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, Oid relowner); static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); -static void copy_acl_privileges(Oid parent_relid, Oid partition_relid); +static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid); static Constraint *make_constraint_common(char *name, Node *raw_expr); @@ -799,8 +799,8 @@ create_single_partition_internal(Oid parent_relid, /* Make changes visible */ CommandCounterIncrement(); - /* Copy ACL privileges of the parent table */ - copy_acl_privileges(parent_relid, partition_relid); + /* Copy ACL privileges of the parent table and set "attislocal" */ + postprocess_child_table_and_atts(parent_relid, partition_relid); } else if (IsA(cur_stmt, CreateForeignTableStmt)) { @@ -880,9 +880,9 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) return table_addr; } -/* Copy ACL privileges of parent table */ +/* Copy ACL privileges of parent table and set "attislocal" = true */ static void -copy_acl_privileges(Oid parent_relid, Oid partition_relid) +postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) { Relation pg_class_rel, pg_attribute_rel; @@ -1039,10 +1039,16 @@ copy_acl_privileges(Oid parent_relid, Oid partition_relid) /* Copy ItemPointer of this tuple */ iptr = subhtup->t_self; + /* Change ACL of this column */ values[Anum_pg_attribute_attacl - 1] = acl_datum; /* ACL array */ nulls[Anum_pg_attribute_attacl - 1] = acl_null; /* do we have ACL? */ replaces[Anum_pg_attribute_attacl - 1] = true; + /* Change 'attislocal' for DROP COLUMN */ + values[Anum_pg_attribute_attislocal - 1] = false; /* should not be local */ + nulls[Anum_pg_attribute_attislocal - 1] = false; /* NOT NULL */ + replaces[Anum_pg_attribute_attislocal - 1] = true; + /* Build new tuple with parent's ACL */ subhtup = heap_modify_tuple(subhtup, pg_attribute_desc, values, nulls, replaces); From d8e4d389d791fea5057b31fd3b75d18d743a9a5c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Jan 2017 19:36:55 +0300 Subject: [PATCH 0136/1124] pfilter_build_tlist() should take dropped columns into account --- src/partition_filter.c | 56 ++++++++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 13 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index eefdfd2c..a978acd0 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -15,6 +15,7 @@ #include "planner_tree_modification.h" #include "utils.h" +#include "catalog/pg_type.h" #include "foreign/fdwapi.h" #include "foreign/foreign.h" #include "nodes/nodeFuncs.h" @@ -65,7 +66,7 @@ CustomExecMethods partition_filter_exec_methods; static estate_mod_data * fetch_estate_mod_data(EState *estate); -static List * pfilter_build_tlist(List *tlist); +static List * pfilter_build_tlist(Relation parent_rel, List *tlist); static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); static void prepare_rri_fdw_for_insert(EState *estate, @@ -317,10 +318,11 @@ find_partitions_for_value(Datum value, Oid value_type, Plan * -make_partition_filter(Plan *subplan, Oid partitioned_table, +make_partition_filter(Plan *subplan, Oid parent_relid, OnConflictAction conflict_action) { CustomScan *cscan = makeNode(CustomScan); + Relation parent_rel; cscan->scan.plan.startup_cost = subplan->startup_cost; cscan->scan.plan.total_cost = subplan->total_cost; @@ -330,14 +332,17 @@ make_partition_filter(Plan *subplan, Oid partitioned_table, cscan->methods = &partition_filter_plan_methods; cscan->custom_plans = list_make1(subplan); - cscan->scan.plan.targetlist = pfilter_build_tlist(subplan->targetlist); + parent_rel = RelationIdGetRelation(parent_relid); + cscan->scan.plan.targetlist = pfilter_build_tlist(parent_rel, + subplan->targetlist); + RelationClose(parent_rel); /* No relation will be scanned */ cscan->scan.scanrelid = 0; cscan->custom_scan_tlist = subplan->targetlist; /* Pack partitioned table's Oid and conflict_action */ - cscan->custom_private = list_make2_int(partitioned_table, conflict_action); + cscan->custom_private = list_make2_int(parent_relid, conflict_action); return &cscan->scan.plan; } @@ -755,7 +760,7 @@ append_rri_to_estate(EState *estate, ResultRelInfo *rri) * Build partition filter's target list pointing to subplan tuple's elements */ static List * -pfilter_build_tlist(List *tlist) +pfilter_build_tlist(Relation parent_rel, List *tlist) { List *result_tlist = NIL; ListCell *lc; @@ -763,17 +768,42 @@ pfilter_build_tlist(List *tlist) foreach (lc, tlist) { - TargetEntry *tle = (TargetEntry *) lfirst(lc); + TargetEntry *tle = (TargetEntry *) lfirst(lc); + Expr *col_expr; + Form_pg_attribute attr; - Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ - i, /* direct attribute mapping */ - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); + /* Make sure that this attribute exists */ + if (i > RelationGetDescr(parent_rel)->natts) + elog(ERROR, "error in function " CppAsString(pfilter_build_tlist)); + + /* Fetch pg_attribute entry for this column */ + attr = RelationGetDescr(parent_rel)->attrs[i - 1]; + + /* If this column is dropped, create a placeholder Const */ + if (attr->attisdropped) + { + /* Insert NULL for dropped column */ + col_expr = (Expr *) makeConst(INT4OID, + -1, + InvalidOid, + sizeof(int32), + (Datum) 0, + true, + true); + } + /* Otherwise we should create a Var referencing subplan's output */ + else + { + col_expr = (Expr *) makeVar(INDEX_VAR, /* point to subplan's elements */ + i, /* direct attribute mapping */ + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); + } result_tlist = lappend(result_tlist, - makeTargetEntry((Expr *) var, + makeTargetEntry(col_expr, i, NULL, tle->resjunk)); From 588b292adf9b672f79123942d741d6651bc97698 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 12 Jan 2017 15:57:35 +0300 Subject: [PATCH 0137/1124] fix sigfault when inserting into area not covered with partitions and interval set to NULL --- init.sql | 34 ++++++++++++++++++++++++++++++++++ src/partition_creation.c | 32 ++++++++------------------------ 2 files changed, 42 insertions(+), 24 deletions(-) diff --git a/init.sql b/init.sql index 657f8df0..3733e398 100644 --- a/init.sql +++ b/init.sql @@ -596,6 +596,40 @@ END $$ LANGUAGE plpgsql STRICT; +/* + * Set new relname, schema and tablespace + */ +CREATE OR REPLACE FUNCTION @extschema@.alter_partition(relation REGCLASS, + new_name TEXT, + new_schema REGNAMESPACE, + new_tablespace TEXT) +RETURNS VOID AS +$$ +DECLARE + orig_name TEXT; + orig_schema OID; +BEGIN + SELECT relname, relnamespace FROM pg_class WHERE oid = relation + INTO orig_name, orig_schema; + + /* Alter table name */ + IF new_name != orig_name THEN + EXECUTE format('ALTER TABLE %s RENAME TO %s', relation, new_name); + END IF; + + /* Alter table schema */ + IF new_schema != orig_schema THEN + EXECUTE format('ALTER TABLE %s SET SCHEMA %s', relation, new_schema); + END IF; + + /* Move to another tablespace */ + IF NOT new_tablespace IS NULL THEN + EXECUTE format('ALTER TABLE %s SET TABLESPACE %s', relation, new_tablespace); + END IF; +END +$$ LANGUAGE plpgsql; + + /* * Partitioning key */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 7b63b203..4ad34042 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -363,36 +363,12 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) RangeEntry *ranges = PrelGetRangesArray(prel); Datum bound_min, /* absolute MIN */ bound_max; /* absolute MAX */ - // Infinitable bound_min, /* lower bound of all partitions */ - // bound_max; /* upper bound of all partitions */ - // Infinitable start, - // end; Oid interval_type = InvalidOid; Datum interval_binary, /* assigned 'width' of one partition */ interval_text; - // bound_min = ranges[0].min; - // bound_max = ranges[PrelLastChild(prel)].max; - - // start.value = !IsInfinite(&bound_min) ? - // datumCopy(InfinitableGetValue(&bound_min), - // prel->attbyval, - // prel->attlen) : - // (Datum) 0; - // start.is_infinite = IsInfinite(&bound_min); - - // end.value = !IsInfinite(&bound_max) ? - // datumCopy(InfinitableGetValue(&bound_max), - // prel->attbyval, - // prel->attlen) : - // (Datum) 0; - // end.is_infinite = IsInfinite(&bound_max); - /* Read max & min range values from PartRelationInfo */ - /* TODO */ - // bound_min = PrelGetRangesArray(prel)[0].min; - // bound_max = PrelGetRangesArray(prel)[PrelLastChild(prel)].max; bound_min = BoundGetValue(&ranges[0].min); bound_max = BoundGetValue(&ranges[PrelLastChild(prel)].max); @@ -400,6 +376,14 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) bound_min = datumCopy(bound_min, prel->attbyval, prel->attlen); bound_max = datumCopy(bound_max, prel->attbyval, prel->attlen); + /* Check if interval is set */ + if (isnull[Anum_pathman_config_range_interval - 1]) + { + elog(ERROR, + "Could not find appropriate partition for key '%s'", + datum_to_cstring(value, value_type)); + } + /* Retrieve interval as TEXT from tuple */ interval_text = values[Anum_pathman_config_range_interval - 1]; From eba6fb0ecabbe14e5c2d8d891116d452a66331e6 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 12 Jan 2017 15:58:42 +0300 Subject: [PATCH 0138/1124] Add pgbench scripts to python tests --- tests/python/pgbench_scripts/detachs_in_timeout.pgbench | 2 ++ tests/python/pgbench_scripts/insert_current_timestamp.pgbench | 1 + 2 files changed, 3 insertions(+) create mode 100644 tests/python/pgbench_scripts/detachs_in_timeout.pgbench create mode 100644 tests/python/pgbench_scripts/insert_current_timestamp.pgbench diff --git a/tests/python/pgbench_scripts/detachs_in_timeout.pgbench b/tests/python/pgbench_scripts/detachs_in_timeout.pgbench new file mode 100644 index 00000000..ff2fe861 --- /dev/null +++ b/tests/python/pgbench_scripts/detachs_in_timeout.pgbench @@ -0,0 +1,2 @@ +select detach_range_partition(partition) from (select partition from pathman_partition_list where parent='ts_range_partitioned'::regclass order by range_min limit 1) t; +select pg_sleep(:timeout); diff --git a/tests/python/pgbench_scripts/insert_current_timestamp.pgbench b/tests/python/pgbench_scripts/insert_current_timestamp.pgbench new file mode 100644 index 00000000..d0276b11 --- /dev/null +++ b/tests/python/pgbench_scripts/insert_current_timestamp.pgbench @@ -0,0 +1 @@ +insert into ts_range_partitioned values (current_timestamp); From 44dce6f5e36e6940563149707f1eb5ec968a848f Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 12 Jan 2017 16:53:47 +0300 Subject: [PATCH 0139/1124] Fix assert condition in test_concurrent_detach case --- tests/python/partitioning_test.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 568be2d9..b70540a5 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -752,10 +752,15 @@ def test_concurrent_detach(self): # Obtain error log from inserts process inserts_errors = inserts.stderr.read() - - self.assertIsNone( - re.search("ERROR: constraint", inserts_errors), - msg="Race condition between detach and concurrent inserts with append partition is expired") + expected_errors = ('starting vacuum...ERROR: relation "pgbench_branches" does not exist\n' + '(ignoring this error and continuing anyway)\n' + 'ERROR: relation "pgbench_tellers" does not exist\n' + '(ignoring this error and continuing anyway)\n' + 'ERROR: relation "pgbench_history" does not exist\n' + '(ignoring this error and continuing anyway)\n' + 'end.\n') + self.assertEqual(inserts_errors, expected_errors, + msg="Race condition between detach and concurrent inserts with append partition is expired") # Stop instance and finish work node.stop() From fa7dab5676f4193e602644f8ce61c86868bf076c Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 12 Jan 2017 18:24:26 +0300 Subject: [PATCH 0140/1124] Fix test_concurrent_detach test --- tests/python/partitioning_test.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index b70540a5..dec4e58f 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -721,6 +721,17 @@ def test_concurrent_detach(self): inserts_advance = 1 # abvance in sec of inserts process under detachs test_interval = int(math.ceil(detach_timeout * num_detachs)) + insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/insert_current_timestamp.pgbench" + detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/detachs_in_timeout.pgbench" + + # Check pgbench scripts on existance + self.assertTrue(os.path.isfile(insert_pgbench_script), + msg="pgbench script with insert timestamp doesn't exist") + self.assertTrue(os.path.isfile(detach_pgbench_script), + msg="pgbench script with detach letfmost partition doesn't exist") + # Create and start new instance node = self.start_new_pathman_cluster(allows_streaming=False) @@ -733,16 +744,21 @@ def test_concurrent_detach(self): # Run in background inserts and detachs processes FNULL = open(os.devnull, 'w') + + # init pgbench's utility tables + init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) + init_pgbench.wait() + inserts = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ "-j", "%i" % num_insert_workers, "-c", "%i" % num_insert_workers, - "-f", "pgbench_scripts/insert_current_timestamp.pgbench", + "-f", insert_pgbench_script, "-T", "%i" % (test_interval+inserts_advance) ]) time.sleep(inserts_advance) detachs = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ "-D", "timeout=%f" % detach_timeout, - "-f", "pgbench_scripts/detachs_in_timeout.pgbench", + "-f", detach_pgbench_script, "-T", "%i" % test_interval ]) @@ -752,14 +768,7 @@ def test_concurrent_detach(self): # Obtain error log from inserts process inserts_errors = inserts.stderr.read() - expected_errors = ('starting vacuum...ERROR: relation "pgbench_branches" does not exist\n' - '(ignoring this error and continuing anyway)\n' - 'ERROR: relation "pgbench_tellers" does not exist\n' - '(ignoring this error and continuing anyway)\n' - 'ERROR: relation "pgbench_history" does not exist\n' - '(ignoring this error and continuing anyway)\n' - 'end.\n') - self.assertEqual(inserts_errors, expected_errors, + self.assertIsNone(re.search("ERROR|FATAL|PANIC", inserts_errors), msg="Race condition between detach and concurrent inserts with append partition is expired") # Stop instance and finish work From 2d0a3a6ab1f448b068d30929e4ce12c92c9770ee Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 16 Jan 2017 17:09:45 +0300 Subject: [PATCH 0141/1124] fix append and prepend funcs for DATE partitioning key --- range.sql | 62 +++++++++++++++++++++++++------------------------------ 1 file changed, 28 insertions(+), 34 deletions(-) diff --git a/range.sql b/range.sql index 810acfc6..3b553430 100644 --- a/range.sql +++ b/range.sql @@ -750,6 +750,7 @@ $$ DECLARE v_part_name TEXT; v_atttype REGTYPE; + v_args_format TEXT; BEGIN IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN @@ -769,26 +770,22 @@ BEGIN END IF; IF @extschema@.is_date_type(p_atttype) THEN - v_part_name := @extschema@.create_single_range_partition( - parent_relid, - p_range[2], - p_range[2] + p_interval::interval, - partition_name, - tablespace); + v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', v_atttype::TEXT); ELSE - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $2 + $3::%s, $4, $5)', - v_atttype::TEXT) - USING - parent_relid, - p_range[2], - p_interval, - partition_name, - tablespace - INTO - v_part_name; + v_args_format := format('$1, $2, $2 + $3::%s, $4, $5', v_atttype::TEXT); END IF; + EXECUTE + format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[2], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + RETURN v_part_name; END $$ @@ -864,6 +861,7 @@ $$ DECLARE v_part_name TEXT; v_atttype REGTYPE; + v_args_format TEXT; BEGIN IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN @@ -883,26 +881,22 @@ BEGIN END IF; IF @extschema@.is_date_type(p_atttype) THEN - v_part_name := @extschema@.create_single_range_partition( - parent_relid, - p_range[1] - p_interval::interval, - p_range[1], - partition_name, - tablespace); + v_args_format := format('$1, ($2 - $3::interval)::%s, $2, $4, $5', v_atttype::TEXT); ELSE - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2 - $3::%s, $2, $4, $5)', - v_atttype::TEXT) - USING - parent_relid, - p_range[1], - p_interval, - partition_name, - tablespace - INTO - v_part_name; + v_args_format := format('$1, $2 - $3::%s, $2, $4, $5', v_atttype::TEXT); END IF; + EXECUTE + format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[1], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + RETURN v_part_name; END $$ From 194f29ffafdd8710f421b3a06fe20cfd30c1b9a7 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 18 Jan 2017 18:27:19 +0300 Subject: [PATCH 0142/1124] added arguments to create_hash_partitions() to specify partition names and tablespaces --- hash.sql | 14 +++++--- src/pl_hash_funcs.c | 83 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 92 insertions(+), 5 deletions(-) diff --git a/hash.sql b/hash.sql index 682b145f..8d34101b 100644 --- a/hash.sql +++ b/hash.sql @@ -15,7 +15,9 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( parent_relid REGCLASS, attribute TEXT, partitions_count INTEGER, - partition_data BOOLEAN DEFAULT TRUE) + partition_data BOOLEAN DEFAULT TRUE, + relnames TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) RETURNS INTEGER AS $$ BEGIN @@ -39,7 +41,9 @@ BEGIN /* Create partitions */ PERFORM @extschema@.create_hash_partitions_internal(parent_relid, attribute, - partitions_count); + partitions_count, + relnames, + tablespaces); /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); @@ -275,9 +279,11 @@ $$ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( parent_relid REGCLASS, attribute TEXT, - partitions_count INTEGER) + partitions_count INTEGER, + relnames TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' -LANGUAGE C STRICT; +LANGUAGE C; /* * Returns hash function OID for specified type diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 882f26b0..08a5e83b 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -13,10 +13,13 @@ #include "relation_info.h" #include "utils.h" +#include "catalog/namespace.h" +#include "catalog/pg_type.h" #include "utils/builtins.h" #include "utils/typcache.h" #include "utils/lsyscache.h" #include "utils/builtins.h" +#include "utils/array.h" /* Function declarations */ @@ -29,6 +32,9 @@ PG_FUNCTION_INFO_V1( get_hash_part_idx ); PG_FUNCTION_INFO_V1( build_hash_condition ); +static char **deconstruct_text_array(Datum arr, int *num_elems); + + /* * Create HASH partitions implementation (written in C). */ @@ -41,6 +47,13 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) uint32 part_count = PG_GETARG_INT32(2), i; + /* Partitions names and tablespaces */ + char **names = NULL, + **tablespaces = NULL; + int names_size = 0, + tablespaces_size = 0; + RangeVar **rangevars = NULL; + /* Check that there's no partitions yet */ if (get_pathman_relation_info(parent_relid)) elog(ERROR, "cannot add new HASH partitions"); @@ -49,16 +62,84 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) TextDatumGetCString(partitioned_col_name), false); + /* Get partition names and tablespaces */ + if (!PG_ARGISNULL(3)) + names = deconstruct_text_array(PG_GETARG_DATUM(3), &names_size); + + if (!PG_ARGISNULL(4)) + tablespaces = deconstruct_text_array(PG_GETARG_DATUM(4), &tablespaces_size); + + /* Convert partition names into RangeVars */ + if (names_size > 0) + { + rangevars = palloc(sizeof(RangeVar) * names_size); + for (i = 0; i < names_size; i++) + { + List *nl = stringToQualifiedNameList(names[i]); + + rangevars[i] = makeRangeVarFromNameList(nl); + } + } + for (i = 0; i < part_count; i++) { + RangeVar *rel = rangevars != NULL ? rangevars[i] : NULL; + char *tablespace = tablespaces != NULL ? tablespaces[i] : NULL; + /* Create a partition (copy FKs, invoke callbacks etc) */ create_single_hash_partition_internal(parent_relid, i, part_count, - partitioned_col_type, NULL, NULL); + partitioned_col_type, + rel, tablespace); } PG_RETURN_VOID(); } +/* + * Convert Datum into cstring array + */ +static char ** +deconstruct_text_array(Datum arr, int *num_elems) +{ + ArrayType *arrayval; + int16 elemlen; + bool elembyval; + char elemalign; + Datum *elem_values; + bool *elem_nulls; + int16 i; + + arrayval = DatumGetArrayTypeP(arr); + + Assert(ARR_ELEMTYPE(arrayval) == TEXTOID); + + get_typlenbyvalalign(ARR_ELEMTYPE(arrayval), + &elemlen, &elembyval, &elemalign); + deconstruct_array(arrayval, + ARR_ELEMTYPE(arrayval), + elemlen, elembyval, elemalign, + &elem_values, &elem_nulls, num_elems); + + /* If there are actual values then convert them into cstrings */ + if (num_elems > 0) + { + char **strings = palloc(sizeof(char *) * *num_elems); + + for (i = 0; i < *num_elems; i++) + { + if (elem_nulls[i]) + elog(ERROR, + "Partition name and tablespace arrays cannot contain nulls"); + + strings[i] = TextDatumGetCString(elem_values[i]); + } + + return strings; + } + + return NULL; +} + /* * Returns hash function's OID for a specified type. */ From 2b0bfaba014ffe0142af2a75943cf2cb5cb1f717 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 20 Jan 2017 15:18:02 +0300 Subject: [PATCH 0143/1124] fix #76: pass parent and partition schemas into callbacks params --- expected/pathman_callbacks.out | 18 +++++++++--------- src/partition_creation.c | 8 ++++++++ src/utils.c | 12 ++++++++++++ src/utils.h | 1 + 4 files changed, 30 insertions(+), 9 deletions(-) diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index e1a29dce..6ef8bf00 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -27,7 +27,7 @@ SELECT set_init_callback('callbacks.abc', INSERT INTO callbacks.abc VALUES (123, 1); INSERT INTO callbacks.abc VALUES (223, 1); /* show warning */ -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201"} +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201", "parent_schema": "callbacks", "partition_schema": "callbacks"} SELECT set_spawn_using_bgw('callbacks.abc', true); set_spawn_using_bgw --------------------- @@ -54,21 +54,21 @@ SELECT set_spawn_using_bgw('callbacks.abc', false); (1 row) SELECT append_range_partition('callbacks.abc'); -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "501", "range_min": "401"} +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "501", "range_min": "401", "parent_schema": "callbacks", "partition_schema": "callbacks"} append_range_partition ------------------------ callbacks.abc_5 (1 row) SELECT prepend_range_partition('callbacks.abc'); -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_6", "range_max": "1", "range_min": "-99"} +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_6", "range_max": "1", "range_min": "-99", "parent_schema": "callbacks", "partition_schema": "callbacks"} prepend_range_partition ------------------------- callbacks.abc_6 (1 row) SELECT add_range_partition('callbacks.abc', 501, 602); -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_7", "range_max": "602", "range_min": "501"} +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_7", "range_max": "602", "range_min": "501", "parent_schema": "callbacks", "partition_schema": "callbacks"} add_range_partition --------------------- callbacks.abc_7 @@ -97,11 +97,11 @@ SELECT set_init_callback('callbacks.abc', (1 row) SELECT create_hash_partitions('callbacks.abc', 'a', 5); -WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_0"} -WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_1"} -WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_2"} -WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_3"} -WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_4"} +WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_0", "parent_schema": "callbacks", "partition_schema": "callbacks"} +WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_1", "parent_schema": "callbacks", "partition_schema": "callbacks"} +WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_2", "parent_schema": "callbacks", "partition_schema": "callbacks"} +WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_3", "parent_schema": "callbacks", "partition_schema": "callbacks"} +WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_4", "parent_schema": "callbacks", "partition_schema": "callbacks"} create_hash_partitions ------------------------ 5 diff --git a/src/partition_creation.c b/src/partition_creation.c index 6b53c6f4..1d4c6627 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1444,8 +1444,12 @@ invoke_init_callback_internal(init_callback_params *cb_params) JSB_INIT_VAL(&key, WJB_KEY, "parent"); JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(parent_oid)); + JSB_INIT_VAL(&key, WJB_KEY, "parent_schema"); + JSB_INIT_VAL(&val, WJB_VALUE, get_namespace_name(get_rel_namespace(parent_oid))); JSB_INIT_VAL(&key, WJB_KEY, "partition"); JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(partition_oid)); + JSB_INIT_VAL(&key, WJB_KEY, "partition_schema"); + JSB_INIT_VAL(&val, WJB_VALUE, get_namespace_name(get_rel_namespace(partition_oid))); JSB_INIT_VAL(&key, WJB_KEY, "parttype"); JSB_INIT_VAL(&val, WJB_VALUE, PartTypeToCString(PT_HASH)); @@ -1469,8 +1473,12 @@ invoke_init_callback_internal(init_callback_params *cb_params) JSB_INIT_VAL(&key, WJB_KEY, "parent"); JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(parent_oid)); + JSB_INIT_VAL(&key, WJB_KEY, "parent_schema"); + JSB_INIT_VAL(&val, WJB_VALUE, get_namespace_name(get_rel_namespace(parent_oid))); JSB_INIT_VAL(&key, WJB_KEY, "partition"); JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(partition_oid)); + JSB_INIT_VAL(&key, WJB_KEY, "partition_schema"); + JSB_INIT_VAL(&val, WJB_VALUE, get_namespace_name(get_rel_namespace(partition_oid))); JSB_INIT_VAL(&key, WJB_KEY, "parttype"); JSB_INIT_VAL(&val, WJB_VALUE, PartTypeToCString(PT_RANGE)); JSB_INIT_VAL(&key, WJB_KEY, "range_min"); diff --git a/src/utils.c b/src/utils.c index 1f086f5f..4c3067b7 100644 --- a/src/utils.c +++ b/src/utils.c @@ -234,6 +234,18 @@ get_rel_name_or_relid(Oid relid) return relname; } +char * +get_qualified_rel_name_or_relid(Oid relid) +{ + char *relname = get_rel_name(relid); + char *namespace = get_namespace_name(get_rel_namespace(relid)); + + if (!relname) + return DatumGetCString(DirectFunctionCall1(oidout, + ObjectIdGetDatum(relid))); + return psprintf("%s.%s", namespace, relname); +} + #if PG_VERSION_NUM < 90600 /* diff --git a/src/utils.h b/src/utils.h index e81f6026..4410b52b 100644 --- a/src/utils.h +++ b/src/utils.h @@ -39,6 +39,7 @@ List * list_reverse(List *l); */ Oid get_rel_owner(Oid relid); char * get_rel_name_or_relid(Oid relid); +char * get_qualified_rel_name_or_relid(Oid relid); Oid get_attribute_type(Oid relid, const char *attname, bool missing_ok); #if PG_VERSION_NUM < 90600 char get_rel_persistence(Oid relid); From 07ad6117c05302767f767b5e8a0714c8cf48a305 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 20 Jan 2017 15:35:41 +0300 Subject: [PATCH 0144/1124] added some tests for callbacks --- expected/pathman_callbacks.out | 24 +++++++++++++++++++++++- sql/pathman_callbacks.sql | 9 +++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 6ef8bf00..84d69d81 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -107,6 +107,28 @@ WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_4", 5 (1 row) +DROP TABLE callbacks.abc CASCADE; +NOTICE: drop cascades to 5 other objects +/* create table in public schema */ +CREATE TABLE abc(a serial, b int); +SELECT set_init_callback('abc', + 'callbacks.abc_on_part_created_callback'); + set_init_callback +------------------- + +(1 row) + +SELECT create_range_partitions('abc', 'a', 1, 100, 2); +NOTICE: sequence "abc_seq" does not exist, skipping +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_1", "range_max": "101", "range_min": "1", "parent_schema": "public", "partition_schema": "public"} +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", "range_max": "201", "range_min": "101", "parent_schema": "public", "partition_schema": "public"} + create_range_partitions +------------------------- + 2 +(1 row) + +DROP TABLE abc CASCADE; +NOTICE: drop cascades to 2 other objects DROP SCHEMA callbacks CASCADE; -NOTICE: drop cascades to 8 other objects +NOTICE: drop cascades to 2 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 290286e6..6c406529 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -43,6 +43,15 @@ SELECT set_init_callback('callbacks.abc', 'callbacks.abc_on_part_created_callback'); SELECT create_hash_partitions('callbacks.abc', 'a', 5); +DROP TABLE callbacks.abc CASCADE; + +/* create table in public schema */ +CREATE TABLE abc(a serial, b int); +SELECT set_init_callback('abc', + 'callbacks.abc_on_part_created_callback'); +SELECT create_range_partitions('abc', 'a', 1, 100, 2); + +DROP TABLE abc CASCADE; DROP SCHEMA callbacks CASCADE; DROP EXTENSION pg_pathman CASCADE; From b8f179d300fbb6320ea1129f9333255d6002e79e Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 23 Jan 2017 14:17:33 +0300 Subject: [PATCH 0145/1124] Replace column type of pathman_config_params from oid to text --- expected/pathman_permissions.out | 2 +- hash.sql | 2 +- init.sql | 17 +++++++---- range.sql | 2 +- src/init.c | 1 - src/partition_creation.c | 48 ++++++++++++++++++++++++++++++-- src/pl_funcs.c | 12 +++++++- 7 files changed, 70 insertions(+), 14 deletions(-) diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index ca95e2d1..58d4b2ae 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -41,7 +41,7 @@ SELECT * FROM pathman_config; SELECT * FROM pathman_config_params; partrel | enable_parent | auto | init_callback | spawn_using_bgw -------------------------+---------------+------+---------------+----------------- - permissions.user1_table | f | t | - | f + permissions.user1_table | f | t | | f (1 row) /* Should fail */ diff --git a/hash.sql b/hash.sql index 682b145f..c831bb4a 100644 --- a/hash.sql +++ b/hash.sql @@ -141,7 +141,7 @@ BEGIN /* Fetch init_callback from 'params' table */ WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) + SELECT coalesce(init_callback, NULL) FROM stub_callback LEFT JOIN @extschema@.pathman_config_params AS params ON params.partrel = parent_relid diff --git a/init.sql b/init.sql index a81af522..fb15d7c6 100644 --- a/init.sql +++ b/init.sql @@ -34,10 +34,10 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( * NOTE: this function is used in CHECK CONSTRAINT. */ CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( - callback REGPROC, + callback TEXT, raise_error BOOL DEFAULT TRUE) RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' -LANGUAGE C STRICT; +LANGUAGE C; /* @@ -45,13 +45,14 @@ LANGUAGE C STRICT; * partrel - regclass (relation type, stored as Oid) * enable_parent - add parent table to plan * auto - enable automatic partition creation - * init_callback - cb to be executed on partition creation + * init_callback - text signature of cb to be executed on partition + * creation */ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( partrel REGCLASS NOT NULL PRIMARY KEY, enable_parent BOOLEAN NOT NULL DEFAULT FALSE, auto BOOLEAN NOT NULL DEFAULT TRUE, - init_callback REGPROCEDURE NOT NULL DEFAULT 0, + init_callback TEXT, spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE CHECK (@extschema@.validate_part_callback(init_callback)) /* check signature */ @@ -118,7 +119,7 @@ BEGIN USING relation, value; END $$ -LANGUAGE plpgsql STRICT; +LANGUAGE plpgsql; /* * Include\exclude parent relation in query plan. @@ -157,7 +158,11 @@ CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( RETURNS VOID AS $$ BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'init_callback', callback); + PERFORM @extschema@.pathman_set_param(relation, 'init_callback', + CASE WHEN callback <> 0 + THEN regprocedureout(callback)::text + ELSE NULL END); + END $$ LANGUAGE plpgsql STRICT; diff --git a/range.sql b/range.sql index 396d8f30..441e8223 100644 --- a/range.sql +++ b/range.sql @@ -1051,7 +1051,7 @@ BEGIN /* Fetch init_callback from 'params' table */ WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) + SELECT coalesce(init_callback, NULL) FROM stub_callback LEFT JOIN @extschema@.pathman_config_params AS params ON params.partrel = parent_relid diff --git a/src/init.c b/src/init.c index cfa25f19..08df63dc 100644 --- a/src/init.c +++ b/src/init.c @@ -750,7 +750,6 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) Assert(!isnull[Anum_pathman_config_params_partrel - 1]); Assert(!isnull[Anum_pathman_config_params_enable_parent - 1]); Assert(!isnull[Anum_pathman_config_params_auto - 1]); - Assert(!isnull[Anum_pathman_config_params_init_callback - 1]); Assert(!isnull[Anum_pathman_config_params_spawn_using_bgw - 1]); } diff --git a/src/partition_creation.c b/src/partition_creation.c index 6b53c6f4..2f5a028f 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -78,6 +78,8 @@ static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); static void copy_acl_privileges(Oid parent_relid, Oid partition_relid); +static Oid text2regprocedure(text *proname_args); + static Constraint *make_constraint_common(char *name, Node *raw_expr); static Value make_string_value_struct(char *str); @@ -1377,6 +1379,34 @@ make_int_value_struct(int int_val) return val; } +/* + * Utility function that converts signature of procedure into regprocedure. + * + * Precondition: proname_args != NULL. + * + * Returns InvalidOid if proname_args is not found. + * Raise error if it's incorrect. + */ +static Oid +text2regprocedure(text *proname_args) +{ + FunctionCallInfoData fcinfo; + Datum result; + + InitFunctionCallInfoData(fcinfo, NULL, 1, InvalidOid, NULL, NULL); + +#if PG_VERSION_NUM >= 90600 + fcinfo.arg[0] = PointerGetDatum(proname_args); +#else + fcinfo.arg[0] = CStringGetDatum(text_to_cstring(proname_args)); +#endif + fcinfo.argnull[0] = false; + + result = to_regprocedure(&fcinfo); + + return DatumGetObjectId(result); +} + /* * --------------------- @@ -1416,14 +1446,26 @@ invoke_init_callback_internal(init_callback_params *cb_params) /* Search for init_callback entry in PATHMAN_CONFIG_PARAMS */ if (read_pathman_params(parent_oid, param_values, param_isnull)) { - Datum init_cb_datum; /* Oid of init_callback */ + Datum init_cb_datum; /* signature of init_callback */ AttrNumber init_cb_attno = Anum_pathman_config_params_init_callback; - /* Extract Datum storing callback's Oid */ + /* Extract Datum storing callback's signature */ init_cb_datum = param_values[init_cb_attno - 1]; /* Cache init_callback's Oid */ - cb_params->callback = DatumGetObjectId(init_cb_datum); + if (init_cb_datum) + { + cb_params->callback = text2regprocedure( + DatumGetTextP(init_cb_datum)); + + if (!RegProcedureIsValid(cb_params->callback)) + ereport(ERROR, + (errcode(ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION), + errmsg("callback function \"%s\" doesn't exist", + DatumGetCString(init_cb_datum)))); + } + else + cb_params->callback = InvalidOid; cb_params->callback_is_cached = true; } } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 154527c6..ea39cc5f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -792,7 +792,17 @@ prevent_relation_modification(PG_FUNCTION_ARGS) Datum validate_part_callback_pl(PG_FUNCTION_ARGS) { - PG_RETURN_BOOL(validate_part_callback(PG_GETARG_OID(0), PG_GETARG_BOOL(1))); + const char *cb_cstring; + Oid cb_oid; + + if (PG_ARGISNULL(0)) + PG_RETURN_BOOL(true); + + cb_cstring = text_to_cstring(PG_GETARG_TEXT_P(0)); + cb_oid = DatumGetObjectId(DirectFunctionCall1(regprocedurein, + CStringGetDatum(cb_cstring))); + + PG_RETURN_BOOL(validate_part_callback(cb_oid, PG_GETARG_BOOL(1))); } /* From 55321b21d528b3054fdc54b60c658d057ed6bba3 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 23 Jan 2017 18:03:27 +0300 Subject: [PATCH 0146/1124] wrote a C version of merge_range_partitions() function --- hash.sql | 8 ++ range.sql | 7 ++ src/init.c | 17 --- src/partition_creation.c | 36 ++++++ src/partition_creation.h | 2 + src/pl_funcs.c | 20 +--- src/pl_range_funcs.c | 245 +++++++++++++++++++++++++++++++++++---- src/utils.c | 1 - src/xact_handling.c | 28 +++++ src/xact_handling.h | 2 + 10 files changed, 305 insertions(+), 61 deletions(-) diff --git a/hash.sql b/hash.sql index 8d34101b..f24641e0 100644 --- a/hash.sql +++ b/hash.sql @@ -38,6 +38,14 @@ BEGIN INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) VALUES (parent_relid, attribute, 1); + IF array_length(relnames) != partitions_count THEN + RAISE EXCEPTION 'Partition names array size must be equal the partitions count'; + END IF; + + IF array_length(tablespaces) != partitions_count THEN + RAISE EXCEPTION 'Partition tablespaces array size must be equal the partitions count'; + END IF; + /* Create partitions */ PERFORM @extschema@.create_hash_partitions_internal(parent_relid, attribute, diff --git a/range.sql b/range.sql index 3b553430..e64f3b8b 100644 --- a/range.sql +++ b/range.sql @@ -681,6 +681,13 @@ END $$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + parent REGCLASS, + partitions REGCLASS[]) +RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' +LANGUAGE C STRICT; + + /* * Append new partition. */ diff --git a/src/init.c b/src/init.c index d0a4ca15..d033d70c 100644 --- a/src/init.c +++ b/src/init.c @@ -910,23 +910,6 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) FmgrInfo *flinfo = (FmgrInfo *) arg; return cmp_bounds(flinfo, &v1->min, &v2->min); - - // /* If range is half open */ - // if (IsInfinite(&v1->min)) - // { - // // if (IsInfinite(&v2->min)) - // // return Int32GetDatum(0); - // return Int32GetDatum(-1); - // } - // if (IsInfinite(&v2->min)) - // { - // return Int32GetDatum(1); - // } - - // /* Else if range is closed */ - // return OidFunctionCall2(cmp_proc_oid, - // BoundGetValue(&v1->min), - // BoundGetValue(&v2->min)); } /* diff --git a/src/partition_creation.c b/src/partition_creation.c index 4ad34042..6fde23d0 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -83,6 +83,8 @@ static Constraint *make_constraint_common(char *name, Node *raw_expr); static Value make_string_value_struct(char *str); static Value make_int_value_struct(int int_val); +static RangeVar *makeRangeVarFromRelid(Oid relid); + /* * --------------------------------------- @@ -1426,6 +1428,40 @@ make_int_value_struct(int int_val) return val; } +void +drop_check_constraint(Oid relid, AttrNumber attnum) +{ + char *constr_name; + AlterTableStmt *stmt; + AlterTableCmd *cmd; + + /* Build a correct name for this constraint */ + constr_name = build_check_constraint_name_relid_internal(relid, attnum); + + stmt = makeNode(AlterTableStmt); + stmt->relation = makeRangeVarFromRelid(relid); + stmt->relkind = OBJECT_TABLE; + + cmd = makeNode(AlterTableCmd); + cmd->subtype = AT_DropConstraint; + cmd->name = constr_name; + cmd->behavior = DROP_RESTRICT; + cmd->missing_ok = true; + + stmt->cmds = list_make1(cmd); + + AlterTable(relid, ShareUpdateExclusiveLock, stmt); +} + +static RangeVar * +makeRangeVarFromRelid(Oid relid) +{ + char *relname = get_rel_name(relid); + char *namespace = get_namespace_name(get_rel_namespace(relid)); + + return makeRangeVar(namespace, relname, -1); +} + /* * --------------------- diff --git a/src/partition_creation.h b/src/partition_creation.h index a901edff..8abce6b3 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -74,6 +74,8 @@ Node * build_raw_hash_check_tree(char *attname, uint32 part_idx, uint32 part_count, Oid value_type); +void drop_check_constraint(Oid relid, AttrNumber attnum); + /* Partitioning callback type */ typedef enum diff --git a/src/pl_funcs.c b/src/pl_funcs.c index edda6754..1f8a521f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -779,25 +779,7 @@ prevent_relation_modification(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); - /* - * Check that isolation level is READ COMMITTED. - * Else we won't be able to see new rows - * which could slip through locks. - */ - if (!xact_is_level_read_committed()) - ereport(ERROR, - (errmsg("Cannot perform blocking partitioning operation"), - errdetail("Expected READ COMMITTED isolation level"))); - - /* - * Check if table is being modified - * concurrently in a separate transaction. - */ - if (!xact_lock_rel_exclusive(relid, true)) - ereport(ERROR, - (errmsg("Cannot perform blocking partitioning operation"), - errdetail("Table \"%s\" is being modified concurrently", - get_rel_name_or_relid(relid)))); + (void) prevent_relation_modification_internal(relid); PG_RETURN_VOID(); } diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index a589d3cc..be100f84 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -13,8 +13,13 @@ #include "partition_creation.h" #include "relation_info.h" #include "utils.h" +#include "xact_handling.h" +#include "access/xact.h" #include "catalog/namespace.h" +#include "catalog/pg_type.h" +#include "catalog/heap.h" +#include "executor/spi.h" #include "parser/parse_relation.h" #include "parser/parse_expr.h" #include "utils/array.h" @@ -30,6 +35,14 @@ static ArrayType *construct_infinitable_array(Bound **elems, int elmlen, bool elmbyval, char elmalign); +static void check_adjacence(Oid cmp_proc, List *ranges); +static void merge_range_partitions_internal(Oid parent, Oid *partitions, uint32 npart); +static void recreate_range_constraint(Oid partition, + const char *attname, + AttrNumber attnum, + Oid atttype, + const Bound *lower, + const Bound *upper); /* Function declarations */ @@ -42,6 +55,7 @@ PG_FUNCTION_INFO_V1( get_part_range_by_idx ); PG_FUNCTION_INFO_V1( build_range_condition ); PG_FUNCTION_INFO_V1( build_sequence_name ); +PG_FUNCTION_INFO_V1( merge_range_partitions ); /* @@ -59,10 +73,6 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) Oid parent_relid; /* RANGE boundaries + value type */ - // Datum start_value, - // end_value; - // bool infinite_start, - // infinite_end; Bound start, end; Oid value_type; @@ -81,10 +91,6 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* Fetch mandatory args */ parent_relid = PG_GETARG_OID(0); - // start_value = PG_GETARG_DATUM(1); - // end_value = PG_GETARG_DATUM(2); - // infinite_start = PG_ARGISNULL(1); - // infinite_end = PG_ARGISNULL(2); value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); MakeBound(&start, PG_GETARG_DATUM(1), @@ -242,23 +248,8 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) if (ranges[i].child_oid == partition_relid) { ArrayType *arr; - // Datum elems[2] = { InfinitableGetValue(&ranges[i].min), - // InfinitableGetValue(&ranges[i].max) }; - // bool nulls[2] = { IsInfinite(&ranges[i].min), - // IsInfinite(&ranges[i].max) }; - // int dims[1] = { 2 }; - // int lbs[1] = { 1 }; - - // arr = construct_md_array(elems, nulls, 1, dims, lbs, - // prel->atttype, prel->attlen, - // prel->attbyval, prel->attalign); - - // arr = construct_array(elems, 2, prel->atttype, - // prel->attlen, prel->attbyval, - // prel->attalign); Bound *elems[2] = { &ranges[i].min, &ranges[i].max }; - arr = construct_infinitable_array(elems, 2, prel->atttype, prel->attlen, prel->attbyval, prel->attalign); @@ -286,7 +277,6 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) { Oid parent_relid = InvalidOid; int partition_idx = 0; - // Datum elems[2]; Bound *elems[2]; RangeEntry *ranges; const PartRelationInfo *prel; @@ -453,3 +443,210 @@ construct_infinitable_array(Bound **elems, return arr; } + + +Datum +merge_range_partitions(PG_FUNCTION_ARGS) +{ + Oid parent = InvalidOid; + PartParentSearch parent_search; + ArrayType *arr = PG_GETARG_ARRAYTYPE_P(0); + + Oid *partitions; + Datum *datums; + bool *nulls; + int npart; + int16 typlen; + bool typbyval; + char typalign; + int i; + + Assert(ARR_ELEMTYPE(arr) == REGCLASSOID); + + /* Extract Oids */ + get_typlenbyvalalign(REGCLASSOID, &typlen, &typbyval, &typalign); + deconstruct_array(arr, REGCLASSOID, + typlen, typbyval, typalign, + &datums, &nulls, &npart); + + partitions = palloc(sizeof(Oid) * npart); + for (i = 0; i < npart; i++) + partitions[i] = DatumGetObjectId(datums[i]); + + if (npart < 2) + elog(ERROR, + "There must be at least two partitions to merge"); + + /* Check if all partitions are from the same parent */ + for (i = 0; i < npart; i++) + { + Oid p = get_parent_of_partition(partitions[i], &parent_search); + + if (parent_search != PPS_ENTRY_PART_PARENT) + elog(ERROR, "Relation '%s' is not a partition", + get_rel_name(partitions[i])); + + if (parent == InvalidOid) + parent = p; + + if (p != parent) + elog(ERROR, "All relations must have the same parent"); + } + + merge_range_partitions_internal(parent, partitions, npart); + + PG_RETURN_VOID(); +} + + +static void +merge_range_partitions_internal(Oid parent, Oid *partitions, uint32 npart) +{ + RangeEntry *ranges; + int i, + j; + List *plist = NIL; + RangeEntry *first, *last; + const PartRelationInfo *prel; + + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); + + if (prel->parttype != PT_RANGE) + elog(ERROR, "Only range partitions can be merged"); + + ranges = PrelGetRangesArray(prel); + + /* Lock parent till transaction's end */ + xact_lock_partitioned_rel(parent, false); + + /* Lock partitions */ + for (i = 0; i < npart; i++) + { + prevent_relation_modification_internal(partitions[0]); + + /* Look for the specified partition */ + for (j = 0; j < PrelChildrenCount(prel); j++) + if (ranges[j].child_oid == partitions[i]) + { + plist = lappend(plist, &ranges[j]); + break; + } + } + + check_adjacence(prel->cmp_proc, plist); + + /* Create a new one */ + first = (RangeEntry *) linitial(plist); + last = (RangeEntry *) llast(plist); + recreate_range_constraint(first->child_oid, + get_relid_attribute_name(prel->key, prel->attnum), + prel->attnum, + prel->atttype, + &first->min, + &last->max); + + /* Make constraint visible */ + CommandCounterIncrement(); + + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect using SPI"); + + /* Migrate the data from all partition to the first one */ + for (i = 1; i < npart; i++) + { + char *query = psprintf("WITH part_data AS (DELETE FROM %s RETURNING *) " + "INSERT INTO %s SELECT * FROM part_data", + get_rel_name(partitions[i]), + get_rel_name(partitions[0])); + + SPI_exec(query, 0); + } + + /* + * Drop old partitions + * + * XXX Rewrite this in C + */ + for (i = 1; i < npart; i++) + { + char *query = psprintf("DROP TABLE %s", + get_rel_name(partitions[i])); + + SPI_exec(query, 0); + } + + SPI_finish(); +} + +/* + * Check that range entries are adjacent + */ +static void +check_adjacence(Oid cmp_proc, List *ranges) +{ + ListCell *lc; + RangeEntry *last = NULL; + FmgrInfo finfo; + + fmgr_info(cmp_proc, &finfo); + + foreach(lc, ranges) + { + RangeEntry *cur = (RangeEntry *) lfirst(lc); + + /* Skip first iteration */ + if (!last) + { + last = cur; + continue; + } + + /* + * Compare upper bound of previous range entry and lower bound + * of current + */ + if (cmp_bounds(&finfo, &last->max, &cur->min) != 0) + elog(ERROR, + "Partitions '%s' and '%s' aren't adjacent", + get_rel_name(last->child_oid), get_rel_name(cur->child_oid)); + + last = cur; + } +} + +/* + * Drops old partition constraint and creates a new one with specified + * boundaries + */ +static void +recreate_range_constraint(Oid partition, + const char *attname, + AttrNumber attnum, + Oid atttype, + const Bound *lower, + const Bound *upper) +{ + Constraint *constraint; + Relation partition_rel; + char *attname_nonconst = pstrdup(attname); + + /* Drop old constraint */ + drop_check_constraint(partition, attnum); + + /* Build a new one */ + constraint = build_range_check_constraint(partition, + attname_nonconst, + lower, + upper, + atttype); + + /* Open the relation and add new check constraint */ + partition_rel = heap_open(partition, AccessExclusiveLock); + AddRelationNewConstraints(partition_rel, NIL, + list_make1(constraint), + false, true, true); + heap_close(partition_rel, NoLock); + + pfree(attname_nonconst); +} diff --git a/src/utils.c b/src/utils.c index 1f086f5f..c763d72c 100644 --- a/src/utils.c +++ b/src/utils.c @@ -234,7 +234,6 @@ get_rel_name_or_relid(Oid relid) return relname; } - #if PG_VERSION_NUM < 90600 /* * Returns the relpersistence associated with a given relation. diff --git a/src/xact_handling.c b/src/xact_handling.c index 39030e6a..260110dc 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -210,3 +210,31 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid) SET_LOCKTAG_RELATION(*tag, dbid, relid); } + + +/* + * Lock relation exclusively & check for current isolation level. + */ +void +prevent_relation_modification_internal(Oid relid) +{ + /* + * Check that isolation level is READ COMMITTED. + * Else we won't be able to see new rows + * which could slip through locks. + */ + if (!xact_is_level_read_committed()) + ereport(ERROR, + (errmsg("Cannot perform blocking partitioning operation"), + errdetail("Expected READ COMMITTED isolation level"))); + + /* + * Check if table is being modified + * concurrently in a separate transaction. + */ + if (!xact_lock_rel_exclusive(relid, true)) + ereport(ERROR, + (errmsg("Cannot perform blocking partitioning operation"), + errdetail("Table \"%s\" is being modified concurrently", + get_rel_name_or_relid(relid)))); +} diff --git a/src/xact_handling.h b/src/xact_handling.h index d10064a1..db7f37d8 100644 --- a/src/xact_handling.h +++ b/src/xact_handling.h @@ -35,5 +35,7 @@ bool xact_is_transaction_stmt(Node *stmt); bool xact_is_set_transaction_stmt(Node *stmt); bool xact_object_is_visible(TransactionId obj_xmin); +void prevent_relation_modification_internal(Oid relid); + #endif /* XACT_HANDLING_H */ From fcc7eadcbb6f5eef13d895e1063af94a0a4d1042 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 24 Jan 2017 14:56:17 +0300 Subject: [PATCH 0147/1124] fixed some bugs and tests --- expected/pathman_domains.out | 2 +- expected/pathman_permissions.out | 4 +- hash.sql | 4 +- range.sql | 98 +++++--------------------------- sql/pathman_domains.sql | 2 +- sql/pathman_permissions.sql | 4 +- src/pl_range_funcs.c | 42 +++++++++++--- 7 files changed, 56 insertions(+), 100 deletions(-) diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index 169f66ea..6062adbc 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -115,7 +115,7 @@ SELECT create_hash_partitions('domains.dom_table', 'val', 5); (1 row) SELECT * FROM pathman_partition_list -ORDER BY partition::TEXT; +ORDER BY "partition"::TEXT; parent | partition | parttype | partattr | range_min | range_max -------------------+---------------------+----------+----------+-----------+----------- domains.dom_table | domains.dom_table_0 | 1 | val | | diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index ca95e2d1..7b89a86f 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -78,7 +78,7 @@ SELECT prepend_range_partition('permissions.user1_table'); (1 row) SELECT attname, attacl from pg_attribute -WHERE attrelid = (SELECT partition FROM pathman_partition_list +WHERE attrelid = (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ LIMIT 1) @@ -104,7 +104,7 @@ INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; (1 row) SELECT relname, relacl FROM pg_class -WHERE oid = ANY (SELECT partition FROM pathman_partition_list +WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.user1_table'::REGCLASS ORDER BY range_max::int DESC /* append */ LIMIT 3) diff --git a/hash.sql b/hash.sql index f24641e0..53489e1f 100644 --- a/hash.sql +++ b/hash.sql @@ -38,11 +38,11 @@ BEGIN INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) VALUES (parent_relid, attribute, 1); - IF array_length(relnames) != partitions_count THEN + IF array_length(relnames, 1) != partitions_count THEN RAISE EXCEPTION 'Partition names array size must be equal the partitions count'; END IF; - IF array_length(tablespaces) != partitions_count THEN + IF array_length(tablespaces, 1) != partitions_count THEN RAISE EXCEPTION 'Partition tablespaces array size must be equal the partitions count'; END IF; diff --git a/range.sql b/range.sql index e64f3b8b..1d56b545 100644 --- a/range.sql +++ b/range.sql @@ -596,98 +596,28 @@ LANGUAGE plpgsql; /* - * Merge two partitions. All data will be copied to the first one. Second - * partition will be destroyed. - * - * NOTE: dummy field is used to pass the element type to the function - * (it is necessary because of pseudo-types used in function). + * Merge multiple partitions. All data will be copied to the first one. The rest + * of partitions will be dropped */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions_internal( - parent_relid REGCLASS, +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partitions REGCLASS[]) +RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' +LANGUAGE C STRICT; + +/* + * The special case of merging two partitions + */ +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( partition1 REGCLASS, - partition2 REGCLASS, - dummy ANYELEMENT, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS + partition2 REGCLASS) +RETURNS VOID AS $$ -DECLARE - v_attname TEXT; - v_atttype REGTYPE; - v_check_name TEXT; - v_lower_bound dummy%TYPE; - v_upper_bound dummy%TYPE; - BEGIN - SELECT attname FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype = @extschema@.get_attribute_type(parent_relid, v_attname); - - /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%1$s) || - @extschema@.get_part_range($2, NULL::%1$s)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING partition1, partition2 - INTO p_range; - - /* Check if ranges are adjacent */ - IF p_range[1] != p_range[4] AND p_range[2] != p_range[3] THEN - RAISE EXCEPTION 'merge failed, partitions must be adjacent'; - END IF; - - /* Drop constraint on first partition... */ - v_check_name := @extschema@.build_check_constraint_name(partition1, v_attname); - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition1::TEXT, - v_check_name); - - /* Determine left bound */ - IF p_range[1] IS NULL OR p_range[3] IS NULL THEN - v_lower_bound := NULL; - ELSE - v_lower_bound := least(p_range[1], p_range[3]); - END IF; - - /* Determine right bound */ - IF p_range[2] IS NULL OR p_range[4] IS NULL THEN - v_upper_bound := NULL; - ELSE - v_upper_bound := greatest(p_range[2], p_range[4]); - END IF; - - /* and create a new one */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition1::TEXT, - v_check_name, - @extschema@.build_range_condition(partition1, - v_attname, - v_lower_bound, - v_upper_bound)); - - /* Copy data from second partition to the first one */ - EXECUTE format('WITH part_data AS (DELETE FROM %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - partition2::TEXT, - partition1::TEXT); - - /* Remove second partition */ - EXECUTE format('DROP TABLE %s', partition2::TEXT); + PERFORM @extschema@.merge_range_partitions(array[partition1, partition2]::regclass[]); END $$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - parent REGCLASS, - partitions REGCLASS[]) -RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' -LANGUAGE C STRICT; - - /* * Append new partition. */ diff --git a/sql/pathman_domains.sql b/sql/pathman_domains.sql index 250c5615..f6ee7076 100644 --- a/sql/pathman_domains.sql +++ b/sql/pathman_domains.sql @@ -37,7 +37,7 @@ SELECT drop_partitions('domains.dom_table'); SELECT create_hash_partitions('domains.dom_table', 'val', 5); SELECT * FROM pathman_partition_list -ORDER BY partition::TEXT; +ORDER BY "partition"::TEXT; DROP SCHEMA domains CASCADE; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 75c2f935..ac3483c8 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -63,7 +63,7 @@ GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ SET ROLE user2; SELECT prepend_range_partition('permissions.user1_table'); SELECT attname, attacl from pg_attribute -WHERE attrelid = (SELECT partition FROM pathman_partition_list +WHERE attrelid = (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ LIMIT 1) @@ -73,7 +73,7 @@ ORDER BY attname; /* check ACL for each column */ SET ROLE user2; INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; SELECT relname, relacl FROM pg_class -WHERE oid = ANY (SELECT partition FROM pathman_partition_list +WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.user1_table'::REGCLASS ORDER BY range_max::int DESC /* append */ LIMIT 3) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index be100f84..461c4a77 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -43,6 +43,7 @@ static void recreate_range_constraint(Oid partition, Oid atttype, const Bound *lower, const Bound *upper); +static char *get_qualified_rel_name(Oid relid); /* Function declarations */ @@ -508,6 +509,7 @@ merge_range_partitions_internal(Oid parent, Oid *partitions, uint32 npart) List *plist = NIL; RangeEntry *first, *last; const PartRelationInfo *prel; + FmgrInfo finfo; prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); @@ -536,10 +538,21 @@ merge_range_partitions_internal(Oid parent, Oid *partitions, uint32 npart) check_adjacence(prel->cmp_proc, plist); - /* Create a new one */ + /* Create a new constraint. To do this first determine the bounds */ first = (RangeEntry *) linitial(plist); last = (RangeEntry *) llast(plist); - recreate_range_constraint(first->child_oid, + + /* If last range is less than first one then swap them */ + fmgr_info(prel->cmp_proc, &finfo); + if (cmp_bounds(&finfo, &last->min, &first->min) < 0) + { + RangeEntry *tmp = last; + last = first; + first = tmp; + } + + /* Drop old constraint and create a new one */ + recreate_range_constraint(partitions[0], get_relid_attribute_name(prel->key, prel->attnum), prel->attnum, prel->atttype, @@ -557,8 +570,8 @@ merge_range_partitions_internal(Oid parent, Oid *partitions, uint32 npart) { char *query = psprintf("WITH part_data AS (DELETE FROM %s RETURNING *) " "INSERT INTO %s SELECT * FROM part_data", - get_rel_name(partitions[i]), - get_rel_name(partitions[0])); + get_qualified_rel_name(partitions[i]), + get_qualified_rel_name(partitions[0])); SPI_exec(query, 0); } @@ -571,7 +584,7 @@ merge_range_partitions_internal(Oid parent, Oid *partitions, uint32 npart) for (i = 1; i < npart; i++) { char *query = psprintf("DROP TABLE %s", - get_rel_name(partitions[i])); + get_qualified_rel_name(partitions[i])); SPI_exec(query, 0); } @@ -603,10 +616,10 @@ check_adjacence(Oid cmp_proc, List *ranges) } /* - * Compare upper bound of previous range entry and lower bound - * of current + * Check that last and current partitions are adjacent */ - if (cmp_bounds(&finfo, &last->max, &cur->min) != 0) + if ((cmp_bounds(&finfo, &last->max, &cur->min) != 0) + && (cmp_bounds(&finfo, &cur->max, &last->min) != 0)) elog(ERROR, "Partitions '%s' and '%s' aren't adjacent", get_rel_name(last->child_oid), get_rel_name(cur->child_oid)); @@ -650,3 +663,16 @@ recreate_range_constraint(Oid partition, pfree(attname_nonconst); } + +/* + * Return palloced fully qualified relation name as a cstring + */ +static char * +get_qualified_rel_name(Oid relid) +{ + Oid namespace = get_rel_namespace(relid); + + return psprintf("%s.%s", + quote_identifier(get_namespace_name(namespace)), + quote_identifier(get_rel_name(relid))); +} From cea6ea5bc9d5b281eb8c69e05f084811a7318b5f Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 25 Jan 2017 14:37:24 +0300 Subject: [PATCH 0148/1124] wrote a drop_table() function --- src/pl_range_funcs.c | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 461c4a77..ec924e47 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -19,6 +19,7 @@ #include "catalog/namespace.h" #include "catalog/pg_type.h" #include "catalog/heap.h" +#include "commands/tablecmds.h" #include "executor/spi.h" #include "parser/parse_relation.h" #include "parser/parse_expr.h" @@ -44,6 +45,7 @@ static void recreate_range_constraint(Oid partition, const Bound *lower, const Bound *upper); static char *get_qualified_rel_name(Oid relid); +static void drop_table(Oid relid); /* Function declarations */ @@ -565,7 +567,9 @@ merge_range_partitions_internal(Oid parent, Oid *partitions, uint32 npart) if (SPI_connect() != SPI_OK_CONNECT) elog(ERROR, "could not connect using SPI"); - /* Migrate the data from all partition to the first one */ + /* + * Migrate the data from all partition to the first one + */ for (i = 1; i < npart; i++) { char *query = psprintf("WITH part_data AS (DELETE FROM %s RETURNING *) " @@ -576,20 +580,14 @@ merge_range_partitions_internal(Oid parent, Oid *partitions, uint32 npart) SPI_exec(query, 0); } + SPI_finish(); + /* * Drop old partitions - * - * XXX Rewrite this in C */ for (i = 1; i < npart; i++) - { - char *query = psprintf("DROP TABLE %s", - get_qualified_rel_name(partitions[i])); - - SPI_exec(query, 0); - } + drop_table(partitions[i]); - SPI_finish(); } /* @@ -676,3 +674,19 @@ get_qualified_rel_name(Oid relid) quote_identifier(get_namespace_name(namespace)), quote_identifier(get_rel_name(relid))); } + +static void +drop_table(Oid relid) +{ + DropStmt *n = makeNode(DropStmt); + const char *relname = get_qualified_rel_name(relid); + + n->removeType = OBJECT_TABLE; + n->missing_ok = false; + n->objects = list_make1(stringToQualifiedNameList(relname)); + n->arguments = NIL; + n->behavior = DROP_RESTRICT; // default behaviour + n->concurrent = false; + + RemoveRelations(n); +} From b1c67ae02349e2a721d62593a2d7d0f9936557ab Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 25 Jan 2017 16:12:06 +0300 Subject: [PATCH 0149/1124] add drop_range_partition_expand_next() function --- expected/pathman_basic.out | 35 ++++++++++++++++-- range.sql | 76 +++++++------------------------------- sql/pathman_basic.sql | 5 +++ src/pl_range_funcs.c | 60 ++++++++++++++++++++++++++++++ 4 files changed, 109 insertions(+), 67 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index fbe953e2..55282421 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1409,6 +1409,35 @@ SELECT pathman.drop_range_partition('test.num_range_rel_7'); test.num_range_rel_7 (1 row) +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | partattr | range_min | range_max +--------------------+----------------------+----------+----------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 +(4 rows) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | partattr | range_min | range_max +--------------------+----------------------+----------+----------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 +(3 rows) + SELECT pathman.append_range_partition('test.range_rel'); append_range_partition ------------------------ @@ -1711,15 +1740,13 @@ SELECT COUNT(*) FROM ONLY test.hash_rel; DROP TABLE test.hash_rel CASCADE; SELECT pathman.drop_partitions('test.num_range_rel'); -NOTICE: drop cascades to 4 other objects +NOTICE: drop cascades to 3 other objects NOTICE: 998 rows copied from test.num_range_rel_1 NOTICE: 1000 rows copied from test.num_range_rel_2 NOTICE: 1000 rows copied from test.num_range_rel_3 -NOTICE: 2 rows copied from test.num_range_rel_4 -NOTICE: 0 rows copied from test.num_range_rel_6 drop_partitions ----------------- - 5 + 3 (1 row) DROP TABLE test.num_range_rel CASCADE; diff --git a/range.sql b/range.sql index 1d56b545..8d899452 100644 --- a/range.sql +++ b/range.sql @@ -533,68 +533,6 @@ END $$ LANGUAGE plpgsql; - -/* - * Merge RANGE partitions - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partition1 REGCLASS, - partition2 REGCLASS) -RETURNS VOID AS -$$ -DECLARE - v_parent1 REGCLASS; - v_parent2 REGCLASS; - v_attname TEXT; - v_part_type INTEGER; - v_atttype REGTYPE; - -BEGIN - IF partition1 = partition2 THEN - RAISE EXCEPTION 'cannot merge partition with itself'; - END IF; - - v_parent1 := @extschema@.get_parent_of_partition(partition1); - v_parent2 := @extschema@.get_parent_of_partition(partition2); - - /* Acquire data modification locks (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition1); - PERFORM @extschema@.prevent_relation_modification(partition2); - - IF v_parent1 != v_parent2 THEN - RAISE EXCEPTION 'cannot merge partitions with different parents'; - END IF; - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(v_parent1); - - SELECT attname, parttype - FROM @extschema@.pathman_config - WHERE partrel = v_parent1 - INTO v_attname, v_part_type; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', v_parent1::TEXT; - END IF; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION 'specified partitions are not RANGE partitions'; - END IF; - - v_atttype := @extschema@.get_attribute_type(partition1, v_attname); - - EXECUTE format('SELECT @extschema@.merge_range_partitions_internal($1, $2, $3, NULL::%s)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING v_parent1, partition1, partition2; - - /* Tell backend to reload configuration */ - PERFORM @extschema@.on_update_partitions(v_parent1); -END -$$ -LANGUAGE plpgsql; - - /* * Merge multiple partitions. All data will be copied to the first one. The rest * of partitions will be dropped @@ -617,7 +555,6 @@ BEGIN END $$ LANGUAGE plpgsql; - /* * Append new partition. */ @@ -952,6 +889,19 @@ LANGUAGE plpgsql SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ +/* + * Drops partition and expands the next partition so that it cover dropped + * one + * + * This function was written in order to support Oracle-like ALTER TABLE ... + * DROP PARTITION. In Oracle partitions only have upper bound and when + * partition is dropped the next one automatically covers freed range + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next(relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' +LANGUAGE C STRICT; + + /* * Attach range partition */ diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 69fff945..3ed24b76 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -400,6 +400,11 @@ SELECT pathman.prepend_range_partition('test.num_range_rel'); EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; SELECT pathman.drop_range_partition('test.num_range_rel_7'); +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + SELECT pathman.append_range_partition('test.range_rel'); SELECT pathman.prepend_range_partition('test.range_rel'); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index ec924e47..faa5798b 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -59,6 +59,7 @@ PG_FUNCTION_INFO_V1( get_part_range_by_idx ); PG_FUNCTION_INFO_V1( build_range_condition ); PG_FUNCTION_INFO_V1( build_sequence_name ); PG_FUNCTION_INFO_V1( merge_range_partitions ); +PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); /* @@ -690,3 +691,62 @@ drop_table(Oid relid) RemoveRelations(n); } + +/* + * Drops partition and expands the next partition so that it cover dropped + * one + * + * This function was written in order to support Oracle-like ALTER TABLE ... + * DROP PARTITION. In Oracle partitions only have upper bound and when + * partition is dropped the next one automatically covers freed range + */ +Datum +drop_range_partition_expand_next(PG_FUNCTION_ARGS) +{ + PartParentSearch parent_search; + const PartRelationInfo *prel; + RangeEntry *ranges; + Oid relid = PG_GETARG_OID(0), + parent; + int i; + + /* Get parent relid */ + parent = get_parent_of_partition(relid, &parent_search); + if (parent_search != PPS_ENTRY_PART_PARENT) + elog(ERROR, "relation \"%s\" is not a partition", + get_rel_name_or_relid(relid)); + + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); + + ranges = PrelGetRangesArray(prel); + + /* Looking for partition in child relations */ + for (i = 0; i < prel->children_count; i++) + if (ranges[i].child_oid == relid) + break; + + /* + * It must be in ranges array because we already know that table + * is a partition + */ + Assert(i < prel->children_count); + + /* If there is next partition then expand it */ + if (i < prel->children_count - 1) + { + RangeEntry *cur = &ranges[i], + *next = &ranges[i+1]; + + recreate_range_constraint(next->child_oid, + get_relid_attribute_name(prel->key, prel->attnum), + prel->attnum, + prel->atttype, + &cur->min, + &next->max); + } + + drop_table(relid); + + PG_RETURN_VOID(); +} \ No newline at end of file From a51250462913ac5d45db63750f345f5832c10ab6 Mon Sep 17 00:00:00 2001 From: maksm90 Date: Thu, 26 Jan 2017 02:26:59 +0300 Subject: [PATCH 0150/1124] Fix expiring pathman_config_params trigger under pg_pathman is disabled and pull up pg_dump tests from pg_dump_tests branch --- src/pl_funcs.c | 5 + tests/python/partitioning_test.py | 211 ++++++++++++++++++++++++++++-- 2 files changed, 205 insertions(+), 11 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 154527c6..6ff1e9da 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -690,6 +690,10 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) Datum partrel_datum; bool partrel_isnull; + /* Handle pg_pathman disabled case */ + if (!OidIsValid(pathman_config_params)) + goto _return; + /* Handle user calls */ if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "this function should not be called directly"); @@ -719,6 +723,7 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) CacheInvalidateRelcacheByRelid(partrel); /* Return the tuple we've been given */ +_return: if (trigdata->tg_event & TRIGGER_EVENT_UPDATE) PG_RETURN_POINTER(trigdata->tg_newtuple); else diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 7f0ee753..7d2076aa 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -13,6 +13,15 @@ import threading +# Helper function for json equality +def ordered(obj): + if isinstance(obj, dict): + return sorted((k, ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered(x) for x in obj) + else: + return obj + def if_fdw_enabled(func): """To run tests with FDW support set environment variable TEST_FDW=1""" def wrapper(*args, **kwargs): @@ -365,7 +374,7 @@ def test_foreign_table(self): master.psql('postgres', 'create extension postgres_fdw') # RANGE partitioning test with FDW: - # - create range partitioned table in master + # - create range partitioned table in master # - create foreign server # - create foreign table and insert some data into it # - attach foreign table to partitioned one @@ -424,7 +433,7 @@ def test_foreign_table(self): master.safe_psql('postgres', 'select drop_partitions(\'abc\')') # HASH partitioning with FDW: - # - create hash partitioned table in master + # - create hash partitioned table in master # - create foreign table # - replace local partition with foreign one # - insert data @@ -493,15 +502,6 @@ def test_parallel_nodes(self): $$ language plpgsql; """) - # Helper function for json equality - def ordered(obj): - if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered(x) for x in obj) - else: - return obj - # Test parallel select with node.connect() as con: con.execute('set max_parallel_workers_per_gather = 2') @@ -708,6 +708,195 @@ def con2_thread(): node.stop() node.cleanup() + def test_pg_dump(self): + """ + Test using dump and restore of partitioned table through pg_dump and pg_restore tools. + + Test strategy: + - test range and hash partitioned tables; + - for each partitioned table check on restorable side the following quantities: + * constraints related to partitioning; + * init callback function and enable parent flag; + * number of rows in parent and child tables; + * plan validity of simple SELECT query under partitioned table; + - check dumping using the following parameters of pg_dump: + * format = plain | custom; + * using of inserts and copy. + - all test cases are carried out on tables half-full with data located in parent part, + the rest of data - in child tables. + """ + + import subprocess + + # Init and start postgres instance with preload pg_pathman module + node = get_new_node('test') + node.init() + node.append_conf( + 'postgresql.conf', + """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false + """) + node.start() + + # Init two databases: initial and copy + node.psql('postgres', 'create database initial') + node.psql('postgres', 'create database copy') + node.psql('initial', 'create extension pg_pathman') + + # Create and fillin partitioned table in initial database + with node.connect('initial') as con: + + # create and initailly fillin tables + con.execute('create table range_partitioned (i integer not null)') + con.execute('insert into range_partitioned select i from generate_series(1, 500) i') + con.execute('create table hash_partitioned (i integer not null)') + con.execute('insert into hash_partitioned select i from generate_series(1, 500) i') + + # partition table keeping data in base table + # enable_parent parameter automatically becames true + con.execute('select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)') + con.execute('select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)') + + # fillin child tables with remain data + con.execute('insert into range_partitioned select i from generate_series(501, 1000) i') + con.execute('insert into hash_partitioned select i from generate_series(501, 1000) i') + + # set init callback + con.execute(""" + create or replace function init_partition_stub_callback(args jsonb) + returns void as $$ + begin + end + $$ language plpgsql; + """) + con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback\')') + con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback\')') + + # turn off enable_parent option + con.execute('select set_enable_parent(\'range_partitioned\', false)') + con.execute('select set_enable_parent(\'hash_partitioned\', false)') + + con.commit() + + # compare strategies + def cmp_full(con1, con2): + """Compare selection partitions in plan and contents in partitioned tables""" + + plan_query = 'explain (costs off, format json) select * from %s' + content_query = 'select * from %s order by i' + table_refs = [ + 'range_partitioned', + 'only range_partitioned', + 'hash_partitioned', + 'only hash_partitioned' + ] + for table_ref in table_refs: + plan_initial = con1.execute(plan_query % table_ref)[0][0][0]['Plan'] + plan_copy = con2.execute(plan_query % table_ref)[0][0][0]['Plan'] + self.assertEqual(ordered(plan_initial), ordered(plan_copy)) + + content_initial = [x[0] for x in con1.execute(content_query % table_ref)] + content_copy = [x[0] for x in con2.execute(content_query % table_ref)] + self.assertEqual(content_initial, content_copy) + + def turnoff_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to off') + node.reload() + + def turnon_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to on') + node.psql('copy', 'alter system set pg_pathman.enable to on') + node.psql('initial', 'alter system set pg_pathman.override_copy to off') + node.psql('copy', 'alter system set pg_pathman.override_copy to off') + node.reload() + + # Test dump/restore from init database to copy functionality + test_params = [ + (None, + None, + [node.get_bin_path("pg_dump"), + "-p {}".format(node.port), + "initial"], + [node.get_bin_path("psql"), + "-p {}".format(node.port), + "copy"], + cmp_full), # dump as plain text and restore via COPY + (turnoff_pathman, + turnon_pathman, + [node.get_bin_path("pg_dump"), + "-p {}".format(node.port), + "--inserts", + "initial"], + [node.get_bin_path("psql"), + "-p {}".format(node.port), + "copy"], + cmp_full), # dump as plain text and restore via INSERTs + (None, + None, + [node.get_bin_path("pg_dump"), + "-p {}".format(node.port), + "--format=custom", + "initial"], + [node.get_bin_path("pg_restore"), + "-p {}".format(node.port), + "--dbname=copy"], + cmp_full), # dump in archive format + ] + for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: + + if (preproc != None): + preproc(node) + + # transfer and restore data + p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) + p2 = subprocess.Popen(pg_restore_params, stdin=p1.stdout, stdout=subprocess.PIPE) + p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. + p2.communicate() + + if (postproc != None): + postproc(node) + + # check validity of data + with node.connect('initial') as con1, node.connect('copy') as con2: + + # compare plans and contents of initial and copy + cmp_dbs(con1, con2) + + # compare enable_parent flag and callback function + config_params_query = """ + select partrel, enable_parent, init_callback from pathman_config_params + """ + config_params_initial, config_params_copy = {}, {} + for row in con1.execute(config_params_query): + config_params_initial[row[0]] = row[1:] + for row in con2.execute(config_params_query): + config_params_copy[row[0]] = row[1:] + self.assertEqual(config_params_initial, config_params_copy) + + # compare constraints on each partition + constraints_query = """ + select r.relname, c.conname, c.consrc from + pg_constraint c join pg_class r on c.conrelid=r.oid + where relname similar to '(range|hash)_partitioned_\d+' + """ + constraints_initial, constraints_copy = {}, {} + for row in con1.execute(constraints_query): + constraints_initial[row[0]] = row[1:] + for row in con2.execute(constraints_query): + constraints_copy[row[0]] = row[1:] + self.assertEqual(constraints_initial, constraints_copy) + + # clear copy database + node.psql('copy', 'drop schema public cascade') + node.psql('copy', 'create schema public') + node.psql('copy', 'drop extension pg_pathman cascade') + + # Stop instance and finish work + node.stop() + node.cleanup() + if __name__ == "__main__": unittest.main() + From fdfdbcbbb78f629ad5e8998dde76a76171bc6681 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 26 Jan 2017 16:12:14 +0300 Subject: [PATCH 0151/1124] generate unique names for partitions --- src/partition_creation.c | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 6fde23d0..6af546cb 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -646,6 +646,8 @@ choose_range_partition_name(Oid parent_relid, Oid parent_nsp) Oid save_userid; int save_sec_context; bool need_priv_escalation = !superuser(); /* we might be a SU */ + char *relname; + int attempts_cnt = 1000; part_seq_relid = get_relname_relid(build_sequence_name_internal(parent_relid), parent_nsp); @@ -661,16 +663,34 @@ choose_range_partition_name(Oid parent_relid, Oid parent_nsp) save_sec_context | SECURITY_LOCAL_USERID_CHANGE); } - /* Get next integer for partition name */ - part_num = DirectFunctionCall1(nextval_oid, ObjectIdGetDatum(part_seq_relid)); + /* Generate unique name */ + while (true) + { + /* Get next integer for partition name */ + part_num = DirectFunctionCall1(nextval_oid, ObjectIdGetDatum(part_seq_relid)); + + relname = psprintf("%s_" UINT64_FORMAT, + get_rel_name(parent_relid), + (uint64) DatumGetInt64(part_num)); /* can't use UInt64 on 9.5 */ + + /* + * If we found a unique name or attemps number exceeds some reasonable + * value then we quit + * + * XXX Should we throw an exception if max attempts number is reached? + */ + if (get_relname_relid(relname, parent_nsp) == InvalidOid || attempts_cnt < 0) + break; + + pfree(relname); + attempts_cnt--; + } /* Restore user's privileges */ if (need_priv_escalation) SetUserIdAndSecContext(save_userid, save_sec_context); - return psprintf("%s_" UINT64_FORMAT, - get_rel_name(parent_relid), - (uint64) DatumGetInt64(part_num)); /* can't use UInt64 on 9.5 */ + return relname; } /* Choose a good name for a HASH partition */ From 39a227481539da8a35e396764c1839ccb30c2c35 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 2 Feb 2017 15:38:08 +0300 Subject: [PATCH 0152/1124] rebuild tuples for children with different TupleDescs, build custom projections for RETURNING lists, introduce function prepare_rri_returning_for_insert(), fixes --- src/partition_creation.c | 2 +- src/partition_filter.c | 431 ++++++++++++++++++++++++-------- src/partition_filter.h | 26 +- src/planner_tree_modification.c | 18 +- src/utility_stmt_hooking.c | 16 +- 5 files changed, 373 insertions(+), 120 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 2074e9cc..10ef0b2a 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -844,7 +844,7 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) /* Create new GUC level... */ guc_level = NewGUCNestLevel(); - /* ... and set client_min_messages = WARNING */ + /* ... and set client_min_messages = warning */ (void) set_config_option("client_min_messages", "WARNING", PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, 0, false); diff --git a/src/partition_filter.c b/src/partition_filter.c index a978acd0..0bdebae9 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -15,6 +15,7 @@ #include "planner_tree_modification.h" #include "utils.h" +#include "access/htup_details.h" #include "catalog/pg_type.h" #include "foreign/fdwapi.h" #include "foreign/foreign.h" @@ -29,6 +30,8 @@ /* + * NOTE: 'estate->es_query_cxt' as data storage + * * We use this struct as an argument for fake * MemoryContextCallback pf_memcxt_callback() * in order to attach some additional info to @@ -65,13 +68,27 @@ CustomScanMethods partition_filter_plan_methods; CustomExecMethods partition_filter_exec_methods; -static estate_mod_data * fetch_estate_mod_data(EState *estate); -static List * pfilter_build_tlist(Relation parent_rel, List *tlist); -static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); -static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); +static void prepare_rri_for_insert(EState *estate, + ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage, + void *arg); +static void prepare_rri_returning_for_insert(EState *estate, + ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage, + void *arg); static void prepare_rri_fdw_for_insert(EState *estate, ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage, void *arg); +static Node *fix_returning_list_mutator(Node *node, void *state); + +static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); +static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); + +static List * pfilter_build_tlist(Relation parent_rel, List *tlist); + +static void pf_memcxt_callback(void *arg); +static estate_mod_data * fetch_estate_mod_data(EState *estate); void @@ -173,6 +190,15 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) heap_close(rri_holder->result_rel_info->ri_RelationDesc, parts_storage->heap_close_lock_mode); + + /* Drop TupleConversionMap as well as TupleDescs */ + if (rri_holder->tuple_map) + { + FreeTupleDesc(rri_holder->tuple_map->indesc); + FreeTupleDesc(rri_holder->tuple_map->outdesc); + + free_conversion_map(rri_holder->tuple_map); + } } } @@ -187,7 +213,7 @@ ResultRelInfoHolder * scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) { #define CopyToResultRelInfo(field_name) \ - ( part_result_rel_info->field_name = parts_storage->saved_rel_info->field_name ) + ( child_result_rel_info->field_name = parts_storage->saved_rel_info->field_name ) ResultRelInfoHolder *rri_holder; bool found; @@ -199,11 +225,14 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* If not found, create & cache new ResultRelInfo */ if (!found) { - Relation child_rel; + Relation child_rel, + parent_rel = parts_storage->saved_rel_info->ri_RelationDesc; RangeTblEntry *child_rte, *parent_rte; Index child_rte_idx; - ResultRelInfo *part_result_rel_info; + ResultRelInfo *child_result_rel_info; + TupleDesc child_tupdesc, + parent_tupdesc; /* Lock partition and check if it exists */ LockRelationOid(partid, parts_storage->head_open_lock_mode); @@ -223,13 +252,13 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Create RangeTblEntry for partition */ child_rte = makeNode(RangeTblEntry); - child_rte->rtekind = RTE_RELATION; - child_rte->relid = partid; - child_rte->relkind = child_rel->rd_rel->relkind; - child_rte->eref = parent_rte->eref; - child_rte->requiredPerms = parent_rte->requiredPerms; - child_rte->checkAsUser = parent_rte->checkAsUser; - child_rte->insertedCols = parent_rte->insertedCols; + child_rte->rtekind = RTE_RELATION; + child_rte->relid = partid; + child_rte->relkind = child_rel->rd_rel->relkind; + child_rte->eref = parent_rte->eref; + child_rte->requiredPerms = parent_rte->requiredPerms; + child_rte->checkAsUser = parent_rte->checkAsUser; + child_rte->insertedCols = parent_rte->insertedCols; /* Check permissions for partition */ ExecCheckRTPerms(list_make1(child_rte), true); @@ -238,19 +267,19 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) child_rte_idx = append_rte_to_estate(parts_storage->estate, child_rte); /* Create ResultRelInfo for partition */ - part_result_rel_info = makeNode(ResultRelInfo); + child_result_rel_info = makeNode(ResultRelInfo); /* Check that 'saved_rel_info' is set */ if (!parts_storage->saved_rel_info) elog(ERROR, "ResultPartsStorage contains no saved_rel_info"); - InitResultRelInfo(part_result_rel_info, + InitResultRelInfo(child_result_rel_info, child_rel, child_rte_idx, parts_storage->estate->es_instrument); if (parts_storage->command_type != CMD_DELETE) - ExecOpenIndices(part_result_rel_info, parts_storage->speculative_inserts); + ExecOpenIndices(child_result_rel_info, parts_storage->speculative_inserts); /* Copy necessary fields from saved ResultRelInfo */ CopyToResultRelInfo(ri_WithCheckOptions); @@ -261,20 +290,40 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) CopyToResultRelInfo(ri_onConflictSetWhere); /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ - part_result_rel_info->ri_ConstraintExprs = NULL; + child_result_rel_info->ri_ConstraintExprs = NULL; - /* Finally fill the ResultRelInfo holder */ + /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; - rri_holder->result_rel_info = part_result_rel_info; + rri_holder->result_rel_info = child_result_rel_info; + + /* Use fake 'tdtypeid' in order to fool convert_tuples_by_name() */ + child_tupdesc = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc->tdtypeid = InvalidOid; + + parent_tupdesc = CreateTupleDescCopy(RelationGetDescr(parent_rel)); + parent_tupdesc->tdtypeid = InvalidOid; + + /* Generate tuple transformation map and some other stuff */ + rri_holder->tuple_map = convert_tuples_by_name(parent_tupdesc, + child_tupdesc, + "could not convert row type"); + + /* If map is one-to-one, free unused TupleDescs */ + if (!rri_holder->tuple_map) + { + FreeTupleDesc(child_tupdesc); + FreeTupleDesc(parent_tupdesc); + } /* Call on_new_rri_holder_callback() if needed */ if (parts_storage->on_new_rri_holder_callback) parts_storage->on_new_rri_holder_callback(parts_storage->estate, rri_holder, + parts_storage, parts_storage->callback_arg); - /* Append ResultRelInfo to storage->es_alloc_result_rels */ - append_rri_to_estate(parts_storage->estate, part_result_rel_info); + /* Finally append ResultRelInfo to storage->es_alloc_result_rels */ + append_rri_to_estate(parts_storage->estate, child_result_rel_info); } return rri_holder; @@ -319,7 +368,8 @@ find_partitions_for_value(Datum value, Oid value_type, Plan * make_partition_filter(Plan *subplan, Oid parent_relid, - OnConflictAction conflict_action) + OnConflictAction conflict_action, + List *returning_list) { CustomScan *cscan = makeNode(CustomScan); Relation parent_rel; @@ -342,7 +392,9 @@ make_partition_filter(Plan *subplan, Oid parent_relid, cscan->custom_scan_tlist = subplan->targetlist; /* Pack partitioned table's Oid and conflict_action */ - cscan->custom_private = list_make2_int(parent_relid, conflict_action); + cscan->custom_private = list_make3(makeInteger(parent_relid), + makeInteger(conflict_action), + returning_list); return &cscan->scan.plan; } @@ -360,8 +412,9 @@ partition_filter_create_scan_state(CustomScan *node) /* Extract necessary variables */ state->subplan = (Plan *) linitial(node->custom_plans); - state->partitioned_table = linitial_int(node->custom_private); - state->on_conflict_action = lsecond_int(node->custom_private); + state->partitioned_table = intVal(linitial(node->custom_private)); + state->on_conflict_action = intVal(lsecond(node->custom_private)); + state->returning_list = lthird(node->custom_private); /* Check boundaries */ Assert(state->on_conflict_action >= ONCONFLICT_NONE || @@ -384,7 +437,9 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) /* Init ResultRelInfo cache */ init_result_parts_storage(&state->result_parts, estate, state->on_conflict_action != ONCONFLICT_NONE, - ResultPartsStorageStandard, prepare_rri_fdw_for_insert, NULL); + ResultPartsStorageStandard, + prepare_rri_for_insert, + (void *) state); state->warning_triggered = false; } @@ -438,12 +493,35 @@ partition_filter_exec(CustomScanState *node) /* Search for a matching partition */ rri_holder = select_partition_for_insert(prel, &state->result_parts, value, prel->atttype, estate); - estate->es_result_relation_info = rri_holder->result_rel_info; /* Switch back and clean up per-tuple context */ MemoryContextSwitchTo(old_cxt); ResetExprContext(econtext); + /* Magic: replace parent's ResultRelInfo with ours */ + estate->es_result_relation_info = rri_holder->result_rel_info; + + /* If there's a transform map, rebuild the tuple */ + if (rri_holder->tuple_map) + { + HeapTuple htup_old, + htup_new; + Relation child_rel = rri_holder->result_rel_info->ri_RelationDesc; + + htup_old = ExecMaterializeSlot(slot); + htup_new = do_convert_tuple(htup_old, rri_holder->tuple_map); + + /* Allocate new slot if needed */ + if (!state->tup_convert_slot) + state->tup_convert_slot = MakeTupleTableSlot(); + + ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); + ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); + + /* Now replace the original slot */ + slot = state->tup_convert_slot; + } + return slot; } @@ -460,6 +538,10 @@ partition_filter_end(CustomScanState *node) Assert(list_length(node->custom_ps) == 1); ExecEndNode((PlanState *) linitial(node->custom_ps)); + + /* Free slot for tuple conversion */ + if (state->tup_convert_slot) + ExecDropSingleTupleTableSlot(state->tup_convert_slot); } void @@ -475,6 +557,7 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e /* Nothing to do here now */ } + /* * Smart wrapper for scan_result_parts_storage(). */ @@ -518,12 +601,134 @@ select_partition_for_insert(const PartRelationInfo *prel, return rri_holder; } + /* - * Callback to be executed on FDW partitions. + * Build partition filter's target list pointing to subplan tuple's elements. */ +static List * +pfilter_build_tlist(Relation parent_rel, List *tlist) +{ + List *result_tlist = NIL; + ListCell *lc; + int i = 1; + + foreach (lc, tlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(lc); + Expr *col_expr; + Form_pg_attribute attr; + + /* Make sure that this attribute exists */ + if (i > RelationGetDescr(parent_rel)->natts) + elog(ERROR, "error in function " CppAsString(pfilter_build_tlist)); + + /* Fetch pg_attribute entry for this column */ + attr = RelationGetDescr(parent_rel)->attrs[i - 1]; + + /* If this column is dropped, create a placeholder Const */ + if (attr->attisdropped) + { + /* Insert NULL for dropped column */ + col_expr = (Expr *) makeConst(INT4OID, + -1, + InvalidOid, + sizeof(int32), + (Datum) 0, + true, + true); + } + /* Otherwise we should create a Var referencing subplan's output */ + else + { + col_expr = (Expr *) makeVar(INDEX_VAR, /* point to subplan's elements */ + i, /* direct attribute mapping */ + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); + } + + result_tlist = lappend(result_tlist, + makeTargetEntry(col_expr, + i, + NULL, + tle->resjunk)); + i++; /* next resno */ + } + + return result_tlist; +} + + +/* + * ---------------------------------------------- + * Additional init steps for ResultPartsStorage + * ---------------------------------------------- + */ + +/* Main trigger */ +static void +prepare_rri_for_insert(EState *estate, + ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage, + void *arg) +{ + prepare_rri_returning_for_insert(estate, rri_holder, rps_storage, arg); + prepare_rri_fdw_for_insert(estate, rri_holder, rps_storage, arg); +} + +/* Prepare 'RETURNING *' tlist & projection */ +static void +prepare_rri_returning_for_insert(EState *estate, + ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage, + void *arg) +{ + PartitionFilterState *pfstate; + List *returning_list; + ResultRelInfo *child_rri, + *parent_rri; + Index parent_rt_idx; + + /* We don't need to do anything ff there's no map */ + if (!rri_holder->tuple_map) + return; + + pfstate = (PartitionFilterState *) arg; + returning_list = pfstate->returning_list; + + /* Exit if there's no RETURNING list */ + if (!returning_list) + return; + + child_rri = rri_holder->result_rel_info; + parent_rri = rps_storage->saved_rel_info; + parent_rt_idx = parent_rri->ri_RangeTableIndex; + + /* Create ExprContext for tuple projections */ + if (!pfstate->tup_convert_econtext) + pfstate->tup_convert_econtext = CreateExprContext(estate); + + /* Replace parent's varattnos with child's */ + returning_list = (List *) + fix_returning_list_mutator((Node *) returning_list, + list_make2(makeInteger(parent_rt_idx), + rri_holder)); + + /* Build new projection info */ + child_rri->ri_projectReturning = + ExecBuildProjectionInfo((List *) ExecInitExpr((Expr *) returning_list, + /* HACK: no PlanState */ NULL), + pfstate->tup_convert_econtext, + parent_rri->ri_projectReturning->pi_slot, + RelationGetDescr(child_rri->ri_RelationDesc)); +} + +/* Prepare FDW access structs */ static void prepare_rri_fdw_for_insert(EState *estate, ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage, void *arg) { ResultRelInfo *rri = rri_holder->result_rel_info; @@ -660,46 +865,74 @@ prepare_rri_fdw_for_insert(EState *estate, } } -/* - * Used by fetch_estate_mod_data() to find estate_mod_data. - */ -static void -pf_memcxt_callback(void *arg) { elog(DEBUG1, "EState is destroyed"); } - -/* - * Fetch (or create) a estate_mod_data structure we've hidden inside es_query_cxt. - */ -static estate_mod_data * -fetch_estate_mod_data(EState *estate) +/* Make parent's Vars of returninig list point to child's tuple */ +static Node * +fix_returning_list_mutator(Node *node, void *state) { - MemoryContext estate_mcxt = estate->es_query_cxt; - estate_mod_data *emd_struct; - MemoryContextCallback *cb = estate_mcxt->reset_cbs; + if (node == NULL) + return NULL; - /* Go through callback list */ - while (cb != NULL) + if (IsA(node, Var)) { - /* This is the dummy callback we're looking for! */ - if (cb->func == pf_memcxt_callback) - return (estate_mod_data *) cb->arg; + /* Extract packed args */ + List *state_args = (List *) state; + Index parent_idx = intVal(linitial(state_args)); + ResultRelInfoHolder *rri_holder = (ResultRelInfoHolder *) lsecond(state_args); + Var *var; + + /* Copy base fields of Var */ + var = (Var *) palloc(sizeof(Var)); + *var = *(Var *) node; + + /* Make Var point to child's attribute */ + if (var->varno == parent_idx && + var->varattno >= 0) /* don't change sysattrs! */ + { + int i; + bool found_mapping = false; - cb = estate_mcxt->reset_cbs->next; - } + /* WHOLEROW reference, change row type */ + if (var->varattno == 0) + { + Relation child_rel = rri_holder->result_rel_info->ri_RelationDesc; - /* Have to create a new one */ - emd_struct = MemoryContextAlloc(estate_mcxt, sizeof(estate_mod_data)); - emd_struct->estate_not_modified = true; - emd_struct->estate_alloc_result_rels = estate->es_num_result_relations; + /* Assign var->vartype a TupleDesc's type */ + var->vartype = RelationGetDescr(child_rel)->tdtypeid; - cb = MemoryContextAlloc(estate_mcxt, sizeof(MemoryContextCallback)); - cb->func = pf_memcxt_callback; - cb->arg = emd_struct; + return (Node *) var; + } - MemoryContextRegisterResetCallback(estate_mcxt, cb); + /* Map: child_att => parent_att, so we have to run through it */ + for (i = 0; i < rri_holder->tuple_map->outdesc->natts; i++) + { + /* Good, 'varattno' of parent is child's 'i+1' */ + if (var->varattno == rri_holder->tuple_map->attrMap[i]) + { + var->varattno = i + 1; /* attnos begin with 1 */ + found_mapping = true; + break; + } + } - return emd_struct; + /* Swear if we couldn't find mapping for this attribute */ + if (!found_mapping) + elog(ERROR, "could not bind attribute %d for returning statement", + var->varattno); + } + + return (Node *) var; + } + + return expression_tree_mutator(node, fix_returning_list_mutator, state); } + +/* + * ------------------------------------- + * ExecutorState-related modifications + * ------------------------------------- + */ + /* * Append RangeTblEntry 'rte' to estate->es_range_table. */ @@ -756,59 +989,49 @@ append_rri_to_estate(EState *estate, ResultRelInfo *rri) return estate->es_num_result_relations++; } + /* - * Build partition filter's target list pointing to subplan tuple's elements + * -------------------------------------- + * Store data in 'estate->es_query_cxt' + * -------------------------------------- */ -static List * -pfilter_build_tlist(Relation parent_rel, List *tlist) + +/* + * Used by fetch_estate_mod_data() to find estate_mod_data. + */ +static void +pf_memcxt_callback(void *arg) { elog(DEBUG1, "EState is destroyed"); } + +/* + * Fetch (or create) a estate_mod_data structure we've hidden inside es_query_cxt. + */ +static estate_mod_data * +fetch_estate_mod_data(EState *estate) { - List *result_tlist = NIL; - ListCell *lc; - int i = 1; + MemoryContext estate_mcxt = estate->es_query_cxt; + estate_mod_data *emd_struct; + MemoryContextCallback *cb = estate_mcxt->reset_cbs; - foreach (lc, tlist) + /* Go through callback list */ + while (cb != NULL) { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - Expr *col_expr; - Form_pg_attribute attr; + /* This is the dummy callback we're looking for! */ + if (cb->func == pf_memcxt_callback) + return (estate_mod_data *) cb->arg; - /* Make sure that this attribute exists */ - if (i > RelationGetDescr(parent_rel)->natts) - elog(ERROR, "error in function " CppAsString(pfilter_build_tlist)); + cb = estate_mcxt->reset_cbs->next; + } - /* Fetch pg_attribute entry for this column */ - attr = RelationGetDescr(parent_rel)->attrs[i - 1]; + /* Have to create a new one */ + emd_struct = MemoryContextAlloc(estate_mcxt, sizeof(estate_mod_data)); + emd_struct->estate_not_modified = true; + emd_struct->estate_alloc_result_rels = estate->es_num_result_relations; - /* If this column is dropped, create a placeholder Const */ - if (attr->attisdropped) - { - /* Insert NULL for dropped column */ - col_expr = (Expr *) makeConst(INT4OID, - -1, - InvalidOid, - sizeof(int32), - (Datum) 0, - true, - true); - } - /* Otherwise we should create a Var referencing subplan's output */ - else - { - col_expr = (Expr *) makeVar(INDEX_VAR, /* point to subplan's elements */ - i, /* direct attribute mapping */ - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); - } + cb = MemoryContextAlloc(estate_mcxt, sizeof(MemoryContextCallback)); + cb->func = pf_memcxt_callback; + cb->arg = emd_struct; - result_tlist = lappend(result_tlist, - makeTargetEntry(col_expr, - i, - NULL, - tle->resjunk)); - i++; /* next resno */ - } + MemoryContextRegisterResetCallback(estate_mcxt, cb); - return result_tlist; + return emd_struct; } diff --git a/src/partition_filter.h b/src/partition_filter.h index df9a175b..c366abb6 100644 --- a/src/partition_filter.h +++ b/src/partition_filter.h @@ -16,6 +16,7 @@ #include "utils.h" #include "postgres.h" +#include "access/tupconvert.h" #include "commands/explain.h" #include "optimizer/planner.h" @@ -34,21 +35,28 @@ */ typedef struct { - Oid partid; /* partition's relid */ - ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ + Oid partid; /* partition's relid */ + ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ + TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ } ResultRelInfoHolder; + +/* Forward declaration (for on_new_rri_holder()) */ +struct ResultPartsStorage; +typedef struct ResultPartsStorage ResultPartsStorage; + /* * Callback to be fired at rri_holder creation. */ typedef void (*on_new_rri_holder)(EState *estate, ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage, void *arg); /* * Cached ResultRelInfos of partitions. */ -typedef struct +struct ResultPartsStorage { ResultRelInfo *saved_rel_info; /* original ResultRelInfo (parent) */ HTAB *result_rels_table; @@ -64,7 +72,7 @@ typedef struct CmdType command_type; /* currenly we only allow INSERT */ LOCKMODE head_open_lock_mode; LOCKMODE heap_close_lock_mode; -} ResultPartsStorage; +}; /* * Standard size of ResultPartsStorage entry. @@ -77,11 +85,16 @@ typedef struct Oid partitioned_table; OnConflictAction on_conflict_action; + List *returning_list; Plan *subplan; /* proxy variable to store subplan */ ResultPartsStorage result_parts; /* partition ResultRelInfo cache */ bool warning_triggered; /* warning message counter */ + + TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ + + ExprContext *tup_convert_econtext; /* ExprContext for projections */ } PartitionFilterState; @@ -112,8 +125,9 @@ Oid * find_partitions_for_value(Datum value, Oid value_type, int *nparts); Plan * make_partition_filter(Plan *subplan, - Oid partitioned_table, - OnConflictAction conflict_action); + Oid parent_relid, + OnConflictAction conflict_action, + List *returning_list); Node * partition_filter_create_scan_state(CustomScan *node); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index c541ef3f..7f14e99a 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -373,7 +373,8 @@ partition_filter_visitor(Plan *plan, void *context) List *rtable = (List *) context; ModifyTable *modify_table = (ModifyTable *) plan; ListCell *lc1, - *lc2; + *lc2, + *lc3; /* Skip if not ModifyTable with 'INSERT' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_INSERT) @@ -381,6 +382,7 @@ partition_filter_visitor(Plan *plan, void *context) Assert(rtable && IsA(rtable, List)); + lc3 = list_head(modify_table->returningLists); forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) { Index rindex = lfirst_int(lc2); @@ -389,9 +391,21 @@ partition_filter_visitor(Plan *plan, void *context) /* Check that table is partitioned */ if (prel) + { + List *returning_list = NIL; + + /* Extract returning list if possible */ + if (lc3) + { + returning_list = lfirst(lc3); + lc3 = lnext(lc3); + } + lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, - modify_table->onConflictAction); + modify_table->onConflictAction, + returning_list); + } } } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 1c0e9eea..7d29972f 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -58,9 +58,10 @@ static uint64 PathmanCopyFrom(CopyState cstate, List *range_table, bool old_protocol); -static void prepare_rri_fdw_for_copy(EState *estate, - ResultRelInfoHolder *rri_holder, - void *arg); +static void prepare_rri_for_copy(EState *estate, + ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage, + void *arg); /* @@ -500,7 +501,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Initialize ResultPartsStorage */ init_result_parts_storage(&parts_storage, estate, false, ResultPartsStorageStandard, - prepare_rri_fdw_for_copy, NULL); + prepare_rri_for_copy, NULL); parts_storage.saved_rel_info = parent_result_rel; /* Set up a tuple slot too */ @@ -656,9 +657,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, * COPY FROM does not support FDWs, emit ERROR. */ static void -prepare_rri_fdw_for_copy(EState *estate, - ResultRelInfoHolder *rri_holder, - void *arg) +prepare_rri_for_copy(EState *estate, + ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage, + void *arg) { ResultRelInfo *rri = rri_holder->result_rel_info; FdwRoutine *fdw_routine = rri->ri_FdwRoutine; From a7b9ea38b34ac4c67e0be9f6498015a0610479c9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 2 Feb 2017 16:29:59 +0300 Subject: [PATCH 0153/1124] make check constraint machinery aware of different TupleDescs (part_attno) --- src/init.c | 42 +++++++++++++++++++++++++++++------------- src/init.h | 1 + src/relation_info.c | 4 +++- 3 files changed, 33 insertions(+), 14 deletions(-) diff --git a/src/init.c b/src/init.c index cfa25f19..76108e77 100644 --- a/src/init.c +++ b/src/init.c @@ -77,15 +77,18 @@ static int cmp_range_entries(const void *p1, const void *p2, void *arg); static bool validate_range_constraint(const Expr *expr, const PartRelationInfo *prel, + const AttrNumber part_attno, Datum *min, Datum *max); static bool validate_hash_constraint(const Expr *expr, const PartRelationInfo *prel, + const AttrNumber part_attno, uint32 *part_hash); static bool read_opexpr_const(const OpExpr *opexpr, const PartRelationInfo *prel, + const AttrNumber part_attno, Datum *val); static int oid_cmp(const void *p1, const void *p2); @@ -347,6 +350,7 @@ init_shmem_config(void) void fill_prel_with_partitions(const Oid *partitions, const uint32 parts_count, + const char *part_column_name, PartRelationInfo *prel) { uint32 i; @@ -361,7 +365,18 @@ fill_prel_with_partitions(const Oid *partitions, for (i = 0; i < PrelChildrenCount(prel); i++) { - con_expr = get_partition_constraint_expr(partitions[i], prel->attnum); + AttrNumber part_attno; + + /* NOTE: Partitions may have different TupleDescs */ + part_attno = get_attnum(partitions[i], part_column_name); + + /* Raise ERROR if there's no such column */ + if (part_attno == InvalidAttrNumber) + elog(ERROR, "partition \"%s\" has no column \"%s\"", + get_rel_name_or_relid(partitions[i]), + part_column_name); + + con_expr = get_partition_constraint_expr(partitions[i], part_attno); /* Perform a partitioning_type-dependent task */ switch (prel->parttype) @@ -370,13 +385,13 @@ fill_prel_with_partitions(const Oid *partitions, { uint32 hash; /* hash value < parts_count */ - if (validate_hash_constraint(con_expr, prel, &hash)) + if (validate_hash_constraint(con_expr, prel, part_attno, &hash)) prel->children[hash] = partitions[i]; else { DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, - (errmsg("Wrong constraint format for HASH partition \"%s\"", + (errmsg("wrong constraint format for HASH partition \"%s\"", get_rel_name_or_relid(partitions[i])), errhint(INIT_ERROR_HINT))); } @@ -387,7 +402,7 @@ fill_prel_with_partitions(const Oid *partitions, { Datum range_min, range_max; - if (validate_range_constraint(con_expr, prel, + if (validate_range_constraint(con_expr, prel, part_attno, &range_min, &range_max)) { prel->ranges[i].child_oid = partitions[i]; @@ -398,7 +413,7 @@ fill_prel_with_partitions(const Oid *partitions, { DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, - (errmsg("Wrong constraint format for RANGE partition \"%s\"", + (errmsg("wrong constraint format for RANGE partition \"%s\"", get_rel_name_or_relid(partitions[i])), errhint(INIT_ERROR_HINT))); } @@ -905,6 +920,7 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) static bool validate_range_constraint(const Expr *expr, const PartRelationInfo *prel, + const AttrNumber part_attno, Datum *min, Datum *max) { @@ -928,11 +944,10 @@ validate_range_constraint(const Expr *expr, if (strategy == BTGreaterEqualStrategyNumber) { - if (!read_opexpr_const(opexpr, prel, min)) + if (!read_opexpr_const(opexpr, prel, part_attno, min)) return false; } - else - return false; + else return false; /* check that right operand is < operator */ opexpr = (OpExpr *) lsecond(boolexpr->args); @@ -940,11 +955,10 @@ validate_range_constraint(const Expr *expr, if (strategy == BTLessStrategyNumber) { - if (!read_opexpr_const(opexpr, prel, max)) + if (!read_opexpr_const(opexpr, prel, part_attno, max)) return false; } - else - return false; + else return false; return true; } @@ -957,6 +971,7 @@ validate_range_constraint(const Expr *expr, static bool read_opexpr_const(const OpExpr *opexpr, const PartRelationInfo *prel, + const AttrNumber part_attno, Datum *val) { const Node *left; @@ -990,7 +1005,7 @@ read_opexpr_const(const OpExpr *opexpr, else return false; /* VAR.attno == partitioned attribute number */ - if (part_attr->varoattno != prel->attnum) + if (part_attr->varoattno != part_attno) return false; /* CONST is NOT NULL */ @@ -1026,6 +1041,7 @@ read_opexpr_const(const OpExpr *opexpr, static bool validate_hash_constraint(const Expr *expr, const PartRelationInfo *prel, + const AttrNumber part_attno, uint32 *part_hash) { const TypeCacheEntry *tce; @@ -1079,7 +1095,7 @@ validate_hash_constraint(const Expr *expr, var = (Var *) linitial(type_hash_proc_expr->args); /* Check that 'var' is the partitioning key attribute */ - if (var->varoattno != prel->attnum) + if (var->varoattno != part_attno) return false; /* Check that PARTITIONS_COUNT is equal to total amount of partitions */ diff --git a/src/init.h b/src/init.h index c769937e..09f574a8 100644 --- a/src/init.h +++ b/src/init.h @@ -122,6 +122,7 @@ void unload_config(void); void fill_prel_with_partitions(const Oid *partitions, const uint32 parts_count, + const char *part_column_name, PartRelationInfo *prel); /* Result of find_inheritance_children_array() */ diff --git a/src/relation_info.c b/src/relation_info.c index 681711c4..471ca1aa 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -197,7 +197,9 @@ refresh_pathman_relation_info(Oid relid, * will try to refresh it again (and again), until the error is fixed * by user manually (i.e. invalid check constraints etc). */ - fill_prel_with_partitions(prel_children, prel_children_count, prel); + fill_prel_with_partitions(prel_children, + prel_children_count, + part_column_name, prel); /* Peform some actions for each child */ for (i = 0; i < prel_children_count; i++) From 6cbe2735b3029e6e6c6001e2098ceaafd74e1396 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 2 Feb 2017 17:21:30 +0300 Subject: [PATCH 0154/1124] fix test 'pathman_calamity', add test 'pathman_inserts' for RETURNING lists etc --- Makefile | 1 + expected/pathman_calamity.out | 6 +- expected/pathman_inserts.out | 461 ++++++++++++++++++++++++++++++++++ sql/pathman_inserts.sql | 104 ++++++++ 4 files changed, 569 insertions(+), 3 deletions(-) create mode 100644 expected/pathman_inserts.out create mode 100644 sql/pathman_inserts.sql diff --git a/Makefile b/Makefile index 04f77da3..d06b2298 100644 --- a/Makefile +++ b/Makefile @@ -18,6 +18,7 @@ DATA = pg_pathman--1.0.sql \ PGFILEDESC = "pg_pathman - partitioning tool" REGRESS = pathman_basic \ + pathman_inserts \ pathman_runtime_nodes \ pathman_callbacks \ pathman_domains \ diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index ca22b1c1..0d061419 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -420,7 +420,7 @@ ALTER TABLE calamity.wrong_partition ADD CONSTRAINT pathman_wrong_partition_1_check CHECK (val < 10); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); -ERROR: Wrong constraint format for RANGE partition "wrong_partition" +ERROR: wrong constraint format for RANGE partition "wrong_partition" EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ QUERY PLAN ----------------------------- @@ -436,7 +436,7 @@ ALTER TABLE calamity.wrong_partition ADD CONSTRAINT pathman_wrong_partition_1_check CHECK (val = 1 OR val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); -ERROR: Wrong constraint format for RANGE partition "wrong_partition" +ERROR: wrong constraint format for RANGE partition "wrong_partition" EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ QUERY PLAN ----------------------------- @@ -452,7 +452,7 @@ ALTER TABLE calamity.wrong_partition ADD CONSTRAINT pathman_wrong_partition_1_check CHECK (val >= 10 AND val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); -ERROR: Wrong constraint format for RANGE partition "wrong_partition" +ERROR: wrong constraint format for RANGE partition "wrong_partition" EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ QUERY PLAN ----------------------------- diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out new file mode 100644 index 00000000..f61d2af0 --- /dev/null +++ b/expected/pathman_inserts.out @@ -0,0 +1,461 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_inserts; +/* create a partitioned table */ +CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); +INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); +NOTICE: sequence "storage_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +/* implicitly prepend a partition (no columns have been dropped yet) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; + tableoid +------------------------- + test_inserts.storage_11 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+----------- + 0 | 0 | 0 | PREPEND. + 0 | 0 | 0 | PREPEND.. +(2 rows) + +INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; + ?column? +---------- + 3 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+------------ + 0 | 0 | 0 | PREPEND. + 0 | 0 | 0 | PREPEND.. + 3 | 0 | 0 | PREPEND... +(3 rows) + +/* drop first column */ +ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_12 +(1 row) + +INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +SELECT * FROM test_inserts.storage_12; /* direct access */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +/* spawn a new partition (b, c, d) */ +INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +SELECT * FROM test_inserts.storage_13; /* direct access */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +/* column 'a' has been dropped */ +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; + b | c | d | ?column? +-----+---+-------------+---------- + 111 | 0 | DROP_COL_1. | 17 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; + tableoid +------------------------- + test_inserts.storage_13 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; + ?column? | b +----------+----- + 222 | 111 +(1 row) + +/* drop third column */ +ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +SELECT * FROM test_inserts.storage_14; /* direct access */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +/* column 'c' has been dropped */ +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; + b | d +-----+------------- + 121 | DROP_COL_2. +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; + tableoid +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; + ?column? | ?column? +------------------+---------- + DROP_COL_2...0_0 | 363 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_1') +RETURNING (SELECT 1); + ?column? +---------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_2') +RETURNING (SELECT generate_series(1, 10) LIMIT 1); + generate_series +----------------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_3') +RETURNING (SELECT attname + FROM pathman_config + WHERE partrel = 'test_inserts.storage'::regclass); + attname +--------- + b +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_4') +RETURNING 1, 2, 3, 4; + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + 1 | 2 | 3 | 4 +(1 row) + +/* show number of columns in each partition */ +SELECT partition, range_min, range_max, count(partition) +FROM pathman_partition_list JOIN pg_attribute ON partition = attrelid +WHERE attnum > 0 +GROUP BY partition, range_min, range_max +ORDER BY range_min::INT4; + partition | range_min | range_max | count +-------------------------+-----------+-----------+------- + test_inserts.storage_11 | -9 | 1 | 4 + test_inserts.storage_1 | 1 | 11 | 4 + test_inserts.storage_2 | 11 | 21 | 4 + test_inserts.storage_3 | 21 | 31 | 4 + test_inserts.storage_4 | 31 | 41 | 4 + test_inserts.storage_5 | 41 | 51 | 4 + test_inserts.storage_6 | 51 | 61 | 4 + test_inserts.storage_7 | 61 | 71 | 4 + test_inserts.storage_8 | 71 | 81 | 4 + test_inserts.storage_9 | 81 | 91 | 4 + test_inserts.storage_10 | 91 | 101 | 4 + test_inserts.storage_12 | 101 | 111 | 3 + test_inserts.storage_13 | 111 | 121 | 3 + test_inserts.storage_14 | 121 | 131 | 2 +(14 rows) + +/* check the data */ +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----------------+------------------------- + 0 | PREPEND. | test_inserts.storage_11 + 0 | PREPEND.. | test_inserts.storage_11 + 0 | PREPEND... | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 3 cols! | test_inserts.storage_12 + 111 | 3 cols as well! | test_inserts.storage_13 + 111 | DROP_COL_1. | test_inserts.storage_13 + 111 | DROP_COL_1.. | test_inserts.storage_13 + 111 | DROP_COL_1... | test_inserts.storage_13 + 121 | 2 cols! | test_inserts.storage_14 + 121 | DROP_COL_2. | test_inserts.storage_14 + 121 | DROP_COL_2.. | test_inserts.storage_14 + 121 | DROP_COL_2... | test_inserts.storage_14 + 121 | query_1 | test_inserts.storage_14 + 121 | query_2 | test_inserts.storage_14 + 121 | query_3 | test_inserts.storage_14 + 121 | query_4 | test_inserts.storage_14 +(116 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* one more time! */ +INSERT INTO test_inserts.storage SELECT i, i FROM generate_series(-2, 120) i; +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----+------------------------- + -2 | -2 | test_inserts.storage_11 + -1 | -1 | test_inserts.storage_11 + 0 | 0 | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 101 | test_inserts.storage_12 + 102 | 102 | test_inserts.storage_12 + 103 | 103 | test_inserts.storage_12 + 104 | 104 | test_inserts.storage_12 + 105 | 105 | test_inserts.storage_12 + 106 | 106 | test_inserts.storage_12 + 107 | 107 | test_inserts.storage_12 + 108 | 108 | test_inserts.storage_12 + 109 | 109 | test_inserts.storage_12 + 110 | 110 | test_inserts.storage_12 + 111 | 111 | test_inserts.storage_13 + 112 | 112 | test_inserts.storage_13 + 113 | 113 | test_inserts.storage_13 + 114 | 114 | test_inserts.storage_13 + 115 | 115 | test_inserts.storage_13 + 116 | 116 | test_inserts.storage_13 + 117 | 117 | test_inserts.storage_13 + 118 | 118 | test_inserts.storage_13 + 119 | 119 | test_inserts.storage_13 + 120 | 120 | test_inserts.storage_13 +(123 rows) + +DROP SCHEMA test_inserts CASCADE; +NOTICE: drop cascades to 16 other objects +DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql new file mode 100644 index 00000000..29ff3746 --- /dev/null +++ b/sql/pathman_inserts.sql @@ -0,0 +1,104 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_inserts; + + +/* create a partitioned table */ +CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); +INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); + + +/* implicitly prepend a partition (no columns have been dropped yet) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; +SELECT * FROM test_inserts.storage_11; + +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +SELECT * FROM test_inserts.storage_11; + +INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; +SELECT * FROM test_inserts.storage_11; + + +/* drop first column */ +ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; + + +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); +INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +SELECT * FROM test_inserts.storage_12; /* direct access */ +SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ + +/* spawn a new partition (b, c, d) */ +INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +SELECT * FROM test_inserts.storage_13; /* direct access */ +SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ + + +/* column 'a' has been dropped */ +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; + + +/* drop third column */ +ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; + + +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); +INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +SELECT * FROM test_inserts.storage_14; /* direct access */ +SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ + + +/* column 'c' has been dropped */ +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; + + + +INSERT INTO test_inserts.storage VALUES(121, 'query_1') +RETURNING (SELECT 1); + +INSERT INTO test_inserts.storage VALUES(121, 'query_2') +RETURNING (SELECT generate_series(1, 10) LIMIT 1); + +INSERT INTO test_inserts.storage VALUES(121, 'query_3') +RETURNING (SELECT attname + FROM pathman_config + WHERE partrel = 'test_inserts.storage'::regclass); + +INSERT INTO test_inserts.storage VALUES(121, 'query_4') +RETURNING 1, 2, 3, 4; + + + +/* show number of columns in each partition */ +SELECT partition, range_min, range_max, count(partition) +FROM pathman_partition_list JOIN pg_attribute ON partition = attrelid +WHERE attnum > 0 +GROUP BY partition, range_min, range_max +ORDER BY range_min::INT4; + + +/* check the data */ +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + + +/* drop data */ +TRUNCATE test_inserts.storage; + + +/* one more time! */ +INSERT INTO test_inserts.storage SELECT i, i FROM generate_series(-2, 120) i; +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + + + +DROP SCHEMA test_inserts CASCADE; +DROP EXTENSION pg_pathman CASCADE; From 21ea8b531a4d01f8c279d9382abfe467ebbe83b1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 2 Feb 2017 17:52:10 +0300 Subject: [PATCH 0155/1124] more tests for RETURNING (add new column) --- expected/pathman_inserts.out | 173 ++++++++++++++++++++++++++++++++++- sql/pathman_inserts.sql | 24 ++++- 2 files changed, 194 insertions(+), 3 deletions(-) diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index f61d2af0..5f5e5307 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -327,7 +327,7 @@ SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; /* drop data */ TRUNCATE test_inserts.storage; /* one more time! */ -INSERT INTO test_inserts.storage SELECT i, i FROM generate_series(-2, 120) i; +INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; b | d | tableoid -----+-----+------------------------- @@ -456,6 +456,177 @@ SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; 120 | 120 | test_inserts.storage_13 (123 rows) +/* drop data */ +TRUNCATE test_inserts.storage; +/* add new column */ +ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; +/* one more time! x2 */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | e | tableoid +-----+-----+-----+------------------------- + -2 | -2 | -2 | test_inserts.storage_11 + -1 | -1 | -1 | test_inserts.storage_11 + 0 | 0 | 0 | test_inserts.storage_11 + 1 | 1 | 1 | test_inserts.storage_1 + 2 | 2 | 2 | test_inserts.storage_1 + 3 | 3 | 3 | test_inserts.storage_1 + 4 | 4 | 4 | test_inserts.storage_1 + 5 | 5 | 5 | test_inserts.storage_1 + 6 | 6 | 6 | test_inserts.storage_1 + 7 | 7 | 7 | test_inserts.storage_1 + 8 | 8 | 8 | test_inserts.storage_1 + 9 | 9 | 9 | test_inserts.storage_1 + 10 | 10 | 10 | test_inserts.storage_1 + 11 | 11 | 11 | test_inserts.storage_2 + 12 | 12 | 12 | test_inserts.storage_2 + 13 | 13 | 13 | test_inserts.storage_2 + 14 | 14 | 14 | test_inserts.storage_2 + 15 | 15 | 15 | test_inserts.storage_2 + 16 | 16 | 16 | test_inserts.storage_2 + 17 | 17 | 17 | test_inserts.storage_2 + 18 | 18 | 18 | test_inserts.storage_2 + 19 | 19 | 19 | test_inserts.storage_2 + 20 | 20 | 20 | test_inserts.storage_2 + 21 | 21 | 21 | test_inserts.storage_3 + 22 | 22 | 22 | test_inserts.storage_3 + 23 | 23 | 23 | test_inserts.storage_3 + 24 | 24 | 24 | test_inserts.storage_3 + 25 | 25 | 25 | test_inserts.storage_3 + 26 | 26 | 26 | test_inserts.storage_3 + 27 | 27 | 27 | test_inserts.storage_3 + 28 | 28 | 28 | test_inserts.storage_3 + 29 | 29 | 29 | test_inserts.storage_3 + 30 | 30 | 30 | test_inserts.storage_3 + 31 | 31 | 31 | test_inserts.storage_4 + 32 | 32 | 32 | test_inserts.storage_4 + 33 | 33 | 33 | test_inserts.storage_4 + 34 | 34 | 34 | test_inserts.storage_4 + 35 | 35 | 35 | test_inserts.storage_4 + 36 | 36 | 36 | test_inserts.storage_4 + 37 | 37 | 37 | test_inserts.storage_4 + 38 | 38 | 38 | test_inserts.storage_4 + 39 | 39 | 39 | test_inserts.storage_4 + 40 | 40 | 40 | test_inserts.storage_4 + 41 | 41 | 41 | test_inserts.storage_5 + 42 | 42 | 42 | test_inserts.storage_5 + 43 | 43 | 43 | test_inserts.storage_5 + 44 | 44 | 44 | test_inserts.storage_5 + 45 | 45 | 45 | test_inserts.storage_5 + 46 | 46 | 46 | test_inserts.storage_5 + 47 | 47 | 47 | test_inserts.storage_5 + 48 | 48 | 48 | test_inserts.storage_5 + 49 | 49 | 49 | test_inserts.storage_5 + 50 | 50 | 50 | test_inserts.storage_5 + 51 | 51 | 51 | test_inserts.storage_6 + 52 | 52 | 52 | test_inserts.storage_6 + 53 | 53 | 53 | test_inserts.storage_6 + 54 | 54 | 54 | test_inserts.storage_6 + 55 | 55 | 55 | test_inserts.storage_6 + 56 | 56 | 56 | test_inserts.storage_6 + 57 | 57 | 57 | test_inserts.storage_6 + 58 | 58 | 58 | test_inserts.storage_6 + 59 | 59 | 59 | test_inserts.storage_6 + 60 | 60 | 60 | test_inserts.storage_6 + 61 | 61 | 61 | test_inserts.storage_7 + 62 | 62 | 62 | test_inserts.storage_7 + 63 | 63 | 63 | test_inserts.storage_7 + 64 | 64 | 64 | test_inserts.storage_7 + 65 | 65 | 65 | test_inserts.storage_7 + 66 | 66 | 66 | test_inserts.storage_7 + 67 | 67 | 67 | test_inserts.storage_7 + 68 | 68 | 68 | test_inserts.storage_7 + 69 | 69 | 69 | test_inserts.storage_7 + 70 | 70 | 70 | test_inserts.storage_7 + 71 | 71 | 71 | test_inserts.storage_8 + 72 | 72 | 72 | test_inserts.storage_8 + 73 | 73 | 73 | test_inserts.storage_8 + 74 | 74 | 74 | test_inserts.storage_8 + 75 | 75 | 75 | test_inserts.storage_8 + 76 | 76 | 76 | test_inserts.storage_8 + 77 | 77 | 77 | test_inserts.storage_8 + 78 | 78 | 78 | test_inserts.storage_8 + 79 | 79 | 79 | test_inserts.storage_8 + 80 | 80 | 80 | test_inserts.storage_8 + 81 | 81 | 81 | test_inserts.storage_9 + 82 | 82 | 82 | test_inserts.storage_9 + 83 | 83 | 83 | test_inserts.storage_9 + 84 | 84 | 84 | test_inserts.storage_9 + 85 | 85 | 85 | test_inserts.storage_9 + 86 | 86 | 86 | test_inserts.storage_9 + 87 | 87 | 87 | test_inserts.storage_9 + 88 | 88 | 88 | test_inserts.storage_9 + 89 | 89 | 89 | test_inserts.storage_9 + 90 | 90 | 90 | test_inserts.storage_9 + 91 | 91 | 91 | test_inserts.storage_10 + 92 | 92 | 92 | test_inserts.storage_10 + 93 | 93 | 93 | test_inserts.storage_10 + 94 | 94 | 94 | test_inserts.storage_10 + 95 | 95 | 95 | test_inserts.storage_10 + 96 | 96 | 96 | test_inserts.storage_10 + 97 | 97 | 97 | test_inserts.storage_10 + 98 | 98 | 98 | test_inserts.storage_10 + 99 | 99 | 99 | test_inserts.storage_10 + 100 | 100 | 100 | test_inserts.storage_10 + 101 | 101 | 101 | test_inserts.storage_12 + 102 | 102 | 102 | test_inserts.storage_12 + 103 | 103 | 103 | test_inserts.storage_12 + 104 | 104 | 104 | test_inserts.storage_12 + 105 | 105 | 105 | test_inserts.storage_12 + 106 | 106 | 106 | test_inserts.storage_12 + 107 | 107 | 107 | test_inserts.storage_12 + 108 | 108 | 108 | test_inserts.storage_12 + 109 | 109 | 109 | test_inserts.storage_12 + 110 | 110 | 110 | test_inserts.storage_12 + 111 | 111 | 111 | test_inserts.storage_13 + 112 | 112 | 112 | test_inserts.storage_13 + 113 | 113 | 113 | test_inserts.storage_13 + 114 | 114 | 114 | test_inserts.storage_13 + 115 | 115 | 115 | test_inserts.storage_13 + 116 | 116 | 116 | test_inserts.storage_13 + 117 | 117 | 117 | test_inserts.storage_13 + 118 | 118 | 118 | test_inserts.storage_13 + 119 | 119 | 119 | test_inserts.storage_13 + 120 | 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* now test RETURNING list using our new column 'e' */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(-2, 130, 5) i +RETURNING e * 2, b, tableoid::regclass; + ?column? | b | tableoid +----------+-----+------------------------- + -4 | -2 | test_inserts.storage_11 + 6 | 3 | test_inserts.storage_1 + 16 | 8 | test_inserts.storage_1 + 26 | 13 | test_inserts.storage_2 + 36 | 18 | test_inserts.storage_2 + 46 | 23 | test_inserts.storage_3 + 56 | 28 | test_inserts.storage_3 + 66 | 33 | test_inserts.storage_4 + 76 | 38 | test_inserts.storage_4 + 86 | 43 | test_inserts.storage_5 + 96 | 48 | test_inserts.storage_5 + 106 | 53 | test_inserts.storage_6 + 116 | 58 | test_inserts.storage_6 + 126 | 63 | test_inserts.storage_7 + 136 | 68 | test_inserts.storage_7 + 146 | 73 | test_inserts.storage_8 + 156 | 78 | test_inserts.storage_8 + 166 | 83 | test_inserts.storage_9 + 176 | 88 | test_inserts.storage_9 + 186 | 93 | test_inserts.storage_10 + 196 | 98 | test_inserts.storage_10 + 206 | 103 | test_inserts.storage_12 + 216 | 108 | test_inserts.storage_12 + 226 | 113 | test_inserts.storage_13 + 236 | 118 | test_inserts.storage_13 + 246 | 123 | test_inserts.storage_14 + 256 | 128 | test_inserts.storage_14 +(27 rows) + DROP SCHEMA test_inserts CASCADE; NOTICE: drop cascades to 16 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index 29ff3746..f50e9009 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -89,15 +89,35 @@ ORDER BY range_min::INT4; /* check the data */ SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; - /* drop data */ TRUNCATE test_inserts.storage; /* one more time! */ -INSERT INTO test_inserts.storage SELECT i, i FROM generate_series(-2, 120) i; +INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + +/* drop data */ +TRUNCATE test_inserts.storage; + + +/* add new column */ +ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; + + +/* one more time! x2 */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; +/* drop data */ +TRUNCATE test_inserts.storage; + + +/* now test RETURNING list using our new column 'e' */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(-2, 130, 5) i +RETURNING e * 2, b, tableoid::regclass; + DROP SCHEMA test_inserts CASCADE; From 967d13fcd77bb1ae73f8ab8f8fb3fe63962a974d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 3 Feb 2017 14:33:19 +0300 Subject: [PATCH 0156/1124] show error message if ON CONFLICT clause is present, improve comments --- src/partition_filter.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 0bdebae9..45efb9c7 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -374,20 +374,28 @@ make_partition_filter(Plan *subplan, Oid parent_relid, CustomScan *cscan = makeNode(CustomScan); Relation parent_rel; + /* Currenly we don't support ON CONFLICT clauses */ + if (conflict_action != ONCONFLICT_NONE) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ON CONFLICT clause is not supported with partitioned tables"))); + + /* Copy costs etc */ cscan->scan.plan.startup_cost = subplan->startup_cost; cscan->scan.plan.total_cost = subplan->total_cost; cscan->scan.plan.plan_rows = subplan->plan_rows; cscan->scan.plan.plan_width = subplan->plan_width; + /* Setup methods and child plan */ cscan->methods = &partition_filter_plan_methods; cscan->custom_plans = list_make1(subplan); + /* Build an appropriate target list using a cached Relation entry */ parent_rel = RelationIdGetRelation(parent_relid); - cscan->scan.plan.targetlist = pfilter_build_tlist(parent_rel, - subplan->targetlist); + cscan->scan.plan.targetlist = pfilter_build_tlist(parent_rel, subplan->targetlist); RelationClose(parent_rel); - /* No relation will be scanned */ + /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; cscan->custom_scan_tlist = subplan->targetlist; From db3508e14257293d8d0bcfba58739d0c9de026c6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 3 Feb 2017 16:20:19 +0300 Subject: [PATCH 0157/1124] add tests for ON CONFLICT (unsupported), fix {incr, decr}_refcount_parenthood_statuses() and pathman_planner_hook() --- expected/pathman_basic.out | 2 +- expected/pathman_inserts.out | 17 ++++++++-- sql/pathman_inserts.sql | 13 +++++++- src/hooks.c | 59 ++++++++++++++++++++------------- src/planner_tree_modification.c | 26 ++++++--------- src/planner_tree_modification.h | 2 +- 6 files changed, 75 insertions(+), 44 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index edaaa435..fc354547 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -494,7 +494,7 @@ UNION SELECT * FROM test.from_only_test; /* not ok, ONLY|non-ONLY in one query */ EXPLAIN (COSTS OFF) SELECT * FROM test.from_only_test a JOIN ONLY test.from_only_test b USING(val); -ERROR: It is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY +ERROR: it is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY EXPLAIN (COSTS OFF) WITH q1 AS (SELECT * FROM test.from_only_test), q2 AS (SELECT * FROM ONLY test.from_only_test) diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index 5f5e5307..d959a79c 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -5,6 +5,7 @@ CREATE SCHEMA test_inserts; /* create a partitioned table */ CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +CREATE UNIQUE INDEX ON test_inserts.storage(a); SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); NOTICE: sequence "storage_seq" does not exist, skipping create_range_partitions @@ -12,6 +13,13 @@ NOTICE: sequence "storage_seq" does not exist, skipping 10 (1 row) +/* we don't support ON CONLICT */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') +ON CONFLICT (a) DO UPDATE SET a = 3; +ERROR: ON CONFLICT clause is not supported with partitioned tables +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_2') +ON CONFLICT (a) DO NOTHING; +ERROR: ON CONFLICT clause is not supported with partitioned tables /* implicitly prepend a partition (no columns have been dropped yet) */ INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; a | b | c | d @@ -25,7 +33,7 @@ SELECT * FROM test_inserts.storage_11; 0 | 0 | 0 | PREPEND. (1 row) -INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; tableoid ------------------------- test_inserts.storage_11 @@ -35,7 +43,7 @@ SELECT * FROM test_inserts.storage_11; a | b | c | d ---+---+---+----------- 0 | 0 | 0 | PREPEND. - 0 | 0 | 0 | PREPEND.. + 1 | 0 | 0 | PREPEND.. (2 rows) INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; @@ -48,10 +56,13 @@ SELECT * FROM test_inserts.storage_11; a | b | c | d ---+---+---+------------ 0 | 0 | 0 | PREPEND. - 0 | 0 | 0 | PREPEND.. + 1 | 0 | 0 | PREPEND.. 3 | 0 | 0 | PREPEND... (3 rows) +/* cause a conflict (a = 0) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; +ERROR: duplicate key value violates unique constraint "storage_11_a_idx" /* drop first column */ ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; /* will have 3 columns (b, c, d) */ diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index f50e9009..64bd191f 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -8,19 +8,30 @@ CREATE SCHEMA test_inserts; /* create a partitioned table */ CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +CREATE UNIQUE INDEX ON test_inserts.storage(a); SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); +/* we don't support ON CONLICT */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') +ON CONFLICT (a) DO UPDATE SET a = 3; +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_2') +ON CONFLICT (a) DO NOTHING; + + /* implicitly prepend a partition (no columns have been dropped yet) */ INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; SELECT * FROM test_inserts.storage_11; -INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; SELECT * FROM test_inserts.storage_11; INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; SELECT * FROM test_inserts.storage_11; +/* cause a conflict (a = 0) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; + /* drop first column */ ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; diff --git a/src/hooks.c b/src/hooks.c index 19688ad6..6e27c0f7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -466,39 +466,52 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) proc((planned_stmt)->rtable, (Plan *) lfirst(lc)); \ } while (0) - PlannedStmt *result; - uint32 query_id = parse->queryId; + PlannedStmt *result; + uint32 query_id = parse->queryId; - if (IsPathmanReady()) + PG_TRY(); { - /* Increment parenthood_statuses refcount */ - incr_refcount_parenthood_statuses(); + if (IsPathmanReady()) + { + /* Increment parenthood_statuses refcount */ + incr_refcount_parenthood_statuses(); - /* Modify query tree if needed */ - pathman_transform_query(parse); - } + /* Modify query tree if needed */ + pathman_transform_query(parse); + } - /* Invoke original hook if needed */ - if (planner_hook_next) - result = planner_hook_next(parse, cursorOptions, boundParams); - else - result = standard_planner(parse, cursorOptions, boundParams); + /* Invoke original hook if needed */ + if (planner_hook_next) + result = planner_hook_next(parse, cursorOptions, boundParams); + else + result = standard_planner(parse, cursorOptions, boundParams); - if (IsPathmanReady()) - { - /* Give rowmark-related attributes correct names */ - ExecuteForPlanTree(result, postprocess_lock_rows); + if (IsPathmanReady()) + { + /* Give rowmark-related attributes correct names */ + ExecuteForPlanTree(result, postprocess_lock_rows); + + /* Add PartitionFilter node for INSERT queries */ + ExecuteForPlanTree(result, add_partition_filters); - /* Add PartitionFilter node for INSERT queries */ - ExecuteForPlanTree(result, add_partition_filters); + /* Decrement parenthood_statuses refcount */ + decr_refcount_parenthood_statuses(); - /* Decrement parenthood_statuses refcount */ - decr_refcount_parenthood_statuses(false); + /* HACK: restore queryId set by pg_stat_statements */ + result->queryId = query_id; + } + } + /* We must decrease parenthood statuses refcount on ERROR */ + PG_CATCH(); + { + /* Caught an ERROR, decrease refcount */ + decr_refcount_parenthood_statuses(); - /* HACK: restore queryId set by pg_stat_statements */ - result->queryId = query_id; + PG_RE_THROW(); } + PG_END_TRY(); + /* Finally return the Plan */ return result; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 7f14e99a..4e032c67 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -43,7 +43,7 @@ static List *get_tableoids_list(List *tlist); * cant't be used with both and without ONLY modifiers. */ static HTAB *per_table_parenthood_mapping = NULL; -static uint32 per_table_parenthood_mapping_refcount = 0; +static int per_table_parenthood_mapping_refcount = 0; /* * We have to mark each Query with a unique id in order @@ -564,10 +564,7 @@ assign_rel_parenthood_status(uint32 query_id, /* Saved status conflicts with 'new_status' */ if (status_entry->parenthood_status != new_status) { - /* Don't forget to clear ALL tracked statuses! */ - decr_refcount_parenthood_statuses(true); - - elog(ERROR, "It is prohibited to apply ONLY modifier to partitioned " + elog(ERROR, "it is prohibited to apply ONLY modifier to partitioned " "tables which have already been mentioned without ONLY"); } } @@ -612,8 +609,10 @@ get_rel_parenthood_status(uint32 query_id, Oid relid) void incr_refcount_parenthood_statuses(void) { - Assert(per_table_parenthood_mapping_refcount >= 0); - per_table_parenthood_mapping_refcount++; + /* Increment reference counter */ + if (++per_table_parenthood_mapping_refcount <= 0) + elog(WARNING, "imbalanced %s", + CppAsString(incr_refcount_parenthood_statuses)); } /* Return current value of usage counter */ @@ -626,15 +625,12 @@ get_refcount_parenthood_statuses(void) /* Reset all cached statuses if needed (query end) */ void -decr_refcount_parenthood_statuses(bool entirely) +decr_refcount_parenthood_statuses(void) { - Assert(per_table_parenthood_mapping_refcount > 0); - - /* Should we destroy the table right now? */ - if (entirely) - per_table_parenthood_mapping_refcount = 0; - else - per_table_parenthood_mapping_refcount--; + /* Decrement reference counter */ + if (--per_table_parenthood_mapping_refcount < 0) + elog(WARNING, "imbalanced %s", + CppAsString(decr_refcount_parenthood_statuses)); /* Free resources if no one is using them */ if (per_table_parenthood_mapping_refcount == 0) diff --git a/src/planner_tree_modification.h b/src/planner_tree_modification.h index 80485cc2..ddf546ac 100644 --- a/src/planner_tree_modification.h +++ b/src/planner_tree_modification.h @@ -46,7 +46,7 @@ void assign_rel_parenthood_status(uint32 query_id, Oid relid, rel_parenthood_status get_rel_parenthood_status(uint32 query_id, Oid relid); void incr_refcount_parenthood_statuses(void); uint32 get_refcount_parenthood_statuses(void); -void decr_refcount_parenthood_statuses(bool entirely); +void decr_refcount_parenthood_statuses(void); #endif /* PLANNER_TREE_MODIFICATION_H */ From 89b13a011a76eb0c8707404094424bf028d3f314 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 3 Feb 2017 20:12:24 +0300 Subject: [PATCH 0158/1124] added set_interval() function --- init.sql | 16 ++++++++++- range.sql | 24 ++++++++++++++++ src/partition_creation.c | 62 ---------------------------------------- src/pl_range_funcs.c | 25 +++++++++++++++- src/utils.c | 58 +++++++++++++++++++++++++++++++++++++ src/utils.h | 3 ++ 6 files changed, 124 insertions(+), 64 deletions(-) diff --git a/init.sql b/init.sql index 3733e398..9ed807bc 100644 --- a/init.sql +++ b/init.sql @@ -8,6 +8,19 @@ * ------------------------------------------------------------------------ */ + +/* + * Takes text representation of interval value and checks if it is corresponds + * to partitioning key. The function throws an error if it fails to convert + * text to Datum + */ +CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( + parent REGCLASS, + interval_value TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C STRICT; + + /* * Pathman config * partrel - regclass (relation type, stored as Oid) @@ -23,7 +36,8 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( parttype INTEGER NOT NULL, range_interval TEXT, - CHECK (parttype IN (1, 2)) /* check for allowed part types */ + CHECK (parttype IN (1, 2)), /* check for allowed part types */ + CHECK (@extschema@.validate_interval_value(partrel, range_interval)) ); diff --git a/range.sql b/range.sql index 8d899452..5755a20d 100644 --- a/range.sql +++ b/range.sql @@ -435,6 +435,30 @@ BEGIN END $$ LANGUAGE plpgsql; + +/* + * Set (or reset) default interval for auto created partitions + */ +CREATE OR REPLACE FUNCTION @extschema@.set_interval(parent REGCLASS, value ANYELEMENT) +RETURNS VOID AS +$$ +DECLARE + affected INTEGER; +BEGIN + UPDATE @extschema@.pathman_config + SET range_interval = value::text + WHERE partrel = parent; + + GET DIAGNOSTICS affected = ROW_COUNT; + + IF affected = 0 THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent; + END IF; +END +$$ +LANGUAGE plpgsql; + + /* * Split RANGE partition */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 01851370..5d185ee6 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -43,10 +43,6 @@ #include "utils/typcache.h" -static Datum extract_binary_interval_from_text(Datum interval_text, - Oid part_atttype, - Oid *interval_type); - static void extract_op_func_and_ret_type(char *opname, Oid type1, Oid type2, Oid *move_bound_op_func, Oid *move_bound_op_ret_type); @@ -427,64 +423,6 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) return partid; } -/* - * Convert interval from TEXT to binary form using partitioned column's type. - */ -static Datum -extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ - Oid part_atttype, /* partitioned column's type */ - Oid *interval_type) /* returned value */ -{ - Datum interval_binary; - const char *interval_cstring; - - interval_cstring = TextDatumGetCString(interval_text); - - /* If 'part_atttype' is a *date type*, cast 'range_interval' to INTERVAL */ - if (is_date_type_internal(part_atttype)) - { - int32 interval_typmod = PATHMAN_CONFIG_interval_typmod; - - /* Convert interval from CSTRING to internal form */ - interval_binary = DirectFunctionCall3(interval_in, - CStringGetDatum(interval_cstring), - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(interval_typmod)); - if (interval_type) - *interval_type = INTERVALOID; - } - /* Otherwise cast it to the partitioned column's type */ - else - { - HeapTuple htup; - Oid typein_proc = InvalidOid; - - htup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(part_atttype)); - if (HeapTupleIsValid(htup)) - { - typein_proc = ((Form_pg_type) GETSTRUCT(htup))->typinput; - ReleaseSysCache(htup); - } - else - elog(ERROR, "Cannot find input function for type %u", part_atttype); - - /* - * Convert interval from CSTRING to 'prel->atttype'. - * - * Note: We pass 3 arguments in case - * 'typein_proc' also takes Oid & typmod. - */ - interval_binary = OidFunctionCall3(typein_proc, - CStringGetDatum(interval_cstring), - ObjectIdGetDatum(part_atttype), - Int32GetDatum(-1)); - if (interval_type) - *interval_type = part_atttype; - } - - return interval_binary; -} - /* * Fetch binary operator by name and return it's function and ret type. */ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index faa5798b..e897f040 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -60,6 +60,7 @@ PG_FUNCTION_INFO_V1( build_range_condition ); PG_FUNCTION_INFO_V1( build_sequence_name ); PG_FUNCTION_INFO_V1( merge_range_partitions ); PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); +PG_FUNCTION_INFO_V1( validate_interval_value ); /* @@ -749,4 +750,26 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) drop_table(relid); PG_RETURN_VOID(); -} \ No newline at end of file +} + +/* + * Takes text representation of interval value and checks if it is corresponds + * to partitioning key. The function throws an error if it fails to convert + * text to Datum + */ +Datum +validate_interval_value(PG_FUNCTION_ARGS) +{ + const PartRelationInfo *prel; + Oid parent = PG_GETARG_OID(0); + Datum interval = PG_GETARG_DATUM(1); + + /* TODO!!! */ + prel = get_pathman_relation_info(parent); + if (!prel) + PG_RETURN_BOOL(true); + + extract_binary_interval_from_text(interval, prel->atttype, NULL); + + PG_RETURN_BOOL(true); +} diff --git a/src/utils.c b/src/utils.c index c763d72c..c061ae09 100644 --- a/src/utils.c +++ b/src/utils.c @@ -410,3 +410,61 @@ perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success) } } } + +/* + * Convert interval from TEXT to binary form using partitioned column's type. + */ +Datum +extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ + Oid part_atttype, /* partitioned column's type */ + Oid *interval_type) /* returned value */ +{ + Datum interval_binary; + const char *interval_cstring; + + interval_cstring = TextDatumGetCString(interval_text); + + /* If 'part_atttype' is a *date type*, cast 'range_interval' to INTERVAL */ + if (is_date_type_internal(part_atttype)) + { + int32 interval_typmod = PATHMAN_CONFIG_interval_typmod; + + /* Convert interval from CSTRING to internal form */ + interval_binary = DirectFunctionCall3(interval_in, + CStringGetDatum(interval_cstring), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(interval_typmod)); + if (interval_type) + *interval_type = INTERVALOID; + } + /* Otherwise cast it to the partitioned column's type */ + else + { + HeapTuple htup; + Oid typein_proc = InvalidOid; + + htup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(part_atttype)); + if (HeapTupleIsValid(htup)) + { + typein_proc = ((Form_pg_type) GETSTRUCT(htup))->typinput; + ReleaseSysCache(htup); + } + else + elog(ERROR, "Cannot find input function for type %u", part_atttype); + + /* + * Convert interval from CSTRING to 'prel->atttype'. + * + * Note: We pass 3 arguments in case + * 'typein_proc' also takes Oid & typmod. + */ + interval_binary = OidFunctionCall3(typein_proc, + CStringGetDatum(interval_cstring), + ObjectIdGetDatum(part_atttype), + Int32GetDatum(-1)); + if (interval_type) + *interval_type = part_atttype; + } + + return interval_binary; +} diff --git a/src/utils.h b/src/utils.h index e81f6026..e00cd582 100644 --- a/src/utils.h +++ b/src/utils.h @@ -56,6 +56,9 @@ void fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2); */ char * datum_to_cstring(Datum datum, Oid typid); Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); +Datum extract_binary_interval_from_text(Datum interval_text, + Oid part_atttype, + Oid *interval_type); #endif /* PATHMAN_UTILS_H */ From ab61e1eab3fd87ec3a1dcd4d79b16938d25625d3 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 7 Feb 2017 16:51:39 +0300 Subject: [PATCH 0159/1124] make functions get_pathman_config_params_relid() & get_pathman_config_relid() safer --- src/hooks.c | 2 +- src/init.c | 6 +++--- src/pathman.h | 4 ++-- src/pg_pathman.c | 20 ++++++++++++++++++-- src/pl_funcs.c | 17 ++++++++++------- src/relation_info.c | 2 +- 6 files changed, 35 insertions(+), 16 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 19688ad6..61627612 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -592,7 +592,7 @@ pathman_relcache_hook(Datum arg, Oid relid) return; /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ - if (relid == get_pathman_config_relid()) + if (relid == get_pathman_config_relid(false)) delay_pathman_shutdown(); /* Invalidate PartParentInfo cache if needed */ diff --git a/src/init.c b/src/init.c index cfa25f19..f4fc5292 100644 --- a/src/init.c +++ b/src/init.c @@ -661,7 +661,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, ObjectIdGetDatum(relid)); /* Open PATHMAN_CONFIG with latest snapshot available */ - rel = heap_open(get_pathman_config_relid(), AccessShareLock); + rel = heap_open(get_pathman_config_relid(false), AccessShareLock); /* Check that 'partrel' column is if regclass type */ Assert(RelationGetDescr(rel)-> @@ -735,7 +735,7 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); - rel = heap_open(get_pathman_config_params_relid(), AccessShareLock); + rel = heap_open(get_pathman_config_params_relid(false), AccessShareLock); snapshot = RegisterSnapshot(GetLatestSnapshot()); scan = heap_beginscan(rel, snapshot, 1, key); @@ -774,7 +774,7 @@ read_pathman_config(void) HeapTuple htup; /* Open PATHMAN_CONFIG with latest snapshot available */ - rel = heap_open(get_pathman_config_relid(), AccessShareLock); + rel = heap_open(get_pathman_config_relid(false), AccessShareLock); /* Check that 'partrel' column is if regclass type */ Assert(RelationGetDescr(rel)-> diff --git a/src/pathman.h b/src/pathman.h index 32e059b3..bf910219 100644 --- a/src/pathman.h +++ b/src/pathman.h @@ -85,8 +85,8 @@ extern Oid pathman_config_params_relid; /* * Just to clarify our intentions (return the corresponding relid). */ -Oid get_pathman_config_relid(void); -Oid get_pathman_config_params_relid(void); +Oid get_pathman_config_relid(bool invalid_is_ok); +Oid get_pathman_config_params_relid(bool invalid_is_ok); /* * pg_pathman's global state structure. diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 10769666..be92a98d 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1884,8 +1884,16 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, * Get cached PATHMAN_CONFIG relation Oid. */ Oid -get_pathman_config_relid(void) +get_pathman_config_relid(bool invalid_is_ok) { + /* Raise ERROR if Oid is invalid */ + if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) + elog(ERROR, + (!IsPathmanInitialized() ? + "pg_pathman is not initialized yet" : + "unexpected error in function " + CppAsString(get_pathman_config_relid))); + return pathman_config_relid; } @@ -1893,7 +1901,15 @@ get_pathman_config_relid(void) * Get cached PATHMAN_CONFIG_PARAMS relation Oid. */ Oid -get_pathman_config_params_relid(void) +get_pathman_config_params_relid(bool invalid_is_ok) { + /* Raise ERROR if Oid is invalid */ + if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) + elog(ERROR, + (!IsPathmanInitialized() ? + "pg_pathman is not initialized yet" : + "unexpected error in function " + CppAsString(get_pathman_config_params_relid))); + return pathman_config_params_relid; } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 6ff1e9da..0d4d7df7 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -285,7 +285,7 @@ show_partition_list_internal(PG_FUNCTION_ARGS) usercxt = (show_partition_list_cxt *) palloc(sizeof(show_partition_list_cxt)); /* Open PATHMAN_CONFIG with latest snapshot available */ - usercxt->pathman_config = heap_open(get_pathman_config_relid(), + usercxt->pathman_config = heap_open(get_pathman_config_relid(false), AccessShareLock); usercxt->snapshot = RegisterSnapshot(GetLatestSnapshot()); usercxt->pathman_config_scan = heap_beginscan(usercxt->pathman_config, @@ -637,7 +637,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) isnull[Anum_pathman_config_range_interval - 1] = PG_ARGISNULL(2); /* Insert new row into PATHMAN_CONFIG */ - pathman_config = heap_open(get_pathman_config_relid(), RowExclusiveLock); + pathman_config = heap_open(get_pathman_config_relid(false), RowExclusiveLock); htup = heap_form_tuple(RelationGetDescr(pathman_config), values, isnull); simple_heap_insert(pathman_config, htup); indstate = CatalogOpenIndexes(pathman_config); @@ -685,14 +685,17 @@ Datum pathman_config_params_trigger_func(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; - Oid pathman_config_params = get_pathman_config_params_relid(); + Oid pathman_config_params; Oid partrel; Datum partrel_datum; bool partrel_isnull; - /* Handle pg_pathman disabled case */ - if (!OidIsValid(pathman_config_params)) - goto _return; + /* Fetch Oid of PATHMAN_CONFIG_PARAMS */ + pathman_config_params = get_pathman_config_params_relid(true); + + /* Handle "pg_pathman.enabled = t" case */ + if (!OidIsValid(pathman_config_params)) + goto pathman_config_params_trigger_func_return; /* Handle user calls */ if (!CALLED_AS_TRIGGER(fcinfo)) @@ -722,8 +725,8 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) if (check_relation_exists(partrel)) CacheInvalidateRelcacheByRelid(partrel); +pathman_config_params_trigger_func_return: /* Return the tuple we've been given */ -_return: if (trigdata->tg_event & TRIGGER_EVENT_UPDATE) PG_RETURN_POINTER(trigdata->tg_newtuple); else diff --git a/src/relation_info.c b/src/relation_info.c index 681711c4..57c3f688 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -415,7 +415,7 @@ finish_delayed_invalidation(void) /* Check that PATHMAN_CONFIG table has indeed been dropped */ if (cur_pathman_config_relid == InvalidOid || - cur_pathman_config_relid != get_pathman_config_relid()) + cur_pathman_config_relid != get_pathman_config_relid(true)) { /* Ok, let's unload pg_pathman's config */ unload_config(); From 7813e85b724e6e228f304261410b2a681193e1c1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 7 Feb 2017 17:45:06 +0300 Subject: [PATCH 0160/1124] small adjustments for recursive CTE test --- expected/pathman_basic.out | 26 ++++++++++++++++++-------- sql/pathman_basic.sql | 26 +++++++++++++++++++------- 2 files changed, 37 insertions(+), 15 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index acff6c92..f8ada1d1 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -2310,17 +2310,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2 (12 rows) /* Test recursive CTE */ -create table test.recursive_cte_test_tbl(id int not null, name text not null); -select * from create_hash_partitions('test.recursive_cte_test_tbl', 'id', 2); +CREATE TABLE test.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT * FROM create_hash_partitions('test.recursive_cte_test_tbl', 'id', 2); create_hash_partitions ------------------------ 2 (1 row) -insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||id from generate_series(1,100) f(id); -insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||(id + 1) from generate_series(1,100) f(id); -insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||(id + 2) from generate_series(1,100) f(id); -select * from test.recursive_cte_test_tbl where id = 5; +INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test.recursive_cte_test_tbl WHERE id = 5; id | name ----+------- 5 | name5 @@ -2328,7 +2328,17 @@ select * from test.recursive_cte_test_tbl where id = 5; 5 | name7 (3 rows) -with recursive test as (select min(name) as name from test.recursive_cte_test_tbl where id = 5 union all select (select min(name) from test.recursive_cte_test_tbl where id = 5 and name > test.name) from test where name is not null) select * from test; +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; name ------- name5 @@ -2338,6 +2348,6 @@ with recursive test as (select min(name) as name from test.recursive_cte_test_tb (4 rows) DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 51 other objects +NOTICE: drop cascades to 54 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 1b7710d1..e4c53663 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -661,13 +661,25 @@ VACUUM ANALYZE test.index_on_childs; EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; /* Test recursive CTE */ -create table test.recursive_cte_test_tbl(id int not null, name text not null); -select * from create_hash_partitions('test.recursive_cte_test_tbl', 'id', 2); -insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||id from generate_series(1,100) f(id); -insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||(id + 1) from generate_series(1,100) f(id); -insert into test.recursive_cte_test_tbl (id, name) select id, 'name'||(id + 2) from generate_series(1,100) f(id); -select * from test.recursive_cte_test_tbl where id = 5; -with recursive test as (select min(name) as name from test.recursive_cte_test_tbl where id = 5 union all select (select min(name) from test.recursive_cte_test_tbl where id = 5 and name > test.name) from test where name is not null) select * from test; +CREATE TABLE test.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT * FROM create_hash_partitions('test.recursive_cte_test_tbl', 'id', 2); +INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test.recursive_cte_test_tbl WHERE id = 5; + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; From 75d67e09a9c76efe73bbaf76354cf0214914b587 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Tue, 7 Feb 2017 18:09:16 +0300 Subject: [PATCH 0161/1124] devnull pg_restore sdterr and stdout and augment descriptions of errors in test_pg_dump --- tests/python/partitioning_test.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 7d2076aa..b0e92a67 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -780,6 +780,7 @@ def test_pg_dump(self): con.commit() # compare strategies + CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) def cmp_full(con1, con2): """Compare selection partitions in plan and contents in partitioned tables""" @@ -794,11 +795,15 @@ def cmp_full(con1, con2): for table_ref in table_refs: plan_initial = con1.execute(plan_query % table_ref)[0][0][0]['Plan'] plan_copy = con2.execute(plan_query % table_ref)[0][0][0]['Plan'] - self.assertEqual(ordered(plan_initial), ordered(plan_copy)) + if ordered(plan_initial) != ordered(plan_copy): + return PLANS_MISMATCH content_initial = [x[0] for x in con1.execute(content_query % table_ref)] content_copy = [x[0] for x in con2.execute(content_query % table_ref)] - self.assertEqual(content_initial, content_copy) + if content_initial != content_copy: + return CONTENTS_MISMATCH + + return CMP_OK def turnoff_pathman(node): node.psql('initial', 'alter system set pg_pathman.enable to off') @@ -845,12 +850,15 @@ def turnon_pathman(node): ] for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: + dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) + if (preproc != None): preproc(node) # transfer and restore data + FNULL = open(os.devnull, 'w') p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - p2 = subprocess.Popen(pg_restore_params, stdin=p1.stdout, stdout=subprocess.PIPE) + p2 = subprocess.Popen(pg_restore_params, stdin=p1.stdout, stdout=FNULL, stderr=FNULL) p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. p2.communicate() @@ -861,7 +869,11 @@ def turnon_pathman(node): with node.connect('initial') as con1, node.connect('copy') as con2: # compare plans and contents of initial and copy - cmp_dbs(con1, con2) + cmp_result = cmp_dbs(con1, con2) + self.assertNotEqual(cmp_result, PLANS_MISMATCH, + "mismatch in plans of select query on partitioned tables under the command: %s" % dump_restore_cmd) + self.assertNotEqual(cmp_result, CONTENTS_MISMATCH, + "mismatch in contents of partitioned tables under the command: %s" % dump_restore_cmd) # compare enable_parent flag and callback function config_params_query = """ @@ -872,7 +884,8 @@ def turnon_pathman(node): config_params_initial[row[0]] = row[1:] for row in con2.execute(config_params_query): config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy) + self.assertEqual(config_params_initial, config_params_copy, \ + "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) # compare constraints on each partition constraints_query = """ @@ -885,7 +898,8 @@ def turnon_pathman(node): constraints_initial[row[0]] = row[1:] for row in con2.execute(constraints_query): constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy) + self.assertEqual(constraints_initial, constraints_copy, \ + "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) # clear copy database node.psql('copy', 'drop schema public cascade') From 39795af4614dd26d15f6c24f3b942a3fb2bbe6d4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 7 Feb 2017 18:41:50 +0300 Subject: [PATCH 0162/1124] fix repo language detection --- .gitattributes | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..88f6baf4 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,3 @@ +*.h linguist-language=C +*.c linguist-language=C +*.spec linguist-vendored=true From 3d6fcb756d2539be2c39e1bbb4a3a9182759d3c8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 7 Feb 2017 18:51:00 +0300 Subject: [PATCH 0163/1124] remove obsolete extension scripts --- pg_pathman--1.0.sql | 2406 ------------------------------------------- pg_pathman--1.1.sql | 2406 ------------------------------------------- 2 files changed, 4812 deletions(-) delete mode 100644 pg_pathman--1.0.sql delete mode 100644 pg_pathman--1.1.sql diff --git a/pg_pathman--1.0.sql b/pg_pathman--1.0.sql deleted file mode 100644 index 20cb62a5..00000000 --- a/pg_pathman--1.0.sql +++ /dev/null @@ -1,2406 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * init.sql - * Creates config table and provides common utility functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -/* - * Pathman config - * partrel - regclass (relation type, stored as Oid) - * attname - partitioning key - * parttype - partitioning type: - * 1 - HASH - * 2 - RANGE - * range_interval - base interval for RANGE partitioning as string - */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( - partrel REGCLASS NOT NULL PRIMARY KEY, - attname TEXT NOT NULL, - parttype INTEGER NOT NULL, - range_interval TEXT, - - CHECK (parttype IN (1, 2)) /* check for allowed part types */ -); - -/* - * Optional parameters for partitioned tables. - * partrel - regclass (relation type, stored as Oid) - * enable_parent - add parent table to plan - * auto - enable automatic partition creation - * init_callback - cb to be executed on partition creation - */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( - partrel REGCLASS NOT NULL PRIMARY KEY, - enable_parent BOOLEAN NOT NULL DEFAULT FALSE, - auto BOOLEAN NOT NULL DEFAULT TRUE, - init_callback REGPROCEDURE NOT NULL DEFAULT 0 -); -CREATE UNIQUE INDEX i_pathman_config_params -ON @extschema@.pathman_config_params(partrel); - -GRANT SELECT, INSERT, UPDATE, DELETE -ON @extschema@.pathman_config, @extschema@.pathman_config_params -TO public; - -/* - * Check if current user can alter/drop specified relation - */ -CREATE OR REPLACE FUNCTION @extschema@.check_security_policy(relation regclass) -RETURNS BOOL AS 'pg_pathman', 'check_security_policy' LANGUAGE C STRICT; - -/* - * Row security policy to restrict partitioning operations to owner and - * superusers only - */ -CREATE POLICY deny_modification ON @extschema@.pathman_config -FOR ALL USING (check_security_policy(partrel)); - -CREATE POLICY deny_modification ON @extschema@.pathman_config_params -FOR ALL USING (check_security_policy(partrel)); - -CREATE POLICY allow_select ON @extschema@.pathman_config FOR SELECT USING (true); - -CREATE POLICY allow_select ON @extschema@.pathman_config_params FOR SELECT USING (true); - -ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; -ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; - -/* - * Invalidate relcache every time someone changes parameters config. - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() -RETURNS TRIGGER AS -$$ -BEGIN - IF TG_OP IN ('INSERT', 'UPDATE') THEN - PERFORM @extschema@.invalidate_relcache(NEW.partrel); - END IF; - - IF TG_OP IN ('UPDATE', 'DELETE') THEN - PERFORM @extschema@.invalidate_relcache(OLD.partrel); - END IF; - - IF TG_OP = 'DELETE' THEN - RETURN OLD; - ELSE - RETURN NEW; - END IF; -END -$$ -LANGUAGE plpgsql; - -CREATE TRIGGER pathman_config_params_trigger -BEFORE INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params -FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); - -/* - * Enable dump of config tables with pg_dump. - */ -SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config', ''); -SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config_params', ''); - - -CREATE OR REPLACE FUNCTION @extschema@.partitions_count(relation REGCLASS) -RETURNS INT AS -$$ -BEGIN - RETURN count(*) FROM pg_inherits WHERE inhparent = relation; -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Add a row describing the optional parameter to pathman_config_params. - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( - relation REGCLASS, - param TEXT, - value ANYELEMENT) -RETURNS VOID AS -$$ -BEGIN - EXECUTE format('INSERT INTO @extschema@.pathman_config_params - (partrel, %1$s) VALUES ($1, $2) - ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) - USING relation, value; -END -$$ -LANGUAGE plpgsql; - -/* - * Include\exclude parent relation in query plan. - */ -CREATE OR REPLACE FUNCTION @extschema@.set_enable_parent( - relation REGCLASS, - value BOOLEAN) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'enable_parent', value); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Enable\disable automatic partition creation. - */ -CREATE OR REPLACE FUNCTION @extschema@.set_auto( - relation REGCLASS, - value BOOLEAN) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'auto', value); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Set partition creation callback - */ -CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( - relation REGCLASS, - callback REGPROC DEFAULT 0) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.validate_on_partition_created_callback(callback); - PERFORM @extschema@.pathman_set_param(relation, 'init_callback', callback); -END -$$ -LANGUAGE plpgsql; - -/* - * Show all existing parents and partitions. - */ -CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() -RETURNS TABLE ( - parent REGCLASS, - partition REGCLASS, - parttype INT4, - partattr TEXT, - range_min TEXT, - range_max TEXT) -AS 'pg_pathman', 'show_partition_list_internal' LANGUAGE C STRICT; - -/* - * View for show_partition_list(). - */ -CREATE OR REPLACE VIEW @extschema@.pathman_partition_list -AS SELECT * FROM @extschema@.show_partition_list(); - -GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; - -/* - * Show all existing concurrent partitioning tasks. - */ -CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() -RETURNS TABLE ( - userid REGROLE, - pid INT, - dbid OID, - relid REGCLASS, - processed INT, - status TEXT) -AS 'pg_pathman', 'show_concurrent_part_tasks_internal' LANGUAGE C STRICT; - -/* - * View for show_concurrent_part_tasks(). - */ -CREATE OR REPLACE VIEW @extschema@.pathman_concurrent_part_tasks -AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); - -GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; - -/* - * Partition table using ConcurrentPartWorker. - */ -CREATE OR REPLACE FUNCTION @extschema@.partition_table_concurrently( - relation REGCLASS, - batch_size INTEGER DEFAULT 1000, - sleep_time FLOAT8 DEFAULT 1.0) -RETURNS VOID AS 'pg_pathman', 'partition_table_concurrently' -LANGUAGE C STRICT; - -/* - * Stop concurrent partitioning task. - */ -CREATE OR REPLACE FUNCTION @extschema@.stop_concurrent_part_task( - relation REGCLASS) -RETURNS BOOL AS 'pg_pathman', 'stop_concurrent_part_task' -LANGUAGE C STRICT; - - -/* - * Copy rows to partitions concurrently. - */ -CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( - relation REGCLASS, - p_min ANYELEMENT DEFAULT NULL::text, - p_max ANYELEMENT DEFAULT NULL::text, - p_limit INT DEFAULT NULL, - OUT p_total BIGINT) -AS -$$ -DECLARE - v_attr TEXT; - v_limit_clause TEXT := ''; - v_where_clause TEXT := ''; - ctids TID[]; - -BEGIN - SELECT attname INTO v_attr - FROM @extschema@.pathman_config WHERE partrel = relation; - - p_total := 0; - - /* Format LIMIT clause if needed */ - IF NOT p_limit IS NULL THEN - v_limit_clause := format('LIMIT %s', p_limit); - END IF; - - /* Format WHERE clause if needed */ - IF NOT p_min IS NULL THEN - v_where_clause := format('%1$s >= $1', v_attr); - END IF; - - IF NOT p_max IS NULL THEN - IF NOT p_min IS NULL THEN - v_where_clause := v_where_clause || ' AND '; - END IF; - v_where_clause := v_where_clause || format('%1$s < $2', v_attr); - END IF; - - IF v_where_clause != '' THEN - v_where_clause := 'WHERE ' || v_where_clause; - END IF; - - /* Lock rows and copy data */ - RAISE NOTICE 'Copying data to partitions...'; - EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', - relation, v_where_clause, v_limit_clause) - USING p_min, p_max - INTO ctids; - - EXECUTE format(' - WITH data AS ( - DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) - INSERT INTO %1$s SELECT * FROM data', - relation) - USING ctids; - - /* Get number of inserted rows */ - GET DIAGNOSTICS p_total = ROW_COUNT; - RETURN; -END -$$ -LANGUAGE plpgsql -SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ - -/* - * Old school way to distribute rows to partitions. - */ -CREATE OR REPLACE FUNCTION @extschema@.partition_data( - parent_relid REGCLASS, - OUT p_total BIGINT) -AS -$$ -DECLARE - relname TEXT; - rec RECORD; - cnt BIGINT := 0; - -BEGIN - p_total := 0; - - /* Create partitions and copy rest of the data */ - EXECUTE format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) - INSERT INTO %1$s SELECT * FROM part_data', - parent_relid::TEXT); - - /* Get number of inserted rows */ - GET DIAGNOSTICS p_total = ROW_COUNT; - RETURN; -END -$$ -LANGUAGE plpgsql STRICT -SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ - -/* - * Disable pathman partitioning for specified relation. - */ -CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( - parent_relid REGCLASS) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; - PERFORM @extschema@.drop_triggers(parent_relid); - - /* Notify backend about changes */ - PERFORM @extschema@.on_remove_partitions(parent_relid); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Validates relation name. It must be schema qualified. - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relname( - cls REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - relname TEXT; - -BEGIN - relname = @extschema@.get_schema_qualified_name(cls); - - IF relname IS NULL THEN - RAISE EXCEPTION 'relation %s does not exist', cls; - END IF; - - RETURN relname; -END -$$ -LANGUAGE plpgsql; - -/* - * Aggregates several common relation checks before partitioning. - * Suitable for every partitioning type. - */ -CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( - relation REGCLASS, - p_attribute TEXT) -RETURNS BOOLEAN AS -$$ -DECLARE - v_rec RECORD; - is_referenced BOOLEAN; - rel_persistence CHAR; - -BEGIN - /* Ignore temporary tables */ - SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = relation INTO rel_persistence; - - IF rel_persistence = 't'::CHAR THEN - RAISE EXCEPTION 'temporary table "%" cannot be partitioned', - relation::TEXT; - END IF; - - IF EXISTS (SELECT * FROM @extschema@.pathman_config - WHERE partrel = relation) THEN - RAISE EXCEPTION 'relation "%" has already been partitioned', relation; - END IF; - - IF @extschema@.is_attribute_nullable(relation, p_attribute) THEN - RAISE EXCEPTION 'partitioning key ''%'' must be NOT NULL', p_attribute; - END IF; - - /* Check if there are foreign keys that reference the relation */ - FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint - WHERE confrelid = relation::REGCLASS::OID) - LOOP - is_referenced := TRUE; - RAISE WARNING 'foreign key "%" references relation "%"', - v_rec.conname, relation; - END LOOP; - - IF is_referenced THEN - RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; - END IF; - - RETURN TRUE; -END -$$ -LANGUAGE plpgsql; - -/* - * Returns relname without quotes or something. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_plain_schema_and_relname( - cls REGCLASS, - OUT schema TEXT, - OUT relname TEXT) -AS -$$ -BEGIN - SELECT pg_catalog.pg_class.relnamespace::regnamespace, - pg_catalog.pg_class.relname - FROM pg_catalog.pg_class WHERE oid = cls::oid - INTO schema, relname; -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Returns the schema-qualified name of table. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_schema_qualified_name( - cls REGCLASS, - delimiter TEXT DEFAULT '.', - suffix TEXT DEFAULT '') -RETURNS TEXT AS -$$ -BEGIN - RETURN (SELECT quote_ident(relnamespace::regnamespace::text) || - delimiter || - quote_ident(relname || suffix) - FROM pg_catalog.pg_class - WHERE oid = cls::oid); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Check if two relations have equal structures. - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relations_equality( - relation1 OID, relation2 OID) -RETURNS BOOLEAN AS -$$ -DECLARE - rec RECORD; - -BEGIN - FOR rec IN ( - WITH - a1 AS (select * from pg_catalog.pg_attribute - where attrelid = relation1 and attnum > 0), - a2 AS (select * from pg_catalog.pg_attribute - where attrelid = relation2 and attnum > 0) - SELECT a1.attname name1, a2.attname name2, a1.atttypid type1, a2.atttypid type2 - FROM a1 - FULL JOIN a2 ON a1.attnum = a2.attnum - ) - LOOP - IF rec.name1 IS NULL OR rec.name2 IS NULL OR rec.name1 != rec.name2 THEN - RETURN false; - END IF; - END LOOP; - - RETURN true; -END -$$ -LANGUAGE plpgsql; - -/* - * DDL trigger that removes entry from pathman_config table. - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() -RETURNS event_trigger AS -$$ -DECLARE - obj record; - pg_class_oid oid; - relids regclass[]; -BEGIN - pg_class_oid = 'pg_catalog.pg_class'::regclass; - - /* Find relids to remove from config */ - SELECT array_agg(cfg.partrel) INTO relids - FROM pg_event_trigger_dropped_objects() AS events - JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid - WHERE events.classid = pg_class_oid; - - /* Cleanup pathman_config */ - DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); - - /* Cleanup params table too */ - DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); -END -$$ -LANGUAGE plpgsql; - -/* - * Drop triggers. - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( - parent_relid REGCLASS) -RETURNS VOID AS -$$ -BEGIN - EXECUTE format('DROP FUNCTION IF EXISTS %s() CASCADE', - @extschema@.build_update_trigger_func_name(parent_relid)); -END -$$ LANGUAGE plpgsql STRICT; - -/* - * Drop partitions. If delete_data set to TRUE, partitions - * will be dropped with all the data. - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( - parent_relid REGCLASS, - delete_data BOOLEAN DEFAULT FALSE) -RETURNS INTEGER AS -$$ -DECLARE - v_rec RECORD; - v_rows BIGINT; - v_part_count INTEGER := 0; - conf_num_del INTEGER; - v_relkind CHAR; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Drop trigger first */ - PERFORM @extschema@.drop_triggers(parent_relid); - - WITH config_num_deleted AS (DELETE FROM @extschema@.pathman_config - WHERE partrel = parent_relid - RETURNING *) - SELECT count(*) from config_num_deleted INTO conf_num_del; - - DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; - - IF conf_num_del = 0 THEN - RAISE EXCEPTION 'relation "%" has no partitions', parent_relid::TEXT; - END IF; - - FOR v_rec IN (SELECT inhrelid::REGCLASS AS tbl - FROM pg_catalog.pg_inherits - WHERE inhparent::regclass = parent_relid - ORDER BY inhrelid ASC) - LOOP - IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', - parent_relid::TEXT, - v_rec.tbl::TEXT); - GET DIAGNOSTICS v_rows = ROW_COUNT; - - /* Show number of copied rows */ - RAISE NOTICE '% rows copied from %', v_rows, v_rec.tbl::TEXT; - END IF; - - SELECT relkind FROM pg_catalog.pg_class - WHERE oid = v_rec.tbl - INTO v_relkind; - - /* - * Determine the kind of child relation. It can be either regular - * table (r) or foreign table (f). Depending on relkind we use - * DROP TABLE or DROP FOREIGN TABLE. - */ - IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', v_rec.tbl::TEXT); - ELSE - EXECUTE format('DROP TABLE %s', v_rec.tbl::TEXT); - END IF; - - v_part_count := v_part_count + 1; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_remove_partitions(parent_relid); - - RETURN v_part_count; -END -$$ LANGUAGE plpgsql -SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ - - -/* - * Copy all of parent's foreign keys. - */ -CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( - parent_relid REGCLASS, - partition REGCLASS) -RETURNS VOID AS -$$ -DECLARE - rec RECORD; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition); - - FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint - WHERE conrelid = parent_relid AND contype = 'f') - LOOP - EXECUTE format('ALTER TABLE %s ADD %s', - partition::TEXT, - pg_catalog.pg_get_constraintdef(rec.conid)); - END LOOP; -END -$$ LANGUAGE plpgsql STRICT; - - -/* - * Create DDL trigger to call pathman_ddl_trigger_func(). - */ -CREATE EVENT TRIGGER pathman_ddl_trigger -ON sql_drop -EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); - - - -CREATE OR REPLACE FUNCTION @extschema@.on_create_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_created' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.on_update_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_updated' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.on_remove_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_removed' -LANGUAGE C STRICT; - - -/* - * Get parent of pg_pathman's partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition(REGCLASS) -RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' -LANGUAGE C STRICT; - -/* - * Extract basic type of a domain. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_base_type(REGTYPE) -RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' -LANGUAGE C STRICT; - -/* - * Returns attribute type name for relation. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_attribute_type( - REGCLASS, TEXT) -RETURNS REGTYPE AS 'pg_pathman', 'get_attribute_type_pl' -LANGUAGE C STRICT; - -/* - * Return tablespace name for specified relation. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_rel_tablespace_name(REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'get_rel_tablespace_name' -LANGUAGE C STRICT; - - -/* - * Checks if attribute is nullable - */ -CREATE OR REPLACE FUNCTION @extschema@.is_attribute_nullable( - REGCLASS, TEXT) -RETURNS BOOLEAN AS 'pg_pathman', 'is_attribute_nullable' -LANGUAGE C STRICT; - -/* - * Check if regclass is date or timestamp. - */ -CREATE OR REPLACE FUNCTION @extschema@.is_date_type( - typid REGTYPE) -RETURNS BOOLEAN AS 'pg_pathman', 'is_date_type' -LANGUAGE C STRICT; - - -/* - * Build check constraint name for a specified relation's column. - */ -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - REGCLASS, INT2) -RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attnum' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - REGCLASS, TEXT) -RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attname' -LANGUAGE C STRICT; - -/* - * Build update trigger and its underlying function's names. - */ -CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_name( - REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_name' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_func_name( - REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_func_name' -LANGUAGE C STRICT; - - -/* - * Attach a previously partitioned table. - */ -CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( - parent_relid REGCLASS, - attname TEXT, - range_interval TEXT DEFAULT NULL) -RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION @extschema@.invalidate_relcache(relid OID) -RETURNS VOID AS 'pg_pathman' -LANGUAGE C STRICT; - - -/* - * Lock partitioned relation to restrict concurrent - * modification of partitioning scheme. - */ - CREATE OR REPLACE FUNCTION @extschema@.lock_partitioned_relation( - REGCLASS) - RETURNS VOID AS 'pg_pathman', 'lock_partitioned_relation' - LANGUAGE C STRICT; - -/* - * Lock relation to restrict concurrent modification of data. - */ - CREATE OR REPLACE FUNCTION @extschema@.prevent_relation_modification( - REGCLASS) - RETURNS VOID AS 'pg_pathman', 'prevent_relation_modification' - LANGUAGE C STRICT; - - -/* - * DEBUG: Place this inside some plpgsql fuction and set breakpoint. - */ -CREATE OR REPLACE FUNCTION @extschema@.debug_capture() -RETURNS VOID AS 'pg_pathman', 'debug_capture' -LANGUAGE C STRICT; - -/* - * Checks that callback function meets specific requirements. Particularly it - * must have the only JSONB argument and VOID return type. - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_on_partition_created_callback( - callback REGPROC) -RETURNS VOID AS 'pg_pathman', 'validate_on_part_init_callback_pl' -LANGUAGE C STRICT; - - -/* - * Invoke init_callback on RANGE partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( - parent_relid REGCLASS, - partition REGCLASS, - init_callback REGPROCEDURE, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' -LANGUAGE C; - -/* - * Invoke init_callback on HASH partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( - parent_relid REGCLASS, - partition REGCLASS, - init_callback REGPROCEDURE) -RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' -LANGUAGE C; -/* ------------------------------------------------------------------------ - * - * hash.sql - * HASH partitioning functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -/* - * Creates hash partitions for specified relation - */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( - parent_relid REGCLASS, - attribute TEXT, - partitions_count INTEGER, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - v_child_relname TEXT; - v_plain_schema TEXT; - v_plain_relname TEXT; - v_atttype REGTYPE; - v_hashfunc REGPROC; - v_init_callback REGPROCEDURE; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - /* Fetch atttype and its hash function */ - v_atttype := @extschema@.get_attribute_type(parent_relid, attribute); - v_hashfunc := @extschema@.get_type_hash_func(v_atttype); - - SELECT * INTO v_plain_schema, v_plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) - VALUES (parent_relid, attribute, 1); - - /* Create partitions and update pg_pathman configuration */ - FOR partnum IN 0..partitions_count-1 - LOOP - v_child_relname := format('%s.%s', - quote_ident(v_plain_schema), - quote_ident(v_plain_relname || '_' || partnum)); - - EXECUTE format( - 'CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) INHERITS (%2$s) TABLESPACE %s', - v_child_relname, - parent_relid::TEXT, - @extschema@.get_rel_tablespace_name(parent_relid)); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s - CHECK (@extschema@.get_hash_part_idx(%s(%s), %s) = %s)', - v_child_relname, - @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, - attribute), - v_hashfunc::TEXT, - attribute, - partitions_count, - partnum); - - PERFORM @extschema@.copy_foreign_keys(parent_relid, v_child_relname::REGCLASS); - - /* Fetch init_callback from 'params' table */ - WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) - FROM stub_callback - LEFT JOIN @extschema@.pathman_config_params AS params - ON params.partrel = parent_relid - INTO v_init_callback; - - PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - v_child_relname::REGCLASS, - v_init_callback); - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Copy data */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN partitions_count; -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; - -/* - * Creates an update trigger - */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( - parent_relid REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() - RETURNS TRIGGER AS - $body$ - DECLARE - old_idx INTEGER; /* partition indices */ - new_idx INTEGER; - - BEGIN - old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); - new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); - - IF old_idx = new_idx THEN - RETURN NEW; - END IF; - - EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) - USING %5$s; - - EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) - USING %7$s; - - RETURN NULL; - END $body$ - LANGUAGE plpgsql'; - - trigger TEXT := 'CREATE TRIGGER %s - BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE %s()'; - - att_names TEXT; - old_fields TEXT; - new_fields TEXT; - att_val_fmt TEXT; - att_fmt TEXT; - attr TEXT; - plain_schema TEXT; - plain_relname TEXT; - child_relname_format TEXT; - funcname TEXT; - triggername TEXT; - atttype REGTYPE; - partitions_count INTEGER; - -BEGIN - attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - - IF attr IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT string_agg(attname, ', '), - string_agg('OLD.' || attname, ', '), - string_agg('NEW.' || attname, ', '), - string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || - attname || ' = $' || attnum || ' ' || - 'ELSE ' || - attname || ' IS NULL END', - ' AND '), - string_agg('$' || attnum, ', ') - FROM pg_catalog.pg_attribute - WHERE attrelid = parent_relid AND attnum > 0 - INTO att_names, - old_fields, - new_fields, - att_val_fmt, - att_fmt; - - partitions_count := COUNT(*) FROM pg_catalog.pg_inherits - WHERE inhparent = parent_relid::oid; - - /* Build trigger & trigger function's names */ - funcname := @extschema@.build_update_trigger_func_name(parent_relid); - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Build partition name template */ - SELECT * INTO plain_schema, plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - child_relname_format := quote_ident(plain_schema) || '.' || - quote_ident(plain_relname || '_%s'); - - /* Fetch base hash function for atttype */ - atttype := @extschema@.get_attribute_type(parent_relid, attr); - - /* Format function definition and execute it */ - EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, - old_fields, att_fmt, new_fields, child_relname_format, - @extschema@.get_type_hash_func(atttype)::TEXT); - - /* Create trigger on every partition */ - FOR num IN 0..partitions_count-1 - LOOP - EXECUTE format(trigger, - triggername, - format(child_relname_format, num), - funcname); - END LOOP; - - return funcname; -END -$$ LANGUAGE plpgsql; - -/* - * Returns hash function OID for specified type - */ -CREATE OR REPLACE FUNCTION @extschema@.get_type_hash_func(REGTYPE) -RETURNS REGPROC AS 'pg_pathman', 'get_type_hash_func' -LANGUAGE C STRICT; - -/* - * Calculates hash for integer value - */ -CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INTEGER, INTEGER) -RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' -LANGUAGE C STRICT; -/* ------------------------------------------------------------------------ - * - * range.sql - * RANGE partitioning functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -CREATE OR REPLACE FUNCTION @extschema@.get_sequence_name( - plain_schema TEXT, - plain_relname TEXT) -RETURNS TEXT AS -$$ -BEGIN - RETURN format('%s.%s', - quote_ident(plain_schema), - quote_ident(format('%s_seq', plain_relname))); -END -$$ -LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION @extschema@.create_or_replace_sequence( - plain_schema TEXT, - plain_relname TEXT, - OUT seq_name TEXT) -AS $$ -BEGIN - seq_name := @extschema@.get_sequence_name(plain_schema, plain_relname); - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); - EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); -END -$$ -LANGUAGE plpgsql; - -/* - * Check RANGE partition boundaries. - */ -CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS VOID AS -$$ -DECLARE - v_min start_value%TYPE; - v_max start_value%TYPE; - v_count BIGINT; - -BEGIN - /* Get min and max values */ - EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) - FROM %2$s WHERE NOT %1$s IS NULL', - attribute, parent_relid::TEXT) - INTO v_count, v_min, v_max; - - /* Check if column has NULL values */ - IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN - RAISE EXCEPTION '''%'' column contains NULL values', attribute; - END IF; - - /* Check lower boundary */ - IF start_value > v_min THEN - RAISE EXCEPTION 'start value is less than minimum value of ''%''', - attribute; - END IF; - - /* Check upper boundary */ - IF end_value <= v_max THEN - RAISE EXCEPTION 'not enough partitions to fit all values of ''%''', - attribute; - END IF; -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified relation based on datetime attribute - */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - p_interval INTERVAL, - p_count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - v_rows_count BIGINT; - v_atttype REGTYPE; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; - end_value start_value%TYPE; - i INTEGER; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - IF p_count < 0 THEN - RAISE EXCEPTION '''p_count'' must not be less than 0'; - END IF; - - /* Try to determine partitions count if not set */ - IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) - INTO v_rows_count, v_max; - - IF v_rows_count = 0 THEN - RAISE EXCEPTION 'cannot determine partitions count for empty table'; - END IF; - - p_count := 0; - WHILE v_cur_value <= v_max - LOOP - v_cur_value := v_cur_value + p_interval; - p_count := p_count + 1; - END LOOP; - END IF; - - v_atttype := @extschema@.get_base_type(pg_typeof(start_value)); - - /* - * In case when user doesn't want to automatically create partitions - * and specifies partition count as 0 then do not check boundaries - */ - IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ - end_value := start_value; - FOR i IN 1..p_count - LOOP - end_value := end_value + p_interval; - END LOOP; - - /* Check boundaries */ - EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', ''%s'', ''%s'', ''%s''::%s)', - parent_relid, - attribute, - start_value, - end_value, - v_atttype::TEXT); - END IF; - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - /* Create first partition */ - FOR i IN 1..p_count - LOOP - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4)', - v_atttype::TEXT) - USING - parent_relid, - start_value, - start_value + p_interval, - @extschema@.get_rel_tablespace_name(parent_relid); - - start_value := start_value + p_interval; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN p_count; -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified relation based on numerical attribute - */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - p_interval ANYELEMENT, - p_count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - v_rows_count BIGINT; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; - end_value start_value%TYPE; - i INTEGER; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - IF p_count < 0 THEN - RAISE EXCEPTION 'partitions count must not be less than zero'; - END IF; - - /* Try to determine partitions count if not set */ - IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) - INTO v_rows_count, v_max; - - IF v_rows_count = 0 THEN - RAISE EXCEPTION 'cannot determine partitions count for empty table'; - END IF; - - IF v_max IS NULL THEN - RAISE EXCEPTION '''%'' column has NULL values', attribute; - END IF; - - p_count := 0; - WHILE v_cur_value <= v_max - LOOP - v_cur_value := v_cur_value + p_interval; - p_count := p_count + 1; - END LOOP; - END IF; - - /* - * In case when user doesn't want to automatically create partitions - * and specifies partition count as 0 then do not check boundaries - */ - IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ - end_value := start_value; - FOR i IN 1..p_count - LOOP - end_value := end_value + p_interval; - END LOOP; - - /* check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - attribute, - start_value, - end_value); - END IF; - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - /* create first partition */ - FOR i IN 1..p_count - LOOP - PERFORM @extschema@.create_single_range_partition( - parent_relid, - start_value, - start_value + p_interval, - tablespace := @extschema@.get_rel_tablespace_name(parent_relid)); - - start_value := start_value + p_interval; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN p_count; -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified range - */ -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval ANYELEMENT, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - part_count INTEGER := 0; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - IF p_interval <= 0 THEN - RAISE EXCEPTION 'interval must be positive'; - END IF; - - /* Check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - attribute, - start_value, - end_value); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - WHILE start_value <= end_value - LOOP - PERFORM @extschema@.create_single_range_partition( - parent_relid, - start_value, - start_value + p_interval, - tablespace := @extschema@.get_rel_tablespace_name(parent_relid)); - - start_value := start_value + p_interval; - part_count := part_count + 1; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN part_count; /* number of created partitions */ -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified range based on datetime attribute - */ -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval INTERVAL, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - part_count INTEGER := 0; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - /* Check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - attribute, - start_value, - end_value); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - WHILE start_value <= end_value - LOOP - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', - @extschema@.get_base_type(pg_typeof(start_value))::TEXT) - USING - parent_relid, - start_value, - start_value + p_interval, - @extschema@.get_rel_tablespace_name(parent_relid); - - start_value := start_value + p_interval; - part_count := part_count + 1; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN part_count; /* number of created partitions */ -END -$$ LANGUAGE plpgsql; - -/* - * Creates new RANGE partition. Returns partition name. - * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). - */ -CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( - parent_relid REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_num INT; - v_child_relname TEXT; - v_plain_child_relname TEXT; - v_attname TEXT; - v_plain_schema TEXT; - v_plain_relname TEXT; - v_child_relname_exists BOOL; - v_seq_name TEXT; - v_init_callback REGPROCEDURE; - -BEGIN - v_attname := attname FROM @extschema@.pathman_config - WHERE partrel = parent_relid; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT * INTO v_plain_schema, v_plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - v_seq_name := @extschema@.get_sequence_name(v_plain_schema, v_plain_relname); - - IF partition_name IS NULL THEN - /* Get next value from sequence */ - LOOP - v_part_num := nextval(v_seq_name); - v_plain_child_relname := format('%s_%s', v_plain_relname, v_part_num); - v_child_relname := format('%s.%s', - quote_ident(v_plain_schema), - quote_ident(v_plain_child_relname)); - - v_child_relname_exists := count(*) > 0 - FROM pg_class - WHERE relname = v_plain_child_relname AND - relnamespace = v_plain_schema::regnamespace - LIMIT 1; - - EXIT WHEN v_child_relname_exists = false; - END LOOP; - ELSE - v_child_relname := partition_name; - END IF; - - IF tablespace IS NULL THEN - tablespace := @extschema@.get_rel_tablespace_name(parent_relid); - END IF; - - EXECUTE format('CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) - INHERITS (%2$s) TABLESPACE %3$s', - v_child_relname, - parent_relid::TEXT, - tablespace); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - v_child_relname, - @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, - v_attname), - @extschema@.build_range_condition(v_attname, - start_value, - end_value)); - - PERFORM @extschema@.copy_foreign_keys(parent_relid, v_child_relname::REGCLASS); - - /* Fetch init_callback from 'params' table */ - WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) - FROM stub_callback - LEFT JOIN @extschema@.pathman_config_params AS params - ON params.partrel = parent_relid - INTO v_init_callback; - - PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - v_child_relname::REGCLASS, - v_init_callback, - start_value, - end_value); - - RETURN v_child_relname; -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; - -/* - * Split RANGE partition - */ -CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( - partition REGCLASS, - split_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS -$$ -DECLARE - v_parent REGCLASS; - v_attname TEXT; - v_atttype REGTYPE; - v_cond TEXT; - v_new_partition TEXT; - v_part_type INTEGER; - v_check_name TEXT; - -BEGIN - v_parent = @extschema@.get_parent_of_partition(partition); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(v_parent); - - /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition); - - SELECT attname, parttype - FROM @extschema@.pathman_config - WHERE partrel = v_parent - INTO v_attname, v_part_type; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', v_parent::TEXT; - END IF; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; - END IF; - - v_atttype = @extschema@.get_attribute_type(v_parent, v_attname); - - /* Get partition values range */ - EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING partition - INTO p_range; - - IF p_range IS NULL THEN - RAISE EXCEPTION 'could not find specified partition'; - END IF; - - /* Check if value fit into the range */ - IF p_range[1] > split_value OR p_range[2] <= split_value - THEN - RAISE EXCEPTION 'specified value does not fit into the range [%, %)', - p_range[1], p_range[2]; - END IF; - - /* Create new partition */ - v_new_partition := @extschema@.create_single_range_partition(v_parent, - split_value, - p_range[2], - partition_name); - - /* Copy data */ - v_cond := @extschema@.build_range_condition(v_attname, split_value, p_range[2]); - EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - partition::TEXT, - v_cond, - v_new_partition); - - /* Alter original partition */ - v_cond := @extschema@.build_range_condition(v_attname, p_range[1], split_value); - v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); - - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, - v_check_name); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, - v_check_name, - v_cond); - - /* Tell backend to reload configuration */ - PERFORM @extschema@.on_update_partitions(v_parent); -END -$$ -LANGUAGE plpgsql; - - -/* - * Merge RANGE partitions - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partition1 REGCLASS, - partition2 REGCLASS) -RETURNS VOID AS -$$ -DECLARE - v_parent1 REGCLASS; - v_parent2 REGCLASS; - v_attname TEXT; - v_part_type INTEGER; - v_atttype REGTYPE; - -BEGIN - IF partition1 = partition2 THEN - RAISE EXCEPTION 'cannot merge partition with itself'; - END IF; - - v_parent1 := @extschema@.get_parent_of_partition(partition1); - v_parent2 := @extschema@.get_parent_of_partition(partition2); - - /* Acquire data modification locks (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition1); - PERFORM @extschema@.prevent_relation_modification(partition2); - - IF v_parent1 != v_parent2 THEN - RAISE EXCEPTION 'cannot merge partitions with different parents'; - END IF; - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(v_parent1); - - SELECT attname, parttype - FROM @extschema@.pathman_config - WHERE partrel = v_parent1 - INTO v_attname, v_part_type; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', v_parent1::TEXT; - END IF; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION 'specified partitions aren''t RANGE partitions'; - END IF; - - v_atttype := @extschema@.get_attribute_type(partition1, v_attname); - - EXECUTE format('SELECT @extschema@.merge_range_partitions_internal($1, $2, $3, NULL::%s)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING v_parent1, partition1, partition2; - - /* Tell backend to reload configuration */ - PERFORM @extschema@.on_update_partitions(v_parent1); -END -$$ -LANGUAGE plpgsql; - - -/* - * Merge two partitions. All data will be copied to the first one. Second - * partition will be destroyed. - * - * NOTE: dummy field is used to pass the element type to the function - * (it is necessary because of pseudo-types used in function). - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions_internal( - parent_relid REGCLASS, - partition1 REGCLASS, - partition2 REGCLASS, - dummy ANYELEMENT, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS -$$ -DECLARE - v_attname TEXT; - v_atttype REGTYPE; - v_check_name TEXT; - -BEGIN - SELECT attname FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype = @extschema@.get_attribute_type(parent_relid, v_attname); - - /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%1$s) || - @extschema@.get_part_range($2, NULL::%1$s)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING partition1, partition2 - INTO p_range; - - /* Check if ranges are adjacent */ - IF p_range[1] != p_range[4] AND p_range[2] != p_range[3] THEN - RAISE EXCEPTION 'merge failed, partitions must be adjacent'; - END IF; - - /* Drop constraint on first partition... */ - v_check_name := @extschema@.build_check_constraint_name(partition1, v_attname); - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition1::TEXT, - v_check_name); - - /* and create a new one */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition1::TEXT, - v_check_name, - @extschema@.build_range_condition(v_attname, - least(p_range[1], p_range[3]), - greatest(p_range[2], p_range[4]))); - - /* Copy data from second partition to the first one */ - EXECUTE format('WITH part_data AS (DELETE FROM %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - partition2::TEXT, - partition1::TEXT); - - /* Remove second partition */ - EXECUTE format('DROP TABLE %s', partition2::TEXT); -END -$$ LANGUAGE plpgsql; - - -/* - * Append new partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( - parent_relid REGCLASS, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - v_atttype REGTYPE; - v_part_name TEXT; - v_interval TEXT; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - SELECT attname, range_interval - FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname, v_interval; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); - - EXECUTE - format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING - parent_relid, - v_atttype, - v_interval, - partition_name, - tablespace - INTO - v_part_name; - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - -/* - * Spawn logic for append_partition(). We have to - * separate this in order to pass the 'p_range'. - * - * NOTE: we don't take a xact_handling lock here. - */ -CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( - parent_relid REGCLASS, - p_atttype REGTYPE, - p_interval TEXT, - p_range ANYARRAY DEFAULT NULL, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_name TEXT; - v_atttype REGTYPE; - -BEGIN - IF @extschema@.partitions_count(parent_relid) = 0 THEN - RAISE EXCEPTION 'cannot append to empty partitions set'; - END IF; - - v_atttype := @extschema@.get_base_type(p_atttype); - - /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', - v_atttype::TEXT) - USING parent_relid - INTO p_range; - - IF @extschema@.is_date_type(p_atttype) THEN - v_part_name := @extschema@.create_single_range_partition( - parent_relid, - p_range[2], - p_range[2] + p_interval::interval, - partition_name, - tablespace); - ELSE - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $2 + $3::%s, $4, $5)', - v_atttype::TEXT) - USING - parent_relid, - p_range[2], - p_interval, - partition_name, - tablespace - INTO - v_part_name; - END IF; - - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - - -/* - * Prepend new partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( - parent_relid REGCLASS, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - v_atttype REGTYPE; - v_part_name TEXT; - v_interval TEXT; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - SELECT attname, range_interval - FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname, v_interval; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); - - EXECUTE - format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING - parent_relid, - v_atttype, - v_interval, - partition_name, - tablespace - INTO - v_part_name; - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - -/* - * Spawn logic for prepend_partition(). We have to - * separate this in order to pass the 'p_range'. - * - * NOTE: we don't take a xact_handling lock here. - */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( - parent_relid REGCLASS, - p_atttype REGTYPE, - p_interval TEXT, - p_range ANYARRAY DEFAULT NULL, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_name TEXT; - v_atttype REGTYPE; - -BEGIN - IF @extschema@.partitions_count(parent_relid) = 0 THEN - RAISE EXCEPTION 'cannot prepend to empty partitions set'; - END IF; - - v_atttype := @extschema@.get_base_type(p_atttype); - - /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', - v_atttype::TEXT) - USING parent_relid - INTO p_range; - - IF @extschema@.is_date_type(p_atttype) THEN - v_part_name := @extschema@.create_single_range_partition( - parent_relid, - p_range[1] - p_interval::interval, - p_range[1], - partition_name, - tablespace); - ELSE - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2 - $3::%s, $2, $4, $5)', - v_atttype::TEXT) - USING - parent_relid, - p_range[1], - p_interval, - partition_name, - tablespace - INTO - v_part_name; - END IF; - - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - - -/* - * Add new partition - */ -CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( - parent_relid REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_name TEXT; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - IF start_value >= end_value THEN - RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; - END IF; - - /* check range overlap */ - IF @extschema@.partitions_count(parent_relid) > 0 - AND @extschema@.check_overlap(parent_relid, start_value, end_value) THEN - RAISE EXCEPTION 'specified range overlaps with existing partitions'; - END IF; - - /* Create new partition */ - v_part_name := @extschema@.create_single_range_partition(parent_relid, - start_value, - end_value, - partition_name, - tablespace); - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - - -/* - * Drop range partition - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( - partition REGCLASS, - delete_data BOOLEAN DEFAULT TRUE) -RETURNS TEXT AS -$$ -DECLARE - parent_relid REGCLASS; - part_name TEXT; - v_relkind CHAR; - v_rows BIGINT; - v_part_type INTEGER; - -BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition); - part_name := partition::TEXT; /* save the name to be returned */ - - SELECT parttype - FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_part_type; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; - END IF; - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', - parent_relid::TEXT, - partition::TEXT); - GET DIAGNOSTICS v_rows = ROW_COUNT; - - /* Show number of copied rows */ - RAISE NOTICE '% rows copied from %', v_rows, partition::TEXT; - END IF; - - SELECT relkind FROM pg_catalog.pg_class - WHERE oid = partition - INTO v_relkind; - - /* - * Determine the kind of child relation. It can be either regular - * table (r) or foreign table (f). Depending on relkind we use - * DROP TABLE or DROP FOREIGN TABLE. - */ - IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', partition::TEXT); - ELSE - EXECUTE format('DROP TABLE %s', partition::TEXT); - END IF; - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN part_name; -END -$$ -LANGUAGE plpgsql -SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ - - -/* - * Attach range partition - */ -CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( - parent_relid REGCLASS, - partition REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - rel_persistence CHAR; - v_init_callback REGPROCEDURE; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - /* Ignore temporary tables */ - SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = partition INTO rel_persistence; - - IF rel_persistence = 't'::CHAR THEN - RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', - partition::TEXT; - END IF; - - IF @extschema@.check_overlap(parent_relid, start_value, end_value) THEN - RAISE EXCEPTION 'specified range overlaps with existing partitions'; - END IF; - - IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN - RAISE EXCEPTION 'partition must have the exact same structure as parent'; - END IF; - - /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); - - v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - /* Set check constraint */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname), - @extschema@.build_range_condition(v_attname, - start_value, - end_value)); - - /* Fetch init_callback from 'params' table */ - WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) - FROM stub_callback - LEFT JOIN @extschema@.pathman_config_params AS params - ON params.partrel = parent_relid - INTO v_init_callback; - - PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - partition, - v_init_callback, - start_value, - end_value); - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN partition; -END -$$ -LANGUAGE plpgsql; - - -/* - * Detach range partition - */ -CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( - partition REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - parent_relid REGCLASS; - -BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - v_attname := attname - FROM @extschema@.pathman_config - WHERE partrel = parent_relid; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - /* Remove inheritance */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', - partition::TEXT, - parent_relid::TEXT); - - /* Remove check constraint */ - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname)); - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN partition; -END -$$ -LANGUAGE plpgsql; - - -/* - * Creates an update trigger - */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_update_trigger( - IN parent_relid REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() - RETURNS TRIGGER AS - $body$ - DECLARE - old_oid Oid; - new_oid Oid; - - BEGIN - old_oid := TG_RELID; - new_oid := @extschema@.find_or_create_range_partition( - ''%2$s''::regclass, NEW.%3$s); - - IF old_oid = new_oid THEN - RETURN NEW; - END IF; - - EXECUTE format(''DELETE FROM %%s WHERE %5$s'', - old_oid::regclass::text) - USING %6$s; - - EXECUTE format(''INSERT INTO %%s VALUES (%7$s)'', - new_oid::regclass::text) - USING %8$s; - - RETURN NULL; - END $body$ - LANGUAGE plpgsql'; - - trigger TEXT := 'CREATE TRIGGER %s ' || - 'BEFORE UPDATE ON %s ' || - 'FOR EACH ROW EXECUTE PROCEDURE %s()'; - - triggername TEXT; - funcname TEXT; - att_names TEXT; - old_fields TEXT; - new_fields TEXT; - att_val_fmt TEXT; - att_fmt TEXT; - attr TEXT; - rec RECORD; - -BEGIN - attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - - IF attr IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT string_agg(attname, ', '), - string_agg('OLD.' || attname, ', '), - string_agg('NEW.' || attname, ', '), - string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || - attname || ' = $' || attnum || ' ' || - 'ELSE ' || - attname || ' IS NULL END', - ' AND '), - string_agg('$' || attnum, ', ') - FROM pg_attribute - WHERE attrelid::REGCLASS = parent_relid AND attnum > 0 - INTO att_names, - old_fields, - new_fields, - att_val_fmt, - att_fmt; - - /* Build trigger & trigger function's names */ - funcname := @extschema@.build_update_trigger_func_name(parent_relid); - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Create function for trigger */ - EXECUTE format(func, funcname, parent_relid, attr, 0, att_val_fmt, - old_fields, att_fmt, new_fields); - - /* Create trigger on every partition */ - FOR rec in (SELECT * FROM pg_catalog.pg_inherits - WHERE inhparent = parent_relid) - LOOP - EXECUTE format(trigger, - triggername, - rec.inhrelid::REGCLASS::TEXT, - funcname); - END LOOP; - - RETURN funcname; -END -$$ LANGUAGE plpgsql; - -/* - * Construct CHECK constraint condition for a range partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( - p_attname TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS TEXT AS 'pg_pathman', 'build_range_condition' -LANGUAGE C; - -/* - * Returns N-th range (as an array of two elements). - */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( - parent_relid REGCLASS, - partition_idx INTEGER, - dummy ANYELEMENT) -RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_idx' -LANGUAGE C; - -/* - * Returns min and max values for specified RANGE partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( - partition_relid REGCLASS, - dummy ANYELEMENT) -RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' -LANGUAGE C; - -/* - * Checks if range overlaps with existing partitions. - * Returns TRUE if overlaps and FALSE otherwise. - */ -CREATE OR REPLACE FUNCTION @extschema@.check_overlap( - parent_relid REGCLASS, - range_min ANYELEMENT, - range_max ANYELEMENT) -RETURNS BOOLEAN AS 'pg_pathman', 'check_overlap' -LANGUAGE C; - -/* - * Needed for an UPDATE trigger. - */ -CREATE OR REPLACE FUNCTION @extschema@.find_or_create_range_partition( - parent_relid REGCLASS, - value ANYELEMENT) -RETURNS REGCLASS AS 'pg_pathman', 'find_or_create_range_partition' -LANGUAGE C; diff --git a/pg_pathman--1.1.sql b/pg_pathman--1.1.sql deleted file mode 100644 index 20cb62a5..00000000 --- a/pg_pathman--1.1.sql +++ /dev/null @@ -1,2406 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * init.sql - * Creates config table and provides common utility functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -/* - * Pathman config - * partrel - regclass (relation type, stored as Oid) - * attname - partitioning key - * parttype - partitioning type: - * 1 - HASH - * 2 - RANGE - * range_interval - base interval for RANGE partitioning as string - */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( - partrel REGCLASS NOT NULL PRIMARY KEY, - attname TEXT NOT NULL, - parttype INTEGER NOT NULL, - range_interval TEXT, - - CHECK (parttype IN (1, 2)) /* check for allowed part types */ -); - -/* - * Optional parameters for partitioned tables. - * partrel - regclass (relation type, stored as Oid) - * enable_parent - add parent table to plan - * auto - enable automatic partition creation - * init_callback - cb to be executed on partition creation - */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( - partrel REGCLASS NOT NULL PRIMARY KEY, - enable_parent BOOLEAN NOT NULL DEFAULT FALSE, - auto BOOLEAN NOT NULL DEFAULT TRUE, - init_callback REGPROCEDURE NOT NULL DEFAULT 0 -); -CREATE UNIQUE INDEX i_pathman_config_params -ON @extschema@.pathman_config_params(partrel); - -GRANT SELECT, INSERT, UPDATE, DELETE -ON @extschema@.pathman_config, @extschema@.pathman_config_params -TO public; - -/* - * Check if current user can alter/drop specified relation - */ -CREATE OR REPLACE FUNCTION @extschema@.check_security_policy(relation regclass) -RETURNS BOOL AS 'pg_pathman', 'check_security_policy' LANGUAGE C STRICT; - -/* - * Row security policy to restrict partitioning operations to owner and - * superusers only - */ -CREATE POLICY deny_modification ON @extschema@.pathman_config -FOR ALL USING (check_security_policy(partrel)); - -CREATE POLICY deny_modification ON @extschema@.pathman_config_params -FOR ALL USING (check_security_policy(partrel)); - -CREATE POLICY allow_select ON @extschema@.pathman_config FOR SELECT USING (true); - -CREATE POLICY allow_select ON @extschema@.pathman_config_params FOR SELECT USING (true); - -ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; -ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; - -/* - * Invalidate relcache every time someone changes parameters config. - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() -RETURNS TRIGGER AS -$$ -BEGIN - IF TG_OP IN ('INSERT', 'UPDATE') THEN - PERFORM @extschema@.invalidate_relcache(NEW.partrel); - END IF; - - IF TG_OP IN ('UPDATE', 'DELETE') THEN - PERFORM @extschema@.invalidate_relcache(OLD.partrel); - END IF; - - IF TG_OP = 'DELETE' THEN - RETURN OLD; - ELSE - RETURN NEW; - END IF; -END -$$ -LANGUAGE plpgsql; - -CREATE TRIGGER pathman_config_params_trigger -BEFORE INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params -FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); - -/* - * Enable dump of config tables with pg_dump. - */ -SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config', ''); -SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config_params', ''); - - -CREATE OR REPLACE FUNCTION @extschema@.partitions_count(relation REGCLASS) -RETURNS INT AS -$$ -BEGIN - RETURN count(*) FROM pg_inherits WHERE inhparent = relation; -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Add a row describing the optional parameter to pathman_config_params. - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( - relation REGCLASS, - param TEXT, - value ANYELEMENT) -RETURNS VOID AS -$$ -BEGIN - EXECUTE format('INSERT INTO @extschema@.pathman_config_params - (partrel, %1$s) VALUES ($1, $2) - ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) - USING relation, value; -END -$$ -LANGUAGE plpgsql; - -/* - * Include\exclude parent relation in query plan. - */ -CREATE OR REPLACE FUNCTION @extschema@.set_enable_parent( - relation REGCLASS, - value BOOLEAN) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'enable_parent', value); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Enable\disable automatic partition creation. - */ -CREATE OR REPLACE FUNCTION @extschema@.set_auto( - relation REGCLASS, - value BOOLEAN) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'auto', value); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Set partition creation callback - */ -CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( - relation REGCLASS, - callback REGPROC DEFAULT 0) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.validate_on_partition_created_callback(callback); - PERFORM @extschema@.pathman_set_param(relation, 'init_callback', callback); -END -$$ -LANGUAGE plpgsql; - -/* - * Show all existing parents and partitions. - */ -CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() -RETURNS TABLE ( - parent REGCLASS, - partition REGCLASS, - parttype INT4, - partattr TEXT, - range_min TEXT, - range_max TEXT) -AS 'pg_pathman', 'show_partition_list_internal' LANGUAGE C STRICT; - -/* - * View for show_partition_list(). - */ -CREATE OR REPLACE VIEW @extschema@.pathman_partition_list -AS SELECT * FROM @extschema@.show_partition_list(); - -GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; - -/* - * Show all existing concurrent partitioning tasks. - */ -CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() -RETURNS TABLE ( - userid REGROLE, - pid INT, - dbid OID, - relid REGCLASS, - processed INT, - status TEXT) -AS 'pg_pathman', 'show_concurrent_part_tasks_internal' LANGUAGE C STRICT; - -/* - * View for show_concurrent_part_tasks(). - */ -CREATE OR REPLACE VIEW @extschema@.pathman_concurrent_part_tasks -AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); - -GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; - -/* - * Partition table using ConcurrentPartWorker. - */ -CREATE OR REPLACE FUNCTION @extschema@.partition_table_concurrently( - relation REGCLASS, - batch_size INTEGER DEFAULT 1000, - sleep_time FLOAT8 DEFAULT 1.0) -RETURNS VOID AS 'pg_pathman', 'partition_table_concurrently' -LANGUAGE C STRICT; - -/* - * Stop concurrent partitioning task. - */ -CREATE OR REPLACE FUNCTION @extschema@.stop_concurrent_part_task( - relation REGCLASS) -RETURNS BOOL AS 'pg_pathman', 'stop_concurrent_part_task' -LANGUAGE C STRICT; - - -/* - * Copy rows to partitions concurrently. - */ -CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( - relation REGCLASS, - p_min ANYELEMENT DEFAULT NULL::text, - p_max ANYELEMENT DEFAULT NULL::text, - p_limit INT DEFAULT NULL, - OUT p_total BIGINT) -AS -$$ -DECLARE - v_attr TEXT; - v_limit_clause TEXT := ''; - v_where_clause TEXT := ''; - ctids TID[]; - -BEGIN - SELECT attname INTO v_attr - FROM @extschema@.pathman_config WHERE partrel = relation; - - p_total := 0; - - /* Format LIMIT clause if needed */ - IF NOT p_limit IS NULL THEN - v_limit_clause := format('LIMIT %s', p_limit); - END IF; - - /* Format WHERE clause if needed */ - IF NOT p_min IS NULL THEN - v_where_clause := format('%1$s >= $1', v_attr); - END IF; - - IF NOT p_max IS NULL THEN - IF NOT p_min IS NULL THEN - v_where_clause := v_where_clause || ' AND '; - END IF; - v_where_clause := v_where_clause || format('%1$s < $2', v_attr); - END IF; - - IF v_where_clause != '' THEN - v_where_clause := 'WHERE ' || v_where_clause; - END IF; - - /* Lock rows and copy data */ - RAISE NOTICE 'Copying data to partitions...'; - EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', - relation, v_where_clause, v_limit_clause) - USING p_min, p_max - INTO ctids; - - EXECUTE format(' - WITH data AS ( - DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) - INSERT INTO %1$s SELECT * FROM data', - relation) - USING ctids; - - /* Get number of inserted rows */ - GET DIAGNOSTICS p_total = ROW_COUNT; - RETURN; -END -$$ -LANGUAGE plpgsql -SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ - -/* - * Old school way to distribute rows to partitions. - */ -CREATE OR REPLACE FUNCTION @extschema@.partition_data( - parent_relid REGCLASS, - OUT p_total BIGINT) -AS -$$ -DECLARE - relname TEXT; - rec RECORD; - cnt BIGINT := 0; - -BEGIN - p_total := 0; - - /* Create partitions and copy rest of the data */ - EXECUTE format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) - INSERT INTO %1$s SELECT * FROM part_data', - parent_relid::TEXT); - - /* Get number of inserted rows */ - GET DIAGNOSTICS p_total = ROW_COUNT; - RETURN; -END -$$ -LANGUAGE plpgsql STRICT -SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ - -/* - * Disable pathman partitioning for specified relation. - */ -CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( - parent_relid REGCLASS) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; - PERFORM @extschema@.drop_triggers(parent_relid); - - /* Notify backend about changes */ - PERFORM @extschema@.on_remove_partitions(parent_relid); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Validates relation name. It must be schema qualified. - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relname( - cls REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - relname TEXT; - -BEGIN - relname = @extschema@.get_schema_qualified_name(cls); - - IF relname IS NULL THEN - RAISE EXCEPTION 'relation %s does not exist', cls; - END IF; - - RETURN relname; -END -$$ -LANGUAGE plpgsql; - -/* - * Aggregates several common relation checks before partitioning. - * Suitable for every partitioning type. - */ -CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( - relation REGCLASS, - p_attribute TEXT) -RETURNS BOOLEAN AS -$$ -DECLARE - v_rec RECORD; - is_referenced BOOLEAN; - rel_persistence CHAR; - -BEGIN - /* Ignore temporary tables */ - SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = relation INTO rel_persistence; - - IF rel_persistence = 't'::CHAR THEN - RAISE EXCEPTION 'temporary table "%" cannot be partitioned', - relation::TEXT; - END IF; - - IF EXISTS (SELECT * FROM @extschema@.pathman_config - WHERE partrel = relation) THEN - RAISE EXCEPTION 'relation "%" has already been partitioned', relation; - END IF; - - IF @extschema@.is_attribute_nullable(relation, p_attribute) THEN - RAISE EXCEPTION 'partitioning key ''%'' must be NOT NULL', p_attribute; - END IF; - - /* Check if there are foreign keys that reference the relation */ - FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint - WHERE confrelid = relation::REGCLASS::OID) - LOOP - is_referenced := TRUE; - RAISE WARNING 'foreign key "%" references relation "%"', - v_rec.conname, relation; - END LOOP; - - IF is_referenced THEN - RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; - END IF; - - RETURN TRUE; -END -$$ -LANGUAGE plpgsql; - -/* - * Returns relname without quotes or something. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_plain_schema_and_relname( - cls REGCLASS, - OUT schema TEXT, - OUT relname TEXT) -AS -$$ -BEGIN - SELECT pg_catalog.pg_class.relnamespace::regnamespace, - pg_catalog.pg_class.relname - FROM pg_catalog.pg_class WHERE oid = cls::oid - INTO schema, relname; -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Returns the schema-qualified name of table. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_schema_qualified_name( - cls REGCLASS, - delimiter TEXT DEFAULT '.', - suffix TEXT DEFAULT '') -RETURNS TEXT AS -$$ -BEGIN - RETURN (SELECT quote_ident(relnamespace::regnamespace::text) || - delimiter || - quote_ident(relname || suffix) - FROM pg_catalog.pg_class - WHERE oid = cls::oid); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Check if two relations have equal structures. - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relations_equality( - relation1 OID, relation2 OID) -RETURNS BOOLEAN AS -$$ -DECLARE - rec RECORD; - -BEGIN - FOR rec IN ( - WITH - a1 AS (select * from pg_catalog.pg_attribute - where attrelid = relation1 and attnum > 0), - a2 AS (select * from pg_catalog.pg_attribute - where attrelid = relation2 and attnum > 0) - SELECT a1.attname name1, a2.attname name2, a1.atttypid type1, a2.atttypid type2 - FROM a1 - FULL JOIN a2 ON a1.attnum = a2.attnum - ) - LOOP - IF rec.name1 IS NULL OR rec.name2 IS NULL OR rec.name1 != rec.name2 THEN - RETURN false; - END IF; - END LOOP; - - RETURN true; -END -$$ -LANGUAGE plpgsql; - -/* - * DDL trigger that removes entry from pathman_config table. - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() -RETURNS event_trigger AS -$$ -DECLARE - obj record; - pg_class_oid oid; - relids regclass[]; -BEGIN - pg_class_oid = 'pg_catalog.pg_class'::regclass; - - /* Find relids to remove from config */ - SELECT array_agg(cfg.partrel) INTO relids - FROM pg_event_trigger_dropped_objects() AS events - JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid - WHERE events.classid = pg_class_oid; - - /* Cleanup pathman_config */ - DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); - - /* Cleanup params table too */ - DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); -END -$$ -LANGUAGE plpgsql; - -/* - * Drop triggers. - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( - parent_relid REGCLASS) -RETURNS VOID AS -$$ -BEGIN - EXECUTE format('DROP FUNCTION IF EXISTS %s() CASCADE', - @extschema@.build_update_trigger_func_name(parent_relid)); -END -$$ LANGUAGE plpgsql STRICT; - -/* - * Drop partitions. If delete_data set to TRUE, partitions - * will be dropped with all the data. - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( - parent_relid REGCLASS, - delete_data BOOLEAN DEFAULT FALSE) -RETURNS INTEGER AS -$$ -DECLARE - v_rec RECORD; - v_rows BIGINT; - v_part_count INTEGER := 0; - conf_num_del INTEGER; - v_relkind CHAR; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Drop trigger first */ - PERFORM @extschema@.drop_triggers(parent_relid); - - WITH config_num_deleted AS (DELETE FROM @extschema@.pathman_config - WHERE partrel = parent_relid - RETURNING *) - SELECT count(*) from config_num_deleted INTO conf_num_del; - - DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; - - IF conf_num_del = 0 THEN - RAISE EXCEPTION 'relation "%" has no partitions', parent_relid::TEXT; - END IF; - - FOR v_rec IN (SELECT inhrelid::REGCLASS AS tbl - FROM pg_catalog.pg_inherits - WHERE inhparent::regclass = parent_relid - ORDER BY inhrelid ASC) - LOOP - IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', - parent_relid::TEXT, - v_rec.tbl::TEXT); - GET DIAGNOSTICS v_rows = ROW_COUNT; - - /* Show number of copied rows */ - RAISE NOTICE '% rows copied from %', v_rows, v_rec.tbl::TEXT; - END IF; - - SELECT relkind FROM pg_catalog.pg_class - WHERE oid = v_rec.tbl - INTO v_relkind; - - /* - * Determine the kind of child relation. It can be either regular - * table (r) or foreign table (f). Depending on relkind we use - * DROP TABLE or DROP FOREIGN TABLE. - */ - IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', v_rec.tbl::TEXT); - ELSE - EXECUTE format('DROP TABLE %s', v_rec.tbl::TEXT); - END IF; - - v_part_count := v_part_count + 1; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_remove_partitions(parent_relid); - - RETURN v_part_count; -END -$$ LANGUAGE plpgsql -SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ - - -/* - * Copy all of parent's foreign keys. - */ -CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( - parent_relid REGCLASS, - partition REGCLASS) -RETURNS VOID AS -$$ -DECLARE - rec RECORD; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition); - - FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint - WHERE conrelid = parent_relid AND contype = 'f') - LOOP - EXECUTE format('ALTER TABLE %s ADD %s', - partition::TEXT, - pg_catalog.pg_get_constraintdef(rec.conid)); - END LOOP; -END -$$ LANGUAGE plpgsql STRICT; - - -/* - * Create DDL trigger to call pathman_ddl_trigger_func(). - */ -CREATE EVENT TRIGGER pathman_ddl_trigger -ON sql_drop -EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); - - - -CREATE OR REPLACE FUNCTION @extschema@.on_create_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_created' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.on_update_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_updated' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.on_remove_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_removed' -LANGUAGE C STRICT; - - -/* - * Get parent of pg_pathman's partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition(REGCLASS) -RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' -LANGUAGE C STRICT; - -/* - * Extract basic type of a domain. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_base_type(REGTYPE) -RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' -LANGUAGE C STRICT; - -/* - * Returns attribute type name for relation. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_attribute_type( - REGCLASS, TEXT) -RETURNS REGTYPE AS 'pg_pathman', 'get_attribute_type_pl' -LANGUAGE C STRICT; - -/* - * Return tablespace name for specified relation. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_rel_tablespace_name(REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'get_rel_tablespace_name' -LANGUAGE C STRICT; - - -/* - * Checks if attribute is nullable - */ -CREATE OR REPLACE FUNCTION @extschema@.is_attribute_nullable( - REGCLASS, TEXT) -RETURNS BOOLEAN AS 'pg_pathman', 'is_attribute_nullable' -LANGUAGE C STRICT; - -/* - * Check if regclass is date or timestamp. - */ -CREATE OR REPLACE FUNCTION @extschema@.is_date_type( - typid REGTYPE) -RETURNS BOOLEAN AS 'pg_pathman', 'is_date_type' -LANGUAGE C STRICT; - - -/* - * Build check constraint name for a specified relation's column. - */ -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - REGCLASS, INT2) -RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attnum' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - REGCLASS, TEXT) -RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attname' -LANGUAGE C STRICT; - -/* - * Build update trigger and its underlying function's names. - */ -CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_name( - REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_name' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_func_name( - REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_func_name' -LANGUAGE C STRICT; - - -/* - * Attach a previously partitioned table. - */ -CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( - parent_relid REGCLASS, - attname TEXT, - range_interval TEXT DEFAULT NULL) -RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION @extschema@.invalidate_relcache(relid OID) -RETURNS VOID AS 'pg_pathman' -LANGUAGE C STRICT; - - -/* - * Lock partitioned relation to restrict concurrent - * modification of partitioning scheme. - */ - CREATE OR REPLACE FUNCTION @extschema@.lock_partitioned_relation( - REGCLASS) - RETURNS VOID AS 'pg_pathman', 'lock_partitioned_relation' - LANGUAGE C STRICT; - -/* - * Lock relation to restrict concurrent modification of data. - */ - CREATE OR REPLACE FUNCTION @extschema@.prevent_relation_modification( - REGCLASS) - RETURNS VOID AS 'pg_pathman', 'prevent_relation_modification' - LANGUAGE C STRICT; - - -/* - * DEBUG: Place this inside some plpgsql fuction and set breakpoint. - */ -CREATE OR REPLACE FUNCTION @extschema@.debug_capture() -RETURNS VOID AS 'pg_pathman', 'debug_capture' -LANGUAGE C STRICT; - -/* - * Checks that callback function meets specific requirements. Particularly it - * must have the only JSONB argument and VOID return type. - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_on_partition_created_callback( - callback REGPROC) -RETURNS VOID AS 'pg_pathman', 'validate_on_part_init_callback_pl' -LANGUAGE C STRICT; - - -/* - * Invoke init_callback on RANGE partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( - parent_relid REGCLASS, - partition REGCLASS, - init_callback REGPROCEDURE, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' -LANGUAGE C; - -/* - * Invoke init_callback on HASH partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( - parent_relid REGCLASS, - partition REGCLASS, - init_callback REGPROCEDURE) -RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' -LANGUAGE C; -/* ------------------------------------------------------------------------ - * - * hash.sql - * HASH partitioning functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -/* - * Creates hash partitions for specified relation - */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( - parent_relid REGCLASS, - attribute TEXT, - partitions_count INTEGER, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - v_child_relname TEXT; - v_plain_schema TEXT; - v_plain_relname TEXT; - v_atttype REGTYPE; - v_hashfunc REGPROC; - v_init_callback REGPROCEDURE; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - /* Fetch atttype and its hash function */ - v_atttype := @extschema@.get_attribute_type(parent_relid, attribute); - v_hashfunc := @extschema@.get_type_hash_func(v_atttype); - - SELECT * INTO v_plain_schema, v_plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) - VALUES (parent_relid, attribute, 1); - - /* Create partitions and update pg_pathman configuration */ - FOR partnum IN 0..partitions_count-1 - LOOP - v_child_relname := format('%s.%s', - quote_ident(v_plain_schema), - quote_ident(v_plain_relname || '_' || partnum)); - - EXECUTE format( - 'CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) INHERITS (%2$s) TABLESPACE %s', - v_child_relname, - parent_relid::TEXT, - @extschema@.get_rel_tablespace_name(parent_relid)); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s - CHECK (@extschema@.get_hash_part_idx(%s(%s), %s) = %s)', - v_child_relname, - @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, - attribute), - v_hashfunc::TEXT, - attribute, - partitions_count, - partnum); - - PERFORM @extschema@.copy_foreign_keys(parent_relid, v_child_relname::REGCLASS); - - /* Fetch init_callback from 'params' table */ - WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) - FROM stub_callback - LEFT JOIN @extschema@.pathman_config_params AS params - ON params.partrel = parent_relid - INTO v_init_callback; - - PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - v_child_relname::REGCLASS, - v_init_callback); - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Copy data */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN partitions_count; -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; - -/* - * Creates an update trigger - */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( - parent_relid REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() - RETURNS TRIGGER AS - $body$ - DECLARE - old_idx INTEGER; /* partition indices */ - new_idx INTEGER; - - BEGIN - old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); - new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); - - IF old_idx = new_idx THEN - RETURN NEW; - END IF; - - EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) - USING %5$s; - - EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) - USING %7$s; - - RETURN NULL; - END $body$ - LANGUAGE plpgsql'; - - trigger TEXT := 'CREATE TRIGGER %s - BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE %s()'; - - att_names TEXT; - old_fields TEXT; - new_fields TEXT; - att_val_fmt TEXT; - att_fmt TEXT; - attr TEXT; - plain_schema TEXT; - plain_relname TEXT; - child_relname_format TEXT; - funcname TEXT; - triggername TEXT; - atttype REGTYPE; - partitions_count INTEGER; - -BEGIN - attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - - IF attr IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT string_agg(attname, ', '), - string_agg('OLD.' || attname, ', '), - string_agg('NEW.' || attname, ', '), - string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || - attname || ' = $' || attnum || ' ' || - 'ELSE ' || - attname || ' IS NULL END', - ' AND '), - string_agg('$' || attnum, ', ') - FROM pg_catalog.pg_attribute - WHERE attrelid = parent_relid AND attnum > 0 - INTO att_names, - old_fields, - new_fields, - att_val_fmt, - att_fmt; - - partitions_count := COUNT(*) FROM pg_catalog.pg_inherits - WHERE inhparent = parent_relid::oid; - - /* Build trigger & trigger function's names */ - funcname := @extschema@.build_update_trigger_func_name(parent_relid); - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Build partition name template */ - SELECT * INTO plain_schema, plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - child_relname_format := quote_ident(plain_schema) || '.' || - quote_ident(plain_relname || '_%s'); - - /* Fetch base hash function for atttype */ - atttype := @extschema@.get_attribute_type(parent_relid, attr); - - /* Format function definition and execute it */ - EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, - old_fields, att_fmt, new_fields, child_relname_format, - @extschema@.get_type_hash_func(atttype)::TEXT); - - /* Create trigger on every partition */ - FOR num IN 0..partitions_count-1 - LOOP - EXECUTE format(trigger, - triggername, - format(child_relname_format, num), - funcname); - END LOOP; - - return funcname; -END -$$ LANGUAGE plpgsql; - -/* - * Returns hash function OID for specified type - */ -CREATE OR REPLACE FUNCTION @extschema@.get_type_hash_func(REGTYPE) -RETURNS REGPROC AS 'pg_pathman', 'get_type_hash_func' -LANGUAGE C STRICT; - -/* - * Calculates hash for integer value - */ -CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INTEGER, INTEGER) -RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' -LANGUAGE C STRICT; -/* ------------------------------------------------------------------------ - * - * range.sql - * RANGE partitioning functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -CREATE OR REPLACE FUNCTION @extschema@.get_sequence_name( - plain_schema TEXT, - plain_relname TEXT) -RETURNS TEXT AS -$$ -BEGIN - RETURN format('%s.%s', - quote_ident(plain_schema), - quote_ident(format('%s_seq', plain_relname))); -END -$$ -LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION @extschema@.create_or_replace_sequence( - plain_schema TEXT, - plain_relname TEXT, - OUT seq_name TEXT) -AS $$ -BEGIN - seq_name := @extschema@.get_sequence_name(plain_schema, plain_relname); - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); - EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); -END -$$ -LANGUAGE plpgsql; - -/* - * Check RANGE partition boundaries. - */ -CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS VOID AS -$$ -DECLARE - v_min start_value%TYPE; - v_max start_value%TYPE; - v_count BIGINT; - -BEGIN - /* Get min and max values */ - EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) - FROM %2$s WHERE NOT %1$s IS NULL', - attribute, parent_relid::TEXT) - INTO v_count, v_min, v_max; - - /* Check if column has NULL values */ - IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN - RAISE EXCEPTION '''%'' column contains NULL values', attribute; - END IF; - - /* Check lower boundary */ - IF start_value > v_min THEN - RAISE EXCEPTION 'start value is less than minimum value of ''%''', - attribute; - END IF; - - /* Check upper boundary */ - IF end_value <= v_max THEN - RAISE EXCEPTION 'not enough partitions to fit all values of ''%''', - attribute; - END IF; -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified relation based on datetime attribute - */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - p_interval INTERVAL, - p_count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - v_rows_count BIGINT; - v_atttype REGTYPE; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; - end_value start_value%TYPE; - i INTEGER; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - IF p_count < 0 THEN - RAISE EXCEPTION '''p_count'' must not be less than 0'; - END IF; - - /* Try to determine partitions count if not set */ - IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) - INTO v_rows_count, v_max; - - IF v_rows_count = 0 THEN - RAISE EXCEPTION 'cannot determine partitions count for empty table'; - END IF; - - p_count := 0; - WHILE v_cur_value <= v_max - LOOP - v_cur_value := v_cur_value + p_interval; - p_count := p_count + 1; - END LOOP; - END IF; - - v_atttype := @extschema@.get_base_type(pg_typeof(start_value)); - - /* - * In case when user doesn't want to automatically create partitions - * and specifies partition count as 0 then do not check boundaries - */ - IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ - end_value := start_value; - FOR i IN 1..p_count - LOOP - end_value := end_value + p_interval; - END LOOP; - - /* Check boundaries */ - EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', ''%s'', ''%s'', ''%s''::%s)', - parent_relid, - attribute, - start_value, - end_value, - v_atttype::TEXT); - END IF; - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - /* Create first partition */ - FOR i IN 1..p_count - LOOP - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4)', - v_atttype::TEXT) - USING - parent_relid, - start_value, - start_value + p_interval, - @extschema@.get_rel_tablespace_name(parent_relid); - - start_value := start_value + p_interval; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN p_count; -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified relation based on numerical attribute - */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - p_interval ANYELEMENT, - p_count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - v_rows_count BIGINT; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; - end_value start_value%TYPE; - i INTEGER; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - IF p_count < 0 THEN - RAISE EXCEPTION 'partitions count must not be less than zero'; - END IF; - - /* Try to determine partitions count if not set */ - IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) - INTO v_rows_count, v_max; - - IF v_rows_count = 0 THEN - RAISE EXCEPTION 'cannot determine partitions count for empty table'; - END IF; - - IF v_max IS NULL THEN - RAISE EXCEPTION '''%'' column has NULL values', attribute; - END IF; - - p_count := 0; - WHILE v_cur_value <= v_max - LOOP - v_cur_value := v_cur_value + p_interval; - p_count := p_count + 1; - END LOOP; - END IF; - - /* - * In case when user doesn't want to automatically create partitions - * and specifies partition count as 0 then do not check boundaries - */ - IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ - end_value := start_value; - FOR i IN 1..p_count - LOOP - end_value := end_value + p_interval; - END LOOP; - - /* check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - attribute, - start_value, - end_value); - END IF; - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - /* create first partition */ - FOR i IN 1..p_count - LOOP - PERFORM @extschema@.create_single_range_partition( - parent_relid, - start_value, - start_value + p_interval, - tablespace := @extschema@.get_rel_tablespace_name(parent_relid)); - - start_value := start_value + p_interval; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN p_count; -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified range - */ -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval ANYELEMENT, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - part_count INTEGER := 0; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - IF p_interval <= 0 THEN - RAISE EXCEPTION 'interval must be positive'; - END IF; - - /* Check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - attribute, - start_value, - end_value); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - WHILE start_value <= end_value - LOOP - PERFORM @extschema@.create_single_range_partition( - parent_relid, - start_value, - start_value + p_interval, - tablespace := @extschema@.get_rel_tablespace_name(parent_relid)); - - start_value := start_value + p_interval; - part_count := part_count + 1; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN part_count; /* number of created partitions */ -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified range based on datetime attribute - */ -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval INTERVAL, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - part_count INTEGER := 0; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - /* Check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - attribute, - start_value, - end_value); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - WHILE start_value <= end_value - LOOP - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', - @extschema@.get_base_type(pg_typeof(start_value))::TEXT) - USING - parent_relid, - start_value, - start_value + p_interval, - @extschema@.get_rel_tablespace_name(parent_relid); - - start_value := start_value + p_interval; - part_count := part_count + 1; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN part_count; /* number of created partitions */ -END -$$ LANGUAGE plpgsql; - -/* - * Creates new RANGE partition. Returns partition name. - * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). - */ -CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( - parent_relid REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_num INT; - v_child_relname TEXT; - v_plain_child_relname TEXT; - v_attname TEXT; - v_plain_schema TEXT; - v_plain_relname TEXT; - v_child_relname_exists BOOL; - v_seq_name TEXT; - v_init_callback REGPROCEDURE; - -BEGIN - v_attname := attname FROM @extschema@.pathman_config - WHERE partrel = parent_relid; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT * INTO v_plain_schema, v_plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - v_seq_name := @extschema@.get_sequence_name(v_plain_schema, v_plain_relname); - - IF partition_name IS NULL THEN - /* Get next value from sequence */ - LOOP - v_part_num := nextval(v_seq_name); - v_plain_child_relname := format('%s_%s', v_plain_relname, v_part_num); - v_child_relname := format('%s.%s', - quote_ident(v_plain_schema), - quote_ident(v_plain_child_relname)); - - v_child_relname_exists := count(*) > 0 - FROM pg_class - WHERE relname = v_plain_child_relname AND - relnamespace = v_plain_schema::regnamespace - LIMIT 1; - - EXIT WHEN v_child_relname_exists = false; - END LOOP; - ELSE - v_child_relname := partition_name; - END IF; - - IF tablespace IS NULL THEN - tablespace := @extschema@.get_rel_tablespace_name(parent_relid); - END IF; - - EXECUTE format('CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) - INHERITS (%2$s) TABLESPACE %3$s', - v_child_relname, - parent_relid::TEXT, - tablespace); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - v_child_relname, - @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, - v_attname), - @extschema@.build_range_condition(v_attname, - start_value, - end_value)); - - PERFORM @extschema@.copy_foreign_keys(parent_relid, v_child_relname::REGCLASS); - - /* Fetch init_callback from 'params' table */ - WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) - FROM stub_callback - LEFT JOIN @extschema@.pathman_config_params AS params - ON params.partrel = parent_relid - INTO v_init_callback; - - PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - v_child_relname::REGCLASS, - v_init_callback, - start_value, - end_value); - - RETURN v_child_relname; -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; - -/* - * Split RANGE partition - */ -CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( - partition REGCLASS, - split_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS -$$ -DECLARE - v_parent REGCLASS; - v_attname TEXT; - v_atttype REGTYPE; - v_cond TEXT; - v_new_partition TEXT; - v_part_type INTEGER; - v_check_name TEXT; - -BEGIN - v_parent = @extschema@.get_parent_of_partition(partition); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(v_parent); - - /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition); - - SELECT attname, parttype - FROM @extschema@.pathman_config - WHERE partrel = v_parent - INTO v_attname, v_part_type; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', v_parent::TEXT; - END IF; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; - END IF; - - v_atttype = @extschema@.get_attribute_type(v_parent, v_attname); - - /* Get partition values range */ - EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING partition - INTO p_range; - - IF p_range IS NULL THEN - RAISE EXCEPTION 'could not find specified partition'; - END IF; - - /* Check if value fit into the range */ - IF p_range[1] > split_value OR p_range[2] <= split_value - THEN - RAISE EXCEPTION 'specified value does not fit into the range [%, %)', - p_range[1], p_range[2]; - END IF; - - /* Create new partition */ - v_new_partition := @extschema@.create_single_range_partition(v_parent, - split_value, - p_range[2], - partition_name); - - /* Copy data */ - v_cond := @extschema@.build_range_condition(v_attname, split_value, p_range[2]); - EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - partition::TEXT, - v_cond, - v_new_partition); - - /* Alter original partition */ - v_cond := @extschema@.build_range_condition(v_attname, p_range[1], split_value); - v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); - - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, - v_check_name); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, - v_check_name, - v_cond); - - /* Tell backend to reload configuration */ - PERFORM @extschema@.on_update_partitions(v_parent); -END -$$ -LANGUAGE plpgsql; - - -/* - * Merge RANGE partitions - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partition1 REGCLASS, - partition2 REGCLASS) -RETURNS VOID AS -$$ -DECLARE - v_parent1 REGCLASS; - v_parent2 REGCLASS; - v_attname TEXT; - v_part_type INTEGER; - v_atttype REGTYPE; - -BEGIN - IF partition1 = partition2 THEN - RAISE EXCEPTION 'cannot merge partition with itself'; - END IF; - - v_parent1 := @extschema@.get_parent_of_partition(partition1); - v_parent2 := @extschema@.get_parent_of_partition(partition2); - - /* Acquire data modification locks (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition1); - PERFORM @extschema@.prevent_relation_modification(partition2); - - IF v_parent1 != v_parent2 THEN - RAISE EXCEPTION 'cannot merge partitions with different parents'; - END IF; - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(v_parent1); - - SELECT attname, parttype - FROM @extschema@.pathman_config - WHERE partrel = v_parent1 - INTO v_attname, v_part_type; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', v_parent1::TEXT; - END IF; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION 'specified partitions aren''t RANGE partitions'; - END IF; - - v_atttype := @extschema@.get_attribute_type(partition1, v_attname); - - EXECUTE format('SELECT @extschema@.merge_range_partitions_internal($1, $2, $3, NULL::%s)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING v_parent1, partition1, partition2; - - /* Tell backend to reload configuration */ - PERFORM @extschema@.on_update_partitions(v_parent1); -END -$$ -LANGUAGE plpgsql; - - -/* - * Merge two partitions. All data will be copied to the first one. Second - * partition will be destroyed. - * - * NOTE: dummy field is used to pass the element type to the function - * (it is necessary because of pseudo-types used in function). - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions_internal( - parent_relid REGCLASS, - partition1 REGCLASS, - partition2 REGCLASS, - dummy ANYELEMENT, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS -$$ -DECLARE - v_attname TEXT; - v_atttype REGTYPE; - v_check_name TEXT; - -BEGIN - SELECT attname FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype = @extschema@.get_attribute_type(parent_relid, v_attname); - - /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%1$s) || - @extschema@.get_part_range($2, NULL::%1$s)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING partition1, partition2 - INTO p_range; - - /* Check if ranges are adjacent */ - IF p_range[1] != p_range[4] AND p_range[2] != p_range[3] THEN - RAISE EXCEPTION 'merge failed, partitions must be adjacent'; - END IF; - - /* Drop constraint on first partition... */ - v_check_name := @extschema@.build_check_constraint_name(partition1, v_attname); - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition1::TEXT, - v_check_name); - - /* and create a new one */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition1::TEXT, - v_check_name, - @extschema@.build_range_condition(v_attname, - least(p_range[1], p_range[3]), - greatest(p_range[2], p_range[4]))); - - /* Copy data from second partition to the first one */ - EXECUTE format('WITH part_data AS (DELETE FROM %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - partition2::TEXT, - partition1::TEXT); - - /* Remove second partition */ - EXECUTE format('DROP TABLE %s', partition2::TEXT); -END -$$ LANGUAGE plpgsql; - - -/* - * Append new partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( - parent_relid REGCLASS, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - v_atttype REGTYPE; - v_part_name TEXT; - v_interval TEXT; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - SELECT attname, range_interval - FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname, v_interval; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); - - EXECUTE - format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING - parent_relid, - v_atttype, - v_interval, - partition_name, - tablespace - INTO - v_part_name; - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - -/* - * Spawn logic for append_partition(). We have to - * separate this in order to pass the 'p_range'. - * - * NOTE: we don't take a xact_handling lock here. - */ -CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( - parent_relid REGCLASS, - p_atttype REGTYPE, - p_interval TEXT, - p_range ANYARRAY DEFAULT NULL, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_name TEXT; - v_atttype REGTYPE; - -BEGIN - IF @extschema@.partitions_count(parent_relid) = 0 THEN - RAISE EXCEPTION 'cannot append to empty partitions set'; - END IF; - - v_atttype := @extschema@.get_base_type(p_atttype); - - /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', - v_atttype::TEXT) - USING parent_relid - INTO p_range; - - IF @extschema@.is_date_type(p_atttype) THEN - v_part_name := @extschema@.create_single_range_partition( - parent_relid, - p_range[2], - p_range[2] + p_interval::interval, - partition_name, - tablespace); - ELSE - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $2 + $3::%s, $4, $5)', - v_atttype::TEXT) - USING - parent_relid, - p_range[2], - p_interval, - partition_name, - tablespace - INTO - v_part_name; - END IF; - - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - - -/* - * Prepend new partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( - parent_relid REGCLASS, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - v_atttype REGTYPE; - v_part_name TEXT; - v_interval TEXT; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - SELECT attname, range_interval - FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname, v_interval; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); - - EXECUTE - format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING - parent_relid, - v_atttype, - v_interval, - partition_name, - tablespace - INTO - v_part_name; - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - -/* - * Spawn logic for prepend_partition(). We have to - * separate this in order to pass the 'p_range'. - * - * NOTE: we don't take a xact_handling lock here. - */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( - parent_relid REGCLASS, - p_atttype REGTYPE, - p_interval TEXT, - p_range ANYARRAY DEFAULT NULL, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_name TEXT; - v_atttype REGTYPE; - -BEGIN - IF @extschema@.partitions_count(parent_relid) = 0 THEN - RAISE EXCEPTION 'cannot prepend to empty partitions set'; - END IF; - - v_atttype := @extschema@.get_base_type(p_atttype); - - /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', - v_atttype::TEXT) - USING parent_relid - INTO p_range; - - IF @extschema@.is_date_type(p_atttype) THEN - v_part_name := @extschema@.create_single_range_partition( - parent_relid, - p_range[1] - p_interval::interval, - p_range[1], - partition_name, - tablespace); - ELSE - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2 - $3::%s, $2, $4, $5)', - v_atttype::TEXT) - USING - parent_relid, - p_range[1], - p_interval, - partition_name, - tablespace - INTO - v_part_name; - END IF; - - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - - -/* - * Add new partition - */ -CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( - parent_relid REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_name TEXT; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - IF start_value >= end_value THEN - RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; - END IF; - - /* check range overlap */ - IF @extschema@.partitions_count(parent_relid) > 0 - AND @extschema@.check_overlap(parent_relid, start_value, end_value) THEN - RAISE EXCEPTION 'specified range overlaps with existing partitions'; - END IF; - - /* Create new partition */ - v_part_name := @extschema@.create_single_range_partition(parent_relid, - start_value, - end_value, - partition_name, - tablespace); - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - - -/* - * Drop range partition - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( - partition REGCLASS, - delete_data BOOLEAN DEFAULT TRUE) -RETURNS TEXT AS -$$ -DECLARE - parent_relid REGCLASS; - part_name TEXT; - v_relkind CHAR; - v_rows BIGINT; - v_part_type INTEGER; - -BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition); - part_name := partition::TEXT; /* save the name to be returned */ - - SELECT parttype - FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_part_type; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; - END IF; - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', - parent_relid::TEXT, - partition::TEXT); - GET DIAGNOSTICS v_rows = ROW_COUNT; - - /* Show number of copied rows */ - RAISE NOTICE '% rows copied from %', v_rows, partition::TEXT; - END IF; - - SELECT relkind FROM pg_catalog.pg_class - WHERE oid = partition - INTO v_relkind; - - /* - * Determine the kind of child relation. It can be either regular - * table (r) or foreign table (f). Depending on relkind we use - * DROP TABLE or DROP FOREIGN TABLE. - */ - IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', partition::TEXT); - ELSE - EXECUTE format('DROP TABLE %s', partition::TEXT); - END IF; - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN part_name; -END -$$ -LANGUAGE plpgsql -SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ - - -/* - * Attach range partition - */ -CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( - parent_relid REGCLASS, - partition REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - rel_persistence CHAR; - v_init_callback REGPROCEDURE; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - /* Ignore temporary tables */ - SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = partition INTO rel_persistence; - - IF rel_persistence = 't'::CHAR THEN - RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', - partition::TEXT; - END IF; - - IF @extschema@.check_overlap(parent_relid, start_value, end_value) THEN - RAISE EXCEPTION 'specified range overlaps with existing partitions'; - END IF; - - IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN - RAISE EXCEPTION 'partition must have the exact same structure as parent'; - END IF; - - /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); - - v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - /* Set check constraint */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname), - @extschema@.build_range_condition(v_attname, - start_value, - end_value)); - - /* Fetch init_callback from 'params' table */ - WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) - FROM stub_callback - LEFT JOIN @extschema@.pathman_config_params AS params - ON params.partrel = parent_relid - INTO v_init_callback; - - PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - partition, - v_init_callback, - start_value, - end_value); - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN partition; -END -$$ -LANGUAGE plpgsql; - - -/* - * Detach range partition - */ -CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( - partition REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - parent_relid REGCLASS; - -BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - v_attname := attname - FROM @extschema@.pathman_config - WHERE partrel = parent_relid; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - /* Remove inheritance */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', - partition::TEXT, - parent_relid::TEXT); - - /* Remove check constraint */ - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname)); - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN partition; -END -$$ -LANGUAGE plpgsql; - - -/* - * Creates an update trigger - */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_update_trigger( - IN parent_relid REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() - RETURNS TRIGGER AS - $body$ - DECLARE - old_oid Oid; - new_oid Oid; - - BEGIN - old_oid := TG_RELID; - new_oid := @extschema@.find_or_create_range_partition( - ''%2$s''::regclass, NEW.%3$s); - - IF old_oid = new_oid THEN - RETURN NEW; - END IF; - - EXECUTE format(''DELETE FROM %%s WHERE %5$s'', - old_oid::regclass::text) - USING %6$s; - - EXECUTE format(''INSERT INTO %%s VALUES (%7$s)'', - new_oid::regclass::text) - USING %8$s; - - RETURN NULL; - END $body$ - LANGUAGE plpgsql'; - - trigger TEXT := 'CREATE TRIGGER %s ' || - 'BEFORE UPDATE ON %s ' || - 'FOR EACH ROW EXECUTE PROCEDURE %s()'; - - triggername TEXT; - funcname TEXT; - att_names TEXT; - old_fields TEXT; - new_fields TEXT; - att_val_fmt TEXT; - att_fmt TEXT; - attr TEXT; - rec RECORD; - -BEGIN - attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - - IF attr IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT string_agg(attname, ', '), - string_agg('OLD.' || attname, ', '), - string_agg('NEW.' || attname, ', '), - string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || - attname || ' = $' || attnum || ' ' || - 'ELSE ' || - attname || ' IS NULL END', - ' AND '), - string_agg('$' || attnum, ', ') - FROM pg_attribute - WHERE attrelid::REGCLASS = parent_relid AND attnum > 0 - INTO att_names, - old_fields, - new_fields, - att_val_fmt, - att_fmt; - - /* Build trigger & trigger function's names */ - funcname := @extschema@.build_update_trigger_func_name(parent_relid); - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Create function for trigger */ - EXECUTE format(func, funcname, parent_relid, attr, 0, att_val_fmt, - old_fields, att_fmt, new_fields); - - /* Create trigger on every partition */ - FOR rec in (SELECT * FROM pg_catalog.pg_inherits - WHERE inhparent = parent_relid) - LOOP - EXECUTE format(trigger, - triggername, - rec.inhrelid::REGCLASS::TEXT, - funcname); - END LOOP; - - RETURN funcname; -END -$$ LANGUAGE plpgsql; - -/* - * Construct CHECK constraint condition for a range partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( - p_attname TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS TEXT AS 'pg_pathman', 'build_range_condition' -LANGUAGE C; - -/* - * Returns N-th range (as an array of two elements). - */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( - parent_relid REGCLASS, - partition_idx INTEGER, - dummy ANYELEMENT) -RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_idx' -LANGUAGE C; - -/* - * Returns min and max values for specified RANGE partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( - partition_relid REGCLASS, - dummy ANYELEMENT) -RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' -LANGUAGE C; - -/* - * Checks if range overlaps with existing partitions. - * Returns TRUE if overlaps and FALSE otherwise. - */ -CREATE OR REPLACE FUNCTION @extschema@.check_overlap( - parent_relid REGCLASS, - range_min ANYELEMENT, - range_max ANYELEMENT) -RETURNS BOOLEAN AS 'pg_pathman', 'check_overlap' -LANGUAGE C; - -/* - * Needed for an UPDATE trigger. - */ -CREATE OR REPLACE FUNCTION @extschema@.find_or_create_range_partition( - parent_relid REGCLASS, - value ANYELEMENT) -RETURNS REGCLASS AS 'pg_pathman', 'find_or_create_range_partition' -LANGUAGE C; From 91ee25e404aadb9bd1768414587595eacf5ae8e3 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 7 Feb 2017 18:57:00 +0300 Subject: [PATCH 0164/1124] don't count migration scripts as PLpgSQL code --- .gitattributes | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitattributes b/.gitattributes index 88f6baf4..5ea3a003 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,3 +1,4 @@ +pg_pathman*.sql linguist-vendored=true *.h linguist-language=C *.c linguist-language=C *.spec linguist-vendored=true From 4f1ad04a777fb492eaee889e016e906ba18a60c8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 7 Feb 2017 19:00:50 +0300 Subject: [PATCH 0165/1124] fix Makefile (DATA) --- Makefile | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 04f77da3..c685aa53 100644 --- a/Makefile +++ b/Makefile @@ -11,10 +11,8 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ EXTENSION = pg_pathman EXTVERSION = 1.2 DATA_built = pg_pathman--$(EXTVERSION).sql -DATA = pg_pathman--1.0.sql \ - pg_pathman--1.0--1.1.sql \ - pg_pathman--1.1.sql \ - pg_pathman--1.1--1.2.sql +DATA = pg_pathman--1.0--1.1.sql \ + pg_pathman--1.1--1.2.sql PGFILEDESC = "pg_pathman - partitioning tool" REGRESS = pathman_basic \ From cff45b574fb4ad23abcacbf50be0b995c2b5d450 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Wed, 8 Feb 2017 15:24:20 +0300 Subject: [PATCH 0166/1124] Add triggers on tested partitioned table in pathman_inserts test suite --- expected/pathman_inserts.out | 222 ++++++++++++++++++++++++++++++++++- sql/pathman_inserts.sql | 32 ++++- 2 files changed, 249 insertions(+), 5 deletions(-) diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index d959a79c..334fcfd2 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -13,6 +13,39 @@ NOTICE: sequence "storage_seq" does not exist, skipping 10 (1 row) +/* + * attach before and after insertion triggers to partitioned table + */ +/* prepare trigger functions */ +CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +/* set triggers on existing first partition and new generated partitions */ +CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); +CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); +CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ +BEGIN + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s for each row execute procedure test_inserts.print_cols_before_change();', args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s for each row execute procedure test_inserts.print_cols_after_change();', args->>'partition_schema', args->>'partition'); +END; +$$ LANGUAGE plpgsql; +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers'); + set_init_callback +------------------- + +(1 row) + /* we don't support ON CONLICT */ INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') ON CONFLICT (a) DO UPDATE SET a = 3; @@ -22,6 +55,8 @@ ON CONFLICT (a) DO NOTHING; ERROR: ON CONFLICT clause is not supported with partitioned tables /* implicitly prepend a partition (no columns have been dropped yet) */ INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) a | b | c | d ---+---+---+---------- 0 | 0 | 0 | PREPEND. @@ -34,6 +69,8 @@ SELECT * FROM test_inserts.storage_11; (1 row) INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) tableoid ------------------------- test_inserts.storage_11 @@ -47,6 +84,8 @@ SELECT * FROM test_inserts.storage_11; (2 rows) INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) ?column? ---------- 3 @@ -60,8 +99,9 @@ SELECT * FROM test_inserts.storage_11; 3 | 0 | 0 | PREPEND... (3 rows) -/* cause a conflict (a = 0) */ +/* cause an unique index conflict (a = 0) */ INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,CONFLICT) ERROR: duplicate key value violates unique constraint "storage_11_a_idx" /* drop first column */ ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; @@ -73,6 +113,8 @@ SELECT append_range_partition('test_inserts.storage'); (1 row) INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") SELECT * FROM test_inserts.storage_12; /* direct access */ b | c | d -----+----+--------- @@ -87,6 +129,8 @@ SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ /* spawn a new partition (b, c, d) */ INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") SELECT * FROM test_inserts.storage_13; /* direct access */ b | c | d -----+----+----------------- @@ -101,18 +145,24 @@ SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ /* column 'a' has been dropped */ INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) b | c | d | ?column? -----+---+-------------+---------- 111 | 0 | DROP_COL_1. | 17 (1 row) INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) tableoid ------------------------- test_inserts.storage_13 (1 row) INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) ?column? | b ----------+----- 222 | 111 @@ -120,7 +170,7 @@ INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2 /* drop third column */ ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; -/* will have 3 columns (b, c, d) */ +/* will have 2 columns (b, d) */ SELECT append_range_partition('test_inserts.storage'); append_range_partition ------------------------- @@ -128,6 +178,8 @@ SELECT append_range_partition('test_inserts.storage'); (1 row) INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") SELECT * FROM test_inserts.storage_14; /* direct access */ b | d -----+--------- @@ -142,18 +194,24 @@ SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ /* column 'c' has been dropped */ INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) b | d -----+------------- 121 | DROP_COL_2. (1 row) INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) tableoid ------------------------- test_inserts.storage_14 (1 row) INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) ?column? | ?column? ------------------+---------- DROP_COL_2...0_0 | 363 @@ -161,6 +219,8 @@ INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_ INSERT INTO test_inserts.storage VALUES(121, 'query_1') RETURNING (SELECT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) ?column? ---------- 1 @@ -168,6 +228,8 @@ RETURNING (SELECT 1); INSERT INTO test_inserts.storage VALUES(121, 'query_2') RETURNING (SELECT generate_series(1, 10) LIMIT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) generate_series ----------------- 1 @@ -177,6 +239,8 @@ INSERT INTO test_inserts.storage VALUES(121, 'query_3') RETURNING (SELECT attname FROM pathman_config WHERE partrel = 'test_inserts.storage'::regclass); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) attname --------- b @@ -184,6 +248,8 @@ RETURNING (SELECT attname INSERT INTO test_inserts.storage VALUES(121, 'query_4') RETURNING 1, 2, 3, 4; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) ?column? | ?column? | ?column? | ?column? ----------+----------+----------+---------- 1 | 2 | 3 | 4 @@ -339,6 +405,72 @@ SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; TRUNCATE test_inserts.storage; /* one more time! */ INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; b | d | tableoid -----+-----+------------------------- @@ -473,6 +605,72 @@ TRUNCATE test_inserts.storage; ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; /* one more time! x2 */ INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; b | d | e | tableoid -----+-----+-----+------------------------- @@ -607,6 +805,24 @@ TRUNCATE test_inserts.storage; INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 130, 5) i RETURNING e * 2, b, tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) ?column? | b | tableoid ----------+-----+------------------------- -4 | -2 | test_inserts.storage_11 @@ -639,5 +855,5 @@ RETURNING e * 2, b, tableoid::regclass; (27 rows) DROP SCHEMA test_inserts CASCADE; -NOTICE: drop cascades to 16 other objects +NOTICE: drop cascades to 19 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index 64bd191f..5eac38f4 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -11,6 +11,34 @@ INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_serie CREATE UNIQUE INDEX ON test_inserts.storage(a); SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); +/* + * attach before and after insertion triggers to partitioned table + */ +/* prepare trigger functions */ +CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +/* set triggers on existing first partition and new generated partitions */ +CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); +CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); +CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ +BEGIN + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s for each row execute procedure test_inserts.print_cols_before_change();', args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s for each row execute procedure test_inserts.print_cols_after_change();', args->>'partition_schema', args->>'partition'); +END; +$$ LANGUAGE plpgsql; +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers'); /* we don't support ON CONLICT */ INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') @@ -29,7 +57,7 @@ SELECT * FROM test_inserts.storage_11; INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; SELECT * FROM test_inserts.storage_11; -/* cause a conflict (a = 0) */ +/* cause an unique index conflict (a = 0) */ INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; @@ -59,7 +87,7 @@ INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2 ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; -/* will have 3 columns (b, c, d) */ +/* will have 2 columns (b, d) */ SELECT append_range_partition('test_inserts.storage'); INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); SELECT * FROM test_inserts.storage_14; /* direct access */ From d98e1af63a7aa0b74ddda780cf673fe2d32a1c2d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 8 Feb 2017 18:10:56 +0300 Subject: [PATCH 0167/1124] refactor Bound-related macros, fix spawn_partitions_val() --- range.sql | 4 +- src/init.c | 41 +++++------- src/partition_creation.c | 103 ++++++++++++++--------------- src/pl_funcs.c | 44 ++++++------- src/pl_range_funcs.c | 135 +++++++++++++++++++++------------------ src/relation_info.h | 75 ++++++++++++++-------- 6 files changed, 210 insertions(+), 192 deletions(-) diff --git a/range.sql b/range.sql index da1f9ae3..7ae728c4 100644 --- a/range.sql +++ b/range.sql @@ -558,8 +558,8 @@ $$ LANGUAGE plpgsql; /* - * Merge multiple partitions. All data will be copied to the first one. The rest - * of partitions will be dropped + * Merge multiple partitions. All data will be copied to the first one. + * The rest of partitions will be dropped. */ CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( partitions REGCLASS[]) diff --git a/src/init.c b/src/init.c index 77527e8b..041fa7ca 100644 --- a/src/init.c +++ b/src/init.c @@ -407,13 +407,15 @@ fill_prel_with_partitions(const Oid *partitions, &lower, &upper, &lower_null, &upper_null)) { - prel->ranges[i].child_oid = partitions[i]; - MakeBound(&prel->ranges[i].min, - lower, - lower_null ? MINUS_INFINITY : FINITE); - MakeBound(&prel->ranges[i].max, - upper, - upper_null ? PLUS_INFINITY : FINITE); + prel->ranges[i].child_oid = partitions[i]; + + prel->ranges[i].min = lower_null ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(lower); + + prel->ranges[i].max = upper_null ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(upper); } else { @@ -459,24 +461,15 @@ fill_prel_with_partitions(const Oid *partitions, old_mcxt = MemoryContextSwitchTo(TopMemoryContext); for (i = 0; i < PrelChildrenCount(prel); i++) { - // prel->ranges[i].max = datumCopy(prel->ranges[i].max, - // prel->attbyval, - // prel->attlen); - CopyBound(&(prel->ranges[i].max), - &(prel->ranges[i].max), - prel->attbyval, - prel->attlen); - - // prel->ranges[i].min = datumCopy(prel->ranges[i].min, - // prel->attbyval, - // prel->attlen); - CopyBound(&prel->ranges[i].min, - &prel->ranges[i].min, - prel->attbyval, - prel->attlen); + prel->ranges[i].min = CopyBound(&prel->ranges[i].min, + prel->attbyval, + prel->attlen); + + prel->ranges[i].max = CopyBound(&prel->ranges[i].max, + prel->attbyval, + prel->attlen); } MemoryContextSwitchTo(old_mcxt); - } #ifdef USE_ASSERT_CHECKING @@ -847,7 +840,7 @@ read_pathman_config(void) { DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, - (errmsg("Table \"%s\" contains nonexistent relation %u", + (errmsg("table \"%s\" contains nonexistent relation %u", PATHMAN_CONFIG, relid), errhint(INIT_ERROR_HINT))); } diff --git a/src/partition_creation.c b/src/partition_creation.c index 0986260e..c8550456 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -48,8 +48,8 @@ static void extract_op_func_and_ret_type(char *opname, Oid type1, Oid type2, Oid *move_bound_op_ret_type); static Oid spawn_partitions_val(Oid parent_relid, - Datum range_bound_min, - Datum range_bound_max, + const Bound *range_bound_min, + const Bound *range_bound_max, Oid range_bound_type, Datum interval_binary, Oid interval_type, @@ -359,26 +359,27 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) if (partid == InvalidOid) { RangeEntry *ranges = PrelGetRangesArray(prel); - Datum bound_min, /* absolute MIN */ + Bound bound_min, /* absolute MIN */ bound_max; /* absolute MAX */ Oid interval_type = InvalidOid; Datum interval_binary, /* assigned 'width' of one partition */ interval_text; - /* Read max & min range values from PartRelationInfo */ - bound_min = BoundGetValue(&ranges[0].min); - bound_max = BoundGetValue(&ranges[PrelLastChild(prel)].max); + /* Copy datums in order to protect them from cache invalidation */ + bound_min = CopyBound(&ranges[0].min, + prel->attbyval, + prel->attlen); - /* Copy datums on order to protect them from cache invalidation */ - bound_min = datumCopy(bound_min, prel->attbyval, prel->attlen); - bound_max = datumCopy(bound_max, prel->attbyval, prel->attlen); + bound_max = CopyBound(&ranges[PrelLastChild(prel)].max, + prel->attbyval, + prel->attlen); /* Check if interval is set */ if (isnull[Anum_pathman_config_range_interval - 1]) { elog(ERROR, - "Could not find appropriate partition for key '%s'", + "cannot find appropriate partition for key '%s'", datum_to_cstring(value, value_type)); } @@ -392,7 +393,7 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) /* At last, spawn partitions to store the value */ partid = spawn_partitions_val(PrelParentRelid(prel), - bound_min, bound_max, base_bound_type, + &bound_min, &bound_max, base_bound_type, interval_binary, interval_type, value, base_value_type); } @@ -455,14 +456,14 @@ extract_op_func_and_ret_type(char *opname, Oid type1, Oid type2, * it into account while searching for the 'cmp_proc'. */ static Oid -spawn_partitions_val(Oid parent_relid, /* parent's Oid */ - Datum range_bound_min, /* parent's MIN boundary */ - Datum range_bound_max, /* parent's MAX boundary */ - Oid range_bound_type, /* type of boundary's value */ - Datum interval_binary, /* interval in binary form */ - Oid interval_type, /* INTERVALOID or prel->atttype */ - Datum value, /* value to be INSERTed */ - Oid value_type) /* type of value */ +spawn_partitions_val(Oid parent_relid, /* parent's Oid */ + const Bound *range_bound_min, /* parent's MIN boundary */ + const Bound *range_bound_max, /* parent's MAX boundary */ + Oid range_bound_type, /* type of boundary's value */ + Datum interval_binary, /* interval in binary form */ + Oid interval_type, /* INTERVALOID or prel->atttype */ + Datum value, /* value to be INSERTed */ + Oid value_type) /* type of value */ { bool should_append; /* append or prepend? */ @@ -475,27 +476,37 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ Datum cur_leading_bound, /* boundaries of a new partition */ cur_following_bound; + Bound value_bound = MakeBound(value); + Oid last_partition = InvalidOid; fill_type_cmp_fmgr_info(&cmp_value_bound_finfo, value_type, range_bound_type); + /* Is it possible to append\prepend a partition? */ + if (IsInfinite(range_bound_min) && IsInfinite(range_bound_max)) + ereport(ERROR, (errmsg("cannot spawn a partition"), + errdetail("both bounds are infinite"))); + /* value >= MAX_BOUNDARY */ - if (check_ge(&cmp_value_bound_finfo, value, range_bound_max)) + else if (cmp_bounds(&cmp_value_bound_finfo, + &value_bound, range_bound_max) >= 0) { should_append = true; - cur_leading_bound = range_bound_max; + cur_leading_bound = BoundGetValue(range_bound_max); } /* value < MIN_BOUNDARY */ - else if (check_lt(&cmp_value_bound_finfo, value, range_bound_min)) + else if (cmp_bounds(&cmp_value_bound_finfo, + &value_bound, range_bound_min) < 0) { should_append = false; - cur_leading_bound = range_bound_min; + cur_leading_bound = BoundGetValue(range_bound_min); } /* There's a gap, halt and emit ERROR */ - else elog(ERROR, "cannot spawn a partition inside a gap"); + else ereport(ERROR, (errmsg("cannot spawn a partition"), + errdetail("there is a gap"))); /* Fetch operator's underlying function and ret type */ extract_op_func_and_ret_type(should_append ? "+" : "-", @@ -541,7 +552,6 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ check_ge(&cmp_value_bound_finfo, value, cur_leading_bound) : check_lt(&cmp_value_bound_finfo, value, cur_leading_bound)) { - Datum args[2]; Bound bounds[2]; /* Assign the 'following' boundary to current 'leading' value */ @@ -552,11 +562,8 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ cur_leading_bound, interval_binary); - args[0] = should_append ? cur_following_bound : cur_leading_bound; - args[1] = should_append ? cur_leading_bound : cur_following_bound; - - MakeBound(&bounds[0], args[0], FINITE); - MakeBound(&bounds[1], args[1], FINITE); + bounds[0] = MakeBound(should_append ? cur_following_bound : cur_leading_bound); + bounds[1] = MakeBound(should_append ? cur_leading_bound : cur_following_bound); last_partition = create_single_range_partition_internal(parent_relid, &bounds[0], &bounds[1], @@ -1143,7 +1150,7 @@ build_raw_range_check_tree(char *attname, } if (and_oper->args == NIL) - elog(ERROR, "Cannot create infinite range constraint"); + elog(ERROR, "cannot create infinite range constraint"); return (Node *) and_oper; } @@ -1207,39 +1214,23 @@ check_range_available(Oid parent_relid, { int c1, c2; - /* - * If the range we're checking starts with minus infinity or current - * range ends in plus infinity then the left boundary of the first - * range is on the left. Otherwise compare specific values - */ - // c1 = (IsInfinite(start) || IsInfinite(&ranges[i].max)) ? - // -1 : - // FunctionCall2(&cmp_func, - // BoundGetValue(start), - // BoundGetValue(&ranges[i].max)); - /* - * Similary check that right boundary of the range we're checking is on - * the right of the beginning of the current one - */ - // c2 = (IsInfinite(end) || IsInfinite(&ranges[i].min)) ? - // 1 : - // FunctionCall2(&cmp_func, - // BoundGetValue(end), - // BoundGetValue(&ranges[i].min)); - c1 = cmp_bounds(&cmp_func, start, &ranges[i].max); c2 = cmp_bounds(&cmp_func, end, &ranges[i].min); - /* There's someone! */ + /* There's something! */ if (c1 < 0 && c2 > 0) { if (raise_error) elog(ERROR, "specified range [%s, %s) overlaps " "with existing partitions", - !IsInfinite(start) ? datum_to_cstring(BoundGetValue(start), value_type) : "NULL", - !IsInfinite(end) ? datum_to_cstring(BoundGetValue(end), value_type) : "NULL"); - else - return false; + !IsInfinite(start) ? + datum_to_cstring(BoundGetValue(start), value_type) : + "NULL", + !IsInfinite(end) ? + datum_to_cstring(BoundGetValue(end), value_type) : + "NULL"); + + else return false; } } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 1f8a521f..eeb3cad4 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -235,13 +235,11 @@ get_attribute_type_pl(PG_FUNCTION_ARGS) Datum get_partition_key_type(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - const PartRelationInfo *prel = get_pathman_relation_info(relid); + Oid relid = PG_GETARG_OID(0); + const PartRelationInfo *prel; - if (!prel) - elog(ERROR, - "Relation '%s' isn't partitioned by pg_pathman", - get_rel_name(relid)); + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_INDIFFERENT); PG_RETURN_OID(prel->atttype); } @@ -420,15 +418,17 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* Lower bound text */ rmin = !IsInfinite(&re->min) ? - CStringGetTextDatum( - datum_to_cstring(BoundGetValue(&re->min), prel->atttype)) : - CStringGetTextDatum("NULL"); + CStringGetTextDatum( + datum_to_cstring(BoundGetValue(&re->min), + prel->atttype)) : + CStringGetTextDatum("NULL"); /* Upper bound text */ rmax = !IsInfinite(&re->max) ? - CStringGetTextDatum( - datum_to_cstring(BoundGetValue(&re->max), prel->atttype)) : - CStringGetTextDatum("NULL"); + CStringGetTextDatum( + datum_to_cstring(BoundGetValue(&re->max), + prel->atttype)) : + CStringGetTextDatum("NULL"); values[Anum_pathman_pl_partition - 1] = re->child_oid; values[Anum_pathman_pl_range_min - 1] = rmin; @@ -843,8 +843,6 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) case 5: { - // Datum sv_datum, - // ev_datum; Bound start, end; Oid value_type; @@ -853,15 +851,15 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) elog(ERROR, "both bounds must be provided for RANGE partition"); /* Fetch start & end values for RANGE + their type */ - // sv_datum = PG_GETARG_DATUM(ARG_RANGE_START); - // ev_datum = PG_GETARG_DATUM(ARG_RANGE_END); - MakeBound(&start, - PG_GETARG_DATUM(ARG_RANGE_START), - PG_ARGISNULL(ARG_RANGE_START) ? MINUS_INFINITY : FINITE); - MakeBound(&end, - PG_GETARG_DATUM(ARG_RANGE_END), - PG_ARGISNULL(ARG_RANGE_END) ? PLUS_INFINITY : FINITE); - value_type = get_fn_expr_argtype(fcinfo->flinfo, ARG_RANGE_START); + start = PG_ARGISNULL(ARG_RANGE_START) ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(ARG_RANGE_START)); + + end = PG_ARGISNULL(ARG_RANGE_END) ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(ARG_RANGE_END)); + + value_type = get_fn_expr_argtype(fcinfo->flinfo, ARG_RANGE_START); MakeInitCallbackRangeParams(&callback_params, callback_oid, diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index e897f040..9a2898b1 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -95,14 +95,16 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) elog(ERROR, "'parent_relid' should not be NULL"); /* Fetch mandatory args */ - parent_relid = PG_GETARG_OID(0); - value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - MakeBound(&start, - PG_GETARG_DATUM(1), - PG_ARGISNULL(1) ? MINUS_INFINITY : FINITE); - MakeBound(&end, - PG_GETARG_DATUM(2), - PG_ARGISNULL(2) ? PLUS_INFINITY : FINITE); + parent_relid = PG_GETARG_OID(0); + value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + + start = PG_ARGISNULL(1) ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(1)); + + end = PG_ARGISNULL(2) ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(2)); /* Fetch 'partition_name' */ if (!PG_ARGISNULL(3)) @@ -189,21 +191,22 @@ Datum check_range_available_pl(PG_FUNCTION_ARGS) { Oid parent_relid = PG_GETARG_OID(0); - Bound start_value, - end_value; + Bound start, + end; Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - MakeBound(&start_value, - PG_GETARG_DATUM(1), - PG_ARGISNULL(1) ? MINUS_INFINITY : FINITE); - MakeBound(&end_value, - PG_GETARG_DATUM(2), - PG_ARGISNULL(2) ? PLUS_INFINITY : FINITE); + start = PG_ARGISNULL(1) ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(1)); + + end = PG_ARGISNULL(2) ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(2)); /* Raise ERROR if range overlaps with any partition */ check_range_available(parent_relid, - &start_value, - &end_value, + &start, + &end, value_type, true); @@ -257,7 +260,7 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) arr = construct_infinitable_array(elems, 2, prel->atttype, prel->attlen, - prel->attbyval, prel->attalign); + prel->attbyval, prel->attalign); PG_RETURN_ARRAYTYPE_P(arr); } @@ -338,8 +341,8 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) Datum build_range_condition(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); + Oid relid = PG_GETARG_OID(0); + text *attname = PG_GETARG_TEXT_P(1); Bound min, max; @@ -347,12 +350,13 @@ build_range_condition(PG_FUNCTION_ARGS) Constraint *con; char *result; - MakeBound(&min, - PG_GETARG_DATUM(2), - PG_ARGISNULL(2) ? MINUS_INFINITY : FINITE); - MakeBound(&max, - PG_GETARG_DATUM(3), - PG_ARGISNULL(3) ? PLUS_INFINITY : FINITE); + min = PG_ARGISNULL(2) ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(2)); + + max = PG_ARGISNULL(3) ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(3)); con = build_range_check_constraint(relid, text_to_cstring(attname), &min, &max, @@ -415,62 +419,70 @@ build_sequence_name(PG_FUNCTION_ARGS) /* * Build an 1d array of Bound elements * - * The main difference from construct_array() is that it will substitute - * infinite values with NULL's + * The main difference from construct_array() is that + * it will substitute infinite values with NULLs */ static ArrayType * construct_infinitable_array(Bound **elems, uint32_t nelems, - Oid elmtype, - int elmlen, - bool elmbyval, - char elmalign) + Oid elemtype, + int elemlen, + bool elembyval, + char elemalign) { ArrayType *arr; - Datum *data; + Datum *datums; bool *nulls; int dims[1] = { nelems }; int lbs[1] = { 1 }; int i; - data = palloc(sizeof(Datum) * nelems); + datums = palloc(sizeof(Datum) * nelems); nulls = palloc(sizeof(bool) * nelems); for (i = 0; i < nelems; i++) { - data[i] = BoundGetValue(elems[i]); + datums[i] = IsInfinite(elems[i]) ? + (Datum) 0 : + BoundGetValue(elems[i]); nulls[i] = IsInfinite(elems[i]); } - arr = construct_md_array(data, nulls, 1, dims, lbs, - elmtype, elmlen, - elmbyval, elmalign); + arr = construct_md_array(datums, nulls, 1, + dims, lbs, + elemtype, elemlen, + elembyval, elemalign); return arr; } +/* + * Merge multiple partitions. All data will be copied to the first one. + * The rest of partitions will be dropped. + */ Datum merge_range_partitions(PG_FUNCTION_ARGS) { - Oid parent = InvalidOid; - PartParentSearch parent_search; - ArrayType *arr = PG_GETARG_ARRAYTYPE_P(0); - - Oid *partitions; - Datum *datums; - bool *nulls; - int npart; - int16 typlen; - bool typbyval; - char typalign; - int i; - + Oid parent = InvalidOid; + PartParentSearch parent_search; + ArrayType *arr = PG_GETARG_ARRAYTYPE_P(0); + + Oid *partitions; + Datum *datums; + bool *nulls; + int npart; + int16 typlen; + bool typbyval; + char typalign; + int i; + + /* Validate array type */ Assert(ARR_ELEMTYPE(arr) == REGCLASSOID); /* Extract Oids */ get_typlenbyvalalign(REGCLASSOID, &typlen, &typbyval, &typalign); - deconstruct_array(arr, REGCLASSOID, + deconstruct_array(arr, REGCLASSOID, typlen, typbyval, typalign, &datums, &nulls, &npart); @@ -479,8 +491,7 @@ merge_range_partitions(PG_FUNCTION_ARGS) partitions[i] = DatumGetObjectId(datums[i]); if (npart < 2) - elog(ERROR, - "There must be at least two partitions to merge"); + elog(ERROR, "there must be at least two partitions to merge"); /* Check if all partitions are from the same parent */ for (i = 0; i < npart; i++) @@ -488,14 +499,14 @@ merge_range_partitions(PG_FUNCTION_ARGS) Oid p = get_parent_of_partition(partitions[i], &parent_search); if (parent_search != PPS_ENTRY_PART_PARENT) - elog(ERROR, "Relation '%s' is not a partition", - get_rel_name(partitions[i])); + elog(ERROR, "relation '%s' is not a partition", + get_rel_name_or_relid(partitions[i])); if (parent == InvalidOid) parent = p; if (p != parent) - elog(ERROR, "All relations must have the same parent"); + elog(ERROR, "all relations must share the same parent"); } merge_range_partitions_internal(parent, partitions, npart); @@ -629,8 +640,8 @@ check_adjacence(Oid cmp_proc, List *ranges) } /* - * Drops old partition constraint and creates a new one with specified - * boundaries + * Drop old partition constraint and create a new one + * with specified boundaries */ static void recreate_range_constraint(Oid partition, @@ -737,7 +748,7 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) if (i < prel->children_count - 1) { RangeEntry *cur = &ranges[i], - *next = &ranges[i+1]; + *next = &ranges[i + 1]; recreate_range_constraint(next->child_oid, get_relid_attribute_name(prel->key, prel->attnum), @@ -749,7 +760,7 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) drop_table(relid); - PG_RETURN_VOID(); + PG_RETURN_VOID(); } /* diff --git a/src/relation_info.h b/src/relation_info.h index 02f18d9d..b0cdd5f2 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -14,12 +14,11 @@ #include "postgres.h" #include "access/attnum.h" +#include "fmgr.h" #include "port/atomics.h" #include "storage/lock.h" -#include "fmgr.h" +#include "utils/datum.h" -#define BOUND_INFINITY_MASK 0x01 -#define BOUND_NEGATIVE_MASK 0x02 /* Range bound */ typedef struct @@ -31,34 +30,57 @@ typedef struct is negative */ } Bound; -#define FINITE 0 -#define PLUS_INFINITY (0 | BOUND_INFINITY_MASK) -#define MINUS_INFINITY (0 | BOUND_INFINITY_MASK | BOUND_NEGATIVE_MASK) -#define MakeBound(inf, _value, _infinity_type) \ - do \ - { \ - (inf)->value = (_value); \ - (inf)->is_infinite = (_infinity_type); \ - } while (0) +#define BOUND_INFINITY_MASK 0x01 +#define BOUND_NEGATIVE_MASK 0x02 + +#define FINITE 0 +#define PLUS_INFINITY (BOUND_INFINITY_MASK) +#define MINUS_INFINITY (BOUND_INFINITY_MASK | BOUND_NEGATIVE_MASK) #define IsInfinite(i) ((i)->is_infinite & BOUND_INFINITY_MASK) #define IsPlusInfinity(i) (IsInfinite(i) && !((i)->is_infinite & BOUND_NEGATIVE_MASK)) #define IsMinusInfinity(i) (IsInfinite(i) && ((i)->is_infinite & BOUND_NEGATIVE_MASK)) -#define BoundGetValue(i) ((i)->value) -#define CopyBound(i_to, i_from, by_val, len) \ - do \ - { \ - (i_to)->value = !IsInfinite(i_from) ? \ - datumCopy((i_from)->value, (by_val), (len)) : \ - (Datum) 0; \ - (i_to)->is_infinite = (i_from)->is_infinite; \ - } while (0) -/* - * Comparison macros for bounds - */ -inline static int8_t + +inline static Bound +CopyBound(const Bound *src, bool byval, int typlen) +{ + Bound bound = { + IsInfinite(src) ? + src->value : + datumCopy(src->value, byval, typlen), + src->is_infinite + }; + + return bound; +} + +inline static Bound +MakeBound(Datum value) +{ + Bound bound = { value, FINITE }; + + return bound; +} + +inline static Bound +MakeBoundInf(uint8 infinity_type) +{ + Bound bound = { (Datum) 0, infinity_type }; + + return bound; +} + +inline static Datum +BoundGetValue(const Bound *bound) +{ + Assert(!IsInfinite(bound)); + + return bound->value; +} + +inline static int cmp_bounds(FmgrInfo *cmp_func, const Bound *b1, const Bound *b2) { if (IsMinusInfinity(b1) || IsPlusInfinity(b2)) @@ -66,9 +88,12 @@ cmp_bounds(FmgrInfo *cmp_func, const Bound *b1, const Bound *b2) if (IsMinusInfinity(b2) || IsPlusInfinity(b1)) return 1; + Assert(cmp_func); + return FunctionCall2(cmp_func, BoundGetValue(b1), BoundGetValue(b2)); } + /* * Partitioning type. */ From 05caed24a9895c3c49045dfa24cb4d39bee5498d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 8 Feb 2017 19:37:53 +0300 Subject: [PATCH 0168/1124] clean code, remove function get_attribute_type_pl(), small fixes --- expected/pathman_basic.out | 4 +- expected/pathman_calamity.out | 26 +- hash.sql | 2 +- init.sql | 9 - range.sql | 34 +-- sql/pathman_calamity.sql | 9 +- src/partition_creation.c | 7 +- src/pg_pathman.c | 10 +- src/pl_funcs.c | 12 - src/pl_range_funcs.c | 532 ++++++++++++++++++---------------- 10 files changed, 309 insertions(+), 336 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 6f328018..d3319dba 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1610,9 +1610,9 @@ NOTICE: sequence "zero_seq" does not exist, skipping (1 row) SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); -ERROR: cannot append to empty partitions set +ERROR: relation "zero" has no partitions SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); -ERROR: cannot prepend to empty partitions set +ERROR: relation "zero" has no partitions SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); add_range_partition --------------------- diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 499f28b9..82ffb42a 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -230,26 +230,12 @@ SELECT get_base_type(NULL) IS NULL; t (1 row) -/* check function get_attribute_type() */ -SELECT get_attribute_type('calamity.part_test', 'val'); - get_attribute_type --------------------- - integer -(1 row) - -SELECT get_attribute_type('calamity.part_test', NULL) IS NULL; - ?column? ----------- - t -(1 row) - -SELECT get_attribute_type(NULL, 'val') IS NULL; - ?column? ----------- - t -(1 row) - -SELECT get_attribute_type(NULL, NULL) IS NULL; +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; ?column? ---------- t diff --git a/hash.sql b/hash.sql index 53489e1f..ef136f6a 100644 --- a/hash.sql +++ b/hash.sql @@ -261,7 +261,7 @@ BEGIN quote_ident(plain_relname || '_%s'); /* Fetch base hash function for atttype */ - atttype := @extschema@.get_attribute_type(parent_relid, attr); + atttype := @extschema@.get_partition_key_type(parent_relid); /* Format function definition and execute it */ EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, diff --git a/init.sql b/init.sql index 9ed807bc..166ddeb1 100644 --- a/init.sql +++ b/init.sql @@ -704,15 +704,6 @@ CREATE OR REPLACE FUNCTION @extschema@.get_base_type( RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' LANGUAGE C STRICT; -/* - * Returns attribute type name for relation. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_attribute_type( - relid REGCLASS, - attname TEXT) -RETURNS REGTYPE AS 'pg_pathman', 'get_attribute_type_pl' -LANGUAGE C STRICT; - /* * Return partition key type */ diff --git a/range.sql b/range.sql index 7ae728c4..a2c0ce45 100644 --- a/range.sql +++ b/range.sql @@ -488,22 +488,18 @@ BEGIN /* Acquire data modification lock (prevent further modifications) */ PERFORM @extschema@.prevent_relation_modification(partition_relid); + v_atttype = @extschema@.get_partition_key_type(v_parent); + SELECT attname, parttype FROM @extschema@.pathman_config WHERE partrel = v_parent INTO v_attname, v_part_type; - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', v_parent::TEXT; - END IF; - /* Check if this is a RANGE partition */ IF v_part_type != 2 THEN RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; - v_atttype = @extschema@.get_attribute_type(v_parent, v_attname); - /* Get partition values range */ EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', @extschema@.get_base_type(v_atttype)::TEXT) @@ -589,7 +585,6 @@ CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( RETURNS TEXT AS $$ DECLARE - v_attname TEXT; v_atttype REGTYPE; v_part_name TEXT; v_interval TEXT; @@ -600,16 +595,12 @@ BEGIN /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(parent_relid); - SELECT attname, range_interval + v_atttype := @extschema@.get_partition_key_type(parent_relid); + + SELECT range_interval FROM @extschema@.pathman_config WHERE partrel = parent_relid - INTO v_attname, v_interval; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); + INTO v_interval; EXECUTE format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', @@ -700,7 +691,6 @@ CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( RETURNS TEXT AS $$ DECLARE - v_attname TEXT; v_atttype REGTYPE; v_part_name TEXT; v_interval TEXT; @@ -711,16 +701,12 @@ BEGIN /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(parent_relid); - SELECT attname, range_interval + v_atttype := @extschema@.get_partition_key_type(parent_relid); + + SELECT range_interval FROM @extschema@.pathman_config WHERE partrel = parent_relid - INTO v_attname, v_interval; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); + INTO v_interval; EXECUTE format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index cb66c753..d14141b8 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -77,11 +77,10 @@ SELECT get_base_type('int4'::regtype); SELECT get_base_type('calamity.test_domain'::regtype); SELECT get_base_type(NULL) IS NULL; -/* check function get_attribute_type() */ -SELECT get_attribute_type('calamity.part_test', 'val'); -SELECT get_attribute_type('calamity.part_test', NULL) IS NULL; -SELECT get_attribute_type(NULL, 'val') IS NULL; -SELECT get_attribute_type(NULL, NULL) IS NULL; +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +SELECT get_partition_key_type(0::regclass); +SELECT get_partition_key_type(NULL) IS NULL; /* check function build_check_constraint_name_attnum() */ SELECT build_check_constraint_name('calamity.part_test', 1::int2); diff --git a/src/partition_creation.c b/src/partition_creation.c index c8550456..d66844be 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1511,10 +1511,6 @@ invoke_init_callback_internal(init_callback_params *cb_params) ev_datum = cb_params->params.range_params.end_value; Oid type = cb_params->params.range_params.value_type; - /* Convert min & max to CSTRING */ - // start_value = datum_to_cstring(sv_datum, type); - // end_value = datum_to_cstring(ev_datum, type); - pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); JSB_INIT_VAL(&key, WJB_KEY, "parent"); @@ -1532,6 +1528,7 @@ invoke_init_callback_internal(init_callback_params *cb_params) JSB_INIT_VAL(&key, WJB_KEY, "range_min"); if (!IsInfinite(&sv_datum)) { + /* Convert min to CSTRING */ start_value = datum_to_cstring(BoundGetValue(&sv_datum), type); JSB_INIT_VAL(&val, WJB_VALUE, start_value); } @@ -1542,12 +1539,12 @@ invoke_init_callback_internal(init_callback_params *cb_params) JSB_INIT_VAL(&key, WJB_KEY, "range_max"); if (!IsInfinite(&ev_datum)) { + /* Convert max to CSTRING */ end_value = datum_to_cstring(BoundGetValue(&ev_datum), type); JSB_INIT_VAL(&val, WJB_VALUE, end_value); } else JSB_INIT_NULL_VAL(&val, WJB_VALUE); - // JSB_INIT_VAL(&val, WJB_VALUE, end_value); result = pushJsonbValue(&jsonb_state, WJB_END_OBJECT, NULL); } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 09a4da03..4e748a58 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -536,12 +536,14 @@ select_range_partitions(const Datum value, current_re = &ranges[i]; - // cmp_min = FunctionCall2(cmp_func, value, current_re->min); - // cmp_max = FunctionCall2(cmp_func, value, current_re->max); cmp_min = IsInfinite(¤t_re->min) ? - 1 : FunctionCall2(cmp_func, value, BoundGetValue(¤t_re->min)); + 1 : + FunctionCall2(cmp_func, value, + BoundGetValue(¤t_re->min)); cmp_max = IsInfinite(¤t_re->max) ? - -1 : FunctionCall2(cmp_func, value, BoundGetValue(¤t_re->max)); + -1 : + FunctionCall2(cmp_func, value, + BoundGetValue(¤t_re->max)); is_less = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); is_greater = (cmp_max > 0 || (cmp_max >= 0 && strategy != BTLessStrategyNumber)); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index eeb3cad4..b48d67e9 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -217,18 +217,6 @@ get_base_type_pl(PG_FUNCTION_ARGS) PG_RETURN_OID(getBaseType(PG_GETARG_OID(0))); } -/* - * Get type (as REGTYPE) of a given attribute. - */ -Datum -get_attribute_type_pl(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); - - PG_RETURN_OID(get_attribute_type(relid, text_to_cstring(attname), false)); -} - /* * Return partition key type */ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 9a2898b1..05c36d27 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -30,22 +30,25 @@ static char *deparse_constraint(Oid relid, Node *expr); -static ArrayType *construct_infinitable_array(Bound **elems, - uint32_t nelems, +static ArrayType *construct_infinitable_array(Bound *elems, + int nelems, Oid elmtype, int elmlen, bool elmbyval, char elmalign); -static void check_adjacence(Oid cmp_proc, List *ranges); -static void merge_range_partitions_internal(Oid parent, Oid *partitions, uint32 npart); -static void recreate_range_constraint(Oid partition, - const char *attname, - AttrNumber attnum, - Oid atttype, - const Bound *lower, - const Bound *upper); +static void check_range_adjacence(Oid cmp_proc, List *ranges); +static void merge_range_partitions_internal(Oid parent, + Oid *parts, + uint32 nparts); +static void modify_range_constraint(Oid child_relid, + const char *attname, + AttrNumber attnum, + Oid atttype, + const Bound *lower, + const Bound *upper); static char *get_qualified_rel_name(Oid relid); -static void drop_table(Oid relid); +static void drop_table_by_oid(Oid relid); + /* Function declarations */ @@ -256,7 +259,7 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) if (ranges[i].child_oid == partition_relid) { ArrayType *arr; - Bound *elems[2] = { &ranges[i].min, &ranges[i].max }; + Bound elems[2] = { ranges[i].min, ranges[i].max }; arr = construct_infinitable_array(elems, 2, prel->atttype, prel->attlen, @@ -285,7 +288,7 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) { Oid parent_relid = InvalidOid; int partition_idx = 0; - Bound *elems[2]; + Bound elems[2]; RangeEntry *ranges; const PartRelationInfo *prel; @@ -319,15 +322,14 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) ranges = PrelGetRangesArray(prel); - elems[0] = &ranges[partition_idx].min; - elems[1] = &ranges[partition_idx].max; + elems[0] = ranges[partition_idx].min; + elems[1] = ranges[partition_idx].max; - PG_RETURN_ARRAYTYPE_P( - construct_infinitable_array(elems, 2, - prel->atttype, - prel->attlen, - prel->attbyval, - prel->attalign)); + PG_RETURN_ARRAYTYPE_P(construct_infinitable_array(elems, 2, + prel->atttype, + prel->attlen, + prel->attbyval, + prel->attalign)); } @@ -367,39 +369,6 @@ build_range_condition(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(result)); } -/* - * Transform constraint into cstring - */ -static char * -deparse_constraint(Oid relid, Node *expr) -{ - Relation rel; - RangeTblEntry *rte; - Node *cooked_expr; - ParseState *pstate; - List *context; - char *result; - - context = deparse_context_for(get_rel_name(relid), relid); - - rel = heap_open(relid, NoLock); - - /* Initialize parse state */ - pstate = make_parsestate(NULL); - rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); - addRTEtoQuery(pstate, rte, true, true, true); - - /* Transform constraint into executable expression (i.e. cook it) */ - cooked_expr = transformExpr(pstate, expr, EXPR_KIND_CHECK_CONSTRAINT); - - /* Transform expression into string */ - result = deparse_expression(cooked_expr, context, false, false); - - heap_close(rel, NoLock); - - return result; -} - Datum build_sequence_name(PG_FUNCTION_ARGS) { @@ -416,49 +385,10 @@ build_sequence_name(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(result)); } -/* - * Build an 1d array of Bound elements - * - * The main difference from construct_array() is that - * it will substitute infinite values with NULLs - */ -static ArrayType * -construct_infinitable_array(Bound **elems, - uint32_t nelems, - Oid elemtype, - int elemlen, - bool elembyval, - char elemalign) -{ - ArrayType *arr; - Datum *datums; - bool *nulls; - int dims[1] = { nelems }; - int lbs[1] = { 1 }; - int i; - - datums = palloc(sizeof(Datum) * nelems); - nulls = palloc(sizeof(bool) * nelems); - - for (i = 0; i < nelems; i++) - { - datums[i] = IsInfinite(elems[i]) ? - (Datum) 0 : - BoundGetValue(elems[i]); - nulls[i] = IsInfinite(elems[i]); - } - - arr = construct_md_array(datums, nulls, 1, - dims, lbs, - elemtype, elemlen, - elembyval, elemalign); - - return arr; -} - /* - * Merge multiple partitions. All data will be copied to the first one. + * Merge multiple partitions. + * All data will be copied to the first one. * The rest of partitions will be dropped. */ Datum @@ -471,7 +401,7 @@ merge_range_partitions(PG_FUNCTION_ARGS) Oid *partitions; Datum *datums; bool *nulls; - int npart; + int nparts; int16 typlen; bool typbyval; char typalign; @@ -484,95 +414,106 @@ merge_range_partitions(PG_FUNCTION_ARGS) get_typlenbyvalalign(REGCLASSOID, &typlen, &typbyval, &typalign); deconstruct_array(arr, REGCLASSOID, typlen, typbyval, typalign, - &datums, &nulls, &npart); + &datums, &nulls, &nparts); - partitions = palloc(sizeof(Oid) * npart); - for (i = 0; i < npart; i++) + /* Extract partition Oids from array */ + partitions = palloc(sizeof(Oid) * nparts); + for (i = 0; i < nparts; i++) partitions[i] = DatumGetObjectId(datums[i]); - if (npart < 2) - elog(ERROR, "there must be at least two partitions to merge"); + if (nparts < 2) + ereport(ERROR, (errmsg("cannot merge partitions"), + errdetail("there must be at least two partitions"))); /* Check if all partitions are from the same parent */ - for (i = 0; i < npart; i++) + for (i = 0; i < nparts; i++) { - Oid p = get_parent_of_partition(partitions[i], &parent_search); + Oid cur_parent = get_parent_of_partition(partitions[i], &parent_search); + /* If we couldn't find a parent, it's not a partition */ if (parent_search != PPS_ENTRY_PART_PARENT) - elog(ERROR, "relation '%s' is not a partition", - get_rel_name_or_relid(partitions[i])); + ereport(ERROR, (errmsg("cannot merge partitions"), + errdetail("relation \"%s\" is not a partition", + get_rel_name_or_relid(partitions[i])))); + /* 'parent' is not initialized */ if (parent == InvalidOid) - parent = p; + parent = cur_parent; /* save parent */ - if (p != parent) - elog(ERROR, "all relations must share the same parent"); + /* Oops, parent mismatch! */ + if (cur_parent != parent) + ereport(ERROR, (errmsg("cannot merge partitions"), + errdetail("all relations must share the same parent"))); } - merge_range_partitions_internal(parent, partitions, npart); + /* Now merge partitions */ + merge_range_partitions_internal(parent, partitions, nparts); PG_RETURN_VOID(); } - static void -merge_range_partitions_internal(Oid parent, Oid *partitions, uint32 npart) +merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) { - RangeEntry *ranges; - int i, - j; - List *plist = NIL; - RangeEntry *first, *last; - const PartRelationInfo *prel; - FmgrInfo finfo; + const PartRelationInfo *prel; + List *rentry_list = NIL; + RangeEntry *ranges, + *first, + *last; + FmgrInfo cmp_proc; + int i; prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); - if (prel->parttype != PT_RANGE) - elog(ERROR, "Only range partitions can be merged"); - + /* Fetch ranges array */ ranges = PrelGetRangesArray(prel); /* Lock parent till transaction's end */ xact_lock_partitioned_rel(parent, false); - /* Lock partitions */ - for (i = 0; i < npart; i++) + /* Process partitions */ + for (i = 0; i < nparts; i++) { - prevent_relation_modification_internal(partitions[0]); + int j; + + /* Lock partition in ACCESS EXCLUSIVE mode */ + prevent_relation_modification_internal(parts[0]); /* Look for the specified partition */ for (j = 0; j < PrelChildrenCount(prel); j++) - if (ranges[j].child_oid == partitions[i]) + if (ranges[j].child_oid == parts[i]) { - plist = lappend(plist, &ranges[j]); + rentry_list = lappend(rentry_list, &ranges[j]); break; } } - check_adjacence(prel->cmp_proc, plist); + /* Check that partitions are adjacent */ + check_range_adjacence(prel->cmp_proc, rentry_list); - /* Create a new constraint. To do this first determine the bounds */ - first = (RangeEntry *) linitial(plist); - last = (RangeEntry *) llast(plist); + /* First determine the bounds of a new constraint */ + first = (RangeEntry *) linitial(rentry_list); + last = (RangeEntry *) llast(rentry_list); - /* If last range is less than first one then swap them */ - fmgr_info(prel->cmp_proc, &finfo); - if (cmp_bounds(&finfo, &last->min, &first->min) < 0) + /* Swap ranges if 'last' < 'first' */ + fmgr_info(prel->cmp_proc, &cmp_proc); + if (cmp_bounds(&cmp_proc, &last->min, &first->min) < 0) { RangeEntry *tmp = last; + last = first; first = tmp; } /* Drop old constraint and create a new one */ - recreate_range_constraint(partitions[0], - get_relid_attribute_name(prel->key, prel->attnum), - prel->attnum, - prel->atttype, - &first->min, - &last->max); + modify_range_constraint(parts[0], + get_relid_attribute_name(prel->key, + prel->attnum), + prel->attnum, + prel->atttype, + &first->min, + &last->max); /* Make constraint visible */ CommandCounterIncrement(); @@ -580,129 +521,27 @@ merge_range_partitions_internal(Oid parent, Oid *partitions, uint32 npart) if (SPI_connect() != SPI_OK_CONNECT) elog(ERROR, "could not connect using SPI"); - /* - * Migrate the data from all partition to the first one - */ - for (i = 1; i < npart; i++) + /* Migrate the data from all partition to the first one */ + for (i = 1; i < nparts; i++) { - char *query = psprintf("WITH part_data AS (DELETE FROM %s RETURNING *) " + char *query = psprintf("WITH part_data AS ( " + "DELETE FROM %s RETURNING " + "*) " "INSERT INTO %s SELECT * FROM part_data", - get_qualified_rel_name(partitions[i]), - get_qualified_rel_name(partitions[0])); + get_qualified_rel_name(parts[i]), + get_qualified_rel_name(parts[0])); SPI_exec(query, 0); + pfree(query); } SPI_finish(); - /* - * Drop old partitions - */ - for (i = 1; i < npart; i++) - drop_table(partitions[i]); - + /* Drop obsolete partitions */ + for (i = 1; i < nparts; i++) + drop_table_by_oid(parts[i]); } -/* - * Check that range entries are adjacent - */ -static void -check_adjacence(Oid cmp_proc, List *ranges) -{ - ListCell *lc; - RangeEntry *last = NULL; - FmgrInfo finfo; - - fmgr_info(cmp_proc, &finfo); - - foreach(lc, ranges) - { - RangeEntry *cur = (RangeEntry *) lfirst(lc); - - /* Skip first iteration */ - if (!last) - { - last = cur; - continue; - } - - /* - * Check that last and current partitions are adjacent - */ - if ((cmp_bounds(&finfo, &last->max, &cur->min) != 0) - && (cmp_bounds(&finfo, &cur->max, &last->min) != 0)) - elog(ERROR, - "Partitions '%s' and '%s' aren't adjacent", - get_rel_name(last->child_oid), get_rel_name(cur->child_oid)); - - last = cur; - } -} - -/* - * Drop old partition constraint and create a new one - * with specified boundaries - */ -static void -recreate_range_constraint(Oid partition, - const char *attname, - AttrNumber attnum, - Oid atttype, - const Bound *lower, - const Bound *upper) -{ - Constraint *constraint; - Relation partition_rel; - char *attname_nonconst = pstrdup(attname); - - /* Drop old constraint */ - drop_check_constraint(partition, attnum); - - /* Build a new one */ - constraint = build_range_check_constraint(partition, - attname_nonconst, - lower, - upper, - atttype); - - /* Open the relation and add new check constraint */ - partition_rel = heap_open(partition, AccessExclusiveLock); - AddRelationNewConstraints(partition_rel, NIL, - list_make1(constraint), - false, true, true); - heap_close(partition_rel, NoLock); - - pfree(attname_nonconst); -} - -/* - * Return palloced fully qualified relation name as a cstring - */ -static char * -get_qualified_rel_name(Oid relid) -{ - Oid namespace = get_rel_namespace(relid); - - return psprintf("%s.%s", - quote_identifier(get_namespace_name(namespace)), - quote_identifier(get_rel_name(relid))); -} - -static void -drop_table(Oid relid) -{ - DropStmt *n = makeNode(DropStmt); - const char *relname = get_qualified_rel_name(relid); - - n->removeType = OBJECT_TABLE; - n->missing_ok = false; - n->objects = list_make1(stringToQualifiedNameList(relname)); - n->arguments = NIL; - n->behavior = DROP_RESTRICT; // default behaviour - n->concurrent = false; - - RemoveRelations(n); -} /* * Drops partition and expands the next partition so that it cover dropped @@ -750,7 +589,7 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) RangeEntry *cur = &ranges[i], *next = &ranges[i + 1]; - recreate_range_constraint(next->child_oid, + modify_range_constraint(next->child_oid, get_relid_attribute_name(prel->key, prel->attnum), prel->attnum, prel->atttype, @@ -758,7 +597,7 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) &next->max); } - drop_table(relid); + drop_table_by_oid(relid); PG_RETURN_VOID(); } @@ -784,3 +623,188 @@ validate_interval_value(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } + + +/* + * ------------------ + * Helper functions + * ------------------ + * + */ + +/* + * Drop old partition constraint and create + * a new one with specified boundaries + */ +static void +modify_range_constraint(Oid child_relid, + const char *attname, + AttrNumber attnum, + Oid atttype, + const Bound *lower, + const Bound *upper) +{ + Constraint *constraint; + Relation partition_rel; + char *attname_nonconst = pstrdup(attname); + + /* Drop old constraint */ + drop_check_constraint(child_relid, attnum); + + /* Build a new one */ + constraint = build_range_check_constraint(child_relid, + attname_nonconst, + lower, + upper, + atttype); + + /* Open the relation and add new check constraint */ + partition_rel = heap_open(child_relid, AccessExclusiveLock); + AddRelationNewConstraints(partition_rel, NIL, + list_make1(constraint), + false, true, true); + heap_close(partition_rel, NoLock); + + pfree(attname_nonconst); +} + +/* + * Transform constraint into cstring + */ +static char * +deparse_constraint(Oid relid, Node *expr) +{ + Relation rel; + RangeTblEntry *rte; + Node *cooked_expr; + ParseState *pstate; + List *context; + char *result; + + context = deparse_context_for(get_rel_name(relid), relid); + + rel = heap_open(relid, NoLock); + + /* Initialize parse state */ + pstate = make_parsestate(NULL); + rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); + addRTEtoQuery(pstate, rte, true, true, true); + + /* Transform constraint into executable expression (i.e. cook it) */ + cooked_expr = transformExpr(pstate, expr, EXPR_KIND_CHECK_CONSTRAINT); + + /* Transform expression into string */ + result = deparse_expression(cooked_expr, context, false, false); + + heap_close(rel, NoLock); + + return result; +} + +/* + * Build an 1d array of Bound elements + * + * The main difference from construct_array() is that + * it will substitute infinite values with NULLs + */ +static ArrayType * +construct_infinitable_array(Bound *elems, + int nelems, + Oid elemtype, + int elemlen, + bool elembyval, + char elemalign) +{ + ArrayType *arr; + Datum *datums; + bool *nulls; + int dims[1] = { nelems }; + int lbs[1] = { 1 }; + int i; + + datums = palloc(sizeof(Datum) * nelems); + nulls = palloc(sizeof(bool) * nelems); + + for (i = 0; i < nelems; i++) + { + datums[i] = IsInfinite(&elems[i]) ? + (Datum) 0 : + BoundGetValue(&elems[i]); + nulls[i] = IsInfinite(&elems[i]); + } + + arr = construct_md_array(datums, nulls, 1, + dims, lbs, + elemtype, elemlen, + elembyval, elemalign); + + return arr; +} + +/* + * Check that range entries are adjacent + */ +static void +check_range_adjacence(Oid cmp_proc, List *ranges) +{ + ListCell *lc; + RangeEntry *last = NULL; + FmgrInfo finfo; + + fmgr_info(cmp_proc, &finfo); + + foreach(lc, ranges) + { + RangeEntry *cur = (RangeEntry *) lfirst(lc); + + /* Skip first iteration */ + if (!last) + { + last = cur; + continue; + } + + /* Check that last and current partitions are adjacent */ + if ((cmp_bounds(&finfo, &last->max, &cur->min) != 0) && + (cmp_bounds(&finfo, &cur->max, &last->min) != 0)) + { + elog(ERROR, "partitions \"%s\" and \"%s\" are not adjacent", + get_rel_name(last->child_oid), + get_rel_name(cur->child_oid)); + } + + last = cur; + } +} + +/* + * Return palloced fully qualified relation name as a cstring + */ +static char * +get_qualified_rel_name(Oid relid) +{ + Oid namespace = get_rel_namespace(relid); + + return psprintf("%s.%s", + quote_identifier(get_namespace_name(namespace)), + quote_identifier(get_rel_name(relid))); +} + +/* + * Drop table using it's Oid + */ +static void +drop_table_by_oid(Oid relid) +{ + DropStmt *n = makeNode(DropStmt); + const char *relname = get_qualified_rel_name(relid); + + n->removeType = OBJECT_TABLE; + n->missing_ok = false; + n->objects = list_make1(stringToQualifiedNameList(relname)); + n->arguments = NIL; + n->behavior = DROP_RESTRICT; /* default behavior */ + n->concurrent = false; + + RemoveRelations(n); +} From 59778f567d349f7ab2605b00718482aa924135da Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Wed, 8 Feb 2017 20:03:23 +0300 Subject: [PATCH 0169/1124] Add error propagation under creating new partition; add test for the case of deleting callback function under existing the referencies to it on new partitions --- expected/pathman_callbacks.out | 33 +++++++++++++++++++++++++++++++++ sql/pathman_callbacks.sql | 21 +++++++++++++++++++++ src/partition_creation.c | 10 ++++++++-- 3 files changed, 62 insertions(+), 2 deletions(-) diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 84d69d81..0df2afa0 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -129,6 +129,39 @@ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", DROP TABLE abc CASCADE; NOTICE: drop cascades to 2 other objects +/* test the temprary deletion of callback function */ +CREATE TABLE abc(a serial, b int); +SELECT set_init_callback('abc', + 'callbacks.abc_on_part_created_callback'); + set_init_callback +------------------- + +(1 row) + +SELECT create_range_partitions('abc', 'a', 1, 100, 2); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_1", "range_max": "101", "range_min": "1", "parent_schema": "public", "partition_schema": "public"} +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", "range_max": "201", "range_min": "101", "parent_schema": "public", "partition_schema": "public"} + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO abc VALUES (201, 0); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201", "parent_schema": "public", "partition_schema": "public"} +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +INSERT INTO abc VALUES (301, 0); +ERROR: create_partitions_internal(): callback function "]callbacks.abc_on_part_created_callback(jsonb)" doesn't exist +CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback( + args JSONB) +RETURNS VOID AS $$ +BEGIN + RAISE WARNING 'callback arg: %', args::TEXT; +END +$$ language plpgsql; +INSERT INTO abc VALUES (301, 0); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "401", "range_min": "301", "parent_schema": "public", "partition_schema": "public"} +DROP TABLE abc CASCADE; +NOTICE: drop cascades to 4 other objects DROP SCHEMA callbacks CASCADE; NOTICE: drop cascades to 2 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 6c406529..4c997dd3 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -53,5 +53,26 @@ SELECT create_range_partitions('abc', 'a', 1, 100, 2); DROP TABLE abc CASCADE; +/* test the temprary deletion of callback function */ +CREATE TABLE abc(a serial, b int); +SELECT set_init_callback('abc', + 'callbacks.abc_on_part_created_callback'); +SELECT create_range_partitions('abc', 'a', 1, 100, 2); + +INSERT INTO abc VALUES (201, 0); +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +INSERT INTO abc VALUES (301, 0); +CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback( + args JSONB) +RETURNS VOID AS $$ +BEGIN + RAISE WARNING 'callback arg: %', args::TEXT; +END +$$ language plpgsql; +INSERT INTO abc VALUES (301, 0); + +DROP TABLE abc CASCADE; + + DROP SCHEMA callbacks CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/partition_creation.c b/src/partition_creation.c index 43ae311e..bfb0f076 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -405,8 +405,14 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) edata = CopyErrorData(); FlushErrorState(); - elog(LOG, "create_partitions_internal(): %s [%u]", - edata->message, MyProcPid); + if (IsBackgroundWorker) + ereport(LOG, + (errmsg("create_partitions_internal(): %s [%u]", edata->message, MyProcPid), + (edata->detail) ? errdetail("%s", edata->detail) : 0)); + else + ereport(ERROR, + (errmsg("create_partitions_internal(): %s", edata->message), + (edata->detail) ? errdetail("%s", edata->detail) : 0)); FreeErrorData(edata); From c4723e38a81cd4708bc89006c81ee2dfe249266a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 8 Feb 2017 20:37:57 +0300 Subject: [PATCH 0170/1124] improve functions validate_interval_value() & set_interval() --- expected/pathman_calamity.out | 28 +++++++++++++++++++++++ init.sql | 43 +++++++++++++++++++++++++++++++---- range.sql | 24 ------------------- sql/pathman_calamity.sql | 12 ++++++++++ src/pl_funcs.c | 4 +--- src/pl_range_funcs.c | 33 ++++++++++++++++++++------- src/utils.c | 2 +- 7 files changed, 105 insertions(+), 41 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 82ffb42a..4b88663d 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -132,6 +132,34 @@ NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping 4 (1 row) +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for integer: "abc" +SELECT drop_partitions('calamity.part_test', true); +NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping + drop_partitions +----------------- + 3 +(1 row) + DELETE FROM calamity.part_test; /* check function build_hash_condition() */ SELECT build_hash_condition('int4', 'val', 10, 1); diff --git a/init.sql b/init.sql index 166ddeb1..2ec64a27 100644 --- a/init.sql +++ b/init.sql @@ -15,10 +15,12 @@ * text to Datum */ CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( - parent REGCLASS, - interval_value TEXT) + partrel REGCLASS, + attname TEXT, + parttype INTEGER, + range_interval TEXT) RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' -LANGUAGE C STRICT; +LANGUAGE C; /* @@ -36,8 +38,14 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( parttype INTEGER NOT NULL, range_interval TEXT, - CHECK (parttype IN (1, 2)), /* check for allowed part types */ - CHECK (@extschema@.validate_interval_value(partrel, range_interval)) + /* check for allowed part types */ + CHECK (parttype IN (1, 2)), + + /* check for correct interval */ + CHECK (@extschema@.validate_interval_value(partrel, + attname, + parttype, + range_interval)) ); @@ -190,6 +198,31 @@ END $$ LANGUAGE plpgsql STRICT; +/* + * Set (or reset) default interval for auto created partitions + */ +CREATE OR REPLACE FUNCTION @extschema@.set_interval( + relation REGCLASS, + value ANYELEMENT) +RETURNS VOID AS +$$ +DECLARE + affected INTEGER; +BEGIN + UPDATE @extschema@.pathman_config + SET range_interval = value::text + WHERE partrel = relation AND parttype = 2; + + /* Check number of affected rows */ + GET DIAGNOSTICS affected = ROW_COUNT; + + IF affected = 0 THEN + RAISE EXCEPTION 'table "%" is not partitioned by RANGE', relation; + END IF; +END +$$ +LANGUAGE plpgsql; + /* * Show all existing parents and partitions. diff --git a/range.sql b/range.sql index a2c0ce45..26a31688 100644 --- a/range.sql +++ b/range.sql @@ -435,30 +435,6 @@ BEGIN END $$ LANGUAGE plpgsql; - -/* - * Set (or reset) default interval for auto created partitions - */ -CREATE OR REPLACE FUNCTION @extschema@.set_interval(parent REGCLASS, value ANYELEMENT) -RETURNS VOID AS -$$ -DECLARE - affected INTEGER; -BEGIN - UPDATE @extschema@.pathman_config - SET range_interval = value::text - WHERE partrel = parent; - - GET DIAGNOSTICS affected = ROW_COUNT; - - IF affected = 0 THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent; - END IF; -END -$$ -LANGUAGE plpgsql; - - /* * Split RANGE partition */ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index d14141b8..c8d8595c 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -45,6 +45,18 @@ SELECT drop_partitions('calamity.part_test', true); DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ + +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +SELECT set_interval('calamity.part_test', 100); /* ok */ +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +SELECT drop_partitions('calamity.part_test', true); +DELETE FROM calamity.part_test; + + /* check function build_hash_condition() */ SELECT build_hash_condition('int4', 'val', 10, 1); SELECT build_hash_condition('text', 'val', 10, 1); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index b48d67e9..673047a2 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -765,9 +765,7 @@ lock_partitioned_relation(PG_FUNCTION_ARGS) Datum prevent_relation_modification(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - - (void) prevent_relation_modification_internal(relid); + prevent_relation_modification_internal(PG_GETARG_OID(0)); PG_RETURN_VOID(); } diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 05c36d27..f5e879ad 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -610,16 +610,33 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) Datum validate_interval_value(PG_FUNCTION_ARGS) { - const PartRelationInfo *prel; - Oid parent = PG_GETARG_OID(0); - Datum interval = PG_GETARG_DATUM(1); + Oid partrel = PG_GETARG_OID(0); + text *attname = PG_GETARG_TEXT_P(1); + PartType parttype = DatumGetPartType(PG_GETARG_DATUM(2)); + Datum range_interval = PG_GETARG_DATUM(3); - /* TODO!!! */ - prel = get_pathman_relation_info(parent); - if (!prel) - PG_RETURN_BOOL(true); + char *attname_cstr; + Oid atttype; /* type of partitioned attribute */ + + if (PG_ARGISNULL(0)) + elog(ERROR, "'partrel' should not be NULL"); + + if (PG_ARGISNULL(1)) + elog(ERROR, "'attname' should not be NULL"); + + if (PG_ARGISNULL(2)) + elog(ERROR, "'parttype' should not be NULL"); + + /* it's OK if interval is NULL and table is HASH-partitioned */ + if (PG_ARGISNULL(3)) + PG_RETURN_BOOL(parttype == PT_HASH); + + /* Convert attname to CSTRING and fetch column's type */ + attname_cstr = text_to_cstring(attname); + atttype = get_attribute_type(partrel, attname_cstr, false); - extract_binary_interval_from_text(interval, prel->atttype, NULL); + /* Try converting textual representation */ + extract_binary_interval_from_text(range_interval, atttype, NULL); PG_RETURN_BOOL(true); } diff --git a/src/utils.c b/src/utils.c index c061ae09..1669accc 100644 --- a/src/utils.c +++ b/src/utils.c @@ -450,7 +450,7 @@ extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ ReleaseSysCache(htup); } else - elog(ERROR, "Cannot find input function for type %u", part_atttype); + elog(ERROR, "cannot find input function for type %u", part_atttype); /* * Convert interval from CSTRING to 'prel->atttype'. From 609813b906f35551ddcad3011980db0190a3d2bd Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 9 Feb 2017 14:57:27 +0300 Subject: [PATCH 0171/1124] fix function get_pathman_relation_info(), refactoring for function validate_range_constraint() --- src/init.c | 126 +++++++++++++++++++++++++++++--------------- src/relation_info.c | 7 ++- 2 files changed, 89 insertions(+), 44 deletions(-) diff --git a/src/init.c b/src/init.c index 041fa7ca..8b9e9084 100644 --- a/src/init.c +++ b/src/init.c @@ -80,6 +80,12 @@ static bool validate_range_constraint(const Expr *expr, const AttrNumber part_attno, Datum *lower, Datum *upper, bool *lower_null, bool *upper_null); +static bool validate_range_opexpr(const Expr *expr, + const PartRelationInfo *prel, + const TypeCacheEntry *tce, + const AttrNumber part_attno, + Datum *lower, Datum *upper, + bool *lower_null, bool *upper_null); static bool validate_hash_constraint(const Expr *expr, const PartRelationInfo *prel, @@ -920,6 +926,65 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) return cmp_bounds(flinfo, &v1->min, &v2->min); } +/* Validates a single expression of kind VAR >= CONST or VAR < CONST */ +static bool +validate_range_opexpr(const Expr *expr, + const PartRelationInfo *prel, + const TypeCacheEntry *tce, + const AttrNumber part_attno, + Datum *lower, Datum *upper, + bool *lower_null, bool *upper_null) +{ + const OpExpr *opexpr; + Datum val; + + if (!expr) + return false; + + /* Fail fast if it's not an OpExpr node */ + if(!IsA(expr, OpExpr)) + return false; + + /* Perform cast */ + opexpr = (const OpExpr *) expr; + + /* Try reading Const value */ + if (!read_opexpr_const(opexpr, prel, part_attno, &val)) + return false; + + /* Examine the strategy (expect '>=' OR '<') */ + switch (get_op_opfamily_strategy(opexpr->opno, tce->btree_opf)) + { + case BTGreaterEqualStrategyNumber: + { + /* Bound already exists */ + if (*lower_null == false) + return false; + + *lower_null = false; + *lower = val; + + return true; + } + + case BTLessStrategyNumber: + { + /* Bound already exists */ + if (*upper_null == false) + return false; + + *upper_null = false; + *upper = val; + + return true; + } + + default: + return false; + } +} + + /* * Validates range constraint. It MUST have one of the following formats: * @@ -936,66 +1001,41 @@ validate_range_constraint(const Expr *expr, Datum *lower, Datum *upper, bool *lower_null, bool *upper_null) { - const TypeCacheEntry *tce; - const OpExpr *opexpr; - int strategy; - -/* Validates a single expression of kind VAR >= CONST or VAR < CONST */ -#define validate_range_expr(expr, part_attno) \ - { \ - Datum val; \ - opexpr = (OpExpr *) (expr); \ - strategy = get_op_opfamily_strategy(opexpr->opno, tce->btree_opf); \ - \ - /* Get const value */ \ - if (!read_opexpr_const(opexpr, prel, part_attno, &val)) \ - return false; \ - \ - /* Set min or max depending on operator */ \ - switch (strategy) \ - { \ - case BTGreaterEqualStrategyNumber: \ - *lower_null = false; \ - *lower = val; \ - break; \ - case BTLessStrategyNumber: \ - *upper_null = false; \ - *upper = val; \ - break; \ - default: \ - return false; \ - } \ - } + const TypeCacheEntry *tce; if (!expr) return false; + + /* Set default values */ *lower_null = *upper_null = true; + + /* Find type cache entry for partitioned column's type */ tce = lookup_type_cache(prel->atttype, TYPECACHE_BTREE_OPFAMILY); - /* It could be either AND operator on top or just an OpExpr */ + /* Is it an AND clause? */ if (and_clause((Node *) expr)) { - const BoolExpr *boolexpr = (const BoolExpr *) expr; - ListCell *lc; + const BoolExpr *boolexpr = (const BoolExpr *) expr; + ListCell *lc; + /* Walk through boolexpr's args */ foreach (lc, boolexpr->args) { - Node *arg = lfirst(lc); + const OpExpr *opexpr = (const OpExpr *) lfirst(lc); - if(!IsA(arg, OpExpr)) + /* Exit immediately if something is wrong */ + if (!validate_range_opexpr((const Expr *) opexpr, prel, tce, part_attno, + lower, upper, lower_null, upper_null)) return false; - - validate_range_expr(arg, part_attno); } - return true; - } - else if(IsA(expr, OpExpr)) - { - validate_range_expr(expr, part_attno); + + /* Everything seems to be fine */ return true; } - return false; + /* It might be just an OpExpr clause */ + else return validate_range_opexpr(expr, prel, tce, part_attno, + lower, upper, lower_null, upper_null); } /* diff --git a/src/relation_info.c b/src/relation_info.c index 471ca1aa..3f184d12 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -296,8 +296,13 @@ get_pathman_relation_info(Oid relid) /* TODO: possible refactoring, pass found 'prel' instead of searching */ prel = refresh_pathman_relation_info(relid, part_type, attname, false); } + /* Else clear remaining cache entry */ - else remove_pathman_relation_info(relid); + else + { + remove_pathman_relation_info(relid); + prel = NULL; /* don't forget to reset 'prel' */ + } } elog(DEBUG2, From 484551a956ce41ec9e9a7b3f00b53eaa391f91ce Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 9 Feb 2017 15:05:40 +0300 Subject: [PATCH 0172/1124] collation support (not finished) --- src/pathman.h | 1 + src/pg_pathman.c | 29 ++++++++++++++++++++++++----- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/src/pathman.h b/src/pathman.h index 32e059b3..aafe07ee 100644 --- a/src/pathman.h +++ b/src/pathman.h @@ -167,6 +167,7 @@ void select_range_partitions(const Datum value, const RangeEntry *ranges, const int nranges, const int strategy, + Oid collid, WrapperNode *result); /* Examine expression in order to select partitions. */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 09a4da03..23008dc5 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -453,6 +453,7 @@ select_range_partitions(const Datum value, const RangeEntry *ranges, const int nranges, const int strategy, + Oid collid, WrapperNode *result) { const RangeEntry *current_re; @@ -487,9 +488,9 @@ select_range_partitions(const Datum value, /* Corner cases */ cmp_min = IsInfinite(&ranges[startidx].min) ? - 1 : DatumGetInt32(FunctionCall2(cmp_func, value, BoundGetValue(&ranges[startidx].min))); + 1 : DatumGetInt32(FunctionCall2Coll(cmp_func, collid, value, BoundGetValue(&ranges[startidx].min))); cmp_max = IsInfinite(&ranges[endidx].max) ? - -1 : DatumGetInt32(FunctionCall2(cmp_func, value, BoundGetValue(&ranges[endidx].max))); + -1 : DatumGetInt32(FunctionCall2Coll(cmp_func, collid, value, BoundGetValue(&ranges[endidx].max))); if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || @@ -745,8 +746,10 @@ walk_expr_tree(Expr *expr, WalkerContext *context) * This function determines which partitions should appear in query plan. */ static void -handle_binary_opexpr(WalkerContext *context, WrapperNode *result, - const Node *varnode, const Const *c) +handle_binary_opexpr(WalkerContext *context, + WrapperNode *result, + const Node *varnode, + const Const *c) { int strategy; TypeCacheEntry *tce; @@ -795,7 +798,20 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, case PT_RANGE: { - FmgrInfo cmp_func; + FmgrInfo cmp_func; + Oid collid; + + /* + * If operator collation is different from default attribute + * collation then we cannot guarantee that we return correct + * partitions. So in this case we just return all of them + */ + if (expr->opcollid != prel->attcollid && strategy != BTEqualStrategyNumber) + goto binary_opexpr_return; + + collid = OidIsValid(expr->opcollid) ? + expr->opcollid : + prel->attcollid; fill_type_cmp_fmgr_info(&cmp_func, getBaseType(c->consttype), @@ -806,6 +822,7 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, PrelGetRangesArray(context->prel), PrelChildrenCount(context->prel), strategy, + collid, result); /* output */ result->paramsel = estimate_paramsel_using_prel(prel, strategy); @@ -893,6 +910,7 @@ search_range_partition_eq(const Datum value, ranges, nranges, BTEqualStrategyNumber, + prel->attcollid, &result); /* output */ if (result.found_gap) @@ -1001,6 +1019,7 @@ handle_const(const Const *c, WalkerContext *context) PrelGetRangesArray(context->prel), PrelChildrenCount(context->prel), strategy, + prel->attcollid, result); /* output */ result->paramsel = estimate_paramsel_using_prel(prel, strategy); From c0a1bb8627bbad2f3d7606168419da77517408bf Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 9 Feb 2017 15:58:20 +0300 Subject: [PATCH 0173/1124] clean code in pl_hash_funcs.c & pl_range_funcs.c --- src/pl_hash_funcs.c | 166 +++++++++++++++++++++++++------------------ src/pl_range_funcs.c | 48 +++++++------ 2 files changed, 123 insertions(+), 91 deletions(-) diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 08a5e83b..8f66cf0a 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -22,6 +22,9 @@ #include "utils/array.h" +static char **deconstruct_text_array(Datum arr, int *num_elems); + + /* Function declarations */ PG_FUNCTION_INFO_V1( create_hash_partitions_internal ); @@ -32,112 +35,85 @@ PG_FUNCTION_INFO_V1( get_hash_part_idx ); PG_FUNCTION_INFO_V1( build_hash_condition ); -static char **deconstruct_text_array(Datum arr, int *num_elems); - - /* * Create HASH partitions implementation (written in C). */ Datum create_hash_partitions_internal(PG_FUNCTION_ARGS) { - Oid parent_relid = PG_GETARG_OID(0); - Datum partitioned_col_name = PG_GETARG_DATUM(1); - Oid partitioned_col_type; - uint32 part_count = PG_GETARG_INT32(2), - i; - - /* Partitions names and tablespaces */ - char **names = NULL, - **tablespaces = NULL; - int names_size = 0, - tablespaces_size = 0; - RangeVar **rangevars = NULL; +/* Free allocated arrays */ +#define DeepFreeArray(arr, arr_len) \ + do { \ + int arr_elem; \ + if (!arr) break; \ + for (arr_elem = 0; arr_elem < arr_len; arr_elem++) \ + pfree(arr[arr_elem]); \ + pfree(arr); \ + } while (0) + + Oid parent_relid = PG_GETARG_OID(0); + const char *partitioned_col_name = TextDatumGetCString(PG_GETARG_DATUM(1)); + Oid partitioned_col_type; + uint32 part_count = PG_GETARG_INT32(2), + i; + + /* Partition names and tablespaces */ + char **relnames = NULL, + **tablespaces = NULL; + int relnames_size = 0, + tablespaces_size = 0; + RangeVar **rangevars = NULL; /* Check that there's no partitions yet */ if (get_pathman_relation_info(parent_relid)) elog(ERROR, "cannot add new HASH partitions"); partitioned_col_type = get_attribute_type(parent_relid, - TextDatumGetCString(partitioned_col_name), + partitioned_col_name, false); - /* Get partition names and tablespaces */ + /* Extract partition names */ if (!PG_ARGISNULL(3)) - names = deconstruct_text_array(PG_GETARG_DATUM(3), &names_size); + relnames = deconstruct_text_array(PG_GETARG_DATUM(3), &relnames_size); + /* Extract partition tablespaces */ if (!PG_ARGISNULL(4)) tablespaces = deconstruct_text_array(PG_GETARG_DATUM(4), &tablespaces_size); + /* If both arrays are present, check that their lengths are equal */ + if (relnames && tablespaces && relnames_size != tablespaces_size) + elog(ERROR, "sizes of arrays 'relnames' and 'tablespaces' are different"); + /* Convert partition names into RangeVars */ - if (names_size > 0) + if (relnames) { - rangevars = palloc(sizeof(RangeVar) * names_size); - for (i = 0; i < names_size; i++) + rangevars = palloc(sizeof(RangeVar) * relnames_size); + for (i = 0; i < relnames_size; i++) { - List *nl = stringToQualifiedNameList(names[i]); + List *nl = stringToQualifiedNameList(relnames[i]); rangevars[i] = makeRangeVarFromNameList(nl); } } + /* Finally create HASH partitions */ for (i = 0; i < part_count; i++) { - RangeVar *rel = rangevars != NULL ? rangevars[i] : NULL; - char *tablespace = tablespaces != NULL ? tablespaces[i] : NULL; + RangeVar *partition_rv = rangevars ? rangevars[i] : NULL; + char *tablespace = tablespaces ? tablespaces[i] : NULL; /* Create a partition (copy FKs, invoke callbacks etc) */ create_single_hash_partition_internal(parent_relid, i, part_count, partitioned_col_type, - rel, tablespace); + partition_rv, tablespace); } - PG_RETURN_VOID(); -} - -/* - * Convert Datum into cstring array - */ -static char ** -deconstruct_text_array(Datum arr, int *num_elems) -{ - ArrayType *arrayval; - int16 elemlen; - bool elembyval; - char elemalign; - Datum *elem_values; - bool *elem_nulls; - int16 i; - - arrayval = DatumGetArrayTypeP(arr); - - Assert(ARR_ELEMTYPE(arrayval) == TEXTOID); - - get_typlenbyvalalign(ARR_ELEMTYPE(arrayval), - &elemlen, &elembyval, &elemalign); - deconstruct_array(arrayval, - ARR_ELEMTYPE(arrayval), - elemlen, elembyval, elemalign, - &elem_values, &elem_nulls, num_elems); - - /* If there are actual values then convert them into cstrings */ - if (num_elems > 0) - { - char **strings = palloc(sizeof(char *) * *num_elems); - - for (i = 0; i < *num_elems; i++) - { - if (elem_nulls[i]) - elog(ERROR, - "Partition name and tablespace arrays cannot contain nulls"); - - strings[i] = TextDatumGetCString(elem_values[i]); - } - - return strings; - } + /* Free arrays */ + DeepFreeArray(relnames, relnames_size); + DeepFreeArray(tablespaces, tablespaces_size); + DeepFreeArray(rangevars, relnames_size); - return NULL; + PG_RETURN_VOID(); } /* @@ -201,3 +177,53 @@ build_hash_condition(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(result)); } + + +/* + * ------------------ + * Helper functions + * ------------------ + */ + +/* Convert Datum into CSTRING array */ +static char ** +deconstruct_text_array(Datum arr, int *num_elems) +{ + ArrayType *arrayval; + int16 elemlen; + bool elembyval; + char elemalign; + Datum *elem_values; + bool *elem_nulls; + int16 i; + + arrayval = DatumGetArrayTypeP(arr); + + Assert(ARR_ELEMTYPE(arrayval) == TEXTOID); + + get_typlenbyvalalign(ARR_ELEMTYPE(arrayval), + &elemlen, &elembyval, &elemalign); + deconstruct_array(arrayval, + ARR_ELEMTYPE(arrayval), + elemlen, elembyval, elemalign, + &elem_values, &elem_nulls, num_elems); + + /* If there are actual values then convert them into CSTRINGs */ + if (num_elems > 0) + { + char **strings = palloc(sizeof(char *) * *num_elems); + + for (i = 0; i < *num_elems; i++) + { + if (elem_nulls[i]) + elog(ERROR, "partition name and tablespace arrays " + "may not contain nulls"); + + strings[i] = TextDatumGetCString(elem_values[i]); + } + + return strings; + } + + return NULL; +} diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index f5e879ad..2fc1a41f 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -53,7 +53,7 @@ static void drop_table_by_oid(Oid relid); /* Function declarations */ PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); -PG_FUNCTION_INFO_V1( find_or_create_range_partition); +PG_FUNCTION_INFO_V1( find_or_create_range_partition ); PG_FUNCTION_INFO_V1( check_range_available_pl ); PG_FUNCTION_INFO_V1( get_part_range_by_oid ); @@ -322,6 +322,7 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) ranges = PrelGetRangesArray(prel); + /* Build args for construct_infinitable_array() */ elems[0] = ranges[partition_idx].min; elems[1] = ranges[partition_idx].max; @@ -369,6 +370,7 @@ build_range_condition(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(result)); } +/* Build name for sequence for auto partition naming */ Datum build_sequence_name(PG_FUNCTION_ARGS) { @@ -544,8 +546,8 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* - * Drops partition and expands the next partition so that it cover dropped - * one + * Drops partition and expands the next partition + * so that it could cover the dropped one * * This function was written in order to support Oracle-like ALTER TABLE ... * DROP PARTITION. In Oracle partitions only have upper bound and when @@ -554,49 +556,54 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) Datum drop_range_partition_expand_next(PG_FUNCTION_ARGS) { - PartParentSearch parent_search; const PartRelationInfo *prel; - RangeEntry *ranges; - Oid relid = PG_GETARG_OID(0), - parent; - int i; + PartParentSearch parent_search; + Oid relid = PG_GETARG_OID(0), + parent; + RangeEntry *ranges; + int i; - /* Get parent relid */ + /* Get parent's relid */ parent = get_parent_of_partition(relid, &parent_search); if (parent_search != PPS_ENTRY_PART_PARENT) elog(ERROR, "relation \"%s\" is not a partition", get_rel_name_or_relid(relid)); + /* Fetch PartRelationInfo and perform some checks */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); + /* Fetch ranges array */ ranges = PrelGetRangesArray(prel); /* Looking for partition in child relations */ - for (i = 0; i < prel->children_count; i++) + for (i = 0; i < PrelChildrenCount(prel); i++) if (ranges[i].child_oid == relid) break; /* - * It must be in ranges array because we already know that table - * is a partition + * It must be in ranges array because we already + * know that this table is a partition */ - Assert(i < prel->children_count); + Assert(i < PrelChildrenCount(prel)); - /* If there is next partition then expand it */ - if (i < prel->children_count - 1) + /* Expand next partition if it exists */ + if (i < PrelChildrenCount(prel) - 1) { RangeEntry *cur = &ranges[i], *next = &ranges[i + 1]; + /* Drop old constraint and create a new one */ modify_range_constraint(next->child_oid, - get_relid_attribute_name(prel->key, prel->attnum), - prel->attnum, - prel->atttype, - &cur->min, - &next->max); + get_relid_attribute_name(prel->key, + prel->attnum), + prel->attnum, + prel->atttype, + &cur->min, + &next->max); } + /* Finally drop this partition */ drop_table_by_oid(relid); PG_RETURN_VOID(); @@ -646,7 +653,6 @@ validate_interval_value(PG_FUNCTION_ARGS) * ------------------ * Helper functions * ------------------ - * */ /* From 5dc7251300bf056a42e8ee4012b562cf37d76b41 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 9 Feb 2017 16:12:20 +0300 Subject: [PATCH 0174/1124] improve function create_hash_partitions(), add calamity tests --- expected/pathman_calamity.out | 7 +++++++ hash.sql | 8 -------- sql/pathman_calamity.sql | 7 +++++++ src/pl_hash_funcs.c | 14 +++++++++++--- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 4b88663d..cff73962 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -100,6 +100,13 @@ SELECT count(*) FROM calamity.part_test; (1 row) DELETE FROM calamity.part_test; +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + relnames := ARRAY['calamity.p1']::TEXT[]); +ERROR: size of array 'relnames' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); +ERROR: size of array 'tablespaces' must be equal to 'partitions_count' /* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ INSERT INTO calamity.part_test SELECT generate_series(1, 30); SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); diff --git a/hash.sql b/hash.sql index ef136f6a..6c360d03 100644 --- a/hash.sql +++ b/hash.sql @@ -38,14 +38,6 @@ BEGIN INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) VALUES (parent_relid, attribute, 1); - IF array_length(relnames, 1) != partitions_count THEN - RAISE EXCEPTION 'Partition names array size must be equal the partitions count'; - END IF; - - IF array_length(tablespaces, 1) != partitions_count THEN - RAISE EXCEPTION 'Partition tablespaces array size must be equal the partitions count'; - END IF; - /* Create partitions */ PERFORM @extschema@.create_hash_partitions_internal(parent_relid, attribute, diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index c8d8595c..70ac0374 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -35,6 +35,13 @@ SELECT count(*) FROM calamity.part_test; DELETE FROM calamity.part_test; +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + relnames := ARRAY['calamity.p1']::TEXT[]); +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); + + /* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ INSERT INTO calamity.part_test SELECT generate_series(1, 30); SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 8f66cf0a..810c6d76 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -54,7 +54,7 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) Oid parent_relid = PG_GETARG_OID(0); const char *partitioned_col_name = TextDatumGetCString(PG_GETARG_DATUM(1)); Oid partitioned_col_type; - uint32 part_count = PG_GETARG_INT32(2), + uint32 partitions_count = PG_GETARG_INT32(2), i; /* Partition names and tablespaces */ @@ -84,6 +84,14 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) if (relnames && tablespaces && relnames_size != tablespaces_size) elog(ERROR, "sizes of arrays 'relnames' and 'tablespaces' are different"); + /* Validate size of 'relnames' */ + if (relnames && relnames_size != partitions_count) + elog(ERROR, "size of array 'relnames' must be equal to 'partitions_count'"); + + /* Validate size of 'tablespaces' */ + if (tablespaces && tablespaces_size != partitions_count) + elog(ERROR, "size of array 'tablespaces' must be equal to 'partitions_count'"); + /* Convert partition names into RangeVars */ if (relnames) { @@ -97,13 +105,13 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) } /* Finally create HASH partitions */ - for (i = 0; i < part_count; i++) + for (i = 0; i < partitions_count; i++) { RangeVar *partition_rv = rangevars ? rangevars[i] : NULL; char *tablespace = tablespaces ? tablespaces[i] : NULL; /* Create a partition (copy FKs, invoke callbacks etc) */ - create_single_hash_partition_internal(parent_relid, i, part_count, + create_single_hash_partition_internal(parent_relid, i, partitions_count, partitioned_col_type, partition_rv, tablespace); } From 3410d7dbe37ed3854ecaad6d8d6e1d7736b2683a Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 9 Feb 2017 16:20:44 +0300 Subject: [PATCH 0175/1124] range partitioning for text partitioning key --- src/init.c | 20 +++++++++++++++----- src/partition_creation.c | 17 ++++++++++------- src/pg_pathman.c | 15 +++++++-------- src/pl_range_funcs.c | 12 ++++++------ src/relation_info.h | 8 ++++++-- 5 files changed, 44 insertions(+), 28 deletions(-) diff --git a/src/init.c b/src/init.c index 8b9e9084..727bb132 100644 --- a/src/init.c +++ b/src/init.c @@ -63,6 +63,15 @@ PathmanInitState pg_pathman_init_state; /* Shall we install new relcache callback? */ static bool relcache_callback_needed = true; +/* + * Comparison function info. This structure is only needed to pass FmgrInfo and + * collation to qsort + */ +typedef struct cmp_func_info +{ + FmgrInfo flinfo; + Oid collid; +} cmp_func_info; /* Functions for various local caches */ static bool init_pathman_relation_oids(void); @@ -449,15 +458,16 @@ fill_prel_with_partitions(const Oid *partitions, if (prel->parttype == PT_RANGE) { MemoryContext old_mcxt; - FmgrInfo flinfo; + cmp_func_info cmp_info; /* Prepare function info */ - fmgr_info(prel->cmp_proc, &flinfo); + fmgr_info(prel->cmp_proc, &cmp_info.flinfo); + cmp_info.collid = prel->attcollid; /* Sort partitions by RangeEntry->min asc */ qsort_arg((void *) prel->ranges, PrelChildrenCount(prel), sizeof(RangeEntry), cmp_range_entries, - (void *) &flinfo); + (void *) &cmp_info); /* Initialize 'prel->children' array */ for (i = 0; i < PrelChildrenCount(prel); i++) @@ -921,9 +931,9 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) { const RangeEntry *v1 = (const RangeEntry *) p1; const RangeEntry *v2 = (const RangeEntry *) p2; - FmgrInfo *flinfo = (FmgrInfo *) arg; + cmp_func_info *info = (cmp_func_info *) arg; - return cmp_bounds(flinfo, &v1->min, &v2->min); + return cmp_bounds(&info->flinfo, info->collid, &v1->min, &v2->min); } /* Validates a single expression of kind VAR >= CONST or VAR < CONST */ diff --git a/src/partition_creation.c b/src/partition_creation.c index d66844be..23b9b30c 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -54,7 +54,8 @@ static Oid spawn_partitions_val(Oid parent_relid, Datum interval_binary, Oid interval_type, Datum value, - Oid value_type); + Oid value_type, + Oid collid); static void create_single_partition_common(Oid partition_relid, Constraint *check_constraint, @@ -395,7 +396,8 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) partid = spawn_partitions_val(PrelParentRelid(prel), &bound_min, &bound_max, base_bound_type, interval_binary, interval_type, - value, base_value_type); + value, base_value_type, + prel->attcollid); } } else @@ -463,7 +465,8 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ Datum interval_binary, /* interval in binary form */ Oid interval_type, /* INTERVALOID or prel->atttype */ Datum value, /* value to be INSERTed */ - Oid value_type) /* type of value */ + Oid value_type, /* type of value */ + Oid collid) /* collation id */ { bool should_append; /* append or prepend? */ @@ -489,7 +492,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ errdetail("both bounds are infinite"))); /* value >= MAX_BOUNDARY */ - else if (cmp_bounds(&cmp_value_bound_finfo, + else if (cmp_bounds(&cmp_value_bound_finfo, collid, &value_bound, range_bound_max) >= 0) { should_append = true; @@ -497,7 +500,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ } /* value < MIN_BOUNDARY */ - else if (cmp_bounds(&cmp_value_bound_finfo, + else if (cmp_bounds(&cmp_value_bound_finfo, collid, &value_bound, range_bound_min) < 0) { should_append = false; @@ -1214,8 +1217,8 @@ check_range_available(Oid parent_relid, { int c1, c2; - c1 = cmp_bounds(&cmp_func, start, &ranges[i].max); - c2 = cmp_bounds(&cmp_func, end, &ranges[i].min); + c1 = cmp_bounds(&cmp_func, prel->attcollid, start, &ranges[i].max); + c2 = cmp_bounds(&cmp_func, prel->attcollid, end, &ranges[i].min); /* There's something! */ if (c1 < 0 && c2 > 0) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 808d18e4..999b8fac 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -456,6 +456,9 @@ select_range_partitions(const Datum value, Oid collid, WrapperNode *result) { +#define cmp_call(value1, value2) \ + DatumGetInt32(FunctionCall2Coll(cmp_func, collid, value1, value2)) + const RangeEntry *current_re; bool lossy = false, is_less, @@ -488,9 +491,9 @@ select_range_partitions(const Datum value, /* Corner cases */ cmp_min = IsInfinite(&ranges[startidx].min) ? - 1 : DatumGetInt32(FunctionCall2Coll(cmp_func, collid, value, BoundGetValue(&ranges[startidx].min))); + 1 : cmp_call(value, BoundGetValue(&ranges[startidx].min)); cmp_max = IsInfinite(&ranges[endidx].max) ? - -1 : DatumGetInt32(FunctionCall2Coll(cmp_func, collid, value, BoundGetValue(&ranges[endidx].max))); + -1 : cmp_call(value, BoundGetValue(&ranges[endidx].max)); if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || @@ -538,13 +541,9 @@ select_range_partitions(const Datum value, current_re = &ranges[i]; cmp_min = IsInfinite(¤t_re->min) ? - 1 : - FunctionCall2(cmp_func, value, - BoundGetValue(¤t_re->min)); + 1 : cmp_call(value, BoundGetValue(¤t_re->min)); cmp_max = IsInfinite(¤t_re->max) ? - -1 : - FunctionCall2(cmp_func, value, - BoundGetValue(¤t_re->max)); + -1 : cmp_call(value, BoundGetValue(¤t_re->max)); is_less = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); is_greater = (cmp_max > 0 || (cmp_max >= 0 && strategy != BTLessStrategyNumber)); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index f5e879ad..1b13dd0c 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -36,7 +36,7 @@ static ArrayType *construct_infinitable_array(Bound *elems, int elmlen, bool elmbyval, char elmalign); -static void check_range_adjacence(Oid cmp_proc, List *ranges); +static void check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges); static void merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts); @@ -490,7 +490,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) } /* Check that partitions are adjacent */ - check_range_adjacence(prel->cmp_proc, rentry_list); + check_range_adjacence(prel->cmp_proc, prel->attcollid, rentry_list); /* First determine the bounds of a new constraint */ first = (RangeEntry *) linitial(rentry_list); @@ -498,7 +498,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Swap ranges if 'last' < 'first' */ fmgr_info(prel->cmp_proc, &cmp_proc); - if (cmp_bounds(&cmp_proc, &last->min, &first->min) < 0) + if (cmp_bounds(&cmp_proc, prel->attcollid, &last->min, &first->min) < 0) { RangeEntry *tmp = last; @@ -762,7 +762,7 @@ construct_infinitable_array(Bound *elems, * Check that range entries are adjacent */ static void -check_range_adjacence(Oid cmp_proc, List *ranges) +check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges) { ListCell *lc; RangeEntry *last = NULL; @@ -782,8 +782,8 @@ check_range_adjacence(Oid cmp_proc, List *ranges) } /* Check that last and current partitions are adjacent */ - if ((cmp_bounds(&finfo, &last->max, &cur->min) != 0) && - (cmp_bounds(&finfo, &cur->max, &last->min) != 0)) + if ((cmp_bounds(&finfo, collid, &last->max, &cur->min) != 0) && + (cmp_bounds(&finfo, collid, &cur->max, &last->min) != 0)) { elog(ERROR, "partitions \"%s\" and \"%s\" are not adjacent", get_rel_name(last->child_oid), diff --git a/src/relation_info.h b/src/relation_info.h index b0cdd5f2..631866ec 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -81,7 +81,7 @@ BoundGetValue(const Bound *bound) } inline static int -cmp_bounds(FmgrInfo *cmp_func, const Bound *b1, const Bound *b2) +cmp_bounds(FmgrInfo *cmp_func, Oid collid, const Bound *b1, const Bound *b2) { if (IsMinusInfinity(b1) || IsPlusInfinity(b2)) return -1; @@ -90,7 +90,11 @@ cmp_bounds(FmgrInfo *cmp_func, const Bound *b1, const Bound *b2) Assert(cmp_func); - return FunctionCall2(cmp_func, BoundGetValue(b1), BoundGetValue(b2)); + return DatumGetInt32( + FunctionCall2Coll(cmp_func, + collid, + BoundGetValue(b1), + BoundGetValue(b2))); } From 43fefff00ac04eac38902250ae5e959127748a1c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 9 Feb 2017 16:27:15 +0300 Subject: [PATCH 0176/1124] clean code (function drop_check_constraint()) --- src/partition_creation.c | 51 ++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index d66844be..0a747738 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1095,6 +1095,32 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) * ----------------------------- */ +/* Drop pg_pathman's check constraint by 'relid' and 'attnum' */ +void +drop_check_constraint(Oid relid, AttrNumber attnum) +{ + char *constr_name; + AlterTableStmt *stmt; + AlterTableCmd *cmd; + + /* Build a correct name for this constraint */ + constr_name = build_check_constraint_name_relid_internal(relid, attnum); + + stmt = makeNode(AlterTableStmt); + stmt->relation = makeRangeVarFromRelid(relid); + stmt->relkind = OBJECT_TABLE; + + cmd = makeNode(AlterTableCmd); + cmd->subtype = AT_DropConstraint; + cmd->name = constr_name; + cmd->behavior = DROP_RESTRICT; + cmd->missing_ok = true; + + stmt->cmds = list_make1(cmd); + + AlterTable(relid, ShareUpdateExclusiveLock, stmt); +} + /* Build RANGE check constraint expression tree */ Node * build_raw_range_check_tree(char *attname, @@ -1383,31 +1409,6 @@ make_int_value_struct(int int_val) return val; } -void -drop_check_constraint(Oid relid, AttrNumber attnum) -{ - char *constr_name; - AlterTableStmt *stmt; - AlterTableCmd *cmd; - - /* Build a correct name for this constraint */ - constr_name = build_check_constraint_name_relid_internal(relid, attnum); - - stmt = makeNode(AlterTableStmt); - stmt->relation = makeRangeVarFromRelid(relid); - stmt->relkind = OBJECT_TABLE; - - cmd = makeNode(AlterTableCmd); - cmd->subtype = AT_DropConstraint; - cmd->name = constr_name; - cmd->behavior = DROP_RESTRICT; - cmd->missing_ok = true; - - stmt->cmds = list_make1(cmd); - - AlterTable(relid, ShareUpdateExclusiveLock, stmt); -} - static RangeVar * makeRangeVarFromRelid(Oid relid) { From b78e4a7f29724b87f350e4ba61d2f544af74e799 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 9 Feb 2017 16:41:03 +0300 Subject: [PATCH 0177/1124] clean code, comments --- init.sql | 20 ++++++++++++-------- src/partition_creation.c | 11 +++++++---- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/init.sql b/init.sql index 2ec64a27..33961d2f 100644 --- a/init.sql +++ b/init.sql @@ -646,17 +646,20 @@ $$ LANGUAGE plpgsql STRICT; /* * Set new relname, schema and tablespace */ -CREATE OR REPLACE FUNCTION @extschema@.alter_partition(relation REGCLASS, - new_name TEXT, - new_schema REGNAMESPACE, - new_tablespace TEXT) +CREATE OR REPLACE FUNCTION @extschema@.alter_partition( + relation REGCLASS, + new_name TEXT, + new_schema REGNAMESPACE, + new_tablespace TEXT) RETURNS VOID AS $$ DECLARE - orig_name TEXT; - orig_schema OID; + orig_name TEXT; + orig_schema OID; + BEGIN - SELECT relname, relnamespace FROM pg_class WHERE oid = relation + SELECT relname, relnamespace FROM pg_class + WHERE oid = relation INTO orig_name, orig_schema; /* Alter table name */ @@ -680,7 +683,8 @@ $$ LANGUAGE plpgsql; /* * Partitioning key */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key(relid REGCLASS) +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( + relid REGCLASS) RETURNS TEXT AS $$ SELECT attname FROM pathman_config WHERE partrel = relid; diff --git a/src/partition_creation.c b/src/partition_creation.c index 0a747738..49330a91 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1136,12 +1136,12 @@ build_raw_range_check_tree(char *attname, ColumnRef *col_ref = makeNode(ColumnRef); /* Partitioned column */ - col_ref->fields = list_make1(makeString(attname)); + col_ref->fields = list_make1(makeString(attname)); col_ref->location = -1; - and_oper->boolop = AND_EXPR; - and_oper->args = NIL; - and_oper->location = -1; + and_oper->boolop = AND_EXPR; + and_oper->args = NIL; + and_oper->location = -1; /* Left comparison (VAR >= start_value) */ if (!IsInfinite(start_value)) @@ -1156,6 +1156,7 @@ build_raw_range_check_tree(char *attname, left_arg->lexpr = (Node *) col_ref; left_arg->rexpr = (Node *) left_const; left_arg->location = -1; + and_oper->args = lappend(and_oper->args, left_arg); } @@ -1172,9 +1173,11 @@ build_raw_range_check_tree(char *attname, right_arg->lexpr = (Node *) col_ref; right_arg->rexpr = (Node *) right_const; right_arg->location = -1; + and_oper->args = lappend(and_oper->args, right_arg); } + /* (-inf, +inf) */ if (and_oper->args == NIL) elog(ERROR, "cannot create infinite range constraint"); From 99eb0c13ec9db8e9117495bdc3a9a8d76ded1b50 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 9 Feb 2017 18:35:37 +0300 Subject: [PATCH 0178/1124] change infinity macros (Bound) --- src/relation_info.h | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/src/relation_info.h b/src/relation_info.h index b0cdd5f2..9c8956bb 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -23,24 +23,18 @@ /* Range bound */ typedef struct { - Datum value; /* Actual value if not infinite */ - uint8 is_infinite; /* bitmask where the least significant bit - is indicates if the bound is infinite and - the second one indicates if bound - is negative */ + Datum value; /* actual value if not infinite */ + int8 is_infinite; /* -inf | +inf | finite */ } Bound; -#define BOUND_INFINITY_MASK 0x01 -#define BOUND_NEGATIVE_MASK 0x02 +#define FINITE ( 0 ) +#define PLUS_INFINITY ( +1 ) +#define MINUS_INFINITY ( -1 ) -#define FINITE 0 -#define PLUS_INFINITY (BOUND_INFINITY_MASK) -#define MINUS_INFINITY (BOUND_INFINITY_MASK | BOUND_NEGATIVE_MASK) - -#define IsInfinite(i) ((i)->is_infinite & BOUND_INFINITY_MASK) -#define IsPlusInfinity(i) (IsInfinite(i) && !((i)->is_infinite & BOUND_NEGATIVE_MASK)) -#define IsMinusInfinity(i) (IsInfinite(i) && ((i)->is_infinite & BOUND_NEGATIVE_MASK)) +#define IsInfinite(i) ( (i)->is_infinite != FINITE ) +#define IsPlusInfinity(i) ( (i)->is_infinite == PLUS_INFINITY ) +#define IsMinusInfinity(i) ( (i)->is_infinite == MINUS_INFINITY ) inline static Bound @@ -65,7 +59,7 @@ MakeBound(Datum value) } inline static Bound -MakeBoundInf(uint8 infinity_type) +MakeBoundInf(int8 infinity_type) { Bound bound = { (Datum) 0, infinity_type }; From 167330bd76e36d66ce05a38476cebb49f19a49b9 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 9 Feb 2017 19:00:32 +0300 Subject: [PATCH 0179/1124] Fix convertiong regprocedure to text, small refactoring --- hash.sql | 2 +- init.sql | 21 +++++++++++++++------ range.sql | 2 +- src/partition_creation.c | 8 ++++---- 4 files changed, 21 insertions(+), 12 deletions(-) diff --git a/hash.sql b/hash.sql index c831bb4a..dd8b0db8 100644 --- a/hash.sql +++ b/hash.sql @@ -141,7 +141,7 @@ BEGIN /* Fetch init_callback from 'params' table */ WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, NULL) + SELECT init_callback FROM stub_callback LEFT JOIN @extschema@.pathman_config_params AS params ON params.partrel = parent_relid diff --git a/init.sql b/init.sql index fb15d7c6..4a64ca97 100644 --- a/init.sql +++ b/init.sql @@ -52,7 +52,7 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( partrel REGCLASS NOT NULL PRIMARY KEY, enable_parent BOOLEAN NOT NULL DEFAULT FALSE, auto BOOLEAN NOT NULL DEFAULT TRUE, - init_callback TEXT, + init_callback TEXT DEFAULT NULL, spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE CHECK (@extschema@.validate_part_callback(init_callback)) /* check signature */ @@ -157,12 +157,21 @@ CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( callback REGPROC DEFAULT 0) RETURNS VOID AS $$ +DECLARE + regproc_text TEXT; BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'init_callback', - CASE WHEN callback <> 0 - THEN regprocedureout(callback)::text - ELSE NULL END); - + IF callback != 0 THEN + EXECUTE 'SELECT quote_ident(nspname) || ''.'' || quote_ident(proname)' + ' || ''('' || (SELECT string_agg(x.argtype::regtype::text, '','')' + ' FROM unnest(proargtypes) AS x(argtype))' + ' || '')''' + 'FROM pg_proc p JOIN pg_namespace n ON n.oid=p.pronamespace WHERE p.oid=$1' + INTO regproc_text + USING callback; + ELSE + regproc_text := NULL; + END IF; + PERFORM @extschema@.pathman_set_param(relation, 'init_callback', regproc_text); END $$ LANGUAGE plpgsql STRICT; diff --git a/range.sql b/range.sql index 5ebf25e9..eed72004 100644 --- a/range.sql +++ b/range.sql @@ -1051,7 +1051,7 @@ BEGIN /* Fetch init_callback from 'params' table */ WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, NULL) + SELECT init_callback FROM stub_callback LEFT JOIN @extschema@.pathman_config_params AS params ON params.partrel = parent_relid diff --git a/src/partition_creation.c b/src/partition_creation.c index bfb0f076..2ec7b9aa 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1394,13 +1394,13 @@ make_int_value_struct(int int_val) /* * Utility function that converts signature of procedure into regprocedure. * - * Precondition: proname_args != NULL. + * Precondition: proc_signature != NULL. * * Returns InvalidOid if proname_args is not found. * Raise error if it's incorrect. */ static Oid -text2regprocedure(text *proname_args) +text2regprocedure(text *proc_signature) { FunctionCallInfoData fcinfo; Datum result; @@ -1408,9 +1408,9 @@ text2regprocedure(text *proname_args) InitFunctionCallInfoData(fcinfo, NULL, 1, InvalidOid, NULL, NULL); #if PG_VERSION_NUM >= 90600 - fcinfo.arg[0] = PointerGetDatum(proname_args); + fcinfo.arg[0] = PointerGetDatum(proc_signature); #else - fcinfo.arg[0] = CStringGetDatum(text_to_cstring(proname_args)); + fcinfo.arg[0] = CStringGetDatum(text_to_cstring(proc_signature)); #endif fcinfo.argnull[0] = false; From 3b9590bfa5bdda00bf4acb69b8d68b4532777e40 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 9 Feb 2017 20:02:26 +0300 Subject: [PATCH 0180/1124] check that interval isnt trivial --- src/pl_range_funcs.c | 106 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 103 insertions(+), 3 deletions(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 2fc1a41f..0cd7eebc 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -26,7 +26,9 @@ #include "utils/array.h" #include "utils/builtins.h" #include "utils/lsyscache.h" +#include "utils/numeric.h" #include "utils/ruleutils.h" +#include "utils/syscache.h" static char *deparse_constraint(Oid relid, Node *expr); @@ -48,7 +50,9 @@ static void modify_range_constraint(Oid child_relid, const Bound *upper); static char *get_qualified_rel_name(Oid relid); static void drop_table_by_oid(Oid relid); - +static bool interval_is_trivial(Oid atttype, + Datum interval, + Oid interval_type); /* Function declarations */ @@ -620,7 +624,9 @@ validate_interval_value(PG_FUNCTION_ARGS) Oid partrel = PG_GETARG_OID(0); text *attname = PG_GETARG_TEXT_P(1); PartType parttype = DatumGetPartType(PG_GETARG_DATUM(2)); - Datum range_interval = PG_GETARG_DATUM(3); + Datum interval_text = PG_GETARG_DATUM(3); + Datum interval_value; + Oid interval_type; char *attname_cstr; Oid atttype; /* type of partitioned attribute */ @@ -643,12 +649,106 @@ validate_interval_value(PG_FUNCTION_ARGS) atttype = get_attribute_type(partrel, attname_cstr, false); /* Try converting textual representation */ - extract_binary_interval_from_text(range_interval, atttype, NULL); + interval_value = extract_binary_interval_from_text(interval_text, + atttype, + &interval_type); + + /* Check that interval isn't trivial */ + if (interval_is_trivial(atttype, interval_value, interval_type)) + elog(ERROR, "Interval must not be trivial"); PG_RETURN_BOOL(true); } +/* + * Check that interval is somehow significant to avoid of infinite loops while + * adding new partitions + * + * The main idea behind this function is to add specified interval to some + * default value (zero for numeric types and '1970-01-01' for datetime types) + * and look if it is changed. If it is then return true. + */ +static bool +interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) +{ + Datum default_value; + Datum op_result; + Oid op_result_type; + Operator op; + Oid op_func; + FmgrInfo cmp_func; + + /* Generate default value */ + switch(atttype) + { + case INT2OID: + case INT4OID: + case INT8OID: + default_value = Int16GetDatum(0); + break; + case FLOAT4OID: + default_value = Float4GetDatum(0); + break; + case FLOAT8OID: + default_value = Float8GetDatum(0); + break; + case NUMERICOID: + default_value = NumericGetDatum(0); + break; + case TIMESTAMPOID: + case TIMESTAMPTZOID: + default_value = TimestampGetDatum(GetCurrentTimestamp()); + break; + case DATEOID: + { + Datum ts = TimestampGetDatum(GetCurrentTimestamp()); + + default_value = perform_type_cast(ts, TIMESTAMPTZOID, DATEOID, NULL); + } + break; + default: + return false; + } + + /* Find suitable addition operator for default value and interval */ + op = get_binary_operator("+", atttype, interval_type); + if (!op) + elog(ERROR, "missing \"+\" operator for types %s and %s", + format_type_be(atttype), + format_type_be(interval_type)); + + op_func = oprfuncid(op); + op_result_type = get_operator_ret_type(op); + ReleaseSysCache(op); + + /* Invoke addition operator and get a result*/ + op_result = OidFunctionCall2(op_func, default_value, interval); + + /* + * If operator result type isn't the same as original value then + * convert it + */ + if (op_result_type != atttype) + { + op_result = perform_type_cast(op_result, op_result_type, atttype, NULL); + op_result_type = atttype; + } + + /* + * Compare it to the default_value. If they are the same then obviously + * interval is trivial + */ + fill_type_cmp_fmgr_info(&cmp_func, + getBaseType(atttype), + getBaseType(op_result_type)); + if (DatumGetInt32(FunctionCall2(&cmp_func, default_value, op_result)) == 0) + return true; + + return false; +} + + /* * ------------------ * Helper functions From 338a2cd6d5ed3fb7beebebe6ce2d3638065cd3aa Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 9 Feb 2017 20:16:43 +0300 Subject: [PATCH 0181/1124] improve function set_init_callback(), other fixes --- expected/pathman_callbacks.out | 2 +- init.sql | 24 ++++++---- src/partition_creation.c | 83 +++++++++++++++++----------------- 3 files changed, 57 insertions(+), 52 deletions(-) diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 0df2afa0..dfd7da2a 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -150,7 +150,7 @@ INSERT INTO abc VALUES (201, 0); WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201", "parent_schema": "public", "partition_schema": "public"} DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); INSERT INTO abc VALUES (301, 0); -ERROR: create_partitions_internal(): callback function "]callbacks.abc_on_part_created_callback(jsonb)" doesn't exist +ERROR: callback function "callbacks.abc_on_part_created_callback(jsonb)" does not exist CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback( args JSONB) RETURNS VOID AS $$ diff --git a/init.sql b/init.sql index 4a64ca97..030ab54d 100644 --- a/init.sql +++ b/init.sql @@ -158,19 +158,23 @@ CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( RETURNS VOID AS $$ DECLARE - regproc_text TEXT; + regproc_text TEXT := NULL; + BEGIN + + /* Fetch schema-qualified name of callback */ IF callback != 0 THEN - EXECUTE 'SELECT quote_ident(nspname) || ''.'' || quote_ident(proname)' - ' || ''('' || (SELECT string_agg(x.argtype::regtype::text, '','')' - ' FROM unnest(proargtypes) AS x(argtype))' - ' || '')''' - 'FROM pg_proc p JOIN pg_namespace n ON n.oid=p.pronamespace WHERE p.oid=$1' - INTO regproc_text - USING callback; - ELSE - regproc_text := NULL; + SELECT quote_ident(nspname) || '.' || + quote_ident(proname) || '(' || + (SELECT string_agg(x.argtype::REGTYPE::TEXT, ',') + FROM unnest(proargtypes) AS x(argtype)) || + ')' + FROM pg_catalog.pg_proc p JOIN pg_catalog.pg_namespace n + ON n.oid = p.pronamespace + WHERE p.oid = callback + INTO regproc_text; /* <= result */ END IF; + PERFORM @extschema@.pathman_set_param(relation, 'init_callback', regproc_text); END $$ diff --git a/src/partition_creation.c b/src/partition_creation.c index 2ec7b9aa..8a0ed000 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -78,7 +78,7 @@ static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid); -static Oid text2regprocedure(text *proname_args); +static Oid text_to_regprocedure(text *proname_args); static Constraint *make_constraint_common(char *name, Node *raw_expr); @@ -400,19 +400,20 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) { ErrorData *edata; + /* Simply rethrow ERROR if we're in backend */ + if (!IsBackgroundWorker) + PG_RE_THROW(); + /* Switch to the original context & copy edata */ MemoryContextSwitchTo(old_mcxt); edata = CopyErrorData(); FlushErrorState(); - if (IsBackgroundWorker) - ereport(LOG, - (errmsg("create_partitions_internal(): %s [%u]", edata->message, MyProcPid), - (edata->detail) ? errdetail("%s", edata->detail) : 0)); - else - ereport(ERROR, - (errmsg("create_partitions_internal(): %s", edata->message), - (edata->detail) ? errdetail("%s", edata->detail) : 0)); + /* Produce log message if we're in BGW */ + ereport(LOG, + (errmsg(CppAsString(create_partitions_for_value_internal) ": %s [%u]", + edata->message, MyProcPid), + (edata->detail) ? errdetail("%s", edata->detail) : 0)); FreeErrorData(edata); @@ -1391,34 +1392,6 @@ make_int_value_struct(int int_val) return val; } -/* - * Utility function that converts signature of procedure into regprocedure. - * - * Precondition: proc_signature != NULL. - * - * Returns InvalidOid if proname_args is not found. - * Raise error if it's incorrect. - */ -static Oid -text2regprocedure(text *proc_signature) -{ - FunctionCallInfoData fcinfo; - Datum result; - - InitFunctionCallInfoData(fcinfo, NULL, 1, InvalidOid, NULL, NULL); - -#if PG_VERSION_NUM >= 90600 - fcinfo.arg[0] = PointerGetDatum(proc_signature); -#else - fcinfo.arg[0] = CStringGetDatum(text_to_cstring(proc_signature)); -#endif - fcinfo.argnull[0] = false; - - result = to_regprocedure(&fcinfo); - - return DatumGetObjectId(result); -} - /* * --------------------- @@ -1467,14 +1440,14 @@ invoke_init_callback_internal(init_callback_params *cb_params) /* Cache init_callback's Oid */ if (init_cb_datum) { - cb_params->callback = text2regprocedure( - DatumGetTextP(init_cb_datum)); + /* Try fetching callback's Oid */ + cb_params->callback = text_to_regprocedure(DatumGetTextP(init_cb_datum)); if (!RegProcedureIsValid(cb_params->callback)) ereport(ERROR, (errcode(ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION), - errmsg("callback function \"%s\" doesn't exist", - DatumGetCString(init_cb_datum)))); + errmsg("callback function \"%s\" does not exist", + TextDatumGetCString(init_cb_datum)))); } else cb_params->callback = InvalidOid; @@ -1609,3 +1582,31 @@ validate_part_callback(Oid procid, bool emit_error) return is_ok; } + +/* + * Utility function that converts signature of procedure into regprocedure. + * + * Precondition: proc_signature != NULL. + * + * Returns InvalidOid if proname_args is not found. + * Raise error if it's incorrect. + */ +static Oid +text_to_regprocedure(text *proc_signature) +{ + FunctionCallInfoData fcinfo; + Datum result; + + InitFunctionCallInfoData(fcinfo, NULL, 1, InvalidOid, NULL, NULL); + +#if PG_VERSION_NUM >= 90600 + fcinfo.arg[0] = PointerGetDatum(proc_signature); +#else + fcinfo.arg[0] = CStringGetDatum(text_to_cstring(proc_signature)); +#endif + fcinfo.argnull[0] = false; + + result = to_regprocedure(&fcinfo); + + return DatumGetObjectId(result); +} From 0116da341bf3ed42e75664786ac5265cf30e04a6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 9 Feb 2017 23:03:23 +0300 Subject: [PATCH 0182/1124] improved init callback API, tests --- expected/pathman_callbacks.out | 75 ++++++++++++++++++++++++++++++---- expected/pathman_inserts.out | 2 +- init.sql | 12 ++++-- sql/pathman_callbacks.sql | 48 ++++++++++++++++++---- sql/pathman_inserts.sql | 2 +- src/partition_creation.c | 4 +- src/pl_funcs.c | 13 +----- 7 files changed, 120 insertions(+), 36 deletions(-) diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index dfd7da2a..91ec6e2d 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -2,14 +2,18 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA callbacks; /* Check callbacks */ -CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback( - args JSONB) +CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ BEGIN RAISE WARNING 'callback arg: %', args::TEXT; END $$ language plpgsql; -/* set callback to be called on RANGE partitions */ +/* callback is in public namespace, must be schema-qualified */ +CREATE OR REPLACE FUNCTION public.dummy_cb(args JSONB) +RETURNS VOID AS $$ +BEGIN +END +$$ language plpgsql; CREATE TABLE callbacks.abc(a serial, b int); SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); NOTICE: sequence "abc_seq" does not exist, skipping @@ -18,8 +22,62 @@ NOTICE: sequence "abc_seq" does not exist, skipping 2 (1 row) +SELECT set_init_callback('callbacks.abc', 'public.dummy_cb(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* check that callback is schema-qualified */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + init_callback +------------------------ + public.dummy_cb(jsonb) +(1 row) + +/* reset callback */ +SELECT set_init_callback('callbacks.abc'); + set_init_callback +------------------- + +(1 row) + +/* should return NULL */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + init_callback +--------------- + +(1 row) + +SELECT set_init_callback('callbacks.abc', + 'callbacks.abc_on_part_created_callback(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* check that callback is schema-qualified */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + init_callback +----------------------------------------------- + callbacks.abc_on_part_created_callback(jsonb) +(1 row) + +DROP TABLE callbacks.abc CASCADE; +NOTICE: drop cascades to 2 other objects +/* set callback to be called on RANGE partitions */ +CREATE TABLE callbacks.abc(a serial, b int); +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + SELECT set_init_callback('callbacks.abc', - 'callbacks.abc_on_part_created_callback'); + 'callbacks.abc_on_part_created_callback(jsonb)'); set_init_callback ------------------- @@ -90,7 +148,7 @@ NOTICE: 0 rows copied from callbacks.abc_7 /* set callback to be called on HASH partitions */ SELECT set_init_callback('callbacks.abc', - 'callbacks.abc_on_part_created_callback'); + 'callbacks.abc_on_part_created_callback(jsonb)'); set_init_callback ------------------- @@ -112,7 +170,7 @@ NOTICE: drop cascades to 5 other objects /* create table in public schema */ CREATE TABLE abc(a serial, b int); SELECT set_init_callback('abc', - 'callbacks.abc_on_part_created_callback'); + 'callbacks.abc_on_part_created_callback(jsonb)'); set_init_callback ------------------- @@ -132,7 +190,7 @@ NOTICE: drop cascades to 2 other objects /* test the temprary deletion of callback function */ CREATE TABLE abc(a serial, b int); SELECT set_init_callback('abc', - 'callbacks.abc_on_part_created_callback'); + 'callbacks.abc_on_part_created_callback(jsonb)'); set_init_callback ------------------- @@ -151,8 +209,7 @@ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); INSERT INTO abc VALUES (301, 0); ERROR: callback function "callbacks.abc_on_part_created_callback(jsonb)" does not exist -CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback( - args JSONB) +CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ BEGIN RAISE WARNING 'callback arg: %', args::TEXT; diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index 334fcfd2..7562b99b 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -40,7 +40,7 @@ BEGIN EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s for each row execute procedure test_inserts.print_cols_after_change();', args->>'partition_schema', args->>'partition'); END; $$ LANGUAGE plpgsql; -SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers'); +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); set_init_callback ------------------- diff --git a/init.sql b/init.sql index c7685c48..3b77b14a 100644 --- a/init.sql +++ b/init.sql @@ -56,10 +56,10 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( * NOTE: this function is used in CHECK CONSTRAINT. */ CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( - callback TEXT, + callback REGPROCEDURE, raise_error BOOL DEFAULT TRUE) RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' -LANGUAGE C; +LANGUAGE C STRICT; /* @@ -77,7 +77,11 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( init_callback TEXT DEFAULT NULL, spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE - CHECK (@extschema@.validate_part_callback(init_callback)) /* check signature */ + /* check callback's signature */ + CHECK (@extschema@.validate_part_callback(CASE WHEN init_callback IS NULL + THEN 0::REGPROCEDURE + ELSE init_callback::REGPROCEDURE + END)) ); GRANT SELECT, INSERT, UPDATE, DELETE @@ -176,7 +180,7 @@ LANGUAGE plpgsql STRICT; */ CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( relation REGCLASS, - callback REGPROC DEFAULT 0) + callback REGPROCEDURE DEFAULT 0) RETURNS VOID AS $$ DECLARE diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 4c997dd3..53b08871 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -5,8 +5,7 @@ CREATE SCHEMA callbacks; /* Check callbacks */ -CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback( - args JSONB) +CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ BEGIN RAISE WARNING 'callback arg: %', args::TEXT; @@ -14,12 +13,46 @@ END $$ language plpgsql; + +/* callback is in public namespace, must be schema-qualified */ +CREATE OR REPLACE FUNCTION public.dummy_cb(args JSONB) +RETURNS VOID AS $$ +BEGIN +END +$$ language plpgsql; + +CREATE TABLE callbacks.abc(a serial, b int); +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); + +SELECT set_init_callback('callbacks.abc', 'public.dummy_cb(jsonb)'); + +/* check that callback is schema-qualified */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + +/* reset callback */ +SELECT set_init_callback('callbacks.abc'); + +/* should return NULL */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + +SELECT set_init_callback('callbacks.abc', + 'callbacks.abc_on_part_created_callback(jsonb)'); + +/* check that callback is schema-qualified */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + +DROP TABLE callbacks.abc CASCADE; + + /* set callback to be called on RANGE partitions */ CREATE TABLE callbacks.abc(a serial, b int); SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); SELECT set_init_callback('callbacks.abc', - 'callbacks.abc_on_part_created_callback'); + 'callbacks.abc_on_part_created_callback(jsonb)'); INSERT INTO callbacks.abc VALUES (123, 1); INSERT INTO callbacks.abc VALUES (223, 1); /* show warning */ @@ -40,7 +73,7 @@ SELECT drop_partitions('callbacks.abc'); /* set callback to be called on HASH partitions */ SELECT set_init_callback('callbacks.abc', - 'callbacks.abc_on_part_created_callback'); + 'callbacks.abc_on_part_created_callback(jsonb)'); SELECT create_hash_partitions('callbacks.abc', 'a', 5); DROP TABLE callbacks.abc CASCADE; @@ -48,7 +81,7 @@ DROP TABLE callbacks.abc CASCADE; /* create table in public schema */ CREATE TABLE abc(a serial, b int); SELECT set_init_callback('abc', - 'callbacks.abc_on_part_created_callback'); + 'callbacks.abc_on_part_created_callback(jsonb)'); SELECT create_range_partitions('abc', 'a', 1, 100, 2); DROP TABLE abc CASCADE; @@ -56,14 +89,13 @@ DROP TABLE abc CASCADE; /* test the temprary deletion of callback function */ CREATE TABLE abc(a serial, b int); SELECT set_init_callback('abc', - 'callbacks.abc_on_part_created_callback'); + 'callbacks.abc_on_part_created_callback(jsonb)'); SELECT create_range_partitions('abc', 'a', 1, 100, 2); INSERT INTO abc VALUES (201, 0); DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); INSERT INTO abc VALUES (301, 0); -CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback( - args JSONB) +CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ BEGIN RAISE WARNING 'callback arg: %', args::TEXT; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index 5eac38f4..19491b6d 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -38,7 +38,7 @@ BEGIN EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s for each row execute procedure test_inserts.print_cols_after_change();', args->>'partition_schema', args->>'partition'); END; $$ LANGUAGE plpgsql; -SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers'); +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); /* we don't support ON CONLICT */ INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') diff --git a/src/partition_creation.c b/src/partition_creation.c index e43009cb..73f53b8f 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1622,7 +1622,7 @@ validate_part_callback(Oid procid, bool emit_error) tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(procid)); if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for function %u", procid); + elog(ERROR, "callback function %u does not exist", procid); functup = (Form_pg_proc) GETSTRUCT(tp); @@ -1635,7 +1635,7 @@ validate_part_callback(Oid procid, bool emit_error) if (emit_error && !is_ok) elog(ERROR, - "Callback function must have the following signature: " + "callback function must have the following signature: " "callback(arg JSONB) RETURNS VOID"); return is_ok; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 1f4e5521..a17e0d79 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -792,17 +792,8 @@ prevent_relation_modification(PG_FUNCTION_ARGS) Datum validate_part_callback_pl(PG_FUNCTION_ARGS) { - const char *cb_cstring; - Oid cb_oid; - - if (PG_ARGISNULL(0)) - PG_RETURN_BOOL(true); - - cb_cstring = text_to_cstring(PG_GETARG_TEXT_P(0)); - cb_oid = DatumGetObjectId(DirectFunctionCall1(regprocedurein, - CStringGetDatum(cb_cstring))); - - PG_RETURN_BOOL(validate_part_callback(cb_oid, PG_GETARG_BOOL(1))); + PG_RETURN_BOOL(validate_part_callback(PG_GETARG_OID(0), + PG_GETARG_BOOL(1))); } /* From beaf8dda39663f39385387692b1e1c09dcd964f3 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 10 Feb 2017 07:02:39 +0300 Subject: [PATCH 0183/1124] fix python tests --- tests/python/partitioning_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index eb26ba63..6ca18970 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -773,8 +773,8 @@ def test_pg_dump(self): end $$ language plpgsql; """) - con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback\')') - con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback\')') + con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')') + con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')') # turn off enable_parent option con.execute('select set_enable_parent(\'range_partitioned\', false)') From b4dd837fd658c9b1ad30e20319d6bc98740cd025 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 10 Feb 2017 14:11:22 +0300 Subject: [PATCH 0184/1124] reformat Pl/PgSQL code --- hash.sql | 1 + init.sql | 3 +-- range.sql | 51 +++++++++++++++++++++++---------------------------- 3 files changed, 25 insertions(+), 30 deletions(-) diff --git a/hash.sql b/hash.sql index 9fa956f0..1b99784b 100644 --- a/hash.sql +++ b/hash.sql @@ -273,6 +273,7 @@ BEGIN END $$ LANGUAGE plpgsql; + /* * Just create HASH partitions, called by create_hash_partitions(). */ diff --git a/init.sql b/init.sql index 3b77b14a..c75418bd 100644 --- a/init.sql +++ b/init.sql @@ -95,8 +95,7 @@ CREATE OR REPLACE FUNCTION @extschema@.check_security_policy(relation regclass) RETURNS BOOL AS 'pg_pathman', 'check_security_policy' LANGUAGE C STRICT; /* - * Row security policy to restrict partitioning operations to owner and - * superusers only + * Row security policy to restrict partitioning operations to owner and superusers only */ CREATE POLICY deny_modification ON @extschema@.pathman_config FOR ALL USING (check_security_policy(partrel)); diff --git a/range.sql b/range.sql index 817c6c1d..ed3e85db 100644 --- a/range.sql +++ b/range.sql @@ -529,15 +529,6 @@ END $$ LANGUAGE plpgsql; -/* - * Merge multiple partitions. All data will be copied to the first one. - * The rest of partitions will be dropped. - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partitions REGCLASS[]) -RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' -LANGUAGE C STRICT; - /* * The special case of merging two partitions */ @@ -656,7 +647,6 @@ END $$ LANGUAGE plpgsql; - /* * Prepend new partition. */ @@ -762,7 +752,6 @@ END $$ LANGUAGE plpgsql; - /* * Add new partition */ @@ -807,7 +796,6 @@ END $$ LANGUAGE plpgsql; - /* * Drop range partition */ @@ -874,20 +862,6 @@ $$ LANGUAGE plpgsql SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ - -/* - * Drops partition and expands the next partition so that it cover dropped - * one - * - * This function was written in order to support Oracle-like ALTER TABLE ... - * DROP PARTITION. In Oracle partitions only have upper bound and when - * partition is dropped the next one automatically covers freed range - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next(relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' -LANGUAGE C STRICT; - - /* * Attach range partition */ @@ -966,7 +940,6 @@ END $$ LANGUAGE plpgsql; - /* * Detach range partition */ @@ -1010,7 +983,6 @@ END $$ LANGUAGE plpgsql; - /* * Creates an update trigger */ @@ -1107,6 +1079,29 @@ BEGIN END $$ LANGUAGE plpgsql; + +/* + * Merge multiple partitions. All data will be copied to the first one. + * The rest of partitions will be dropped. + */ +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partitions REGCLASS[]) +RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' +LANGUAGE C STRICT; + +/* + * Drops partition and expands the next partition so that it cover dropped + * one + * + * This function was written in order to support Oracle-like ALTER TABLE ... + * DROP PARTITION. In Oracle partitions only have upper bound and when + * partition is dropped the next one automatically covers freed range + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( + partition REGCLASS) +RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' +LANGUAGE C STRICT; + /* * Creates new RANGE partition. Returns partition name. * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). From b074c0f9520b59fee51c6400220a41d7a124df29 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 10 Feb 2017 14:35:14 +0300 Subject: [PATCH 0185/1124] tests for set_interval() --- Makefile | 3 +- expected/pathman_interval.out | 88 +++++++++++++++++++++++++++++++++++ sql/pathman_interval.sql | 38 +++++++++++++++ src/pl_range_funcs.c | 54 ++++++++++++--------- 4 files changed, 161 insertions(+), 22 deletions(-) create mode 100644 expected/pathman_interval.out create mode 100644 sql/pathman_interval.sql diff --git a/Makefile b/Makefile index d06b2298..1c2f52c7 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,8 @@ REGRESS = pathman_basic \ pathman_permissions \ pathman_rowmarks \ pathman_utility_stmt_hooking \ - pathman_calamity + pathman_calamity \ + pathman_interval EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out new file mode 100644 index 00000000..592f2b26 --- /dev/null +++ b/expected/pathman_interval.out @@ -0,0 +1,88 @@ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +/* Range partitions for INTEGER type */ +CREATE TABLE abc (id SERIAL); +SELECT create_range_partitions('abc', 'id', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('abc', NULL::INTEGER); + set_interval +-------------- + +(1 row) + +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO abc VALUES (250); +ERROR: could not create new partitions for relation "abc" +/* Set a trivial interval */ +SELECT set_interval('abc', 0); +ERROR: interval must not be trivial +/* We also shouldn't be able to set a trivial interval directly in pathman_config table */ +UPDATE pathman_config SET range_interval = '0' WHERE partrel = 'abc'::REGCLASS; +ERROR: interval must not be trivial +/* Set a normal interval */ +SELECT set_interval('abc', 1000); + set_interval +-------------- + +(1 row) + +INSERT INTO abc VALUES (250); +SELECT * FROM pathman_config; + partrel | attname | parttype | range_interval +---------+---------+----------+---------------- + abc | id | 2 | 1000 +(1 row) + +DROP TABLE abc cascade; +NOTICE: drop cascades to 3 other objects +/* Range partitions for DATE type */ +CREATE TABLE abc (dt DATE NOT NULL); +SELECT create_range_partitions('abc', 'dt', '2016-01-01'::DATE, '1 day'::INTERVAL, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('abc', NULL::INTERVAL); + set_interval +-------------- + +(1 row) + +/* Set a trivial interval */ +SELECT set_interval('abc', '1 second'::INTERVAL); +ERROR: interval must not be trivial +/* Set a normal interval */ +SELECT set_interval('abc', '1 month'::INTERVAL); + set_interval +-------------- + +(1 row) + +SELECT * FROM pathman_config; + partrel | attname | parttype | range_interval +---------+---------+----------+---------------- + abc | dt | 2 | @ 1 mon +(1 row) + +DROP TABLE abc cascade; +NOTICE: drop cascades to 2 other objects +/* Hash partitioned table shouldn't accept any interval value */ +CREATE TABLE abc (id SERIAL); +SELECT create_hash_partitions('abc', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_interval('abc', 100); +ERROR: table "abc" is not partitioned by RANGE +SELECT set_interval('abc', NULL::INTEGER); +ERROR: table "abc" is not partitioned by RANGE +DROP TABLE abc cascade; +NOTICE: drop cascades to 3 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql new file mode 100644 index 00000000..d9d49d83 --- /dev/null +++ b/sql/pathman_interval.sql @@ -0,0 +1,38 @@ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; + +/* Range partitions for INTEGER type */ +CREATE TABLE abc (id SERIAL); +SELECT create_range_partitions('abc', 'id', 0, 100, 2); +SELECT set_interval('abc', NULL::INTEGER); +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO abc VALUES (250); +/* Set a trivial interval */ +SELECT set_interval('abc', 0); +/* We also shouldn't be able to set a trivial interval directly in pathman_config table */ +UPDATE pathman_config SET range_interval = '0' WHERE partrel = 'abc'::REGCLASS; +/* Set a normal interval */ +SELECT set_interval('abc', 1000); +INSERT INTO abc VALUES (250); +SELECT * FROM pathman_config; +DROP TABLE abc cascade; + +/* Range partitions for DATE type */ +CREATE TABLE abc (dt DATE NOT NULL); +SELECT create_range_partitions('abc', 'dt', '2016-01-01'::DATE, '1 day'::INTERVAL, 2); +SELECT set_interval('abc', NULL::INTERVAL); +/* Set a trivial interval */ +SELECT set_interval('abc', '1 second'::INTERVAL); +/* Set a normal interval */ +SELECT set_interval('abc', '1 month'::INTERVAL); +SELECT * FROM pathman_config; +DROP TABLE abc cascade; + +/* Hash partitioned table shouldn't accept any interval value */ +CREATE TABLE abc (id SERIAL); +SELECT create_hash_partitions('abc', 'id', 3); +SELECT set_interval('abc', 100); +SELECT set_interval('abc', NULL::INTEGER); +DROP TABLE abc cascade; + +DROP EXTENSION pg_pathman; \ No newline at end of file diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 0cd7eebc..ead56371 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -628,9 +628,6 @@ validate_interval_value(PG_FUNCTION_ARGS) Datum interval_value; Oid interval_type; - char *attname_cstr; - Oid atttype; /* type of partitioned attribute */ - if (PG_ARGISNULL(0)) elog(ERROR, "'partrel' should not be NULL"); @@ -640,34 +637,43 @@ validate_interval_value(PG_FUNCTION_ARGS) if (PG_ARGISNULL(2)) elog(ERROR, "'parttype' should not be NULL"); - /* it's OK if interval is NULL and table is HASH-partitioned */ - if (PG_ARGISNULL(3)) - PG_RETURN_BOOL(parttype == PT_HASH); + /* + * NULL interval is fine for both HASH and RANGE. But for RANGE we need + * to make some additional checks + */ + if (!PG_ARGISNULL(3)) + { + char *attname_cstr; + Oid atttype; /* type of partitioned attribute */ + + if (parttype == PT_HASH) + elog(ERROR, "interval must be NULL for HASH partitioned table"); - /* Convert attname to CSTRING and fetch column's type */ - attname_cstr = text_to_cstring(attname); - atttype = get_attribute_type(partrel, attname_cstr, false); + /* Convert attname to CSTRING and fetch column's type */ + attname_cstr = text_to_cstring(attname); + atttype = get_attribute_type(partrel, attname_cstr, false); - /* Try converting textual representation */ - interval_value = extract_binary_interval_from_text(interval_text, - atttype, - &interval_type); + /* Try converting textual representation */ + interval_value = extract_binary_interval_from_text(interval_text, + atttype, + &interval_type); - /* Check that interval isn't trivial */ - if (interval_is_trivial(atttype, interval_value, interval_type)) - elog(ERROR, "Interval must not be trivial"); + /* Check that interval isn't trivial */ + if (interval_is_trivial(atttype, interval_value, interval_type)) + elog(ERROR, "interval must not be trivial"); + } PG_RETURN_BOOL(true); } /* - * Check that interval is somehow significant to avoid of infinite loops while - * adding new partitions + * Check if interval is insignificant to avoid infinite loops while adding + * new partitions * * The main idea behind this function is to add specified interval to some - * default value (zero for numeric types and '1970-01-01' for datetime types) - * and look if it is changed. If it is then return true. + * default value (zero for numeric types and current date/timestamp for datetime + * types) and look if it is changed. If it is then return true. */ static bool interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) @@ -727,7 +733,13 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) /* * If operator result type isn't the same as original value then - * convert it + * convert it. We need this to make sure that specified interval would + * change the _origianal_ value somehow. For example, if we add one second + * to a date then we'll get a timestamp which is one second later than + * original date (obviously). But when we convert it back to a date we will + * get the same original value meaning that one second interval wouldn't + * change original value anyhow. We should consider such interval + * as trivial */ if (op_result_type != atttype) { From 06ccfc27654e4148b2a57bfba5f465d9e0f8a150 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 10 Feb 2017 14:39:23 +0300 Subject: [PATCH 0186/1124] intervals test fixed --- expected/pathman_interval.out | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 592f2b26..77afa709 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -16,7 +16,7 @@ SELECT set_interval('abc', NULL::INTEGER); /* pg_pathman shouldn't be able to create a new partition */ INSERT INTO abc VALUES (250); -ERROR: could not create new partitions for relation "abc" +ERROR: cannot find appropriate partition for key '250' /* Set a trivial interval */ SELECT set_interval('abc', 0); ERROR: interval must not be trivial From 52375caa7e7e072b032a19a19fd0c748fd483d68 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 10 Feb 2017 15:30:59 +0300 Subject: [PATCH 0187/1124] documentation updated --- README.md | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c54b8b99..756ada3a 100644 --- a/README.md +++ b/README.md @@ -103,7 +103,7 @@ create_range_partitions(relation REGCLASS, p_count INTEGER DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) ``` -Performs RANGE partitioning for `relation` by partitioning key `attribute`. `start_value` argument specifies initial value, `interval` sets the range of values in a single partition, `count` is the number of premade partitions (if not set then pathman tries to determine it based on attribute values). Partition creation callback is invoked for each partition if set beforehand. +Performs RANGE partitioning for `relation` by partitioning key `attribute`, `start_value` argument specifies initial value, `p_interval` sets the default range for auto created partitions or partitions created with `append_range_partition()` or `prepend_range_partition()` (if `NULL` then auto partition creation feature won't work), `p_count` is the number of premade partitions (if not set then `pg_pathman` tries to determine it based on attribute values). Partition creation callback is invoked for each partition if set beforehand. ```plpgsql create_partitions_from_range(relation REGCLASS, @@ -167,6 +167,11 @@ merge_range_partitions(partition1 REGCLASS, partition2 REGCLASS) ``` Merge two adjacent RANGE partitions. First, data from `partition2` is copied to `partition1`, then `partition2` is removed. +```plpgsql +merge_range_partitions(partitions REGCLASS[]) +``` +Merge several adjacent RANGE partitions (partitions must be specified in ascending or descending order). All the data will be accumulated in the first partition. + ```plpgsql append_range_partition(parent REGCLASS, partition_name TEXT DEFAULT NULL, @@ -188,7 +193,7 @@ add_range_partition(relation REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) ``` -Create new RANGE partition for `relation` with specified range bounds. +Create new RANGE partition for `relation` with specified range bounds. If `start_value` or `end_value` are NULL then corresponding range bound will be infinite. ```plpgsql drop_range_partition(partition TEXT, delete_data BOOLEAN DEFAULT TRUE) @@ -222,6 +227,12 @@ Drop partitions of the `parent` table (both foreign and local relations). If `de ### Additional parameters + +```plpgsql +set_interval(relation REGCLASS, value ANYELEMENT) +``` +Update RANGE partitioned table interval. + ```plpgsql set_enable_parent(relation REGCLASS, value BOOLEAN) ``` From 66f41913d4855d0ec1ae3f6d830983adc383c965 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 10 Feb 2017 16:23:40 +0300 Subject: [PATCH 0188/1124] calamity tests for function drop_range_partition_expand_next() --- expected/pathman_calamity.out | 9 +++++++++ sql/pathman_calamity.sql | 4 ++++ src/pl_range_funcs.c | 12 ++++++------ 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index cff73962..2b3c96c9 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -355,6 +355,15 @@ SELECT build_update_trigger_func_name(NULL) IS NULL; /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::regclass); ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); +ERROR: relation "pg_class" is not a partition +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + /* check invoke_on_partition_created_callback() for RANGE */ SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, NULL::int); ERROR: both bounds must be provided for RANGE partition diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 70ac0374..9c7b6f28 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -124,6 +124,10 @@ SELECT build_update_trigger_func_name(NULL) IS NULL; /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::regclass); +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); +SELECT drop_range_partition_expand_next(NULL) IS NULL; + /* check invoke_on_partition_created_callback() for RANGE */ SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, NULL::int); SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, 1, NULL); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index ead56371..278c882c 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -934,12 +934,12 @@ drop_table_by_oid(Oid relid) DropStmt *n = makeNode(DropStmt); const char *relname = get_qualified_rel_name(relid); - n->removeType = OBJECT_TABLE; - n->missing_ok = false; - n->objects = list_make1(stringToQualifiedNameList(relname)); - n->arguments = NIL; - n->behavior = DROP_RESTRICT; /* default behavior */ - n->concurrent = false; + n->removeType = OBJECT_TABLE; + n->missing_ok = false; + n->objects = list_make1(stringToQualifiedNameList(relname)); + n->arguments = NIL; + n->behavior = DROP_RESTRICT; /* default behavior */ + n->concurrent = false; RemoveRelations(n); } From ec40a7cff5e3af8220fdb1857afa07c92d02e184 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 10 Feb 2017 17:16:25 +0300 Subject: [PATCH 0189/1124] fix interval validation for negative values --- expected/pathman_interval.out | 3 +++ sql/pathman_interval.sql | 2 ++ src/pl_range_funcs.c | 8 +++++++- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 77afa709..36cdfe11 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -20,6 +20,9 @@ ERROR: cannot find appropriate partition for key '250' /* Set a trivial interval */ SELECT set_interval('abc', 0); ERROR: interval must not be trivial +/* Set a negative interval */ +SELECT set_interval('abc', -100); +ERROR: interval must not be negative /* We also shouldn't be able to set a trivial interval directly in pathman_config table */ UPDATE pathman_config SET range_interval = '0' WHERE partrel = 'abc'::REGCLASS; ERROR: interval must not be trivial diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql index d9d49d83..92a93140 100644 --- a/sql/pathman_interval.sql +++ b/sql/pathman_interval.sql @@ -9,6 +9,8 @@ SELECT set_interval('abc', NULL::INTEGER); INSERT INTO abc VALUES (250); /* Set a trivial interval */ SELECT set_interval('abc', 0); +/* Set a negative interval */ +SELECT set_interval('abc', -100); /* We also shouldn't be able to set a trivial interval directly in pathman_config table */ UPDATE pathman_config SET range_interval = '0' WHERE partrel = 'abc'::REGCLASS; /* Set a normal interval */ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index ead56371..ea639e5d 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -684,6 +684,7 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) Operator op; Oid op_func; FmgrInfo cmp_func; + int32 cmp_result; /* Generate default value */ switch(atttype) @@ -754,8 +755,13 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) fill_type_cmp_fmgr_info(&cmp_func, getBaseType(atttype), getBaseType(op_result_type)); - if (DatumGetInt32(FunctionCall2(&cmp_func, default_value, op_result)) == 0) + cmp_result = DatumGetInt32(FunctionCall2(&cmp_func, + default_value, + op_result)); + if (cmp_result == 0) return true; + else if (cmp_result > 0) /* Negative interval? */ + elog(ERROR, "interval must not be negative"); return false; } From 20ed60b19fa179eee2ade68150623024a54a3d24 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 10 Feb 2017 17:38:06 +0300 Subject: [PATCH 0190/1124] add support for Codecov.io --- .gitignore | 1 + .travis.yml | 3 +++ travis/pg-travis-test.sh | 11 +++++------ 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 54963e0b..6aa27542 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,5 @@ regression.out *.pyc *.gcda *.gcno +*.gcov pg_pathman--1.2.sql diff --git a/.travis.yml b/.travis.yml index 36b5bc04..f0bcd93f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,3 +20,6 @@ env: - PGVERSION=9.5 CHECK_CODE=false script: bash ./travis/pg-travis-test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index ca22b0e6..e0e7bac0 100644 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -66,7 +66,7 @@ if [ $CHECK_CODE = "true" ]; then fi # build pg_pathman (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -fprofile-arcs -ftest-coverage" +make USE_PGXS=1 PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -coverage" sudo make install USE_PGXS=1 PG_CONFIG=$config_path # set permission to write postgres locks @@ -103,10 +103,9 @@ cd ../.. set -u -# finally report code coverage -sudo apt-get install -qq -y lcov -gem install coveralls-lcov -lcov --no-extern --capture --directory src --output-file coverage.info -coveralls-lcov coverage.info + +#generate *.gcov files +gcov src/*.c src/*.h + exit $status From e0335063bd2ac8ed6fa06a17c4af3f5ff180f79e Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 10 Feb 2017 18:59:06 +0300 Subject: [PATCH 0191/1124] numeric and float interval validation fixes --- README.md | 2 +- expected/pathman_interval.out | 93 +++++++++++++++++++++++++++++++++++ sql/pathman_interval.sql | 40 +++++++++++++++ src/pl_range_funcs.c | 45 ++++++++++++++--- 4 files changed, 171 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 756ada3a..50caee65 100644 --- a/README.md +++ b/README.md @@ -231,7 +231,7 @@ Drop partitions of the `parent` table (both foreign and local relations). If `de ```plpgsql set_interval(relation REGCLASS, value ANYELEMENT) ``` -Update RANGE partitioned table interval. +Update RANGE partitioned table interval. Note that interval must not be negative and it must not be trivial, i.e. its value should be greater than zero for numeric types, at least 1 microsecond for `TIMESTAMP` and at least 1 day for `DATE`. ```plpgsql set_enable_parent(relation REGCLASS, value BOOLEAN) diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 36cdfe11..03910e15 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -72,6 +72,99 @@ SELECT * FROM pathman_config; abc | dt | 2 | @ 1 mon (1 row) +DROP TABLE abc cascade; +NOTICE: drop cascades to 2 other objects +/* Range partitions for FLOAT4 type */ +CREATE TABLE abc (x FLOAT4 NOT NULL); +SELECT create_range_partitions('abc', 'x', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('abc', NULL::FLOAT4); + set_interval +-------------- + +(1 row) + +/* Set a trivial interval */ +SELECT set_interval('abc', 0); +ERROR: interval must not be trivial +/* Set NaN float as interval */ +SELECT set_interval('abc', 'NaN'::FLOAT4); +ERROR: invalid floating point interval +/* Set INF float as interval */ +SELECT set_interval('abc', 'Infinity'::FLOAT4); +ERROR: invalid floating point interval +/* Set a normal interval */ +SELECT set_interval('abc', 100); + set_interval +-------------- + +(1 row) + +DROP TABLE abc cascade; +NOTICE: drop cascades to 2 other objects +/* Range partitions for FLOAT8 type */ +CREATE TABLE abc (x FLOAT4 NOT NULL); +SELECT create_range_partitions('abc', 'x', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('abc', NULL::FLOAT8); + set_interval +-------------- + +(1 row) + +/* Set a trivial interval */ +SELECT set_interval('abc', 0); +ERROR: interval must not be trivial +/* Set NaN float as interval */ +SELECT set_interval('abc', 'NaN'::FLOAT8); +ERROR: invalid floating point interval +/* Set INF float as interval */ +SELECT set_interval('abc', 'Infinity'::FLOAT8); +ERROR: invalid floating point interval +/* Set a normal interval */ +SELECT set_interval('abc', 100); + set_interval +-------------- + +(1 row) + +DROP TABLE abc cascade; +NOTICE: drop cascades to 2 other objects +/* Range partitions for NUMERIC type */ +CREATE TABLE abc (x NUMERIC NOT NULL); +SELECT create_range_partitions('abc', 'x', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('abc', NULL::NUMERIC); + set_interval +-------------- + +(1 row) + +/* Set a trivial interval */ +SELECT set_interval('abc', 0); +ERROR: interval must not be trivial +/* Set NaN numeric as interval */ +SELECT set_interval('abc', 'NaN'::NUMERIC); +ERROR: invalid numeric interval +/* Set a normal interval */ +SELECT set_interval('abc', 100); + set_interval +-------------- + +(1 row) + DROP TABLE abc cascade; NOTICE: drop cascades to 2 other objects /* Hash partitioned table shouldn't accept any interval value */ diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql index 92a93140..f5e1efa2 100644 --- a/sql/pathman_interval.sql +++ b/sql/pathman_interval.sql @@ -30,6 +30,46 @@ SELECT set_interval('abc', '1 month'::INTERVAL); SELECT * FROM pathman_config; DROP TABLE abc cascade; +/* Range partitions for FLOAT4 type */ +CREATE TABLE abc (x FLOAT4 NOT NULL); +SELECT create_range_partitions('abc', 'x', 0, 100, 2); +SELECT set_interval('abc', NULL::FLOAT4); +/* Set a trivial interval */ +SELECT set_interval('abc', 0); +/* Set NaN float as interval */ +SELECT set_interval('abc', 'NaN'::FLOAT4); +/* Set INF float as interval */ +SELECT set_interval('abc', 'Infinity'::FLOAT4); +/* Set a normal interval */ +SELECT set_interval('abc', 100); +DROP TABLE abc cascade; + +/* Range partitions for FLOAT8 type */ +CREATE TABLE abc (x FLOAT4 NOT NULL); +SELECT create_range_partitions('abc', 'x', 0, 100, 2); +SELECT set_interval('abc', NULL::FLOAT8); +/* Set a trivial interval */ +SELECT set_interval('abc', 0); +/* Set NaN float as interval */ +SELECT set_interval('abc', 'NaN'::FLOAT8); +/* Set INF float as interval */ +SELECT set_interval('abc', 'Infinity'::FLOAT8); +/* Set a normal interval */ +SELECT set_interval('abc', 100); +DROP TABLE abc cascade; + +/* Range partitions for NUMERIC type */ +CREATE TABLE abc (x NUMERIC NOT NULL); +SELECT create_range_partitions('abc', 'x', 0, 100, 2); +SELECT set_interval('abc', NULL::NUMERIC); +/* Set a trivial interval */ +SELECT set_interval('abc', 0); +/* Set NaN numeric as interval */ +SELECT set_interval('abc', 'NaN'::NUMERIC); +/* Set a normal interval */ +SELECT set_interval('abc', 100); +DROP TABLE abc cascade; + /* Hash partitioned table shouldn't accept any interval value */ CREATE TABLE abc (id SERIAL); SELECT create_hash_partitions('abc', 'id', 3); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 2ea9fa41..b2d6635a 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -686,7 +686,10 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) FmgrInfo cmp_func; int32 cmp_result; - /* Generate default value */ + /* + * Generate default value. For float4 and float8 values we also check + * that they aren't NaN or INF + */ switch(atttype) { case INT2OID: @@ -695,14 +698,40 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) default_value = Int16GetDatum(0); break; case FLOAT4OID: - default_value = Float4GetDatum(0); - break; + { + float4 f = DatumGetFloat4(interval); + + if (isnan(f) || is_infinite(f)) + elog(ERROR, "invalid floating point interval"); + default_value = Float4GetDatum(0); + break; + } case FLOAT8OID: - default_value = Float8GetDatum(0); - break; + { + float8 f = DatumGetFloat8(interval); + + if (isnan(f) || is_infinite(f)) + elog(ERROR, "invalid floating point interval"); + default_value = Float8GetDatum(0); + break; + } case NUMERICOID: - default_value = NumericGetDatum(0); - break; + { + Numeric ni = DatumGetNumeric(interval); + Numeric numeric; + + /* Test for NaN */ + if (numeric_is_nan(ni)) + elog(ERROR, "invalid numeric interval"); + + /* Building default value */ + numeric = DatumGetNumeric(DirectFunctionCall3(numeric_in, + CStringGetDatum("0"), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1))); + default_value = NumericGetDatum(numeric); + break; + } case TIMESTAMPOID: case TIMESTAMPTZOID: default_value = TimestampGetDatum(GetCurrentTimestamp()); @@ -712,8 +741,8 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) Datum ts = TimestampGetDatum(GetCurrentTimestamp()); default_value = perform_type_cast(ts, TIMESTAMPTZOID, DATEOID, NULL); + break; } - break; default: return false; } From d2ff1380609b26cfcc513cf2757a43b61988471e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 10 Feb 2017 19:25:05 +0300 Subject: [PATCH 0192/1124] test arg relnames := ARRAY[...]::TEXT[] of create_hash_partitions() --- expected/pathman_basic.out | 25 +++++++++++++++++++++++++ sql/pathman_basic.sql | 15 +++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index d3319dba..e8a13b1a 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -2430,6 +2430,31 @@ SELECT * FROM test; (4 rows) +/* Test create_range_partitions() + relnames */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + relnames := ARRAY[]::TEXT[]); /* not ok */ +ERROR: size of array 'relnames' must be equal to 'partitions_count' +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + relnames := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + create_hash_partitions +------------------------ + 2 +(1 row) + +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + partition +----------- + p1 + p2 +(2 rows) + +DROP TABLE test.provided_part_names CASCADE; +NOTICE: drop cascades to 2 other objects DROP SCHEMA test CASCADE; NOTICE: drop cascades to 54 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index dbe7ea7e..d7cb6664 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -700,6 +700,21 @@ WITH RECURSIVE test AS ( SELECT * FROM test; +/* Test create_range_partitions() + relnames */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + relnames := ARRAY[]::TEXT[]); /* not ok */ +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + relnames := ARRAY['p1', 'p2']::TEXT[]); /* ok */ +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + +DROP TABLE test.provided_part_names CASCADE; + + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; From a5e55809b11cca93bf9470902b289c9e3a0da5d7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 10 Feb 2017 19:52:31 +0300 Subject: [PATCH 0193/1124] beautify function interval_is_trivial() and utils.c --- src/partition_creation.c | 27 ---- src/pl_range_funcs.c | 101 ++++++------ src/utils.c | 324 +++++++++++++++++++++------------------ src/utils.h | 7 +- 4 files changed, 234 insertions(+), 225 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 73f53b8f..9ee5f7cc 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -43,10 +43,6 @@ #include "utils/typcache.h" -static void extract_op_func_and_ret_type(char *opname, Oid type1, Oid type2, - Oid *move_bound_op_func, - Oid *move_bound_op_ret_type); - static Oid spawn_partitions_val(Oid parent_relid, const Bound *range_bound_min, const Bound *range_bound_max, @@ -433,29 +429,6 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) return partid; } -/* - * Fetch binary operator by name and return it's function and ret type. - */ -static void -extract_op_func_and_ret_type(char *opname, Oid type1, Oid type2, - Oid *move_bound_op_func, /* returned value #1 */ - Oid *move_bound_op_ret_type) /* returned value #2 */ -{ - Operator op; - - /* Get "move bound operator" descriptor */ - op = get_binary_operator(opname, type1, type2); - if (!op) - elog(ERROR, "missing %s operator for types %s and %s", - opname, format_type_be(type1), format_type_be(type2)); - - *move_bound_op_func = oprfuncid(op); - *move_bound_op_ret_type = get_operator_ret_type(op); - - /* Don't forget to release system cache */ - ReleaseSysCache(op); -} - /* * Append\prepend partitions if there's no partition to store 'value'. * diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index b2d6635a..7d68b416 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -667,6 +667,12 @@ validate_interval_value(PG_FUNCTION_ARGS) } +/* + * ------------------ + * Helper functions + * ------------------ + */ + /* * Check if interval is insignificant to avoid infinite loops while adding * new partitions @@ -678,17 +684,19 @@ validate_interval_value(PG_FUNCTION_ARGS) static bool interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) { + Oid plus_op_func; + Datum plus_op_result; + Oid plus_op_result_type; + Datum default_value; - Datum op_result; - Oid op_result_type; - Operator op; - Oid op_func; + FmgrInfo cmp_func; int32 cmp_result; /* - * Generate default value. For float4 and float8 values we also check - * that they aren't NaN or INF + * Generate default value. + * + * For float4 and float8 values we also check that they aren't NaN or INF. */ switch(atttype) { @@ -697,69 +705,70 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) case INT8OID: default_value = Int16GetDatum(0); break; + case FLOAT4OID: { - float4 f = DatumGetFloat4(interval); + float4 f = DatumGetFloat4(interval); if (isnan(f) || is_infinite(f)) elog(ERROR, "invalid floating point interval"); default_value = Float4GetDatum(0); - break; } + break; + case FLOAT8OID: { - float8 f = DatumGetFloat8(interval); + float8 f = DatumGetFloat8(interval); if (isnan(f) || is_infinite(f)) elog(ERROR, "invalid floating point interval"); default_value = Float8GetDatum(0); - break; } + break; + case NUMERICOID: { - Numeric ni = DatumGetNumeric(interval); - Numeric numeric; + Numeric ni = DatumGetNumeric(interval), + numeric; /* Test for NaN */ if (numeric_is_nan(ni)) elog(ERROR, "invalid numeric interval"); /* Building default value */ - numeric = DatumGetNumeric(DirectFunctionCall3(numeric_in, - CStringGetDatum("0"), - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(-1))); + numeric = DatumGetNumeric( + DirectFunctionCall3(numeric_in, + CStringGetDatum("0"), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1))); default_value = NumericGetDatum(numeric); - break; } + break; + case TIMESTAMPOID: case TIMESTAMPTZOID: default_value = TimestampGetDatum(GetCurrentTimestamp()); break; + case DATEOID: { - Datum ts = TimestampGetDatum(GetCurrentTimestamp()); + Datum ts = TimestampGetDatum(GetCurrentTimestamp()); default_value = perform_type_cast(ts, TIMESTAMPTZOID, DATEOID, NULL); - break; } + break; + default: return false; } /* Find suitable addition operator for default value and interval */ - op = get_binary_operator("+", atttype, interval_type); - if (!op) - elog(ERROR, "missing \"+\" operator for types %s and %s", - format_type_be(atttype), - format_type_be(interval_type)); + extract_op_func_and_ret_type("+", atttype, interval_type, + &plus_op_func, + &plus_op_result_type); - op_func = oprfuncid(op); - op_result_type = get_operator_ret_type(op); - ReleaseSysCache(op); - - /* Invoke addition operator and get a result*/ - op_result = OidFunctionCall2(op_func, default_value, interval); + /* Invoke addition operator and get a result */ + plus_op_result = OidFunctionCall2(plus_op_func, default_value, interval); /* * If operator result type isn't the same as original value then @@ -768,40 +777,38 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) * to a date then we'll get a timestamp which is one second later than * original date (obviously). But when we convert it back to a date we will * get the same original value meaning that one second interval wouldn't - * change original value anyhow. We should consider such interval - * as trivial + * change original value anyhow. We should consider such interval as trivial */ - if (op_result_type != atttype) + if (plus_op_result_type != atttype) { - op_result = perform_type_cast(op_result, op_result_type, atttype, NULL); - op_result_type = atttype; + plus_op_result = perform_type_cast(plus_op_result, + plus_op_result_type, + atttype, NULL); + plus_op_result_type = atttype; } /* - * Compare it to the default_value. If they are the same then obviously - * interval is trivial + * Compare it to the default_value. + * + * If they are the same then obviously interval is trivial. */ fill_type_cmp_fmgr_info(&cmp_func, getBaseType(atttype), - getBaseType(op_result_type)); + getBaseType(plus_op_result_type)); + cmp_result = DatumGetInt32(FunctionCall2(&cmp_func, default_value, - op_result)); + plus_op_result)); if (cmp_result == 0) return true; - else if (cmp_result > 0) /* Negative interval? */ + + else if (cmp_result > 0) /* Negative interval? */ elog(ERROR, "interval must not be negative"); + /* Everything is OK */ return false; } - -/* - * ------------------ - * Helper functions - * ------------------ - */ - /* * Drop old partition constraint and create * a new one with specified boundaries diff --git a/src/utils.c b/src/utils.c index 1669accc..38960b3a 100644 --- a/src/utils.c +++ b/src/utils.c @@ -58,64 +58,55 @@ clause_contains_params_walker(Node *node, void *context) } /* - * Get BTORDER_PROC for two types described by Oids + * Check if this is a "date"-related type. */ -void -fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2) +bool +is_date_type_internal(Oid typid) { - Oid cmp_proc_oid; - TypeCacheEntry *tce_1, - *tce_2; - - /* Check type compatibility */ - if (IsBinaryCoercible(type1, type2)) - type1 = type2; - - else if (IsBinaryCoercible(type2, type1)) - type2 = type1; - - tce_1 = lookup_type_cache(type1, TYPECACHE_BTREE_OPFAMILY); - tce_2 = lookup_type_cache(type2, TYPECACHE_BTREE_OPFAMILY); + return typid == TIMESTAMPOID || + typid == TIMESTAMPTZOID || + typid == DATEOID; +} - /* Both types should belong to the same opfamily */ - if (tce_1->btree_opf != tce_2->btree_opf) - goto fill_type_cmp_fmgr_info_error; +/* + * Check if user can alter/drop specified relation. This function is used to + * make sure that current user can change pg_pathman's config. Returns true + * if user can manage relation, false otherwise. + * + * XXX currently we just check if user is a table owner. Probably it's + * better to check user permissions in order to let other users participate. + */ +bool +check_security_policy_internal(Oid relid, Oid role) +{ + Oid owner; - cmp_proc_oid = get_opfamily_proc(tce_1->btree_opf, - tce_1->btree_opintype, - tce_2->btree_opintype, - BTORDER_PROC); + /* Superuser is allowed to do anything */ + if (superuser()) + return true; - /* No such function, emit ERROR */ - if (!OidIsValid(cmp_proc_oid)) - goto fill_type_cmp_fmgr_info_error; + /* Fetch the owner */ + owner = get_rel_owner(relid); - /* Fill FmgrInfo struct */ - fmgr_info(cmp_proc_oid, finfo); + /* + * Sometimes the relation doesn't exist anymore but there is still + * a record in config. For instance, it happens in DDL event trigger. + * Still we should be able to remove this record. + */ + if (owner == InvalidOid) + return true; - return; /* everything is OK */ + /* Check if current user is the owner of the relation */ + if (owner != role) + return false; -/* Handle errors (no such function) */ -fill_type_cmp_fmgr_info_error: - elog(ERROR, "missing comparison function for types %s & %s", - format_type_be(type1), format_type_be(type2)); + return true; } -List * -list_reverse(List *l) -{ - List *result = NIL; - ListCell *lc; - foreach (lc, l) - { - result = lcons(lfirst(lc), result); - } - return result; -} /* - * Returns pg_pathman schema's Oid or InvalidOid if that's not possible. + * Return pg_pathman schema's Oid or InvalidOid if that's not possible. */ Oid get_pathman_schema(void) @@ -159,65 +150,42 @@ get_pathman_schema(void) return result; } -/* - * Check if this is a "date"-related type. - */ -bool -is_date_type_internal(Oid typid) -{ - return typid == TIMESTAMPOID || - typid == TIMESTAMPTZOID || - typid == DATEOID; -} - - -/* - * Try to find binary operator. - * - * Returns operator function's Oid or throws an ERROR on InvalidOid. - */ -Operator -get_binary_operator(char *oprname, Oid arg1, Oid arg2) +List * +list_reverse(List *l) { - Operator op; - - op = compatible_oper(NULL, list_make1(makeString(oprname)), - arg1, arg2, true, -1); - - if (!op) - elog(ERROR, "Cannot find operator \"%s\"(%u, %u)", oprname, arg1, arg2); + List *result = NIL; + ListCell *lc; - return op; + foreach (lc, l) + { + result = lcons(lfirst(lc), result); + } + return result; } -/* Get operator's result type */ -Oid -get_operator_ret_type(Operator op) -{ - Form_pg_operator pgopform = (Form_pg_operator) GETSTRUCT(op); - return pgopform->oprresult; -} /* - * Get CSTRING representation of Datum using the type Oid. + * Get relation owner. */ -char * -datum_to_cstring(Datum datum, Oid typid) +Oid +get_rel_owner(Oid relid) { - char *result; - HeapTuple tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); + HeapTuple tp; + Oid owner; - if (HeapTupleIsValid(tup)) + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (HeapTupleIsValid(tp)) { - Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tup); - result = OidOutputFunctionCall(typtup->typoutput, datum); - ReleaseSysCache(tup); + Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); + + owner = reltup->relowner; + ReleaseSysCache(tp); + + return owner; } - else - result = pstrdup("[error]"); - return result; + return InvalidOid; } /* @@ -234,6 +202,34 @@ get_rel_name_or_relid(Oid relid) return relname; } +/* + * Get type of column by its name. + */ +Oid +get_attribute_type(Oid relid, const char *attname, bool missing_ok) +{ + Oid result; + HeapTuple tp; + + /* NOTE: for now it's the most efficient way */ + tp = SearchSysCacheAttName(relid, attname); + if (HeapTupleIsValid(tp)) + { + Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); + result = att_tup->atttypid; + ReleaseSysCache(tp); + + return result; + } + + if (!missing_ok) + elog(ERROR, "cannot find type name for attribute \"%s\" " + "of relation \"%s\"", + attname, get_rel_name_or_relid(relid)); + + return InvalidOid; +} + #if PG_VERSION_NUM < 90600 /* * Returns the relpersistence associated with a given relation. @@ -259,92 +255,120 @@ get_rel_persistence(Oid relid) } #endif + + /* - * Get relation owner. + * Try to find binary operator. + * + * Returns operator function's Oid or throws an ERROR on InvalidOid. */ -Oid -get_rel_owner(Oid relid) +Operator +get_binary_operator(char *oprname, Oid arg1, Oid arg2) { - HeapTuple tp; - Oid owner; - - tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (HeapTupleIsValid(tp)) - { - Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); + Operator op; - owner = reltup->relowner; - ReleaseSysCache(tp); + op = compatible_oper(NULL, list_make1(makeString(oprname)), + arg1, arg2, true, -1); - return owner; - } + if (!op) + elog(ERROR, "Cannot find operator \"%s\"(%u, %u)", oprname, arg1, arg2); - return InvalidOid; + return op; } /* - * Get type of column by its name. + * Get BTORDER_PROC for two types described by Oids. */ -Oid -get_attribute_type(Oid relid, const char *attname, bool missing_ok) +void +fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2) { - Oid result; - HeapTuple tp; + Oid cmp_proc_oid; + TypeCacheEntry *tce_1, + *tce_2; - /* NOTE: for now it's the most efficient way */ - tp = SearchSysCacheAttName(relid, attname); - if (HeapTupleIsValid(tp)) - { - Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); - result = att_tup->atttypid; - ReleaseSysCache(tp); + /* Check type compatibility */ + if (IsBinaryCoercible(type1, type2)) + type1 = type2; - return result; - } + else if (IsBinaryCoercible(type2, type1)) + type2 = type1; - if (!missing_ok) - elog(ERROR, "cannot find type name for attribute \"%s\" " - "of relation \"%s\"", - attname, get_rel_name_or_relid(relid)); + tce_1 = lookup_type_cache(type1, TYPECACHE_BTREE_OPFAMILY); + tce_2 = lookup_type_cache(type2, TYPECACHE_BTREE_OPFAMILY); - return InvalidOid; + /* Both types should belong to the same opfamily */ + if (tce_1->btree_opf != tce_2->btree_opf) + goto fill_type_cmp_fmgr_info_error; + + cmp_proc_oid = get_opfamily_proc(tce_1->btree_opf, + tce_1->btree_opintype, + tce_2->btree_opintype, + BTORDER_PROC); + + /* No such function, emit ERROR */ + if (!OidIsValid(cmp_proc_oid)) + goto fill_type_cmp_fmgr_info_error; + + /* Fill FmgrInfo struct */ + fmgr_info(cmp_proc_oid, finfo); + + return; /* everything is OK */ + +/* Handle errors (no such function) */ +fill_type_cmp_fmgr_info_error: + elog(ERROR, "missing comparison function for types %s & %s", + format_type_be(type1), format_type_be(type2)); } /* - * Check if user can alter/drop specified relation. This function is used to - * make sure that current user can change pg_pathman's config. Returns true - * if user can manage relation, false otherwise. - * - * XXX currently we just check if user is a table owner. Probably it's - * better to check user permissions in order to let other users participate. + * Fetch binary operator by name and return it's function and ret type. */ -bool -check_security_policy_internal(Oid relid, Oid role) +void +extract_op_func_and_ret_type(char *opname, + Oid type1, Oid type2, + Oid *op_func, /* returned value #1 */ + Oid *op_ret_type) /* returned value #2 */ { - Oid owner; + Operator op; - /* Superuser is allowed to do anything */ - if (superuser()) - return true; + /* Get "move bound operator" descriptor */ + op = get_binary_operator(opname, type1, type2); + if (!op) + elog(ERROR, "missing %s operator for types %s and %s", + opname, format_type_be(type1), format_type_be(type2)); - /* Fetch the owner */ - owner = get_rel_owner(relid); + *op_func = oprfuncid(op); + *op_ret_type = ((Form_pg_operator) GETSTRUCT(op))->oprresult; - /* - * Sometimes the relation doesn't exist anymore but there is still - * a record in config. For instance, it happens in DDL event trigger. - * Still we should be able to remove this record. - */ - if (owner == InvalidOid) - return true; + /* Don't forget to release system cache */ + ReleaseSysCache(op); +} - /* Check if current user is the owner of the relation */ - if (owner != role) - return false; - return true; + +/* + * Get CSTRING representation of Datum using the type Oid. + */ +char * +datum_to_cstring(Datum datum, Oid typid) +{ + char *result; + HeapTuple tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); + + if (HeapTupleIsValid(tup)) + { + Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tup); + result = OidOutputFunctionCall(typtup->typoutput, datum); + ReleaseSysCache(tup); + } + else + result = pstrdup("[error]"); + + return result; } + + /* * Try casting value of type 'in_type' to 'out_type'. * diff --git a/src/utils.h b/src/utils.h index e00cd582..24bad286 100644 --- a/src/utils.h +++ b/src/utils.h @@ -48,8 +48,12 @@ char get_rel_persistence(Oid relid); * Operator-related stuff. */ Operator get_binary_operator(char *opname, Oid arg1, Oid arg2); -Oid get_operator_ret_type(Operator op); void fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2); +void extract_op_func_and_ret_type(char *opname, + Oid type1, Oid type2, + Oid *op_func, + Oid *op_ret_type); + /* * Print values and cast types. @@ -61,4 +65,5 @@ Datum extract_binary_interval_from_text(Datum interval_text, Oid *interval_type); + #endif /* PATHMAN_UTILS_H */ From 0ef73496fa25cbf628b9bb32968e9ebebc04de47 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 10 Feb 2017 20:02:28 +0300 Subject: [PATCH 0194/1124] update pg_pathman's version (1.3) --- Makefile | 2 +- pg_pathman.control | 2 +- src/init.h | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 87381430..f5ae64e9 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/partition_creation.o $(WIN32RES) EXTENSION = pg_pathman -EXTVERSION = 1.2 +EXTVERSION = 1.3 DATA_built = pg_pathman--$(EXTVERSION).sql DATA = pg_pathman--1.0--1.1.sql \ pg_pathman--1.1--1.2.sql diff --git a/pg_pathman.control b/pg_pathman.control index 4d07adf5..280f2aa4 100644 --- a/pg_pathman.control +++ b/pg_pathman.control @@ -1,4 +1,4 @@ # pg_pathman extension comment 'Partitioning tool' -default_version = '1.2' +default_version = '1.3' module_pathname='$libdir/pg_pathman' diff --git a/src/init.h b/src/init.h index 09f574a8..1ac9e52e 100644 --- a/src/init.h +++ b/src/init.h @@ -96,10 +96,10 @@ extern PathmanInitState pg_pathman_init_state; /* Lowest version of Pl/PgSQL frontend compatible with internals (0xAA_BB_CC) */ -#define LOWEST_COMPATIBLE_FRONT 0x010200 +#define LOWEST_COMPATIBLE_FRONT 0x010300 /* Current version on native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010201 +#define CURRENT_LIB_VERSION 0x010300 /* From 6f2eb101a8edd21b50a57d5f9bb1076f27e302c8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 10 Feb 2017 20:09:44 +0300 Subject: [PATCH 0195/1124] fix pathman_interval test (reported by Codecov.io) --- .gitignore | 2 +- expected/pathman_interval.out | 14 +++++++------- sql/pathman_interval.sql | 16 ++++++++-------- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.gitignore b/.gitignore index 6aa27542..f0d2c2c4 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,4 @@ regression.out *.gcda *.gcno *.gcov -pg_pathman--1.2.sql +pg_pathman--*.sql diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 03910e15..2483cdbb 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -40,7 +40,7 @@ SELECT * FROM pathman_config; abc | id | 2 | 1000 (1 row) -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; NOTICE: drop cascades to 3 other objects /* Range partitions for DATE type */ CREATE TABLE abc (dt DATE NOT NULL); @@ -72,7 +72,7 @@ SELECT * FROM pathman_config; abc | dt | 2 | @ 1 mon (1 row) -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; NOTICE: drop cascades to 2 other objects /* Range partitions for FLOAT4 type */ CREATE TABLE abc (x FLOAT4 NOT NULL); @@ -104,10 +104,10 @@ SELECT set_interval('abc', 100); (1 row) -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; NOTICE: drop cascades to 2 other objects /* Range partitions for FLOAT8 type */ -CREATE TABLE abc (x FLOAT4 NOT NULL); +CREATE TABLE abc (x FLOAT8 NOT NULL); SELECT create_range_partitions('abc', 'x', 0, 100, 2); create_range_partitions ------------------------- @@ -136,7 +136,7 @@ SELECT set_interval('abc', 100); (1 row) -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; NOTICE: drop cascades to 2 other objects /* Range partitions for NUMERIC type */ CREATE TABLE abc (x NUMERIC NOT NULL); @@ -165,7 +165,7 @@ SELECT set_interval('abc', 100); (1 row) -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; NOTICE: drop cascades to 2 other objects /* Hash partitioned table shouldn't accept any interval value */ CREATE TABLE abc (id SERIAL); @@ -179,6 +179,6 @@ SELECT set_interval('abc', 100); ERROR: table "abc" is not partitioned by RANGE SELECT set_interval('abc', NULL::INTEGER); ERROR: table "abc" is not partitioned by RANGE -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; NOTICE: drop cascades to 3 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql index f5e1efa2..f5e1694f 100644 --- a/sql/pathman_interval.sql +++ b/sql/pathman_interval.sql @@ -17,7 +17,7 @@ UPDATE pathman_config SET range_interval = '0' WHERE partrel = 'abc'::REGCLASS; SELECT set_interval('abc', 1000); INSERT INTO abc VALUES (250); SELECT * FROM pathman_config; -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; /* Range partitions for DATE type */ CREATE TABLE abc (dt DATE NOT NULL); @@ -28,7 +28,7 @@ SELECT set_interval('abc', '1 second'::INTERVAL); /* Set a normal interval */ SELECT set_interval('abc', '1 month'::INTERVAL); SELECT * FROM pathman_config; -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; /* Range partitions for FLOAT4 type */ CREATE TABLE abc (x FLOAT4 NOT NULL); @@ -42,10 +42,10 @@ SELECT set_interval('abc', 'NaN'::FLOAT4); SELECT set_interval('abc', 'Infinity'::FLOAT4); /* Set a normal interval */ SELECT set_interval('abc', 100); -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; /* Range partitions for FLOAT8 type */ -CREATE TABLE abc (x FLOAT4 NOT NULL); +CREATE TABLE abc (x FLOAT8 NOT NULL); SELECT create_range_partitions('abc', 'x', 0, 100, 2); SELECT set_interval('abc', NULL::FLOAT8); /* Set a trivial interval */ @@ -56,7 +56,7 @@ SELECT set_interval('abc', 'NaN'::FLOAT8); SELECT set_interval('abc', 'Infinity'::FLOAT8); /* Set a normal interval */ SELECT set_interval('abc', 100); -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; /* Range partitions for NUMERIC type */ CREATE TABLE abc (x NUMERIC NOT NULL); @@ -68,13 +68,13 @@ SELECT set_interval('abc', 0); SELECT set_interval('abc', 'NaN'::NUMERIC); /* Set a normal interval */ SELECT set_interval('abc', 100); -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; /* Hash partitioned table shouldn't accept any interval value */ CREATE TABLE abc (id SERIAL); SELECT create_hash_partitions('abc', 'id', 3); SELECT set_interval('abc', 100); SELECT set_interval('abc', NULL::INTEGER); -DROP TABLE abc cascade; +DROP TABLE abc CASCADE; -DROP EXTENSION pg_pathman; \ No newline at end of file +DROP EXTENSION pg_pathman; From 819c07ecda4de2d24a1c397d2124c3883b90e158 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 10 Feb 2017 20:45:12 +0300 Subject: [PATCH 0196/1124] remove declaration of function get_attribute_type_pl(), more calamity tests --- expected/pathman_calamity.out | 10 +++++++--- sql/pathman_calamity.sql | 8 +++++--- src/pl_funcs.c | 1 - 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 2b3c96c9..ae3d1434 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -389,9 +389,13 @@ ERROR: 'parent_relid' should not be NULL SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); ERROR: 'partition' should not be NULL /* check function add_to_pathman_config() -- PHASE #1 */ -SELECT add_to_pathman_config('calamity.part_test', NULL); +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no column */ ERROR: 'attname' should not be NULL -SELECT add_to_pathman_config('calamity.part_test', 'val'); +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong column */ +ERROR: relation "part_test" has no column "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ add_to_pathman_config ----------------------- t @@ -404,7 +408,7 @@ NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping (1 row) -SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ add_to_pathman_config ----------------------- t diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 9c7b6f28..597a103d 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -140,10 +140,12 @@ SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); /* check function add_to_pathman_config() -- PHASE #1 */ -SELECT add_to_pathman_config('calamity.part_test', NULL); -SELECT add_to_pathman_config('calamity.part_test', 'val'); +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no column */ +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong column */ +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ SELECT disable_pathman_for('calamity.part_test'); -SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ SELECT disable_pathman_for('calamity.part_test'); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index a17e0d79..2e24dc77 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -41,7 +41,6 @@ PG_FUNCTION_INFO_V1( on_partitions_removed ); PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); PG_FUNCTION_INFO_V1( get_base_type_pl ); -PG_FUNCTION_INFO_V1( get_attribute_type_pl ); PG_FUNCTION_INFO_V1( get_partition_key_type ); PG_FUNCTION_INFO_V1( get_tablespace_pl ); From 4468ae8e2889fc2b4000ec30036898da9bb43312 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 12 Feb 2017 01:41:11 +0300 Subject: [PATCH 0197/1124] small regression tests refactoring (+pathman_cte) --- Makefile | 18 +- expected/pathman_basic.out | 257 +--------------------------- expected/pathman_callbacks.out | 35 ++-- expected/pathman_cte.out | 275 ++++++++++++++++++++++++++++++ expected/pathman_foreign_keys.out | 33 +++- expected/pathman_inserts.out | 14 +- expected/pathman_interval.out | 112 ++++++------ sql/pathman_basic.sql | 116 ------------- sql/pathman_callbacks.sql | 22 +-- sql/pathman_cte.sql | 159 +++++++++++++++++ sql/pathman_foreign_keys.sql | 26 +++ sql/pathman_inserts.sql | 19 ++- sql/pathman_interval.sql | 129 +++++++++----- 13 files changed, 698 insertions(+), 517 deletions(-) create mode 100644 expected/pathman_cte.out create mode 100644 sql/pathman_cte.sql diff --git a/Makefile b/Makefile index f5ae64e9..cf83a381 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ # contrib/pg_pathman/Makefile MODULE_big = pg_pathman + OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/runtimeappend.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ @@ -9,24 +10,31 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/partition_creation.o $(WIN32RES) EXTENSION = pg_pathman + EXTVERSION = 1.3 + DATA_built = pg_pathman--$(EXTVERSION).sql + DATA = pg_pathman--1.0--1.1.sql \ pg_pathman--1.1--1.2.sql + PGFILEDESC = "pg_pathman - partitioning tool" REGRESS = pathman_basic \ - pathman_inserts \ - pathman_runtime_nodes \ - pathman_callbacks \ + pathman_cte \ pathman_domains \ + pathman_interval \ + pathman_callbacks \ pathman_foreign_keys \ pathman_permissions \ pathman_rowmarks \ + pathman_inserts \ + pathman_runtime_nodes \ pathman_utility_stmt_hooking \ - pathman_calamity \ - pathman_interval + pathman_calamity + EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add + EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output ifdef USE_PGXS diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index e8a13b1a..0352cf4d 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1138,197 +1138,6 @@ SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; DROP TABLE test.hash_varchar CASCADE; NOTICE: drop cascades to 4 other objects -/* - * Test CTE query - */ -EXPLAIN (COSTS OFF) - WITH ttt AS (SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') -SELECT * FROM ttt; - QUERY PLAN --------------------------------------------------------------------------------------------- - CTE Scan on ttt - CTE ttt - -> Append - -> Seq Scan on range_rel_2 - -> Index Scan using range_rel_3_dt_idx on range_rel_3 - Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) -(6 rows) - -EXPLAIN (COSTS OFF) - WITH ttt AS (SELECT * FROM test.hash_rel WHERE value = 2) -SELECT * FROM ttt; - QUERY PLAN --------------------------------------- - CTE Scan on ttt - CTE ttt - -> Append - -> Seq Scan on hash_rel_1 - Filter: (value = 2) -(5 rows) - -/* - * Test CTE query - by @parihaaraka (add varno to WalkerContext) - */ -CREATE TABLE test.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); -INSERT INTO test.cte_del_xacts (pdate) SELECT gen_date FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; -create table test.cte_del_xacts_specdata -( - tid BIGINT PRIMARY KEY, - test_mode SMALLINT, - state_code SMALLINT NOT NULL DEFAULT 8, - regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL -); -INSERT INTO test.cte_del_xacts_specdata VALUES(1, 1, 1, current_timestamp); /* for subquery test */ -/* create 2 partitions */ -SELECT pathman.create_range_partitions('test.cte_del_xacts'::regclass, 'pdate', '2016-01-01'::date, '50 days'::interval); -NOTICE: sequence "cte_del_xacts_seq" does not exist, skipping - create_range_partitions -------------------------- - 2 -(1 row) - -EXPLAIN (COSTS OFF) -WITH tmp AS ( - SELECT tid, test_mode, regtime::DATE AS pdate, state_code - FROM test.cte_del_xacts_specdata) -DELETE FROM test.cte_del_xacts t USING tmp -WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; - QUERY PLAN --------------------------------------------------------------------------------- - Delete on cte_del_xacts t - Delete on cte_del_xacts t - Delete on cte_del_xacts_1 t_1 - Delete on cte_del_xacts_2 t_2 - CTE tmp - -> Seq Scan on cte_del_xacts_specdata - -> Hash Join - Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) - -> CTE Scan on tmp - Filter: (test_mode > 0) - -> Hash - -> Index Scan using cte_del_xacts_pkey on cte_del_xacts t - -> Hash Join - Hash Cond: ((tmp.tid = t_1.id) AND (tmp.pdate = t_1.pdate)) - -> CTE Scan on tmp - Filter: (test_mode > 0) - -> Hash - -> Index Scan using cte_del_xacts_1_pkey on cte_del_xacts_1 t_1 - -> Hash Join - Hash Cond: ((tmp.tid = t_2.id) AND (tmp.pdate = t_2.pdate)) - -> CTE Scan on tmp - Filter: (test_mode > 0) - -> Hash - -> Index Scan using cte_del_xacts_2_pkey on cte_del_xacts_2 t_2 -(24 rows) - -SELECT pathman.drop_partitions('test.cte_del_xacts'); /* now drop partitions */ -NOTICE: function test.cte_del_xacts_upd_trig_func() does not exist, skipping -NOTICE: 50 rows copied from test.cte_del_xacts_1 -NOTICE: 50 rows copied from test.cte_del_xacts_2 - drop_partitions ------------------ - 2 -(1 row) - -/* create 1 partition */ -SELECT pathman.create_range_partitions('test.cte_del_xacts'::regclass, 'pdate', '2016-01-01'::date, '1 year'::interval); - create_range_partitions -------------------------- - 1 -(1 row) - -/* parent enabled! */ -SELECT pathman.set_enable_parent('test.cte_del_xacts', true); - set_enable_parent -------------------- - -(1 row) - -EXPLAIN (COSTS OFF) -WITH tmp AS ( - SELECT tid, test_mode, regtime::DATE AS pdate, state_code - FROM test.cte_del_xacts_specdata) -DELETE FROM test.cte_del_xacts t USING tmp -WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; - QUERY PLAN --------------------------------------------------------------------------------- - Delete on cte_del_xacts t - Delete on cte_del_xacts t - Delete on cte_del_xacts_1 t_1 - CTE tmp - -> Seq Scan on cte_del_xacts_specdata - -> Hash Join - Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) - -> CTE Scan on tmp - Filter: (test_mode > 0) - -> Hash - -> Index Scan using cte_del_xacts_pkey on cte_del_xacts t - -> Hash Join - Hash Cond: ((tmp.tid = t_1.id) AND (tmp.pdate = t_1.pdate)) - -> CTE Scan on tmp - Filter: (test_mode > 0) - -> Hash - -> Index Scan using cte_del_xacts_1_pkey on cte_del_xacts_1 t_1 -(17 rows) - -/* parent disabled! */ -SELECT pathman.set_enable_parent('test.cte_del_xacts', false); - set_enable_parent -------------------- - -(1 row) - -EXPLAIN (COSTS OFF) -WITH tmp AS ( - SELECT tid, test_mode, regtime::DATE AS pdate, state_code - FROM test.cte_del_xacts_specdata) -DELETE FROM test.cte_del_xacts t USING tmp -WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; - QUERY PLAN ------------------------------------------------------------------------------- - Delete on cte_del_xacts_1 t - CTE tmp - -> Seq Scan on cte_del_xacts_specdata - -> Hash Join - Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) - -> CTE Scan on tmp - Filter: (test_mode > 0) - -> Hash - -> Index Scan using cte_del_xacts_1_pkey on cte_del_xacts_1 t -(9 rows) - -/* create stub pl/PgSQL function */ -CREATE OR REPLACE FUNCTION test.cte_del_xacts_stab(name TEXT) -RETURNS smallint AS -$$ -begin - return 2::smallint; -end -$$ -LANGUAGE plpgsql STABLE; -/* test subquery planning */ -WITH tmp AS ( - SELECT tid FROM test.cte_del_xacts_specdata - WHERE state_code != test.cte_del_xacts_stab('test')) -SELECT * FROM test.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; - id | pdate | tid -----+------------+----- - 1 | 01-01-2016 | 1 -(1 row) - -/* test subquery planning (one more time) */ -WITH tmp AS ( - SELECT tid FROM test.cte_del_xacts_specdata - WHERE state_code != test.cte_del_xacts_stab('test')) -SELECT * FROM test.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; - id | pdate | tid -----+------------+----- - 1 | 01-01-2016 | 1 -(1 row) - -DROP FUNCTION test.cte_del_xacts_stab(TEXT); -DROP TABLE test.cte_del_xacts, test.cte_del_xacts_specdata CASCADE; -NOTICE: drop cascades to table test.cte_del_xacts_1 /* * Test split and merge */ @@ -2244,32 +2053,6 @@ SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ DROP TABLE bool_test CASCADE; NOTICE: drop cascades to 3 other objects -/* Test foreign keys */ -CREATE TABLE test.messages(id SERIAL PRIMARY KEY, msg TEXT); -CREATE TABLE test.replies(id SERIAL PRIMARY KEY, message_id INTEGER REFERENCES test.messages(id), msg TEXT); -INSERT INTO test.messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; -INSERT INTO test.replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; -SELECT create_range_partitions('test.messages', 'id', 1, 100, 2); -WARNING: foreign key "replies_message_id_fkey" references relation "test.messages" -ERROR: relation "test.messages" is referenced from other relations -ALTER TABLE test.replies DROP CONSTRAINT replies_message_id_fkey; -SELECT create_range_partitions('test.messages', 'id', 1, 100, 2); -NOTICE: sequence "messages_seq" does not exist, skipping - create_range_partitions -------------------------- - 2 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM test.messages; - QUERY PLAN ------------------------------- - Append - -> Seq Scan on messages_1 - -> Seq Scan on messages_2 -(3 rows) - -DROP TABLE test.messages, test.replies CASCADE; -NOTICE: drop cascades to 2 other objects /* Special test case (quals generation) -- fixing commit f603e6c5 */ CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; @@ -2392,44 +2175,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2 Filter: (c1 < 2500) (12 rows) -/* Test recursive CTE */ -CREATE TABLE test.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); -SELECT * FROM create_hash_partitions('test.recursive_cte_test_tbl', 'id', 2); - create_hash_partitions ------------------------- - 2 -(1 row) - -INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||id FROM generate_series(1,100) f(id); -INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); -INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); -SELECT * FROM test.recursive_cte_test_tbl WHERE id = 5; - id | name -----+------- - 5 | name5 - 5 | name6 - 5 | name7 -(3 rows) - -WITH RECURSIVE test AS ( - SELECT min(name) AS name - FROM test.recursive_cte_test_tbl - WHERE id = 5 - UNION ALL - SELECT (SELECT min(name) - FROM test.recursive_cte_test_tbl - WHERE id = 5 AND name > test.name) - FROM test - WHERE name IS NOT NULL) -SELECT * FROM test; - name -------- - name5 - name6 - name7 - -(4 rows) - /* Test create_range_partitions() + relnames */ CREATE TABLE test.provided_part_names(id INT NOT NULL); INSERT INTO test.provided_part_names SELECT generate_series(1, 10); @@ -2456,6 +2201,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 54 other objects +NOTICE: drop cascades to 49 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 91ec6e2d..4a5b1a47 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -168,46 +168,45 @@ WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_4", DROP TABLE callbacks.abc CASCADE; NOTICE: drop cascades to 5 other objects /* create table in public schema */ -CREATE TABLE abc(a serial, b int); -SELECT set_init_callback('abc', +CREATE TABLE callbacks.abc(a serial, b int); +SELECT set_init_callback('callbacks.abc', 'callbacks.abc_on_part_created_callback(jsonb)'); set_init_callback ------------------- (1 row) -SELECT create_range_partitions('abc', 'a', 1, 100, 2); -NOTICE: sequence "abc_seq" does not exist, skipping -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_1", "range_max": "101", "range_min": "1", "parent_schema": "public", "partition_schema": "public"} -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", "range_max": "201", "range_min": "101", "parent_schema": "public", "partition_schema": "public"} +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_1", "range_max": "101", "range_min": "1", "parent_schema": "callbacks", "partition_schema": "callbacks"} +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", "range_max": "201", "range_min": "101", "parent_schema": "callbacks", "partition_schema": "callbacks"} create_range_partitions ------------------------- 2 (1 row) -DROP TABLE abc CASCADE; +DROP TABLE callbacks.abc CASCADE; NOTICE: drop cascades to 2 other objects /* test the temprary deletion of callback function */ -CREATE TABLE abc(a serial, b int); -SELECT set_init_callback('abc', +CREATE TABLE callbacks.abc(a serial, b int); +SELECT set_init_callback('callbacks.abc', 'callbacks.abc_on_part_created_callback(jsonb)'); set_init_callback ------------------- (1 row) -SELECT create_range_partitions('abc', 'a', 1, 100, 2); -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_1", "range_max": "101", "range_min": "1", "parent_schema": "public", "partition_schema": "public"} -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", "range_max": "201", "range_min": "101", "parent_schema": "public", "partition_schema": "public"} +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_1", "range_max": "101", "range_min": "1", "parent_schema": "callbacks", "partition_schema": "callbacks"} +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", "range_max": "201", "range_min": "101", "parent_schema": "callbacks", "partition_schema": "callbacks"} create_range_partitions ------------------------- 2 (1 row) -INSERT INTO abc VALUES (201, 0); -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201", "parent_schema": "public", "partition_schema": "public"} +INSERT INTO callbacks.abc VALUES (201, 0); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201", "parent_schema": "callbacks", "partition_schema": "callbacks"} DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); -INSERT INTO abc VALUES (301, 0); +INSERT INTO callbacks.abc VALUES (301, 0); ERROR: callback function "callbacks.abc_on_part_created_callback(jsonb)" does not exist CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ @@ -215,9 +214,9 @@ BEGIN RAISE WARNING 'callback arg: %', args::TEXT; END $$ language plpgsql; -INSERT INTO abc VALUES (301, 0); -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "401", "range_min": "301", "parent_schema": "public", "partition_schema": "public"} -DROP TABLE abc CASCADE; +INSERT INTO callbacks.abc VALUES (301, 0); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "401", "range_min": "301", "parent_schema": "callbacks", "partition_schema": "callbacks"} +DROP TABLE callbacks.abc CASCADE; NOTICE: drop cascades to 4 other objects DROP SCHEMA callbacks CASCADE; NOTICE: drop cascades to 2 other objects diff --git a/expected/pathman_cte.out b/expected/pathman_cte.out new file mode 100644 index 00000000..facda1bb --- /dev/null +++ b/expected/pathman_cte.out @@ -0,0 +1,275 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +/* + * Test simple CTE queries + */ +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); +NOTICE: sequence "range_rel_seq" does not exist, skipping + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +---------------------------------------------------------------------------------------- + CTE Scan on ttt + CTE ttt + -> Append + -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 4 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------- + CTE Scan on ttt + CTE ttt + -> Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) +(5 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); +NOTICE: sequence "cte_del_xacts_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + Delete on cte_del_xacts_2 t_2 + CTE tmp + -> Seq Scan on cte_del_xacts_specdata + -> Hash Join + Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((tmp.tid = t_1.id) AND (tmp.pdate = t_1.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash Join + Hash Cond: ((tmp.tid = t_2.id) AND (tmp.pdate = t_2.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts_2 t_2 +(24 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: function test_cte.cte_del_xacts_upd_trig_func() does not exist, skipping +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + CTE tmp + -> Seq Scan on cte_del_xacts_specdata + -> Hash Join + Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((tmp.tid = t_1.id) AND (tmp.pdate = t_1.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts_1 t_1 +(17 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------- + Delete on cte_del_xacts_1 t + CTE tmp + -> Seq Scan on cte_del_xacts_specdata + -> Hash Join + Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts_1 t +(9 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to table test_cte.cte_del_xacts_1 +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP SCHEMA test_cte CASCADE; +NOTICE: drop cascades to 5 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_foreign_keys.out b/expected/pathman_foreign_keys.out index 20a4da60..8b3db83e 100644 --- a/expected/pathman_foreign_keys.out +++ b/expected/pathman_foreign_keys.out @@ -62,6 +62,37 @@ NOTICE: 94 rows copied from fkeys.test_fkey_9 10 (1 row) +/* Try to partition table that's being referenced */ +CREATE TABLE fkeys.messages( + id SERIAL PRIMARY KEY, + msg TEXT); +CREATE TABLE fkeys.replies( + id SERIAL PRIMARY KEY, + message_id INTEGER REFERENCES fkeys.messages(id), + msg TEXT); +INSERT INTO fkeys.messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; +INSERT INTO fkeys.replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; +SELECT create_range_partitions('fkeys.messages', 'id', 1, 100, 2); /* not ok */ +WARNING: foreign key "replies_message_id_fkey" references relation "fkeys.messages" +ERROR: relation "fkeys.messages" is referenced from other relations +ALTER TABLE fkeys.replies DROP CONSTRAINT replies_message_id_fkey; +SELECT create_range_partitions('fkeys.messages', 'id', 1, 100, 2); /* ok */ +NOTICE: sequence "messages_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM fkeys.messages; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on messages_1 + -> Seq Scan on messages_2 +(3 rows) + +DROP TABLE fkeys.messages, fkeys.replies CASCADE; +NOTICE: drop cascades to 2 other objects DROP SCHEMA fkeys CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 4 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index 7562b99b..f2ee7245 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -13,10 +13,7 @@ NOTICE: sequence "storage_seq" does not exist, skipping 10 (1 row) -/* - * attach before and after insertion triggers to partitioned table - */ -/* prepare trigger functions */ +/* attach before and after insertion triggers to partitioned table */ CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ BEGIN RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; @@ -34,10 +31,15 @@ CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); +/* set partition init callback */ CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ BEGIN - EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s for each row execute procedure test_inserts.print_cols_before_change();', args->>'partition_schema', args->>'partition'); - EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s for each row execute procedure test_inserts.print_cols_after_change();', args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s + for each row execute procedure test_inserts.print_cols_before_change();', + args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s + for each row execute procedure test_inserts.print_cols_after_change();', + args->>'partition_schema', args->>'partition'); END; $$ LANGUAGE plpgsql; SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 2483cdbb..39120a3d 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -1,184 +1,190 @@ \set VERBOSITY terse CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_interval; /* Range partitions for INTEGER type */ -CREATE TABLE abc (id SERIAL); -SELECT create_range_partitions('abc', 'id', 0, 100, 2); +CREATE TABLE test_interval.abc (id SERIAL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); +NOTICE: sequence "abc_seq" does not exist, skipping create_range_partitions ------------------------- 2 (1 row) -SELECT set_interval('abc', NULL::INTEGER); +SELECT set_interval('test_interval.abc', NULL::INTEGER); set_interval -------------- (1 row) /* pg_pathman shouldn't be able to create a new partition */ -INSERT INTO abc VALUES (250); +INSERT INTO test_interval.abc VALUES (250); ERROR: cannot find appropriate partition for key '250' /* Set a trivial interval */ -SELECT set_interval('abc', 0); +SELECT set_interval('test_interval.abc', 0); ERROR: interval must not be trivial /* Set a negative interval */ -SELECT set_interval('abc', -100); +SELECT set_interval('test_interval.abc', -100); ERROR: interval must not be negative -/* We also shouldn't be able to set a trivial interval directly in pathman_config table */ -UPDATE pathman_config SET range_interval = '0' WHERE partrel = 'abc'::REGCLASS; +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; ERROR: interval must not be trivial /* Set a normal interval */ -SELECT set_interval('abc', 1000); +SELECT set_interval('test_interval.abc', 1000); set_interval -------------- (1 row) -INSERT INTO abc VALUES (250); +INSERT INTO test_interval.abc VALUES (250); SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval ----------+---------+----------+---------------- - abc | id | 2 | 1000 + partrel | attname | parttype | range_interval +-------------------+---------+----------+---------------- + test_interval.abc | id | 2 | 1000 (1 row) -DROP TABLE abc CASCADE; +DROP TABLE test_interval.abc CASCADE; NOTICE: drop cascades to 3 other objects /* Range partitions for DATE type */ -CREATE TABLE abc (dt DATE NOT NULL); -SELECT create_range_partitions('abc', 'dt', '2016-01-01'::DATE, '1 day'::INTERVAL, 2); +CREATE TABLE test_interval.abc (dt DATE NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'dt', + '2016-01-01'::DATE, '1 day'::INTERVAL, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT set_interval('abc', NULL::INTERVAL); +SELECT set_interval('test_interval.abc', NULL::INTERVAL); set_interval -------------- (1 row) /* Set a trivial interval */ -SELECT set_interval('abc', '1 second'::INTERVAL); +SELECT set_interval('test_interval.abc', '1 second'::INTERVAL); ERROR: interval must not be trivial /* Set a normal interval */ -SELECT set_interval('abc', '1 month'::INTERVAL); +SELECT set_interval('test_interval.abc', '1 month'::INTERVAL); set_interval -------------- (1 row) SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval ----------+---------+----------+---------------- - abc | dt | 2 | @ 1 mon + partrel | attname | parttype | range_interval +-------------------+---------+----------+---------------- + test_interval.abc | dt | 2 | @ 1 mon (1 row) -DROP TABLE abc CASCADE; +DROP TABLE test_interval.abc CASCADE; NOTICE: drop cascades to 2 other objects /* Range partitions for FLOAT4 type */ -CREATE TABLE abc (x FLOAT4 NOT NULL); -SELECT create_range_partitions('abc', 'x', 0, 100, 2); +CREATE TABLE test_interval.abc (x FLOAT4 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT set_interval('abc', NULL::FLOAT4); +SELECT set_interval('test_interval.abc', NULL::FLOAT4); set_interval -------------- (1 row) /* Set a trivial interval */ -SELECT set_interval('abc', 0); +SELECT set_interval('test_interval.abc', 0); ERROR: interval must not be trivial /* Set NaN float as interval */ -SELECT set_interval('abc', 'NaN'::FLOAT4); +SELECT set_interval('test_interval.abc', 'NaN'::FLOAT4); ERROR: invalid floating point interval /* Set INF float as interval */ -SELECT set_interval('abc', 'Infinity'::FLOAT4); +SELECT set_interval('test_interval.abc', 'Infinity'::FLOAT4); ERROR: invalid floating point interval /* Set a normal interval */ -SELECT set_interval('abc', 100); +SELECT set_interval('test_interval.abc', 100); set_interval -------------- (1 row) -DROP TABLE abc CASCADE; +DROP TABLE test_interval.abc CASCADE; NOTICE: drop cascades to 2 other objects /* Range partitions for FLOAT8 type */ -CREATE TABLE abc (x FLOAT8 NOT NULL); -SELECT create_range_partitions('abc', 'x', 0, 100, 2); +CREATE TABLE test_interval.abc (x FLOAT8 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT set_interval('abc', NULL::FLOAT8); +SELECT set_interval('test_interval.abc', NULL::FLOAT8); set_interval -------------- (1 row) /* Set a trivial interval */ -SELECT set_interval('abc', 0); +SELECT set_interval('test_interval.abc', 0); ERROR: interval must not be trivial /* Set NaN float as interval */ -SELECT set_interval('abc', 'NaN'::FLOAT8); +SELECT set_interval('test_interval.abc', 'NaN'::FLOAT8); ERROR: invalid floating point interval /* Set INF float as interval */ -SELECT set_interval('abc', 'Infinity'::FLOAT8); +SELECT set_interval('test_interval.abc', 'Infinity'::FLOAT8); ERROR: invalid floating point interval /* Set a normal interval */ -SELECT set_interval('abc', 100); +SELECT set_interval('test_interval.abc', 100); set_interval -------------- (1 row) -DROP TABLE abc CASCADE; +DROP TABLE test_interval.abc CASCADE; NOTICE: drop cascades to 2 other objects /* Range partitions for NUMERIC type */ -CREATE TABLE abc (x NUMERIC NOT NULL); -SELECT create_range_partitions('abc', 'x', 0, 100, 2); +CREATE TABLE test_interval.abc (x NUMERIC NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT set_interval('abc', NULL::NUMERIC); +SELECT set_interval('test_interval.abc', NULL::NUMERIC); set_interval -------------- (1 row) /* Set a trivial interval */ -SELECT set_interval('abc', 0); +SELECT set_interval('test_interval.abc', 0); ERROR: interval must not be trivial /* Set NaN numeric as interval */ -SELECT set_interval('abc', 'NaN'::NUMERIC); +SELECT set_interval('test_interval.abc', 'NaN'::NUMERIC); ERROR: invalid numeric interval /* Set a normal interval */ -SELECT set_interval('abc', 100); +SELECT set_interval('test_interval.abc', 100); set_interval -------------- (1 row) -DROP TABLE abc CASCADE; +DROP TABLE test_interval.abc CASCADE; NOTICE: drop cascades to 2 other objects /* Hash partitioned table shouldn't accept any interval value */ -CREATE TABLE abc (id SERIAL); -SELECT create_hash_partitions('abc', 'id', 3); +CREATE TABLE test_interval.abc (id SERIAL); +SELECT create_hash_partitions('test_interval.abc', 'id', 3); create_hash_partitions ------------------------ 3 (1 row) -SELECT set_interval('abc', 100); -ERROR: table "abc" is not partitioned by RANGE -SELECT set_interval('abc', NULL::INTEGER); -ERROR: table "abc" is not partitioned by RANGE -DROP TABLE abc CASCADE; +SELECT set_interval('test_interval.abc', 100); +ERROR: table "test_interval.abc" is not partitioned by RANGE +SELECT set_interval('test_interval.abc', NULL::INTEGER); +ERROR: table "test_interval.abc" is not partitioned by RANGE +DROP TABLE test_interval.abc CASCADE; NOTICE: drop cascades to 3 other objects +DROP SCHEMA test_interval CASCADE; +NOTICE: drop cascades to sequence test_interval.abc_seq DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index d7cb6664..179402f6 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -292,90 +292,6 @@ SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; DROP TABLE test.hash_varchar CASCADE; -/* - * Test CTE query - */ -EXPLAIN (COSTS OFF) - WITH ttt AS (SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') -SELECT * FROM ttt; - -EXPLAIN (COSTS OFF) - WITH ttt AS (SELECT * FROM test.hash_rel WHERE value = 2) -SELECT * FROM ttt; - -/* - * Test CTE query - by @parihaaraka (add varno to WalkerContext) - */ -CREATE TABLE test.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); -INSERT INTO test.cte_del_xacts (pdate) SELECT gen_date FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; - -create table test.cte_del_xacts_specdata -( - tid BIGINT PRIMARY KEY, - test_mode SMALLINT, - state_code SMALLINT NOT NULL DEFAULT 8, - regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL -); -INSERT INTO test.cte_del_xacts_specdata VALUES(1, 1, 1, current_timestamp); /* for subquery test */ - -/* create 2 partitions */ -SELECT pathman.create_range_partitions('test.cte_del_xacts'::regclass, 'pdate', '2016-01-01'::date, '50 days'::interval); - -EXPLAIN (COSTS OFF) -WITH tmp AS ( - SELECT tid, test_mode, regtime::DATE AS pdate, state_code - FROM test.cte_del_xacts_specdata) -DELETE FROM test.cte_del_xacts t USING tmp -WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; - -SELECT pathman.drop_partitions('test.cte_del_xacts'); /* now drop partitions */ - -/* create 1 partition */ -SELECT pathman.create_range_partitions('test.cte_del_xacts'::regclass, 'pdate', '2016-01-01'::date, '1 year'::interval); - -/* parent enabled! */ -SELECT pathman.set_enable_parent('test.cte_del_xacts', true); -EXPLAIN (COSTS OFF) -WITH tmp AS ( - SELECT tid, test_mode, regtime::DATE AS pdate, state_code - FROM test.cte_del_xacts_specdata) -DELETE FROM test.cte_del_xacts t USING tmp -WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; - -/* parent disabled! */ -SELECT pathman.set_enable_parent('test.cte_del_xacts', false); -EXPLAIN (COSTS OFF) -WITH tmp AS ( - SELECT tid, test_mode, regtime::DATE AS pdate, state_code - FROM test.cte_del_xacts_specdata) -DELETE FROM test.cte_del_xacts t USING tmp -WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; - -/* create stub pl/PgSQL function */ -CREATE OR REPLACE FUNCTION test.cte_del_xacts_stab(name TEXT) -RETURNS smallint AS -$$ -begin - return 2::smallint; -end -$$ -LANGUAGE plpgsql STABLE; - -/* test subquery planning */ -WITH tmp AS ( - SELECT tid FROM test.cte_del_xacts_specdata - WHERE state_code != test.cte_del_xacts_stab('test')) -SELECT * FROM test.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; - -/* test subquery planning (one more time) */ -WITH tmp AS ( - SELECT tid FROM test.cte_del_xacts_specdata - WHERE state_code != test.cte_del_xacts_stab('test')) -SELECT * FROM test.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; - -DROP FUNCTION test.cte_del_xacts_stab(TEXT); -DROP TABLE test.cte_del_xacts, test.cte_del_xacts_specdata CASCADE; - /* * Test split and merge @@ -641,17 +557,6 @@ SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ DROP TABLE bool_test CASCADE; -/* Test foreign keys */ -CREATE TABLE test.messages(id SERIAL PRIMARY KEY, msg TEXT); -CREATE TABLE test.replies(id SERIAL PRIMARY KEY, message_id INTEGER REFERENCES test.messages(id), msg TEXT); -INSERT INTO test.messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; -INSERT INTO test.replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; -SELECT create_range_partitions('test.messages', 'id', 1, 100, 2); -ALTER TABLE test.replies DROP CONSTRAINT replies_message_id_fkey; -SELECT create_range_partitions('test.messages', 'id', 1, 100, 2); -EXPLAIN (COSTS OFF) SELECT * FROM test.messages; -DROP TABLE test.messages, test.replies CASCADE; - /* Special test case (quals generation) -- fixing commit f603e6c5 */ CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; @@ -679,27 +584,6 @@ SELECT set_enable_parent('test.index_on_childs', true); VACUUM ANALYZE test.index_on_childs; EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; -/* Test recursive CTE */ -CREATE TABLE test.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); -SELECT * FROM create_hash_partitions('test.recursive_cte_test_tbl', 'id', 2); -INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||id FROM generate_series(1,100) f(id); -INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); -INSERT INTO test.recursive_cte_test_tbl (id, name) SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); -SELECT * FROM test.recursive_cte_test_tbl WHERE id = 5; - -WITH RECURSIVE test AS ( - SELECT min(name) AS name - FROM test.recursive_cte_test_tbl - WHERE id = 5 - UNION ALL - SELECT (SELECT min(name) - FROM test.recursive_cte_test_tbl - WHERE id = 5 AND name > test.name) - FROM test - WHERE name IS NOT NULL) -SELECT * FROM test; - - /* Test create_range_partitions() + relnames */ CREATE TABLE test.provided_part_names(id INT NOT NULL); INSERT INTO test.provided_part_names SELECT generate_series(1, 10); diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 53b08871..27510b92 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -79,31 +79,31 @@ SELECT create_hash_partitions('callbacks.abc', 'a', 5); DROP TABLE callbacks.abc CASCADE; /* create table in public schema */ -CREATE TABLE abc(a serial, b int); -SELECT set_init_callback('abc', +CREATE TABLE callbacks.abc(a serial, b int); +SELECT set_init_callback('callbacks.abc', 'callbacks.abc_on_part_created_callback(jsonb)'); -SELECT create_range_partitions('abc', 'a', 1, 100, 2); +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); -DROP TABLE abc CASCADE; +DROP TABLE callbacks.abc CASCADE; /* test the temprary deletion of callback function */ -CREATE TABLE abc(a serial, b int); -SELECT set_init_callback('abc', +CREATE TABLE callbacks.abc(a serial, b int); +SELECT set_init_callback('callbacks.abc', 'callbacks.abc_on_part_created_callback(jsonb)'); -SELECT create_range_partitions('abc', 'a', 1, 100, 2); +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); -INSERT INTO abc VALUES (201, 0); +INSERT INTO callbacks.abc VALUES (201, 0); DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); -INSERT INTO abc VALUES (301, 0); +INSERT INTO callbacks.abc VALUES (301, 0); CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ BEGIN RAISE WARNING 'callback arg: %', args::TEXT; END $$ language plpgsql; -INSERT INTO abc VALUES (301, 0); +INSERT INTO callbacks.abc VALUES (301, 0); -DROP TABLE abc CASCADE; +DROP TABLE callbacks.abc CASCADE; DROP SCHEMA callbacks CASCADE; diff --git a/sql/pathman_cte.sql b/sql/pathman_cte.sql new file mode 100644 index 00000000..04af82f0 --- /dev/null +++ b/sql/pathman_cte.sql @@ -0,0 +1,159 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; + + + +/* + * Test simple CTE queries + */ + +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); + +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; + +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + +DROP TABLE test_cte.range_rel CASCADE; + + +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + +DROP TABLE test_cte.hash_rel CASCADE; + + + +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); + +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; + +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); + +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ + +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; + +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; + + +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); + +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + + + +DROP SCHEMA test_cte CASCADE; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_foreign_keys.sql b/sql/pathman_foreign_keys.sql index a2032815..392b3a7a 100644 --- a/sql/pathman_foreign_keys.sql +++ b/sql/pathman_foreign_keys.sql @@ -3,6 +3,8 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA fkeys; + + /* Check primary keys generation */ CREATE TABLE fkeys.test_ref(comment TEXT UNIQUE); INSERT INTO fkeys.test_ref VALUES('test'); @@ -25,5 +27,29 @@ INSERT INTO fkeys.test_fkey VALUES(1, 'test'); SELECT drop_partitions('fkeys.test_fkey'); +/* Try to partition table that's being referenced */ +CREATE TABLE fkeys.messages( + id SERIAL PRIMARY KEY, + msg TEXT); + +CREATE TABLE fkeys.replies( + id SERIAL PRIMARY KEY, + message_id INTEGER REFERENCES fkeys.messages(id), + msg TEXT); + +INSERT INTO fkeys.messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; +INSERT INTO fkeys.replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; + +SELECT create_range_partitions('fkeys.messages', 'id', 1, 100, 2); /* not ok */ + +ALTER TABLE fkeys.replies DROP CONSTRAINT replies_message_id_fkey; + +SELECT create_range_partitions('fkeys.messages', 'id', 1, 100, 2); /* ok */ +EXPLAIN (COSTS OFF) SELECT * FROM fkeys.messages; + +DROP TABLE fkeys.messages, fkeys.replies CASCADE; + + + DROP SCHEMA fkeys CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index 19491b6d..a5798646 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -11,35 +11,42 @@ INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_serie CREATE UNIQUE INDEX ON test_inserts.storage(a); SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); -/* - * attach before and after insertion triggers to partitioned table - */ -/* prepare trigger functions */ +/* attach before and after insertion triggers to partitioned table */ CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ BEGIN RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; RETURN new; END; $$ LANGUAGE plpgsql; + CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ BEGIN RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; RETURN new; END; $$ LANGUAGE plpgsql; + /* set triggers on existing first partition and new generated partitions */ CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); + CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); + +/* set partition init callback */ CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ BEGIN - EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s for each row execute procedure test_inserts.print_cols_before_change();', args->>'partition_schema', args->>'partition'); - EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s for each row execute procedure test_inserts.print_cols_after_change();', args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s + for each row execute procedure test_inserts.print_cols_before_change();', + args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s + for each row execute procedure test_inserts.print_cols_after_change();', + args->>'partition_schema', args->>'partition'); END; $$ LANGUAGE plpgsql; SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); + /* we don't support ON CONLICT */ INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') ON CONFLICT (a) DO UPDATE SET a = 3; diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql index f5e1694f..451984ad 100644 --- a/sql/pathman_interval.sql +++ b/sql/pathman_interval.sql @@ -1,80 +1,119 @@ \set VERBOSITY terse + CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_interval; + + /* Range partitions for INTEGER type */ -CREATE TABLE abc (id SERIAL); -SELECT create_range_partitions('abc', 'id', 0, 100, 2); -SELECT set_interval('abc', NULL::INTEGER); +CREATE TABLE test_interval.abc (id SERIAL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::INTEGER); + /* pg_pathman shouldn't be able to create a new partition */ -INSERT INTO abc VALUES (250); +INSERT INTO test_interval.abc VALUES (250); + /* Set a trivial interval */ -SELECT set_interval('abc', 0); +SELECT set_interval('test_interval.abc', 0); + /* Set a negative interval */ -SELECT set_interval('abc', -100); -/* We also shouldn't be able to set a trivial interval directly in pathman_config table */ -UPDATE pathman_config SET range_interval = '0' WHERE partrel = 'abc'::REGCLASS; +SELECT set_interval('test_interval.abc', -100); + +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; + /* Set a normal interval */ -SELECT set_interval('abc', 1000); -INSERT INTO abc VALUES (250); +SELECT set_interval('test_interval.abc', 1000); +INSERT INTO test_interval.abc VALUES (250); SELECT * FROM pathman_config; -DROP TABLE abc CASCADE; + +DROP TABLE test_interval.abc CASCADE; + /* Range partitions for DATE type */ -CREATE TABLE abc (dt DATE NOT NULL); -SELECT create_range_partitions('abc', 'dt', '2016-01-01'::DATE, '1 day'::INTERVAL, 2); -SELECT set_interval('abc', NULL::INTERVAL); +CREATE TABLE test_interval.abc (dt DATE NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'dt', + '2016-01-01'::DATE, '1 day'::INTERVAL, 2); +SELECT set_interval('test_interval.abc', NULL::INTERVAL); + /* Set a trivial interval */ -SELECT set_interval('abc', '1 second'::INTERVAL); +SELECT set_interval('test_interval.abc', '1 second'::INTERVAL); + /* Set a normal interval */ -SELECT set_interval('abc', '1 month'::INTERVAL); +SELECT set_interval('test_interval.abc', '1 month'::INTERVAL); + SELECT * FROM pathman_config; -DROP TABLE abc CASCADE; + +DROP TABLE test_interval.abc CASCADE; + /* Range partitions for FLOAT4 type */ -CREATE TABLE abc (x FLOAT4 NOT NULL); -SELECT create_range_partitions('abc', 'x', 0, 100, 2); -SELECT set_interval('abc', NULL::FLOAT4); +CREATE TABLE test_interval.abc (x FLOAT4 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::FLOAT4); + /* Set a trivial interval */ -SELECT set_interval('abc', 0); +SELECT set_interval('test_interval.abc', 0); + /* Set NaN float as interval */ -SELECT set_interval('abc', 'NaN'::FLOAT4); +SELECT set_interval('test_interval.abc', 'NaN'::FLOAT4); + /* Set INF float as interval */ -SELECT set_interval('abc', 'Infinity'::FLOAT4); +SELECT set_interval('test_interval.abc', 'Infinity'::FLOAT4); + /* Set a normal interval */ -SELECT set_interval('abc', 100); -DROP TABLE abc CASCADE; +SELECT set_interval('test_interval.abc', 100); + +DROP TABLE test_interval.abc CASCADE; + /* Range partitions for FLOAT8 type */ -CREATE TABLE abc (x FLOAT8 NOT NULL); -SELECT create_range_partitions('abc', 'x', 0, 100, 2); -SELECT set_interval('abc', NULL::FLOAT8); +CREATE TABLE test_interval.abc (x FLOAT8 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::FLOAT8); + /* Set a trivial interval */ -SELECT set_interval('abc', 0); +SELECT set_interval('test_interval.abc', 0); + /* Set NaN float as interval */ -SELECT set_interval('abc', 'NaN'::FLOAT8); +SELECT set_interval('test_interval.abc', 'NaN'::FLOAT8); + /* Set INF float as interval */ -SELECT set_interval('abc', 'Infinity'::FLOAT8); +SELECT set_interval('test_interval.abc', 'Infinity'::FLOAT8); + /* Set a normal interval */ -SELECT set_interval('abc', 100); -DROP TABLE abc CASCADE; +SELECT set_interval('test_interval.abc', 100); + +DROP TABLE test_interval.abc CASCADE; + /* Range partitions for NUMERIC type */ -CREATE TABLE abc (x NUMERIC NOT NULL); -SELECT create_range_partitions('abc', 'x', 0, 100, 2); -SELECT set_interval('abc', NULL::NUMERIC); +CREATE TABLE test_interval.abc (x NUMERIC NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::NUMERIC); + /* Set a trivial interval */ -SELECT set_interval('abc', 0); +SELECT set_interval('test_interval.abc', 0); + /* Set NaN numeric as interval */ -SELECT set_interval('abc', 'NaN'::NUMERIC); +SELECT set_interval('test_interval.abc', 'NaN'::NUMERIC); + /* Set a normal interval */ -SELECT set_interval('abc', 100); -DROP TABLE abc CASCADE; +SELECT set_interval('test_interval.abc', 100); + +DROP TABLE test_interval.abc CASCADE; + /* Hash partitioned table shouldn't accept any interval value */ -CREATE TABLE abc (id SERIAL); -SELECT create_hash_partitions('abc', 'id', 3); -SELECT set_interval('abc', 100); -SELECT set_interval('abc', NULL::INTEGER); -DROP TABLE abc CASCADE; +CREATE TABLE test_interval.abc (id SERIAL); +SELECT create_hash_partitions('test_interval.abc', 'id', 3); +SELECT set_interval('test_interval.abc', 100); +SELECT set_interval('test_interval.abc', NULL::INTEGER); + +DROP TABLE test_interval.abc CASCADE; + + +DROP SCHEMA test_interval CASCADE; DROP EXTENSION pg_pathman; From 301f6d21a0b5a892b7982c94107b866c4f3e0f17 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 12 Feb 2017 02:25:17 +0300 Subject: [PATCH 0198/1124] fix bugs in create_hash_partitions_internal() and deconstruct_text_array(), tests --- expected/pathman_basic.out | 3 --- expected/pathman_calamity.out | 17 +++++++++--- sql/pathman_basic.sql | 2 -- sql/pathman_calamity.sql | 14 ++++++++-- src/pl_hash_funcs.c | 51 ++++++++++++++++++++--------------- 5 files changed, 54 insertions(+), 33 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 0352cf4d..3845ad48 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -2178,9 +2178,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2 /* Test create_range_partitions() + relnames */ CREATE TABLE test.provided_part_names(id INT NOT NULL); INSERT INTO test.provided_part_names SELECT generate_series(1, 10); -SELECT create_hash_partitions('test.provided_part_names', 'id', 2, - relnames := ARRAY[]::TEXT[]); /* not ok */ -ERROR: size of array 'relnames' must be equal to 'partitions_count' SELECT create_hash_partitions('test.provided_part_names', 'id', 2, relnames := ARRAY['p1', 'p2']::TEXT[]); /* ok */ create_hash_partitions diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index ae3d1434..80cefff3 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -102,11 +102,20 @@ SELECT count(*) FROM calamity.part_test; DELETE FROM calamity.part_test; /* test function create_hash_partitions() */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, - relnames := ARRAY['calamity.p1']::TEXT[]); -ERROR: size of array 'relnames' must be equal to 'partitions_count' + relnames := ARRAY[]::TEXT[]); /* not ok */ +ERROR: 'relnames' and 'tablespaces' may not be empty SELECT create_hash_partitions('calamity.part_test', 'val', 2, - tablespaces := ARRAY['abcd']::TEXT[]); -ERROR: size of array 'tablespaces' must be equal to 'partitions_count' + relnames := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: 'relnames' and 'tablespaces' may not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + relnames := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: 'relnames' and 'tablespaces' may contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + relnames := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'relnames' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' /* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ INSERT INTO calamity.part_test SELECT generate_series(1, 30); SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 179402f6..eecbd167 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -587,8 +587,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2 /* Test create_range_partitions() + relnames */ CREATE TABLE test.provided_part_names(id INT NOT NULL); INSERT INTO test.provided_part_names SELECT generate_series(1, 10); -SELECT create_hash_partitions('test.provided_part_names', 'id', 2, - relnames := ARRAY[]::TEXT[]); /* not ok */ SELECT create_hash_partitions('test.provided_part_names', 'id', 2, relnames := ARRAY['p1', 'p2']::TEXT[]); /* ok */ /* list partitions */ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 597a103d..67b4bb9b 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -37,9 +37,19 @@ DELETE FROM calamity.part_test; /* test function create_hash_partitions() */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, - relnames := ARRAY['calamity.p1']::TEXT[]); + relnames := ARRAY[]::TEXT[]); /* not ok */ + +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + relnames := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ + +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + relnames := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ + +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + relnames := ARRAY['calamity.p1']::TEXT[]); /* not ok */ + SELECT create_hash_partitions('calamity.part_test', 'val', 2, - tablespaces := ARRAY['abcd']::TEXT[]); + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ /* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 810c6d76..9ac06cbd 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -22,7 +22,7 @@ #include "utils/array.h" -static char **deconstruct_text_array(Datum arr, int *num_elems); +static char **deconstruct_text_array(Datum array, int *array_size); /* Function declarations */ @@ -80,17 +80,13 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(4)) tablespaces = deconstruct_text_array(PG_GETARG_DATUM(4), &tablespaces_size); - /* If both arrays are present, check that their lengths are equal */ - if (relnames && tablespaces && relnames_size != tablespaces_size) - elog(ERROR, "sizes of arrays 'relnames' and 'tablespaces' are different"); - /* Validate size of 'relnames' */ if (relnames && relnames_size != partitions_count) - elog(ERROR, "size of array 'relnames' must be equal to 'partitions_count'"); + elog(ERROR, "size of 'relnames' must be equal to 'partitions_count'"); /* Validate size of 'tablespaces' */ if (tablespaces && tablespaces_size != partitions_count) - elog(ERROR, "size of array 'tablespaces' must be equal to 'partitions_count'"); + elog(ERROR, "size of 'tablespaces' must be equal to 'partitions_count'"); /* Convert partition names into RangeVars */ if (relnames) @@ -195,43 +191,54 @@ build_hash_condition(PG_FUNCTION_ARGS) /* Convert Datum into CSTRING array */ static char ** -deconstruct_text_array(Datum arr, int *num_elems) +deconstruct_text_array(Datum array, int *array_size) { - ArrayType *arrayval; + ArrayType *array_ptr = DatumGetArrayTypeP(array); int16 elemlen; bool elembyval; char elemalign; + Datum *elem_values; bool *elem_nulls; - int16 i; - arrayval = DatumGetArrayTypeP(arr); + int arr_size = 0; - Assert(ARR_ELEMTYPE(arrayval) == TEXTOID); + /* Check type invariant */ + Assert(ARR_ELEMTYPE(array_ptr) == TEXTOID); - get_typlenbyvalalign(ARR_ELEMTYPE(arrayval), + /* Check number of dimensions */ + if (ARR_NDIM(array_ptr) > 1) + elog(ERROR, "'relnames' and 'tablespaces' may contain only 1 dimension"); + + get_typlenbyvalalign(ARR_ELEMTYPE(array_ptr), &elemlen, &elembyval, &elemalign); - deconstruct_array(arrayval, - ARR_ELEMTYPE(arrayval), + + deconstruct_array(array_ptr, + ARR_ELEMTYPE(array_ptr), elemlen, elembyval, elemalign, - &elem_values, &elem_nulls, num_elems); + &elem_values, &elem_nulls, &arr_size); - /* If there are actual values then convert them into CSTRINGs */ - if (num_elems > 0) + /* If there are actual values, convert them into CSTRINGs */ + if (arr_size > 0) { - char **strings = palloc(sizeof(char *) * *num_elems); + char **strings = palloc(arr_size * sizeof(char *)); + int i; - for (i = 0; i < *num_elems; i++) + for (i = 0; i < arr_size; i++) { if (elem_nulls[i]) - elog(ERROR, "partition name and tablespace arrays " - "may not contain nulls"); + elog(ERROR, "'relnames' and 'tablespaces' may not contain NULLs"); strings[i] = TextDatumGetCString(elem_values[i]); } + /* Return an array and it's size */ + *array_size = arr_size; return strings; } + /* Else emit ERROR */ + else elog(ERROR, "'relnames' and 'tablespaces' may not be empty"); + /* Keep compiler happy */ return NULL; } From a69f782a6f579d665b438844895b4489589441bd Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 12 Feb 2017 02:29:47 +0300 Subject: [PATCH 0199/1124] =?UTF-8?q?fix=20warning:=20implicit=20declarati?= =?UTF-8?q?on=20of=20function=20=E2=80=98get=5Frel=5Fname=5For=5Frelid?= =?UTF-8?q?=E2=80=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/xact_handling.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/xact_handling.c b/src/xact_handling.c index 260110dc..7eae0f25 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -9,6 +9,7 @@ */ #include "xact_handling.h" +#include "utils.h" #include "postgres.h" #include "access/transam.h" From 5de8506e16803f6c71a09823443f05078ae71733 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 12 Feb 2017 04:17:02 +0300 Subject: [PATCH 0200/1124] improve README.md, create_hash_partitions(): rename 'relnames' to 'partition_names', more tests --- README.md | 12 ++++++----- expected/pathman_basic.out | 4 ++-- expected/pathman_calamity.out | 39 +++++++++++++++++++++++++++------- expected/pathman_callbacks.out | 19 ----------------- hash.sql | 6 +++--- sql/pathman_basic.sql | 4 ++-- sql/pathman_calamity.sql | 23 ++++++++++++++++---- sql/pathman_callbacks.sql | 8 ------- src/partition_creation.c | 11 +++++++--- src/pl_hash_funcs.c | 36 +++++++++++++++---------------- 10 files changed, 90 insertions(+), 72 deletions(-) diff --git a/README.md b/README.md index 50caee65..0febc6e1 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,9 @@ Done! Now it's time to setup your partitioning schemes. create_hash_partitions(relation REGCLASS, attribute TEXT, partitions_count INTEGER, - partition_data BOOLEAN DEFAULT TRUE) + partition_data BOOLEAN DEFAULT TRUE, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) ``` Performs HASH partitioning for `relation` by integer key `attribute`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. Partition creation callback is invoked for each partition if set beforehand (see `set_init_callback()`). @@ -148,9 +150,9 @@ Same as above, but for a RANGE-partitioned table. ### Post-creation partition management ```plpgsql -replace_hash_partition(old_partition REGCLASS, - new_partition REGCLASS, - lock_parent BOOL DEFAULT TRUE) +replace_hash_partition(old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) ``` Replaces specified partition of HASH-partitioned table with another table. The `lock_parent` parameter will prevent any INSERT/UPDATE/ALTER TABLE queries to parent table. @@ -168,7 +170,7 @@ merge_range_partitions(partition1 REGCLASS, partition2 REGCLASS) Merge two adjacent RANGE partitions. First, data from `partition2` is copied to `partition1`, then `partition2` is removed. ```plpgsql -merge_range_partitions(partitions REGCLASS[]) +merge_range_partitions(partitions REGCLASS[]) ``` Merge several adjacent RANGE partitions (partitions must be specified in ascending or descending order). All the data will be accumulated in the first partition. diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 3845ad48..a5902c58 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -2175,11 +2175,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2 Filter: (c1 < 2500) (12 rows) -/* Test create_range_partitions() + relnames */ +/* Test create_range_partitions() + partition_names */ CREATE TABLE test.provided_part_names(id INT NOT NULL); INSERT INTO test.provided_part_names SELECT generate_series(1, 10); SELECT create_hash_partitions('test.provided_part_names', 'id', 2, - relnames := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ create_hash_partitions ------------------------ 2 diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 80cefff3..785509b7 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -102,20 +102,43 @@ SELECT count(*) FROM calamity.part_test; DELETE FROM calamity.part_test; /* test function create_hash_partitions() */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, - relnames := ARRAY[]::TEXT[]); /* not ok */ -ERROR: 'relnames' and 'tablespaces' may not be empty + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: 'partition_names' and 'tablespaces' may not be empty SELECT create_hash_partitions('calamity.part_test', 'val', 2, - relnames := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ -ERROR: 'relnames' and 'tablespaces' may not contain NULLs + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: 'partition_names' and 'tablespaces' may not contain NULLs SELECT create_hash_partitions('calamity.part_test', 'val', 2, - relnames := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ -ERROR: 'relnames' and 'tablespaces' may contain only 1 dimension + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: 'partition_names' and 'tablespaces' may contain only 1 dimension SELECT create_hash_partitions('calamity.part_test', 'val', 2, - relnames := ARRAY['calamity.p1']::TEXT[]); /* not ok */ -ERROR: size of 'relnames' must be equal to 'partitions_count' + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' SELECT create_hash_partitions('calamity.part_test', 'val', 2, tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; /* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ INSERT INTO calamity.part_test SELECT generate_series(1, 30); SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 4a5b1a47..4a4c7e49 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -167,25 +167,6 @@ WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_4", DROP TABLE callbacks.abc CASCADE; NOTICE: drop cascades to 5 other objects -/* create table in public schema */ -CREATE TABLE callbacks.abc(a serial, b int); -SELECT set_init_callback('callbacks.abc', - 'callbacks.abc_on_part_created_callback(jsonb)'); - set_init_callback -------------------- - -(1 row) - -SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_1", "range_max": "101", "range_min": "1", "parent_schema": "callbacks", "partition_schema": "callbacks"} -WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", "range_max": "201", "range_min": "101", "parent_schema": "callbacks", "partition_schema": "callbacks"} - create_range_partitions -------------------------- - 2 -(1 row) - -DROP TABLE callbacks.abc CASCADE; -NOTICE: drop cascades to 2 other objects /* test the temprary deletion of callback function */ CREATE TABLE callbacks.abc(a serial, b int); SELECT set_init_callback('callbacks.abc', diff --git a/hash.sql b/hash.sql index 1b99784b..59a2ae64 100644 --- a/hash.sql +++ b/hash.sql @@ -16,7 +16,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( attribute TEXT, partitions_count INTEGER, partition_data BOOLEAN DEFAULT TRUE, - relnames TEXT[] DEFAULT NULL, + partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL) RETURNS INTEGER AS $$ @@ -42,7 +42,7 @@ BEGIN PERFORM @extschema@.create_hash_partitions_internal(parent_relid, attribute, partitions_count, - relnames, + partition_names, tablespaces); /* Notify backend about changes */ @@ -281,7 +281,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( parent_relid REGCLASS, attribute TEXT, partitions_count INTEGER, - relnames TEXT[] DEFAULT NULL, + partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL) RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' LANGUAGE C; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index eecbd167..6dc25deb 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -584,11 +584,11 @@ SELECT set_enable_parent('test.index_on_childs', true); VACUUM ANALYZE test.index_on_childs; EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; -/* Test create_range_partitions() + relnames */ +/* Test create_range_partitions() + partition_names */ CREATE TABLE test.provided_part_names(id INT NOT NULL); INSERT INTO test.provided_part_names SELECT generate_series(1, 10); SELECT create_hash_partitions('test.provided_part_names', 'id', 2, - relnames := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ /* list partitions */ SELECT partition FROM pathman_partition_list WHERE parent = 'test.provided_part_names'::REGCLASS diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 67b4bb9b..a87cae21 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -37,21 +37,36 @@ DELETE FROM calamity.part_test; /* test function create_hash_partitions() */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, - relnames := ARRAY[]::TEXT[]); /* not ok */ + partition_names := ARRAY[]::TEXT[]); /* not ok */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, - relnames := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, - relnames := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, - relnames := ARRAY['calamity.p1']::TEXT[]); /* not ok */ + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); +select add_range_partition(' calamity.no_naming_seq', 10, 20); +DROP TABLE calamity.no_naming_seq CASCADE; + + +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +DROP TABLE calamity.double_inf CASCADE; + + /* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ INSERT INTO calamity.part_test SELECT generate_series(1, 30); SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 27510b92..1fce2eaf 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -78,14 +78,6 @@ SELECT create_hash_partitions('callbacks.abc', 'a', 5); DROP TABLE callbacks.abc CASCADE; -/* create table in public schema */ -CREATE TABLE callbacks.abc(a serial, b int); -SELECT set_init_callback('callbacks.abc', - 'callbacks.abc_on_part_created_callback(jsonb)'); -SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); - -DROP TABLE callbacks.abc CASCADE; - /* test the temprary deletion of callback function */ CREATE TABLE callbacks.abc(a serial, b int); SELECT set_init_callback('callbacks.abc', diff --git a/src/partition_creation.c b/src/partition_creation.c index 9ee5f7cc..704ac570 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -570,14 +570,19 @@ choose_range_partition_name(Oid parent_relid, Oid parent_nsp) { Datum part_num; Oid part_seq_relid; + char *part_seq_relname; Oid save_userid; int save_sec_context; bool need_priv_escalation = !superuser(); /* we might be a SU */ char *relname; int attempts_cnt = 1000; - part_seq_relid = get_relname_relid(build_sequence_name_internal(parent_relid), - parent_nsp); + part_seq_relname = build_sequence_name_internal(parent_relid); + part_seq_relid = get_relname_relid(part_seq_relname, parent_nsp); + + /* Could not find part number generating sequence */ + if (!OidIsValid(part_seq_relid)) + elog(ERROR, "auto naming sequence \"%s\" does not exist", part_seq_relname); /* Do we have to escalate privileges? */ if (need_priv_escalation) @@ -1161,7 +1166,7 @@ build_raw_range_check_tree(char *attname, /* (-inf, +inf) */ if (and_oper->args == NIL) - elog(ERROR, "cannot create infinite range constraint"); + elog(ERROR, "cannot create partition with range (-inf, +inf)"); return (Node *) and_oper; } diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 9ac06cbd..55540196 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -58,11 +58,11 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) i; /* Partition names and tablespaces */ - char **relnames = NULL, - **tablespaces = NULL; - int relnames_size = 0, - tablespaces_size = 0; - RangeVar **rangevars = NULL; + char **partition_names = NULL, + **tablespaces = NULL; + int partition_names_size = 0, + tablespaces_size = 0; + RangeVar **rangevars = NULL; /* Check that there's no partitions yet */ if (get_pathman_relation_info(parent_relid)) @@ -74,27 +74,27 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) /* Extract partition names */ if (!PG_ARGISNULL(3)) - relnames = deconstruct_text_array(PG_GETARG_DATUM(3), &relnames_size); + partition_names = deconstruct_text_array(PG_GETARG_DATUM(3), &partition_names_size); /* Extract partition tablespaces */ if (!PG_ARGISNULL(4)) tablespaces = deconstruct_text_array(PG_GETARG_DATUM(4), &tablespaces_size); - /* Validate size of 'relnames' */ - if (relnames && relnames_size != partitions_count) - elog(ERROR, "size of 'relnames' must be equal to 'partitions_count'"); + /* Validate size of 'partition_names' */ + if (partition_names && partition_names_size != partitions_count) + elog(ERROR, "size of 'partition_names' must be equal to 'partitions_count'"); /* Validate size of 'tablespaces' */ if (tablespaces && tablespaces_size != partitions_count) elog(ERROR, "size of 'tablespaces' must be equal to 'partitions_count'"); /* Convert partition names into RangeVars */ - if (relnames) + if (partition_names) { - rangevars = palloc(sizeof(RangeVar) * relnames_size); - for (i = 0; i < relnames_size; i++) + rangevars = palloc(sizeof(RangeVar) * partition_names_size); + for (i = 0; i < partition_names_size; i++) { - List *nl = stringToQualifiedNameList(relnames[i]); + List *nl = stringToQualifiedNameList(partition_names[i]); rangevars[i] = makeRangeVarFromNameList(nl); } @@ -113,9 +113,9 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) } /* Free arrays */ - DeepFreeArray(relnames, relnames_size); + DeepFreeArray(partition_names, partition_names_size); DeepFreeArray(tablespaces, tablespaces_size); - DeepFreeArray(rangevars, relnames_size); + DeepFreeArray(rangevars, partition_names_size); PG_RETURN_VOID(); } @@ -208,7 +208,7 @@ deconstruct_text_array(Datum array, int *array_size) /* Check number of dimensions */ if (ARR_NDIM(array_ptr) > 1) - elog(ERROR, "'relnames' and 'tablespaces' may contain only 1 dimension"); + elog(ERROR, "'partition_names' and 'tablespaces' may contain only 1 dimension"); get_typlenbyvalalign(ARR_ELEMTYPE(array_ptr), &elemlen, &elembyval, &elemalign); @@ -227,7 +227,7 @@ deconstruct_text_array(Datum array, int *array_size) for (i = 0; i < arr_size; i++) { if (elem_nulls[i]) - elog(ERROR, "'relnames' and 'tablespaces' may not contain NULLs"); + elog(ERROR, "'partition_names' and 'tablespaces' may not contain NULLs"); strings[i] = TextDatumGetCString(elem_values[i]); } @@ -237,7 +237,7 @@ deconstruct_text_array(Datum array, int *array_size) return strings; } /* Else emit ERROR */ - else elog(ERROR, "'relnames' and 'tablespaces' may not be empty"); + else elog(ERROR, "'partition_names' and 'tablespaces' may not be empty"); /* Keep compiler happy */ return NULL; From e1336990b6f066f16c1a548829fa15e6cefd6250 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Feb 2017 01:56:35 +0300 Subject: [PATCH 0201/1124] add 'pathman_bgw' regression test --- Makefile | 3 +- expected/pathman_bgw.out | 114 +++++++++++++++++++++++++++++++++ expected/pathman_callbacks.out | 6 +- expected/pathman_inserts.out | 2 +- sql/pathman_bgw.sql | 58 +++++++++++++++++ sql/pathman_callbacks.sql | 6 +- sql/pathman_inserts.sql | 2 +- 7 files changed, 182 insertions(+), 9 deletions(-) create mode 100644 expected/pathman_bgw.out create mode 100644 sql/pathman_bgw.sql diff --git a/Makefile b/Makefile index cf83a381..144538d0 100644 --- a/Makefile +++ b/Makefile @@ -22,13 +22,14 @@ PGFILEDESC = "pg_pathman - partitioning tool" REGRESS = pathman_basic \ pathman_cte \ + pathman_bgw \ + pathman_inserts \ pathman_domains \ pathman_interval \ pathman_callbacks \ pathman_foreign_keys \ pathman_permissions \ pathman_rowmarks \ - pathman_inserts \ pathman_runtime_nodes \ pathman_utility_stmt_hooking \ pathman_calamity diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out new file mode 100644 index 00000000..1b6f6878 --- /dev/null +++ b/expected/pathman_bgw.out @@ -0,0 +1,114 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_bgw; +/* + * Tests for SpawnPartitionsWorker + */ +/* int4, size of Datum == 4 */ +CREATE TABLE test_bgw.test_1(val INT4 NOT NULL); +SELECT create_range_partitions('test_bgw.test_1', 'val', 1, 5, 2); +NOTICE: sequence "test_1_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_spawn_using_bgw('test_bgw.test_1', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +INSERT INTO test_bgw.test_1 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + parent | partition | parttype | partattr | range_min | range_max +-----------------+-------------------+----------+----------+-----------+----------- + test_bgw.test_1 | test_bgw.test_1_1 | 2 | val | 1 | 6 + test_bgw.test_1 | test_bgw.test_1_2 | 2 | val | 6 | 11 + test_bgw.test_1 | test_bgw.test_1_3 | 2 | val | 11 | 16 +(3 rows) + +DROP TABLE test_bgw.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +/* int8, size of Datum == 8 */ +CREATE TABLE test_bgw.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('test_bgw.test_2', 'val', 1, 5, 2); +NOTICE: sequence "test_2_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_spawn_using_bgw('test_bgw.test_2', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +INSERT INTO test_bgw.test_2 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + parent | partition | parttype | partattr | range_min | range_max +-----------------+-------------------+----------+----------+-----------+----------- + test_bgw.test_2 | test_bgw.test_2_1 | 2 | val | 1 | 6 + test_bgw.test_2 | test_bgw.test_2_2 | 2 | val | 6 | 11 + test_bgw.test_2 | test_bgw.test_2_3 | 2 | val | 11 | 16 +(3 rows) + +DROP TABLE test_bgw.test_2 CASCADE; +NOTICE: drop cascades to 3 other objects +/* numeric, size of Datum == var */ +CREATE TABLE test_bgw.test_3(val NUMERIC NOT NULL); +SELECT create_range_partitions('test_bgw.test_3', 'val', 1, 5, 2); +NOTICE: sequence "test_3_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_spawn_using_bgw('test_bgw.test_3', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +INSERT INTO test_bgw.test_3 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + parent | partition | parttype | partattr | range_min | range_max +-----------------+-------------------+----------+----------+-----------+----------- + test_bgw.test_3 | test_bgw.test_3_1 | 2 | val | 1 | 6 + test_bgw.test_3 | test_bgw.test_3_2 | 2 | val | 6 | 11 + test_bgw.test_3 | test_bgw.test_3_3 | 2 | val | 11 | 16 +(3 rows) + +DROP TABLE test_bgw.test_3 CASCADE; +NOTICE: drop cascades to 3 other objects +/* date, size of Datum == var */ +CREATE TABLE test_bgw.test_4(val DATE NOT NULL); +SELECT create_range_partitions('test_bgw.test_4', 'val', '20170213'::date, '1 day'::interval, 2); +NOTICE: sequence "test_4_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_spawn_using_bgw('test_bgw.test_4', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +INSERT INTO test_bgw.test_4 VALUES ('20170215'); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + parent | partition | parttype | partattr | range_min | range_max +-----------------+-------------------+----------+----------+------------+------------ + test_bgw.test_4 | test_bgw.test_4_1 | 2 | val | 02-13-2017 | 02-14-2017 + test_bgw.test_4 | test_bgw.test_4_2 | 2 | val | 02-14-2017 | 02-15-2017 + test_bgw.test_4 | test_bgw.test_4_3 | 2 | val | 02-15-2017 | 02-16-2017 +(3 rows) + +DROP TABLE test_bgw.test_4 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA test_bgw CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 4a4c7e49..4903b7b4 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -184,10 +184,10 @@ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", 2 (1 row) -INSERT INTO callbacks.abc VALUES (201, 0); +INSERT INTO callbacks.abc VALUES (201, 0); /* +1 new partition */ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201", "parent_schema": "callbacks", "partition_schema": "callbacks"} DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); -INSERT INTO callbacks.abc VALUES (301, 0); +INSERT INTO callbacks.abc VALUES (301, 0); /* +0 new partitions (ERROR) */ ERROR: callback function "callbacks.abc_on_part_created_callback(jsonb)" does not exist CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ @@ -195,7 +195,7 @@ BEGIN RAISE WARNING 'callback arg: %', args::TEXT; END $$ language plpgsql; -INSERT INTO callbacks.abc VALUES (301, 0); +INSERT INTO callbacks.abc VALUES (301, 0); /* +1 new partition */ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "401", "range_min": "301", "parent_schema": "callbacks", "partition_schema": "callbacks"} DROP TABLE callbacks.abc CASCADE; NOTICE: drop cascades to 4 other objects diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index f2ee7245..3b713b0b 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -31,7 +31,7 @@ CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); -/* set partition init callback */ +/* set partition init callback that will add triggers to partitions */ CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ BEGIN EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql new file mode 100644 index 00000000..90165f4c --- /dev/null +++ b/sql/pathman_bgw.sql @@ -0,0 +1,58 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_bgw; + + +/* + * Tests for SpawnPartitionsWorker + */ + +/* int4, size of Datum == 4 */ +CREATE TABLE test_bgw.test_1(val INT4 NOT NULL); +SELECT create_range_partitions('test_bgw.test_1', 'val', 1, 5, 2); + +SELECT set_spawn_using_bgw('test_bgw.test_1', true); +INSERT INTO test_bgw.test_1 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + +DROP TABLE test_bgw.test_1 CASCADE; + + +/* int8, size of Datum == 8 */ +CREATE TABLE test_bgw.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('test_bgw.test_2', 'val', 1, 5, 2); + +SELECT set_spawn_using_bgw('test_bgw.test_2', true); +INSERT INTO test_bgw.test_2 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + +DROP TABLE test_bgw.test_2 CASCADE; + + +/* numeric, size of Datum == var */ +CREATE TABLE test_bgw.test_3(val NUMERIC NOT NULL); +SELECT create_range_partitions('test_bgw.test_3', 'val', 1, 5, 2); + +SELECT set_spawn_using_bgw('test_bgw.test_3', true); +INSERT INTO test_bgw.test_3 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + +DROP TABLE test_bgw.test_3 CASCADE; + + +/* date, size of Datum == var */ +CREATE TABLE test_bgw.test_4(val DATE NOT NULL); +SELECT create_range_partitions('test_bgw.test_4', 'val', '20170213'::date, '1 day'::interval, 2); + +SELECT set_spawn_using_bgw('test_bgw.test_4', true); +INSERT INTO test_bgw.test_4 VALUES ('20170215'); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + +DROP TABLE test_bgw.test_4 CASCADE; + + + +DROP SCHEMA test_bgw CASCADE; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 1fce2eaf..a97d3f57 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -84,16 +84,16 @@ SELECT set_init_callback('callbacks.abc', 'callbacks.abc_on_part_created_callback(jsonb)'); SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); -INSERT INTO callbacks.abc VALUES (201, 0); +INSERT INTO callbacks.abc VALUES (201, 0); /* +1 new partition */ DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); -INSERT INTO callbacks.abc VALUES (301, 0); +INSERT INTO callbacks.abc VALUES (301, 0); /* +0 new partitions (ERROR) */ CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ BEGIN RAISE WARNING 'callback arg: %', args::TEXT; END $$ language plpgsql; -INSERT INTO callbacks.abc VALUES (301, 0); +INSERT INTO callbacks.abc VALUES (301, 0); /* +1 new partition */ DROP TABLE callbacks.abc CASCADE; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index a5798646..53d47abf 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -33,7 +33,7 @@ CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); -/* set partition init callback */ +/* set partition init callback that will add triggers to partitions */ CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ BEGIN EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s From 69abd812bef7f831d2d1ddc548f483d9ef9d7692 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Feb 2017 13:59:09 +0300 Subject: [PATCH 0202/1124] set CreateStmt->partition_info if it's PgPro EE --- src/partition_creation.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 704ac570..c502de64 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -716,8 +716,9 @@ create_single_partition_internal(Oid parent_relid, create_stmt.oncommit = ONCOMMIT_NOOP; create_stmt.tablespacename = tablespace; create_stmt.if_not_exists = false; -#ifdef PGPRO_VERSION - create_stmt.partition_info = NULL; + +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 90600 + create_stmt.partition_info = NULL; #endif /* Do we have to escalate privileges? */ From 176572e82201d4ef739e21c8e73ac8d182690c57 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Feb 2017 15:00:55 +0300 Subject: [PATCH 0203/1124] transform tuples for COPY TO/FROM as well --- expected/pathman_utility_stmt_hooking.out | 74 +++++++++++++++++++++-- sql/pathman_utility_stmt_hooking.sql | 49 ++++++++++++++- src/partition_filter.c | 39 ++++++++---- src/utility_stmt_hooking.c | 22 +++++-- 4 files changed, 160 insertions(+), 24 deletions(-) diff --git a/expected/pathman_utility_stmt_hooking.out b/expected/pathman_utility_stmt_hooking.out index 94dbee45..aadd6def 100644 --- a/expected/pathman_utility_stmt_hooking.out +++ b/expected/pathman_utility_stmt_hooking.out @@ -160,14 +160,76 @@ SELECT * FROM copy_stmt_hooking.test WHERE val > 20; /* COPY TO (partitioned column is not specified) */ COPY copy_stmt_hooking.test(comment) FROM stdin; ERROR: partitioned column's value should not be NULL -/* delete all data */ -SELECT drop_partitions('copy_stmt_hooking.test', true); -NOTICE: function copy_stmt_hooking.test_upd_trig_func() does not exist, skipping - drop_partitions ------------------ - 5 +/* Drop column (make use of 'tuple_map') */ +ALTER TABLE copy_stmt_hooking.test DROP COLUMN comment; +/* create new partition */ +SELECT get_number_of_partitions('copy_stmt_hooking.test'); + get_number_of_partitions +-------------------------- + 5 (1 row) +INSERT INTO copy_stmt_hooking.test (val, c3, c4) VALUES (26, 1, 2); +SELECT get_number_of_partitions('copy_stmt_hooking.test'); + get_number_of_partitions +-------------------------- + 6 +(1 row) + +/* check number of columns in 'test' */ +SELECT count(*) FROM pg_attribute +WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test'::REGCLASS; + count +------- + 4 +(1 row) + +/* check number of columns in 'test_6' */ +SELECT count(*) FROM pg_attribute +WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test_6'::REGCLASS; + count +------- + 3 +(1 row) + +/* COPY FROM (test transformed tuples) */ +COPY copy_stmt_hooking.test (val, c3, c4) TO stdout; +1 0 0 +6 0 0 +7 0 0 +11 0 0 +16 0 0 +21 0 0 +26 1 2 +/* COPY TO (insert into table with dropped column) */ +COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; +/* COPY TO (insert into table without dropped column) */ +COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; +/* check tuples from last partition (without dropped column) */ +SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; + val | c3 | c4 | tableoid +-----+----+----+-------------------------- + 1 | 0 | 0 | copy_stmt_hooking.test_1 + 2 | 1 | 2 | copy_stmt_hooking.test_1 + 6 | 0 | 0 | copy_stmt_hooking.test_2 + 7 | 0 | 0 | copy_stmt_hooking.test_2 + 11 | 0 | 0 | copy_stmt_hooking.test_3 + 16 | 0 | 0 | copy_stmt_hooking.test_4 + 21 | 0 | 0 | copy_stmt_hooking.test_5 + 26 | 1 | 2 | copy_stmt_hooking.test_6 + 27 | 1 | 2 | copy_stmt_hooking.test_6 +(9 rows) + +/* drop modified table */ +DROP TABLE copy_stmt_hooking.test CASCADE; +NOTICE: drop cascades to 6 other objects +/* create table again */ +CREATE TABLE copy_stmt_hooking.test( + val int not null, + comment text, + c3 int, + c4 int); +CREATE INDEX ON copy_stmt_hooking.test(val); /* test for HASH partitioning */ SELECT create_hash_partitions('copy_stmt_hooking.test', 'val', 5); create_hash_partitions diff --git a/sql/pathman_utility_stmt_hooking.sql b/sql/pathman_utility_stmt_hooking.sql index 89e9225c..d9126e3f 100644 --- a/sql/pathman_utility_stmt_hooking.sql +++ b/sql/pathman_utility_stmt_hooking.sql @@ -6,6 +6,7 @@ CREATE EXTENSION pg_pathman; * Test COPY */ CREATE SCHEMA copy_stmt_hooking; + CREATE TABLE copy_stmt_hooking.test( val int not null, comment text, @@ -75,8 +76,52 @@ test_no_part \. -/* delete all data */ -SELECT drop_partitions('copy_stmt_hooking.test', true); +/* Drop column (make use of 'tuple_map') */ +ALTER TABLE copy_stmt_hooking.test DROP COLUMN comment; + + +/* create new partition */ +SELECT get_number_of_partitions('copy_stmt_hooking.test'); +INSERT INTO copy_stmt_hooking.test (val, c3, c4) VALUES (26, 1, 2); +SELECT get_number_of_partitions('copy_stmt_hooking.test'); + +/* check number of columns in 'test' */ +SELECT count(*) FROM pg_attribute +WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test'::REGCLASS; + +/* check number of columns in 'test_6' */ +SELECT count(*) FROM pg_attribute +WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test_6'::REGCLASS; + + +/* COPY FROM (test transformed tuples) */ +COPY copy_stmt_hooking.test (val, c3, c4) TO stdout; + +/* COPY TO (insert into table with dropped column) */ +COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; +2 1 2 +\. + +/* COPY TO (insert into table without dropped column) */ +COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; +27 1 2 +\. + +/* check tuples from last partition (without dropped column) */ +SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; + + +/* drop modified table */ +DROP TABLE copy_stmt_hooking.test CASCADE; + + +/* create table again */ +CREATE TABLE copy_stmt_hooking.test( + val int not null, + comment text, + c3 int, + c4 int); +CREATE INDEX ON copy_stmt_hooking.test(val); /* test for HASH partitioning */ diff --git a/src/partition_filter.c b/src/partition_filter.c index 45efb9c7..daed5d43 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -177,12 +177,12 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, void fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) { - /* Close partitions and their indices if asked to */ + HASH_SEQ_STATUS stat; + ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ + + /* Close partitions and free free conversion-related stuff */ if (close_rels) { - HASH_SEQ_STATUS stat; - ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ - hash_seq_init(&stat, parts_storage->result_rels_table); while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) { @@ -191,14 +191,31 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) heap_close(rri_holder->result_rel_info->ri_RelationDesc, parts_storage->heap_close_lock_mode); - /* Drop TupleConversionMap as well as TupleDescs */ - if (rri_holder->tuple_map) - { - FreeTupleDesc(rri_holder->tuple_map->indesc); - FreeTupleDesc(rri_holder->tuple_map->outdesc); + /* Skip if there's no map */ + if (!rri_holder->tuple_map) + continue; - free_conversion_map(rri_holder->tuple_map); - } + FreeTupleDesc(rri_holder->tuple_map->indesc); + FreeTupleDesc(rri_holder->tuple_map->outdesc); + + free_conversion_map(rri_holder->tuple_map); + } + } + + /* Else just free conversion-related stuff */ + else + { + hash_seq_init(&stat, parts_storage->result_rels_table); + while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + { + /* Skip if there's no map */ + if (!rri_holder->tuple_map) + continue; + + FreeTupleDesc(rri_holder->tuple_map->indesc); + FreeTupleDesc(rri_holder->tuple_map->outdesc); + + free_conversion_map(rri_holder->tuple_map); } } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 7d29972f..92d2081f 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -533,7 +533,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, Oid tuple_oid = InvalidOid; const PartRelationInfo *prel; - ResultRelInfoHolder *rri_holder_child; + ResultRelInfoHolder *rri_holder; ResultRelInfo *child_result_rel; CHECK_FOR_INTERRUPTS(); @@ -553,14 +553,26 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, elog(ERROR, ERR_PART_ATTR_NULL); /* Search for a matching partition */ - rri_holder_child = select_partition_for_insert(prel, &parts_storage, - values[prel->attnum - 1], - prel->atttype, estate); - child_result_rel = rri_holder_child->result_rel_info; + rri_holder = select_partition_for_insert(prel, &parts_storage, + values[prel->attnum - 1], + prel->atttype, estate); + child_result_rel = rri_holder->result_rel_info; estate->es_result_relation_info = child_result_rel; /* And now we can form the input tuple. */ tuple = heap_form_tuple(tupDesc, values, nulls); + + /* If there's a transform map, rebuild the tuple */ + if (rri_holder->tuple_map) + { + HeapTuple tuple_old; + + /* TODO: use 'tuple_map' directly instead of do_convert_tuple() */ + tuple_old = tuple; + tuple = do_convert_tuple(tuple, rri_holder->tuple_map); + heap_freetuple(tuple_old); + } + if (tuple_oid != InvalidOid) HeapTupleSetOid(tuple, tuple_oid); From 15e574e73d70cf959447f7b12a6b600d1b3bc937 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Feb 2017 15:24:06 +0300 Subject: [PATCH 0204/1124] test FREEZE in pathman_utility_stmt_hooking --- expected/pathman_utility_stmt_hooking.out | 5 ++++- sql/pathman_utility_stmt_hooking.sql | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/expected/pathman_utility_stmt_hooking.out b/expected/pathman_utility_stmt_hooking.out index aadd6def..f6642d1b 100644 --- a/expected/pathman_utility_stmt_hooking.out +++ b/expected/pathman_utility_stmt_hooking.out @@ -157,9 +157,12 @@ SELECT * FROM copy_stmt_hooking.test WHERE val > 20; 21 | test_no_part | 0 | 0 (1 row) -/* COPY TO (partitioned column is not specified) */ +/* COPY FROM (partitioned column is not specified) */ COPY copy_stmt_hooking.test(comment) FROM stdin; ERROR: partitioned column's value should not be NULL +/* COPY FROM (we don't support FREEZE) */ +COPY copy_stmt_hooking.test FROM stdin WITH (FREEZE); +ERROR: freeze is not supported for partitioned tables /* Drop column (make use of 'tuple_map') */ ALTER TABLE copy_stmt_hooking.test DROP COLUMN comment; /* create new partition */ diff --git a/sql/pathman_utility_stmt_hooking.sql b/sql/pathman_utility_stmt_hooking.sql index d9126e3f..15367b86 100644 --- a/sql/pathman_utility_stmt_hooking.sql +++ b/sql/pathman_utility_stmt_hooking.sql @@ -70,11 +70,14 @@ COPY copy_stmt_hooking.test FROM stdin; \. SELECT * FROM copy_stmt_hooking.test WHERE val > 20; -/* COPY TO (partitioned column is not specified) */ +/* COPY FROM (partitioned column is not specified) */ COPY copy_stmt_hooking.test(comment) FROM stdin; test_no_part \. +/* COPY FROM (we don't support FREEZE) */ +COPY copy_stmt_hooking.test FROM stdin WITH (FREEZE); + /* Drop column (make use of 'tuple_map') */ ALTER TABLE copy_stmt_hooking.test DROP COLUMN comment; From b361af6adf0abfc66beb68d20578259ab3a0188f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Feb 2017 17:16:51 +0300 Subject: [PATCH 0205/1124] improve init callback subsystem, tests --- expected/pathman_calamity.out | 71 +++++++++++++++++++++++------ sql/pathman_calamity.sql | 31 ++++++++++--- src/partition_creation.c | 85 ++++++++++++++++++++--------------- src/pl_funcs.c | 3 -- 4 files changed, 130 insertions(+), 60 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 785509b7..bc125cc5 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -9,6 +9,12 @@ SELECT debug_capture(); (1 row) +SELECT get_pathman_lib_version(); + get_pathman_lib_version +------------------------- + 10300 +(1 row) + set client_min_messages = NOTICE; /* create table to be partitioned */ CREATE TABLE calamity.part_test(val serial); @@ -396,30 +402,69 @@ SELECT drop_range_partition_expand_next(NULL) IS NULL; t (1 row) -/* check invoke_on_partition_created_callback() for RANGE */ -SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, NULL::int); -ERROR: both bounds must be provided for RANGE partition -SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, 1, NULL); -ERROR: both bounds must be provided for RANGE partition -SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, 1); -ERROR: both bounds must be provided for RANGE partition -/* check invoke_on_partition_created_callback() for HASH */ +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); invoke_on_partition_created_callback -------------------------------------- (1 row) -SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} invoke_on_partition_created_callback -------------------------------------- (1 row) -SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); -ERROR: 'parent_relid' should not be NULL -SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); -ERROR: 'partition' should not be NULL +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); /* check function add_to_pathman_config() -- PHASE #1 */ SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index a87cae21..5662daa0 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -7,6 +7,7 @@ CREATE SCHEMA calamity; /* call for coverage test */ set client_min_messages = ERROR; SELECT debug_capture(); +SELECT get_pathman_lib_version(); set client_min_messages = NOTICE; @@ -153,16 +154,32 @@ SELECT stop_concurrent_part_task(1::regclass); SELECT drop_range_partition_expand_next('pg_class'); SELECT drop_range_partition_expand_next(NULL) IS NULL; -/* check invoke_on_partition_created_callback() for RANGE */ -SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, NULL::int); -SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, 1, NULL); -SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1, NULL, 1); -/* check invoke_on_partition_created_callback() for HASH */ -SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); -SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; + +/* Invalid args */ SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); + +DROP FUNCTION calamity.dummy_cb(arg jsonb); + /* check function add_to_pathman_config() -- PHASE #1 */ SELECT add_to_pathman_config(NULL, 'val'); /* no table */ diff --git a/src/partition_creation.c b/src/partition_creation.c index c502de64..7afc3548 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1422,15 +1422,18 @@ invoke_init_callback_internal(init_callback_params *cb_params) { #define JSB_INIT_VAL(value, val_type, val_cstring) \ do { \ - (value)->type = jbvString; \ - (value)->val.string.len = strlen(val_cstring); \ - (value)->val.string.val = val_cstring; \ - pushJsonbValue(&jsonb_state, val_type, (value)); \ - } while (0) - -#define JSB_INIT_NULL_VAL(value, val_type) \ - do { \ - (value)->type = jbvNull; \ + if ((val_cstring) != NULL) \ + { \ + (value)->type = jbvString; \ + (value)->val.string.len = strlen(val_cstring); \ + (value)->val.string.val = val_cstring; \ + } \ + else \ + { \ + (value)->type = jbvNull; \ + Assert((val_type) != WJB_KEY); \ + } \ + \ pushJsonbValue(&jsonb_state, val_type, (value)); \ } while (0) @@ -1445,6 +1448,12 @@ invoke_init_callback_internal(init_callback_params *cb_params) key, val; + char *parent_name, + *parent_namespace, + *partition_name, + *partition_namespace; + + /* Fetch & cache callback's Oid if needed */ if (!cb_params->callback_is_cached) { @@ -1472,8 +1481,10 @@ invoke_init_callback_internal(init_callback_params *cb_params) errmsg("callback function \"%s\" does not exist", TextDatumGetCString(init_cb_datum)))); } - else - cb_params->callback = InvalidOid; + /* There's no callback */ + else cb_params->callback = InvalidOid; + + /* We've made a lookup */ cb_params->callback_is_cached = true; } } @@ -1485,6 +1496,12 @@ invoke_init_callback_internal(init_callback_params *cb_params) /* Validate the callback's signature */ validate_part_callback(cb_params->callback, true); + parent_name = get_rel_name(parent_oid); + parent_namespace = get_namespace_name(get_rel_namespace(parent_oid)); + + partition_name = get_rel_name(partition_oid); + partition_namespace = get_namespace_name(get_rel_namespace(partition_oid)); + /* Generate JSONB we're going to pass to callback */ switch (cb_params->parttype) { @@ -1493,13 +1510,13 @@ invoke_init_callback_internal(init_callback_params *cb_params) pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); JSB_INIT_VAL(&key, WJB_KEY, "parent"); - JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(parent_oid)); + JSB_INIT_VAL(&val, WJB_VALUE, parent_name); JSB_INIT_VAL(&key, WJB_KEY, "parent_schema"); - JSB_INIT_VAL(&val, WJB_VALUE, get_namespace_name(get_rel_namespace(parent_oid))); + JSB_INIT_VAL(&val, WJB_VALUE, parent_namespace); JSB_INIT_VAL(&key, WJB_KEY, "partition"); - JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(partition_oid)); + JSB_INIT_VAL(&val, WJB_VALUE, partition_name); JSB_INIT_VAL(&key, WJB_KEY, "partition_schema"); - JSB_INIT_VAL(&val, WJB_VALUE, get_namespace_name(get_rel_namespace(partition_oid))); + JSB_INIT_VAL(&val, WJB_VALUE, partition_namespace); JSB_INIT_VAL(&key, WJB_KEY, "parttype"); JSB_INIT_VAL(&val, WJB_VALUE, PartTypeToCString(PT_HASH)); @@ -1509,46 +1526,40 @@ invoke_init_callback_internal(init_callback_params *cb_params) case PT_RANGE: { - char *start_value, - *end_value; + char *start_value = NULL, + *end_value = NULL; Bound sv_datum = cb_params->params.range_params.start_value, ev_datum = cb_params->params.range_params.end_value; Oid type = cb_params->params.range_params.value_type; + /* Convert min to CSTRING */ + if (!IsInfinite(&sv_datum)) + start_value = datum_to_cstring(BoundGetValue(&sv_datum), type); + + /* Convert max to CSTRING */ + if (!IsInfinite(&ev_datum)) + end_value = datum_to_cstring(BoundGetValue(&ev_datum), type); + pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); JSB_INIT_VAL(&key, WJB_KEY, "parent"); - JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(parent_oid)); + JSB_INIT_VAL(&val, WJB_VALUE, parent_name); JSB_INIT_VAL(&key, WJB_KEY, "parent_schema"); - JSB_INIT_VAL(&val, WJB_VALUE, get_namespace_name(get_rel_namespace(parent_oid))); + JSB_INIT_VAL(&val, WJB_VALUE, parent_namespace); JSB_INIT_VAL(&key, WJB_KEY, "partition"); - JSB_INIT_VAL(&val, WJB_VALUE, get_rel_name_or_relid(partition_oid)); + JSB_INIT_VAL(&val, WJB_VALUE, partition_name); JSB_INIT_VAL(&key, WJB_KEY, "partition_schema"); - JSB_INIT_VAL(&val, WJB_VALUE, get_namespace_name(get_rel_namespace(partition_oid))); + JSB_INIT_VAL(&val, WJB_VALUE, partition_namespace); JSB_INIT_VAL(&key, WJB_KEY, "parttype"); JSB_INIT_VAL(&val, WJB_VALUE, PartTypeToCString(PT_RANGE)); /* Lower bound */ JSB_INIT_VAL(&key, WJB_KEY, "range_min"); - if (!IsInfinite(&sv_datum)) - { - /* Convert min to CSTRING */ - start_value = datum_to_cstring(BoundGetValue(&sv_datum), type); - JSB_INIT_VAL(&val, WJB_VALUE, start_value); - } - else - JSB_INIT_NULL_VAL(&val, WJB_VALUE); + JSB_INIT_VAL(&val, WJB_VALUE, start_value); /* Upper bound */ JSB_INIT_VAL(&key, WJB_KEY, "range_max"); - if (!IsInfinite(&ev_datum)) - { - /* Convert max to CSTRING */ - end_value = datum_to_cstring(BoundGetValue(&ev_datum), type); - JSB_INIT_VAL(&val, WJB_VALUE, end_value); - } - else - JSB_INIT_NULL_VAL(&val, WJB_VALUE); + JSB_INIT_VAL(&val, WJB_VALUE, end_value); result = pushJsonbValue(&jsonb_state, WJB_END_OBJECT, NULL); } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 2e24dc77..f64433d3 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -841,9 +841,6 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) end; Oid value_type; - if (PG_ARGISNULL(ARG_RANGE_START) || PG_ARGISNULL(ARG_RANGE_END)) - elog(ERROR, "both bounds must be provided for RANGE partition"); - /* Fetch start & end values for RANGE + their type */ start = PG_ARGISNULL(ARG_RANGE_START) ? MakeBoundInf(MINUS_INFINITY) : From aad50f89c07bb69b00ea45b5e2ff8bc1fc46eca6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Feb 2017 17:25:15 +0300 Subject: [PATCH 0206/1124] pathman_planner_hook(): don't call decr_refcount_parenthood_statuses() if pathman is disabled --- src/hooks.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 72b776f6..72169921 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -468,10 +468,11 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) PlannedStmt *result; uint32 query_id = parse->queryId; + bool pathman_ready = IsPathmanReady(); /* in case it changes */ PG_TRY(); { - if (IsPathmanReady()) + if (pathman_ready) { /* Increment parenthood_statuses refcount */ incr_refcount_parenthood_statuses(); @@ -486,7 +487,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) else result = standard_planner(parse, cursorOptions, boundParams); - if (IsPathmanReady()) + if (pathman_ready) { /* Give rowmark-related attributes correct names */ ExecuteForPlanTree(result, postprocess_lock_rows); @@ -504,9 +505,13 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* We must decrease parenthood statuses refcount on ERROR */ PG_CATCH(); { - /* Caught an ERROR, decrease refcount */ - decr_refcount_parenthood_statuses(); + if (pathman_ready) + { + /* Caught an ERROR, decrease refcount */ + decr_refcount_parenthood_statuses(); + } + /* Rethrow ERROR further */ PG_RE_THROW(); } PG_END_TRY(); From 29806bcf079107a643db8d7945bf325471469fb4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Feb 2017 17:36:17 +0300 Subject: [PATCH 0207/1124] cppcheck: suppress incorrectStringBooleanError --- travis/pg-travis-test.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index e0e7bac0..9a544a15 100644 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -51,6 +51,7 @@ if [ $CHECK_CODE = "true" ]; then --enable=warning,portability,performance \ --suppress=redundantAssignment \ --suppress=uselessAssignmentPtrArg \ + --suppress=incorrectStringBooleanError \ --std=c89 src/*.c src/*.h 2> cppcheck.log if [ -s cppcheck.log ]; then From b0e32cd51cc8091be302b74bf9afd0326889d275 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 13 Feb 2017 17:41:38 +0300 Subject: [PATCH 0208/1124] Add flag IsBgWorker to create_partitions_for_value_internal function --- src/partition_creation.c | 7 ++++--- src/partition_creation.h | 2 +- src/pathman_workers.c | 5 +++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 704ac570..89dc1724 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -270,7 +270,8 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) elog(DEBUG2, "create_partitions(): chose backend [%u]", MyProcPid); last_partition = create_partitions_for_value_internal(relid, value, - value_type); + value_type, + false); } } else @@ -299,7 +300,7 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) * use create_partitions_for_value() instead. */ Oid -create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) +create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, bool IsBgWorker) { MemoryContext old_mcxt = CurrentMemoryContext; Oid partid = InvalidOid; /* last created partition (or InvalidOid) */ @@ -405,7 +406,7 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) ErrorData *edata; /* Simply rethrow ERROR if we're in backend */ - if (!IsBackgroundWorker) + if (!IsBgWorker) PG_RE_THROW(); /* Switch to the original context & copy edata */ diff --git a/src/partition_creation.h b/src/partition_creation.h index 8abce6b3..7d1f691e 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -24,7 +24,7 @@ /* Create RANGE partitions to store some value */ Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); -Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type); +Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, bool IsBgWorker); /* Create one RANGE partition */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index b3a55b76..9b1d3e21 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -366,8 +366,8 @@ bgw_main_spawn_partitions(Datum main_arg) args->parallel_master_pid)) return; #endif - /* Establish connection and start transaction */ + BackgroundWorkerInitializeConnectionByOid(args->dbid, args->userid); /* Start new transaction (syscache access etc.) */ @@ -392,7 +392,8 @@ bgw_main_spawn_partitions(Datum main_arg) /* Create partitions and save the Oid of the last one */ args->result = create_partitions_for_value_internal(args->partitioned_table, value, /* unpacked Datum */ - args->value_type); + args->value_type, + true); /* run under background woker */ /* Finish transaction in an appropriate way */ if (args->result == InvalidOid) From 369b49d35fb59f24989abedc5c48e0b8c98bbfb2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Feb 2017 17:48:25 +0300 Subject: [PATCH 0209/1124] clean code --- src/partition_creation.c | 7 ++++--- src/partition_creation.h | 3 ++- src/pathman_workers.c | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 188b84fd..61650ae7 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -271,7 +271,7 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) last_partition = create_partitions_for_value_internal(relid, value, value_type, - false); + false); /* backend */ } } else @@ -300,7 +300,8 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) * use create_partitions_for_value() instead. */ Oid -create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, bool IsBgWorker) +create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, + bool is_background_worker) { MemoryContext old_mcxt = CurrentMemoryContext; Oid partid = InvalidOid; /* last created partition (or InvalidOid) */ @@ -406,7 +407,7 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, boo ErrorData *edata; /* Simply rethrow ERROR if we're in backend */ - if (!IsBgWorker) + if (!is_background_worker) PG_RE_THROW(); /* Switch to the original context & copy edata */ diff --git a/src/partition_creation.h b/src/partition_creation.h index 7d1f691e..0338fa4e 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -24,7 +24,8 @@ /* Create RANGE partitions to store some value */ Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); -Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, bool IsBgWorker); +Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, + bool is_background_worker); /* Create one RANGE partition */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 9b1d3e21..246b216b 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -393,7 +393,7 @@ bgw_main_spawn_partitions(Datum main_arg) args->result = create_partitions_for_value_internal(args->partitioned_table, value, /* unpacked Datum */ args->value_type, - true); /* run under background woker */ + true); /* background woker */ /* Finish transaction in an appropriate way */ if (args->result == InvalidOid) From b718cbdeb2b9e58ceb49ef5692a2bf6510e4af2b Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 13 Feb 2017 18:59:51 +0300 Subject: [PATCH 0210/1124] migration script from 1.2 to 1.3 --- .gitignore | 2 +- init.sql | 12 +- pg_pathman--1.2--1.3.sql | 863 +++++++++++++++++++++++++++++++++++++++ range.sql | 92 ++--- 4 files changed, 916 insertions(+), 53 deletions(-) create mode 100644 pg_pathman--1.2--1.3.sql diff --git a/.gitignore b/.gitignore index f0d2c2c4..90108e07 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,4 @@ regression.out *.gcda *.gcno *.gcov -pg_pathman--*.sql +pg_pathman--1.3.sql diff --git a/init.sql b/init.sql index c75418bd..319069be 100644 --- a/init.sql +++ b/init.sql @@ -251,7 +251,7 @@ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() RETURNS TABLE ( parent REGCLASS, - "partition" REGCLASS, + partition REGCLASS, parttype INT4, partattr TEXT, range_min TEXT, @@ -643,7 +643,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is */ CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( parent_relid REGCLASS, - partition_relid REGCLASS) + partition REGCLASS) RETURNS VOID AS $$ DECLARE @@ -651,13 +651,13 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition_relid); + PERFORM @extschema@.validate_relname(partition); FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint WHERE conrelid = parent_relid AND contype = 'f') LOOP EXECUTE format('ALTER TABLE %s ADD %s', - partition_relid::TEXT, + partition::TEXT, pg_catalog.pg_get_constraintdef(rec.conid)); END LOOP; END @@ -880,7 +880,7 @@ LANGUAGE C STRICT; */ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, - "partition" REGCLASS, + partition REGCLASS, init_callback REGPROCEDURE, start_value ANYELEMENT, end_value ANYELEMENT) @@ -892,7 +892,7 @@ LANGUAGE C; */ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, - "partition" REGCLASS, + partition REGCLASS, init_callback REGPROCEDURE) RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; diff --git a/pg_pathman--1.2--1.3.sql b/pg_pathman--1.2--1.3.sql new file mode 100644 index 00000000..bf7f3bef --- /dev/null +++ b/pg_pathman--1.2--1.3.sql @@ -0,0 +1,863 @@ + +/* ------------------------------------------------------------------------ + * Alter config tables + * ----------------------------------------------------------------------*/ +CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( + partrel REGCLASS, + attname TEXT, + parttype INTEGER, + range_interval TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C; + +ALTER TABLE @extschema@.pathman_config +ADD CHECK (@extschema@.validate_interval_value(partrel, + attname, + parttype, + range_interval)) + +/* TODO! Write a convertation */ +ALTER TABLE @extschema@.pathman_config_params +ALTER COLUMN init_callback TYPE TEXT DEFAULT NULL; + +DROP FUNCTION validate_part_callback(REGPROC, BOOL); + +CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( + callback REGPROCEDURE, + raise_error BOOL DEFAULT TRUE) +RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' +LANGUAGE C STRICT; + +ALTER TABLE @extschema@.pathman_config_params +ADD CHECK (@extschema@.validate_part_callback(CASE WHEN init_callback IS NULL + THEN 0::REGPROCEDURE + ELSE init_callback::REGPROCEDURE + END)) + +/* ------------------------------------------------------------------------ + * Drop irrelevant objects + * ----------------------------------------------------------------------*/ +DROP FUNCTION @extschema@.set_init_callback(REGCLASS, REGPROC); +DROP FUNCTION @extschema@.get_attribute_type(REGCLASS, TEXT); +DROP FUNCTION @extschema@.create_hash_partitions(REGCLASS, TEXT, INTEGER, BOOLEAN); +DROP FUNCTION @extschema@.create_hash_partitions_internal(REGCLASS, TEXT, INTEGER); +DROP FUNCTION @extschema@.build_range_condition(TEXT, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.get_part_range(REGCLASS, ANYELEMENT); + +/* ------------------------------------------------------------------------ + * Alter functions' modifiers + * ----------------------------------------------------------------------*/ +ALTER FUNCTION @extschema@.pathman_set_param(REGCLASS, TEXT, ANYELEMENT) STRICT; + +/* ------------------------------------------------------------------------ + * (Re)create functions + * ----------------------------------------------------------------------*/ +CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( + relation REGCLASS, + callback REGPROCEDURE DEFAULT 0) +RETURNS VOID AS +$$ +DECLARE + regproc_text TEXT := NULL; + +BEGIN + + /* Fetch schema-qualified name of callback */ + IF callback != 0 THEN + SELECT quote_ident(nspname) || '.' || + quote_ident(proname) || '(' || + (SELECT string_agg(x.argtype::REGTYPE::TEXT, ',') + FROM unnest(proargtypes) AS x(argtype)) || + ')' + FROM pg_catalog.pg_proc p JOIN pg_catalog.pg_namespace n + ON n.oid = p.pronamespace + WHERE p.oid = callback + INTO regproc_text; /* <= result */ + END IF; + + PERFORM @extschema@.pathman_set_param(relation, 'init_callback', regproc_text); +END +$$ +LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.set_interval( + relation REGCLASS, + value ANYELEMENT) +RETURNS VOID AS +$$ +DECLARE + affected INTEGER; +BEGIN + UPDATE @extschema@.pathman_config + SET range_interval = value::text + WHERE partrel = relation AND parttype = 2; + + /* Check number of affected rows */ + GET DIAGNOSTICS affected = ROW_COUNT; + + IF affected = 0 THEN + RAISE EXCEPTION 'table "%" is not partitioned by RANGE', relation; + END IF; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.alter_partition( + relation REGCLASS, + new_name TEXT, + new_schema REGNAMESPACE, + new_tablespace TEXT) +RETURNS VOID AS +$$ +DECLARE + orig_name TEXT; + orig_schema OID; + +BEGIN + SELECT relname, relnamespace FROM pg_class + WHERE oid = relation + INTO orig_name, orig_schema; + + /* Alter table name */ + IF new_name != orig_name THEN + EXECUTE format('ALTER TABLE %s RENAME TO %s', relation, new_name); + END IF; + + /* Alter table schema */ + IF new_schema != orig_schema THEN + EXECUTE format('ALTER TABLE %s SET SCHEMA %s', relation, new_schema); + END IF; + + /* Move to another tablespace */ + IF NOT new_tablespace IS NULL THEN + EXECUTE format('ALTER TABLE %s SET TABLESPACE %s', relation, new_tablespace); + END IF; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( + relid REGCLASS) +RETURNS TEXT AS +$$ + SELECT attname FROM pathman_config WHERE partrel = relid; +$$ +LANGUAGE sql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( + relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER, + partition_data BOOLEAN DEFAULT TRUE, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) +RETURNS INTEGER AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) + VALUES (parent_relid, attribute, 1); + + /* Create partitions */ + PERFORM @extschema@.create_hash_partitions_internal(parent_relid, + attribute, + partitions_count, + partition_names, + tablespaces); + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Copy data */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN partitions_count; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; + + +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS +$$ +DECLARE + parent_relid REGCLASS; + part_attname TEXT; /* partitioned column */ + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); + + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(old_partition); + PERFORM @extschema@.prevent_relation_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.validate_relations_equality(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Get partitioning key */ + part_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + IF part_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS, + part_attname); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND conname = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS, + part_attname), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN new_partition; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() + RETURNS TRIGGER AS + $body$ + DECLARE + old_idx INTEGER; /* partition indices */ + new_idx INTEGER; + + BEGIN + old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); + new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); + + IF old_idx = new_idx THEN + RETURN NEW; + END IF; + + EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) + USING %5$s; + + EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) + USING %7$s; + + RETURN NULL; + END $body$ + LANGUAGE plpgsql'; + + trigger TEXT := 'CREATE TRIGGER %s + BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE %s()'; + + att_names TEXT; + old_fields TEXT; + new_fields TEXT; + att_val_fmt TEXT; + att_fmt TEXT; + attr TEXT; + plain_schema TEXT; + plain_relname TEXT; + child_relname_format TEXT; + funcname TEXT; + triggername TEXT; + atttype REGTYPE; + partitions_count INTEGER; + +BEGIN + attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF attr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT string_agg(attname, ', '), + string_agg('OLD.' || attname, ', '), + string_agg('NEW.' || attname, ', '), + string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || + attname || ' = $' || attnum || ' ' || + 'ELSE ' || + attname || ' IS NULL END', + ' AND '), + string_agg('$' || attnum, ', ') + FROM pg_catalog.pg_attribute + WHERE attrelid = parent_relid AND attnum > 0 + INTO att_names, + old_fields, + new_fields, + att_val_fmt, + att_fmt; + + partitions_count := @extschema@.get_number_of_partitions(parent_relid); + + /* Build trigger & trigger function's names */ + funcname := @extschema@.build_update_trigger_func_name(parent_relid); + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Build partition name template */ + SELECT * INTO plain_schema, plain_relname + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + child_relname_format := quote_ident(plain_schema) || '.' || + quote_ident(plain_relname || '_%s'); + + /* Fetch base hash function for atttype */ + atttype := @extschema@.get_partition_key_type(parent_relid); + + /* Format function definition and execute it */ + EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, + old_fields, att_fmt, new_fields, child_relname_format, + @extschema@.get_type_hash_func(atttype)::TEXT); + + /* Create trigger on each partition */ + FOR num IN 0..partitions_count-1 + LOOP + EXECUTE format(trigger, + triggername, + format(child_relname_format, num), + funcname); + END LOOP; + + return funcname; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) +RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS +$$ +DECLARE + v_parent REGCLASS; + v_attname TEXT; + v_atttype REGTYPE; + v_cond TEXT; + v_new_partition TEXT; + v_part_type INTEGER; + v_check_name TEXT; + +BEGIN + v_parent = @extschema@.get_parent_of_partition(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent); + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition); + + v_atttype = @extschema@.get_partition_key_type(v_parent); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent + INTO v_attname, v_part_type; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + END IF; + + /* Get partition values range */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING partition + INTO p_range; + + IF p_range IS NULL THEN + RAISE EXCEPTION 'could not find specified partition'; + END IF; + + /* Check if value fit into the range */ + IF p_range[1] > split_value OR p_range[2] <= split_value + THEN + RAISE EXCEPTION 'specified value does not fit into the range [%, %)', + p_range[1], p_range[2]; + END IF; + + /* Create new partition */ + v_new_partition := @extschema@.create_single_range_partition(v_parent, + split_value, + p_range[2], + partition_name, + tablespace); + + /* Copy data */ + v_cond := @extschema@.build_range_condition(v_new_partition::regclass, + v_attname, split_value, p_range[2]); + EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition::TEXT, + v_cond, + v_new_partition); + + /* Alter original partition */ + v_cond := @extschema@.build_range_condition(partition::regclass, + v_attname, p_range[1], split_value); + v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); + + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition::TEXT, + v_check_name); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + v_check_name, + v_cond); + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent); +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partitions REGCLASS[]) +RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partition1 REGCLASS, + partition2 REGCLASS) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.merge_range_partitions(array[partition1, partition2]::regclass[]); +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_atttype REGTYPE; + v_part_name TEXT; + v_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + v_atttype := @extschema@.get_partition_key_type(parent_relid); + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_interval; + + EXECUTE + format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING + parent_relid, + v_atttype, + v_interval, + partition_name, + tablespace + INTO + v_part_name; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + v_args_format TEXT; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot append to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF p_range[2] IS NULL THEN + RAISE EXCEPTION 'Cannot append partition because last partition''s range is half open'; + END IF; + + IF @extschema@.is_date_type(p_atttype) THEN + v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', v_atttype::TEXT); + ELSE + v_args_format := format('$1, $2, $2 + $3::%s, $4, $5', v_atttype::TEXT); + END IF; + + EXECUTE + format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[2], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_atttype REGTYPE; + v_part_name TEXT; + v_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + v_atttype := @extschema@.get_partition_key_type(parent_relid); + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_interval; + + EXECUTE + format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING + parent_relid, + v_atttype, + v_interval, + partition_name, + tablespace + INTO + v_part_name; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + v_args_format TEXT; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot prepend to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF p_range[1] IS NULL THEN + RAISE EXCEPTION 'Cannot prepend partition because first partition''s range is half open'; + END IF; + + IF @extschema@.is_date_type(p_atttype) THEN + v_args_format := format('$1, ($2 - $3::interval)::%s, $2, $4, $5', v_atttype::TEXT); + ELSE + v_args_format := format('$1, $2 - $3::%s, $2, $4, $5', v_atttype::TEXT); + END IF; + + EXECUTE + format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[1], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition::TEXT; + END IF; + + /* check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); + + IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); + + v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname), + @extschema@.build_range_condition(partition, + v_attname, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition, + v_init_callback, + start_value, + end_value); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( + partition REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + parent_relid REGCLASS; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + + v_attname := attname + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Remove inheritance */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', + partition::TEXT, + parent_relid::TEXT); + + /* Remove check constraint */ + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname)); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( + partition REGCLASS) +RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( + p_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS 'pg_pathman', 'build_range_condition' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.get_part_range( + partition REGCLASS, + dummy ANYELEMENT) +RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' +LANGUAGE C; diff --git a/range.sql b/range.sql index ed3e85db..c2733a3a 100644 --- a/range.sql +++ b/range.sql @@ -439,7 +439,7 @@ $$ LANGUAGE plpgsql; * Split RANGE partition */ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( - partition_relid REGCLASS, + partition REGCLASS, split_value ANYELEMENT, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL, @@ -456,13 +456,13 @@ DECLARE v_check_name TEXT; BEGIN - v_parent = @extschema@.get_parent_of_partition(partition_relid); + v_parent = @extschema@.get_parent_of_partition(partition); /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(v_parent); /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition_relid); + PERFORM @extschema@.prevent_relation_modification(partition); v_atttype = @extschema@.get_partition_key_type(v_parent); @@ -473,13 +473,13 @@ BEGIN /* Check if this is a RANGE partition */ IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; END IF; /* Get partition values range */ EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', @extschema@.get_base_type(v_atttype)::TEXT) - USING partition_relid + USING partition INTO p_range; IF p_range IS NULL THEN @@ -505,21 +505,21 @@ BEGIN v_attname, split_value, p_range[2]); EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) INSERT INTO %s SELECT * FROM part_data', - partition_relid::TEXT, + partition::TEXT, v_cond, v_new_partition); /* Alter original partition */ - v_cond := @extschema@.build_range_condition(partition_relid::regclass, + v_cond := @extschema@.build_range_condition(partition::regclass, v_attname, p_range[1], split_value); - v_check_name := @extschema@.build_check_constraint_name(partition_relid, v_attname); + v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition_relid::TEXT, + partition::TEXT, v_check_name); EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition_relid::TEXT, + partition::TEXT, v_check_name, v_cond); @@ -529,6 +529,15 @@ END $$ LANGUAGE plpgsql; +/* + * Merge multiple partitions. All data will be copied to the first one. + * The rest of partitions will be dropped. + */ +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partitions REGCLASS[]) +RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' +LANGUAGE C STRICT; + /* * The special case of merging two partitions */ @@ -800,7 +809,7 @@ LANGUAGE plpgsql; * Drop range partition */ CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( - partition_relid REGCLASS, + partition REGCLASS, delete_data BOOLEAN DEFAULT TRUE) RETURNS TEXT AS $$ @@ -812,8 +821,8 @@ DECLARE v_part_type INTEGER; BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition_relid); - part_name := partition_relid::TEXT; /* save the name to be returned */ + parent_relid := @extschema@.get_parent_of_partition(partition); + part_name := partition::TEXT; /* save the name to be returned */ SELECT parttype FROM @extschema@.pathman_config @@ -822,7 +831,7 @@ BEGIN /* Check if this is a RANGE partition */ IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; END IF; /* Acquire lock on parent */ @@ -831,15 +840,15 @@ BEGIN IF NOT delete_data THEN EXECUTE format('INSERT INTO %s SELECT * FROM %s', parent_relid::TEXT, - partition_relid::TEXT); + partition::TEXT); GET DIAGNOSTICS v_rows = ROW_COUNT; /* Show number of copied rows */ - RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; + RAISE NOTICE '% rows copied from %', v_rows, partition::TEXT; END IF; SELECT relkind FROM pg_catalog.pg_class - WHERE oid = partition_relid + WHERE oid = partition INTO v_relkind; /* @@ -848,9 +857,9 @@ BEGIN * DROP TABLE or DROP FOREIGN TABLE. */ IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + EXECUTE format('DROP FOREIGN TABLE %s', partition::TEXT); ELSE - EXECUTE format('DROP TABLE %s', partition_relid::TEXT); + EXECUTE format('DROP TABLE %s', partition::TEXT); END IF; /* Invalidate cache */ @@ -867,7 +876,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is */ CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( parent_relid REGCLASS, - partition_relid REGCLASS, + partition REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT) RETURNS TEXT AS @@ -879,29 +888,29 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition_relid); + PERFORM @extschema@.validate_relname(partition); /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(parent_relid); /* Ignore temporary tables */ SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = partition_relid INTO rel_persistence; + WHERE oid = partition INTO rel_persistence; IF rel_persistence = 't'::CHAR THEN RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', - partition_relid::TEXT; + partition::TEXT; END IF; /* check range overlap */ PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); - IF NOT @extschema@.validate_relations_equality(parent_relid, partition_relid) THEN + IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN RAISE EXCEPTION 'partition must have the exact same structure as parent'; END IF; /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; @@ -911,9 +920,9 @@ BEGIN /* Set check constraint */ EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition_relid::TEXT, - @extschema@.build_check_constraint_name(partition_relid, v_attname), - @extschema@.build_range_condition(partition_relid, + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname), + @extschema@.build_range_condition(partition, v_attname, start_value, end_value)); @@ -927,7 +936,7 @@ BEGIN INTO v_init_callback; PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - partition_relid, + partition, v_init_callback, start_value, end_value); @@ -935,7 +944,7 @@ BEGIN /* Invalidate cache */ PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN partition_relid; + RETURN partition; END $$ LANGUAGE plpgsql; @@ -944,7 +953,7 @@ LANGUAGE plpgsql; * Detach range partition */ CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( - partition_relid REGCLASS) + partition REGCLASS) RETURNS TEXT AS $$ DECLARE @@ -952,7 +961,7 @@ DECLARE parent_relid REGCLASS; BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition_relid); + parent_relid := @extschema@.get_parent_of_partition(partition); /* Acquire lock on parent */ PERFORM @extschema@.prevent_relation_modification(parent_relid); @@ -967,18 +976,18 @@ BEGIN /* Remove inheritance */ EXECUTE format('ALTER TABLE %s NO INHERIT %s', - partition_relid::TEXT, + partition::TEXT, parent_relid::TEXT); /* Remove check constraint */ EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition_relid::TEXT, - @extschema@.build_check_constraint_name(partition_relid, v_attname)); + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname)); /* Invalidate cache */ PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN partition_relid; + RETURN partition; END $$ LANGUAGE plpgsql; @@ -1080,15 +1089,6 @@ END $$ LANGUAGE plpgsql; -/* - * Merge multiple partitions. All data will be copied to the first one. - * The rest of partitions will be dropped. - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partitions REGCLASS[]) -RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' -LANGUAGE C STRICT; - /* * Drops partition and expands the next partition so that it cover dropped * one @@ -1146,7 +1146,7 @@ LANGUAGE C; * Returns min and max values for specified RANGE partition. */ CREATE OR REPLACE FUNCTION @extschema@.get_part_range( - partition_relid REGCLASS, + partition REGCLASS, dummy ANYELEMENT) RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' LANGUAGE C; From 80476ef6125ee8bcd26d9ea94287a4b8cd6e2b72 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Feb 2017 20:29:50 +0300 Subject: [PATCH 0211/1124] fix function interval_is_trivial() for 32-bit platforms --- src/pl_range_funcs.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 7d68b416..d89658a2 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -701,9 +701,16 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) switch(atttype) { case INT2OID: + default_value = Int16GetDatum(0); + break; + case INT4OID: + default_value = Int32GetDatum(0); + break; + + /* Take care of 32-bit platforms */ case INT8OID: - default_value = Int16GetDatum(0); + default_value = Int64GetDatum(0); break; case FLOAT4OID: From a5c8cb7c36731cfb66e108ea20f32569cea829ff Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Feb 2017 20:52:21 +0300 Subject: [PATCH 0212/1124] improve interval tests --- expected/pathman_interval.out | 92 +++++++++++++++++++++++++++++++++-- sql/pathman_interval.sql | 58 ++++++++++++++++++++-- 2 files changed, 144 insertions(+), 6 deletions(-) diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 39120a3d..ff7340ea 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -1,8 +1,8 @@ \set VERBOSITY terse CREATE EXTENSION pg_pathman; CREATE SCHEMA test_interval; -/* Range partitions for INTEGER type */ -CREATE TABLE test_interval.abc (id SERIAL); +/* Range partitions for INT2 type */ +CREATE TABLE test_interval.abc (id INT2 NOT NULL); SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); NOTICE: sequence "abc_seq" does not exist, skipping create_range_partitions @@ -10,7 +10,93 @@ NOTICE: sequence "abc_seq" does not exist, skipping 2 (1 row) -SELECT set_interval('test_interval.abc', NULL::INTEGER); +SELECT set_interval('test_interval.abc', NULL::INT2); + set_interval +-------------- + +(1 row) + +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO test_interval.abc VALUES (250); +ERROR: cannot find appropriate partition for key '250' +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); +ERROR: interval must not be trivial +/* Set a negative interval */ +SELECT set_interval('test_interval.abc', -100); +ERROR: interval must not be negative +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; +ERROR: interval must not be trivial +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 1000); + set_interval +-------------- + +(1 row) + +INSERT INTO test_interval.abc VALUES (250); +SELECT * FROM pathman_config; + partrel | attname | parttype | range_interval +-------------------+---------+----------+---------------- + test_interval.abc | id | 2 | 1000 +(1 row) + +DROP TABLE test_interval.abc CASCADE; +NOTICE: drop cascades to 3 other objects +/* Range partitions for INT4 type */ +CREATE TABLE test_interval.abc (id INT4 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('test_interval.abc', NULL::INT4); + set_interval +-------------- + +(1 row) + +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO test_interval.abc VALUES (250); +ERROR: cannot find appropriate partition for key '250' +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); +ERROR: interval must not be trivial +/* Set a negative interval */ +SELECT set_interval('test_interval.abc', -100); +ERROR: interval must not be negative +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; +ERROR: interval must not be trivial +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 1000); + set_interval +-------------- + +(1 row) + +INSERT INTO test_interval.abc VALUES (250); +SELECT * FROM pathman_config; + partrel | attname | parttype | range_interval +-------------------+---------+----------+---------------- + test_interval.abc | id | 2 | 1000 +(1 row) + +DROP TABLE test_interval.abc CASCADE; +NOTICE: drop cascades to 3 other objects +/* Range partitions for INT8 type */ +CREATE TABLE test_interval.abc (id INT8 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('test_interval.abc', NULL::INT8); set_interval -------------- diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql index 451984ad..ec49254b 100644 --- a/sql/pathman_interval.sql +++ b/sql/pathman_interval.sql @@ -5,10 +5,62 @@ CREATE SCHEMA test_interval; -/* Range partitions for INTEGER type */ -CREATE TABLE test_interval.abc (id SERIAL); +/* Range partitions for INT2 type */ +CREATE TABLE test_interval.abc (id INT2 NOT NULL); SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); -SELECT set_interval('test_interval.abc', NULL::INTEGER); +SELECT set_interval('test_interval.abc', NULL::INT2); + +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO test_interval.abc VALUES (250); + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); + +/* Set a negative interval */ +SELECT set_interval('test_interval.abc', -100); + +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; + +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 1000); +INSERT INTO test_interval.abc VALUES (250); +SELECT * FROM pathman_config; + +DROP TABLE test_interval.abc CASCADE; + + +/* Range partitions for INT4 type */ +CREATE TABLE test_interval.abc (id INT4 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::INT4); + +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO test_interval.abc VALUES (250); + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); + +/* Set a negative interval */ +SELECT set_interval('test_interval.abc', -100); + +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; + +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 1000); +INSERT INTO test_interval.abc VALUES (250); +SELECT * FROM pathman_config; + +DROP TABLE test_interval.abc CASCADE; + + +/* Range partitions for INT8 type */ +CREATE TABLE test_interval.abc (id INT8 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::INT8); /* pg_pathman shouldn't be able to create a new partition */ INSERT INTO test_interval.abc VALUES (250); From 3a6d8a682924dea653a3688e28b82dce4acc7ff8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Feb 2017 21:11:23 +0300 Subject: [PATCH 0213/1124] improve functions get_part_range_by_idx() and get_part_range_by_oid(), tests --- expected/pathman_calamity.out | 52 ++++++++++++++++++++++++++++++++++- sql/pathman_calamity.sql | 27 ++++++++++++++++++ src/pl_range_funcs.c | 12 ++++++++ 3 files changed, 90 insertions(+), 1 deletion(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index bc125cc5..1830584f 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -617,6 +617,56 @@ SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disab 0 (1 row) +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); +NOTICE: sequence "test_range_idx_seq" does not exist, skipping + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to table calamity.test_range_idx_1 +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); +NOTICE: sequence "test_range_oid_seq" does not exist, skipping + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to table calamity.test_range_oid_1 DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 16 other objects +NOTICE: drop cascades to 18 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 5662daa0..0b8058c8 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -235,5 +235,32 @@ SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::R SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + +DROP TABLE calamity.test_range_idx CASCADE; + + +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + +DROP TABLE calamity.test_range_oid CASCADE; + + + DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index d89658a2..8d1fc523 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -256,6 +256,12 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) prel = get_pathman_relation_info(parent_relid); shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + /* Check type of 'dummy' (for correct output) */ + if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 1)) != getBaseType(prel->atttype)) + elog(ERROR, "pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->atttype))); + + ranges = PrelGetRangesArray(prel); /* Look for the specified partition */ @@ -309,6 +315,12 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) prel = get_pathman_relation_info(parent_relid); shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + /* Check type of 'dummy' (for correct output) */ + if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 2)) != getBaseType(prel->atttype)) + elog(ERROR, "pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->atttype))); + + /* Now we have to deal with 'idx' */ if (partition_idx < -1) { From e23915c7f6984e7c8b6468fd7538a999a670fdca Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 14 Feb 2017 12:12:52 +0300 Subject: [PATCH 0214/1124] migration script fixes --- Makefile | 3 +- pg_pathman--1.2--1.3.sql | 16 +- pg_pathman--1.2.sql | 2373 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 2386 insertions(+), 6 deletions(-) create mode 100644 pg_pathman--1.2.sql diff --git a/Makefile b/Makefile index 144538d0..6f9680cb 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,8 @@ EXTVERSION = 1.3 DATA_built = pg_pathman--$(EXTVERSION).sql DATA = pg_pathman--1.0--1.1.sql \ - pg_pathman--1.1--1.2.sql + pg_pathman--1.1--1.2.sql \ + pg_pathman--1.2--1.3.sql PGFILEDESC = "pg_pathman - partitioning tool" diff --git a/pg_pathman--1.2--1.3.sql b/pg_pathman--1.2--1.3.sql index bf7f3bef..bd31902d 100644 --- a/pg_pathman--1.2--1.3.sql +++ b/pg_pathman--1.2--1.3.sql @@ -14,13 +14,19 @@ ALTER TABLE @extschema@.pathman_config ADD CHECK (@extschema@.validate_interval_value(partrel, attname, parttype, - range_interval)) + range_interval)); -/* TODO! Write a convertation */ +/* Change type for init_callback attribute */ ALTER TABLE @extschema@.pathman_config_params -ALTER COLUMN init_callback TYPE TEXT DEFAULT NULL; +ALTER COLUMN init_callback TYPE TEXT, +ALTER COLUMN init_callback SET DEFAULT NULL; -DROP FUNCTION validate_part_callback(REGPROC, BOOL); +/* Set init_callback to NULL where it used to be 0 */ +UPDATE @extschema@.pathman_config_params +SET init_callback = NULL +WHERE init_callback = '-'; + +DROP FUNCTION @extschema@.validate_part_callback(REGPROC, BOOL); CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( callback REGPROCEDURE, @@ -32,7 +38,7 @@ ALTER TABLE @extschema@.pathman_config_params ADD CHECK (@extschema@.validate_part_callback(CASE WHEN init_callback IS NULL THEN 0::REGPROCEDURE ELSE init_callback::REGPROCEDURE - END)) + END)); /* ------------------------------------------------------------------------ * Drop irrelevant objects diff --git a/pg_pathman--1.2.sql b/pg_pathman--1.2.sql new file mode 100644 index 00000000..ffbfcc55 --- /dev/null +++ b/pg_pathman--1.2.sql @@ -0,0 +1,2373 @@ +/* ------------------------------------------------------------------------ + * + * init.sql + * Creates config table and provides common utility functions + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +/* + * Pathman config + * partrel - regclass (relation type, stored as Oid) + * attname - partitioning key + * parttype - partitioning type: + * 1 - HASH + * 2 - RANGE + * range_interval - base interval for RANGE partitioning as string + */ +CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( + partrel REGCLASS NOT NULL PRIMARY KEY, + attname TEXT NOT NULL, + parttype INTEGER NOT NULL, + range_interval TEXT, + + CHECK (parttype IN (1, 2)) /* check for allowed part types */ +); + + +/* + * Checks that callback function meets specific requirements. + * Particularly it must have the only JSONB argument and VOID return type. + * + * NOTE: this function is used in CHECK CONSTRAINT. + */ +CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( + callback REGPROC, + raise_error BOOL DEFAULT TRUE) +RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' +LANGUAGE C STRICT; + + +/* + * Optional parameters for partitioned tables. + * partrel - regclass (relation type, stored as Oid) + * enable_parent - add parent table to plan + * auto - enable automatic partition creation + * init_callback - cb to be executed on partition creation + */ +CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( + partrel REGCLASS NOT NULL PRIMARY KEY, + enable_parent BOOLEAN NOT NULL DEFAULT FALSE, + auto BOOLEAN NOT NULL DEFAULT TRUE, + init_callback REGPROCEDURE NOT NULL DEFAULT 0, + spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE + + CHECK (@extschema@.validate_part_callback(init_callback)) /* check signature */ +); + +GRANT SELECT, INSERT, UPDATE, DELETE +ON @extschema@.pathman_config, @extschema@.pathman_config_params +TO public; + +/* + * Check if current user can alter/drop specified relation + */ +CREATE OR REPLACE FUNCTION @extschema@.check_security_policy(relation regclass) +RETURNS BOOL AS 'pg_pathman', 'check_security_policy' LANGUAGE C STRICT; + +/* + * Row security policy to restrict partitioning operations to owner and + * superusers only + */ +CREATE POLICY deny_modification ON @extschema@.pathman_config +FOR ALL USING (check_security_policy(partrel)); + +CREATE POLICY deny_modification ON @extschema@.pathman_config_params +FOR ALL USING (check_security_policy(partrel)); + +CREATE POLICY allow_select ON @extschema@.pathman_config FOR SELECT USING (true); + +CREATE POLICY allow_select ON @extschema@.pathman_config_params FOR SELECT USING (true); + +ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; +ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; + +/* + * Invalidate relcache every time someone changes parameters config. + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() +RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' +LANGUAGE C; + +CREATE TRIGGER pathman_config_params_trigger +BEFORE INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + +/* + * Enable dump of config tables with pg_dump. + */ +SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config', ''); +SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config_params', ''); + + +/* + * Add a row describing the optional parameter to pathman_config_params. + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( + relation REGCLASS, + param TEXT, + value ANYELEMENT) +RETURNS VOID AS +$$ +BEGIN + EXECUTE format('INSERT INTO @extschema@.pathman_config_params + (partrel, %1$s) VALUES ($1, $2) + ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) + USING relation, value; +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Include\exclude parent relation in query plan. + */ +CREATE OR REPLACE FUNCTION @extschema@.set_enable_parent( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'enable_parent', value); +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Enable\disable automatic partition creation. + */ +CREATE OR REPLACE FUNCTION @extschema@.set_auto( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'auto', value); +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Set partition creation callback + */ +CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( + relation REGCLASS, + callback REGPROC DEFAULT 0) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'init_callback', callback); +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Set 'spawn using BGW' option + */ +CREATE OR REPLACE FUNCTION @extschema@.set_spawn_using_bgw( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'spawn_using_bgw', value); +END +$$ +LANGUAGE plpgsql STRICT; + + +/* + * Show all existing parents and partitions. + */ +CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() +RETURNS TABLE ( + parent REGCLASS, + partition REGCLASS, + parttype INT4, + partattr TEXT, + range_min TEXT, + range_max TEXT) +AS 'pg_pathman', 'show_partition_list_internal' +LANGUAGE C STRICT; + +/* + * View for show_partition_list(). + */ +CREATE OR REPLACE VIEW @extschema@.pathman_partition_list +AS SELECT * FROM @extschema@.show_partition_list(); + +GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; + +/* + * Show all existing concurrent partitioning tasks. + */ +CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() +RETURNS TABLE ( + userid REGROLE, + pid INT, + dbid OID, + relid REGCLASS, + processed INT, + status TEXT) +AS 'pg_pathman', 'show_concurrent_part_tasks_internal' +LANGUAGE C STRICT; + +/* + * View for show_concurrent_part_tasks(). + */ +CREATE OR REPLACE VIEW @extschema@.pathman_concurrent_part_tasks +AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); + +GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; + +/* + * Partition table using ConcurrentPartWorker. + */ +CREATE OR REPLACE FUNCTION @extschema@.partition_table_concurrently( + relation REGCLASS, + batch_size INTEGER DEFAULT 1000, + sleep_time FLOAT8 DEFAULT 1.0) +RETURNS VOID AS 'pg_pathman', 'partition_table_concurrently' +LANGUAGE C STRICT; + +/* + * Stop concurrent partitioning task. + */ +CREATE OR REPLACE FUNCTION @extschema@.stop_concurrent_part_task( + relation REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'stop_concurrent_part_task' +LANGUAGE C STRICT; + + +/* + * Copy rows to partitions concurrently. + */ +CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( + relation REGCLASS, + p_min ANYELEMENT DEFAULT NULL::text, + p_max ANYELEMENT DEFAULT NULL::text, + p_limit INT DEFAULT NULL, + OUT p_total BIGINT) +AS +$$ +DECLARE + v_attr TEXT; + v_limit_clause TEXT := ''; + v_where_clause TEXT := ''; + ctids TID[]; + +BEGIN + SELECT attname INTO v_attr + FROM @extschema@.pathman_config WHERE partrel = relation; + + p_total := 0; + + /* Format LIMIT clause if needed */ + IF NOT p_limit IS NULL THEN + v_limit_clause := format('LIMIT %s', p_limit); + END IF; + + /* Format WHERE clause if needed */ + IF NOT p_min IS NULL THEN + v_where_clause := format('%1$s >= $1', v_attr); + END IF; + + IF NOT p_max IS NULL THEN + IF NOT p_min IS NULL THEN + v_where_clause := v_where_clause || ' AND '; + END IF; + v_where_clause := v_where_clause || format('%1$s < $2', v_attr); + END IF; + + IF v_where_clause != '' THEN + v_where_clause := 'WHERE ' || v_where_clause; + END IF; + + /* Lock rows and copy data */ + RAISE NOTICE 'Copying data to partitions...'; + EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', + relation, v_where_clause, v_limit_clause) + USING p_min, p_max + INTO ctids; + + EXECUTE format(' + WITH data AS ( + DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) + INSERT INTO %1$s SELECT * FROM data', + relation) + USING ctids; + + /* Get number of inserted rows */ + GET DIAGNOSTICS p_total = ROW_COUNT; + RETURN; +END +$$ +LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ + +/* + * Old school way to distribute rows to partitions. + */ +CREATE OR REPLACE FUNCTION @extschema@.partition_data( + parent_relid REGCLASS, + OUT p_total BIGINT) +AS +$$ +DECLARE + relname TEXT; + rec RECORD; + cnt BIGINT := 0; + +BEGIN + p_total := 0; + + /* Create partitions and copy rest of the data */ + EXECUTE format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) + INSERT INTO %1$s SELECT * FROM part_data', + parent_relid::TEXT); + + /* Get number of inserted rows */ + GET DIAGNOSTICS p_total = ROW_COUNT; + RETURN; +END +$$ +LANGUAGE plpgsql STRICT +SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ + +/* + * Disable pathman partitioning for specified relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( + parent_relid REGCLASS) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Delete rows from both config tables */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + /* Drop triggers on update */ + PERFORM @extschema@.drop_triggers(parent_relid); + + /* Notify backend about changes */ + PERFORM @extschema@.on_remove_partitions(parent_relid); +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Aggregates several common relation checks before partitioning. + * Suitable for every partitioning type. + */ +CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( + relation REGCLASS, + p_attribute TEXT) +RETURNS BOOLEAN AS +$$ +DECLARE + v_rec RECORD; + is_referenced BOOLEAN; + rel_persistence CHAR; + +BEGIN + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = relation INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be partitioned', + relation::TEXT; + END IF; + + IF EXISTS (SELECT * FROM @extschema@.pathman_config + WHERE partrel = relation) THEN + RAISE EXCEPTION 'relation "%" has already been partitioned', relation; + END IF; + + IF @extschema@.is_attribute_nullable(relation, p_attribute) THEN + RAISE EXCEPTION 'partitioning key "%" must be NOT NULL', p_attribute; + END IF; + + /* Check if there are foreign keys that reference the relation */ + FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint + WHERE confrelid = relation::REGCLASS::OID) + LOOP + is_referenced := TRUE; + RAISE WARNING 'foreign key "%" references relation "%"', + v_rec.conname, relation; + END LOOP; + + IF is_referenced THEN + RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; + END IF; + + RETURN TRUE; +END +$$ +LANGUAGE plpgsql; + +/* + * Returns relname without quotes or something. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_plain_schema_and_relname( + cls REGCLASS, + OUT schema TEXT, + OUT relname TEXT) +AS +$$ +BEGIN + SELECT pg_catalog.pg_class.relnamespace::regnamespace, + pg_catalog.pg_class.relname + FROM pg_catalog.pg_class WHERE oid = cls::oid + INTO schema, relname; +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Check if two relations have equal structures. + */ +CREATE OR REPLACE FUNCTION @extschema@.validate_relations_equality( + relation1 OID, relation2 OID) +RETURNS BOOLEAN AS +$$ +DECLARE + rec RECORD; + +BEGIN + FOR rec IN ( + WITH + a1 AS (select * from pg_catalog.pg_attribute + where attrelid = relation1 and attnum > 0), + a2 AS (select * from pg_catalog.pg_attribute + where attrelid = relation2 and attnum > 0) + SELECT a1.attname name1, a2.attname name2, a1.atttypid type1, a2.atttypid type2 + FROM a1 + FULL JOIN a2 ON a1.attnum = a2.attnum + ) + LOOP + IF rec.name1 IS NULL OR rec.name2 IS NULL OR rec.name1 != rec.name2 THEN + RETURN false; + END IF; + END LOOP; + + RETURN true; +END +$$ +LANGUAGE plpgsql; + +/* + * DDL trigger that removes entry from pathman_config table. + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() +RETURNS event_trigger AS +$$ +DECLARE + obj record; + pg_class_oid oid; + relids regclass[]; +BEGIN + pg_class_oid = 'pg_catalog.pg_class'::regclass; + + /* Find relids to remove from config */ + SELECT array_agg(cfg.partrel) INTO relids + FROM pg_event_trigger_dropped_objects() AS events + JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid + WHERE events.classid = pg_class_oid AND events.objsubid = 0; + + /* Cleanup pathman_config */ + DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); + + /* Cleanup params table too */ + DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); +END +$$ +LANGUAGE plpgsql; + +/* + * Drop triggers. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( + parent_relid REGCLASS) +RETURNS VOID AS +$$ +BEGIN + EXECUTE format('DROP FUNCTION IF EXISTS %s() CASCADE', + @extschema@.build_update_trigger_func_name(parent_relid)); +END +$$ LANGUAGE plpgsql STRICT; + +/* + * Drop partitions. If delete_data set to TRUE, partitions + * will be dropped with all the data. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( + parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) +RETURNS INTEGER AS +$$ +DECLARE + v_rec RECORD; + v_rows BIGINT; + v_part_count INTEGER := 0; + conf_num_del INTEGER; + v_relkind CHAR; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Drop trigger first */ + PERFORM @extschema@.drop_triggers(parent_relid); + + WITH config_num_deleted AS (DELETE FROM @extschema@.pathman_config + WHERE partrel = parent_relid + RETURNING *) + SELECT count(*) from config_num_deleted INTO conf_num_del; + + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + IF conf_num_del = 0 THEN + RAISE EXCEPTION 'relation "%" has no partitions', parent_relid::TEXT; + END IF; + + FOR v_rec IN (SELECT inhrelid::REGCLASS AS tbl + FROM pg_catalog.pg_inherits + WHERE inhparent::regclass = parent_relid + ORDER BY inhrelid ASC) + LOOP + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + v_rec.tbl::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, v_rec.tbl::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = v_rec.tbl + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', v_rec.tbl::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', v_rec.tbl::TEXT); + END IF; + + v_part_count := v_part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_remove_partitions(parent_relid); + + RETURN v_part_count; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + + +/* + * Copy all of parent's foreign keys. + */ +CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( + parent_relid REGCLASS, + partition REGCLASS) +RETURNS VOID AS +$$ +DECLARE + rec RECORD; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition); + + FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint + WHERE conrelid = parent_relid AND contype = 'f') + LOOP + EXECUTE format('ALTER TABLE %s ADD %s', + partition::TEXT, + pg_catalog.pg_get_constraintdef(rec.conid)); + END LOOP; +END +$$ LANGUAGE plpgsql STRICT; + + +/* + * Create DDL trigger to call pathman_ddl_trigger_func(). + */ +CREATE EVENT TRIGGER pathman_ddl_trigger +ON sql_drop +EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); + + + +CREATE OR REPLACE FUNCTION @extschema@.on_create_partitions( + relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'on_partitions_created' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.on_update_partitions( + relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'on_partitions_updated' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.on_remove_partitions( + relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'on_partitions_removed' +LANGUAGE C STRICT; + + +/* + * Get number of partitions managed by pg_pathman. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( + parent_relid REGCLASS) +RETURNS INT4 AS 'pg_pathman', 'get_number_of_partitions_pl' +LANGUAGE C STRICT; + +/* + * Get parent of pg_pathman's partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition( + partition_relid REGCLASS) +RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' +LANGUAGE C STRICT; + +/* + * Extract basic type of a domain. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_base_type( + typid REGTYPE) +RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' +LANGUAGE C STRICT; + +/* + * Returns attribute type name for relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_attribute_type( + relid REGCLASS, + attname TEXT) +RETURNS REGTYPE AS 'pg_pathman', 'get_attribute_type_pl' +LANGUAGE C STRICT; + +/* + * Return tablespace name for specified relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_tablespace( + relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_tablespace_pl' +LANGUAGE C STRICT; + + +/* + * Check that relation exists. + */ +CREATE OR REPLACE FUNCTION @extschema@.validate_relname( + relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'validate_relname' +LANGUAGE C; + +/* + * Checks if attribute is nullable + */ +CREATE OR REPLACE FUNCTION @extschema@.is_attribute_nullable( + relid REGCLASS, + attname TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'is_attribute_nullable' +LANGUAGE C STRICT; + +/* + * Check if regclass is date or timestamp. + */ +CREATE OR REPLACE FUNCTION @extschema@.is_date_type( + typid REGTYPE) +RETURNS BOOLEAN AS 'pg_pathman', 'is_date_type' +LANGUAGE C STRICT; + + +/* + * Build check constraint name for a specified relation's column. + */ +CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( + partition_relid REGCLASS, + attribute INT2) +RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attnum' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( + partition_relid REGCLASS, + attribute TEXT) +RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attname' +LANGUAGE C STRICT; + +/* + * Build update trigger and its underlying function's names. + */ +CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_name( + relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_name' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_func_name( + relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_func_name' +LANGUAGE C STRICT; + + +/* + * Attach a previously partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( + parent_relid REGCLASS, + attname TEXT, + range_interval TEXT DEFAULT NULL) +RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' +LANGUAGE C; + + +/* + * Lock partitioned relation to restrict concurrent + * modification of partitioning scheme. + */ +CREATE OR REPLACE FUNCTION @extschema@.lock_partitioned_relation( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'lock_partitioned_relation' +LANGUAGE C STRICT; + +/* + * Lock relation to restrict concurrent modification of data. + */ +CREATE OR REPLACE FUNCTION @extschema@.prevent_relation_modification( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'prevent_relation_modification' +LANGUAGE C STRICT; + + +/* + * DEBUG: Place this inside some plpgsql fuction and set breakpoint. + */ +CREATE OR REPLACE FUNCTION @extschema@.debug_capture() +RETURNS VOID AS 'pg_pathman', 'debug_capture' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() +RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' +LANGUAGE C STRICT; + + +/* + * Invoke init_callback on RANGE partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition REGCLASS, + init_callback REGPROCEDURE, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; + +/* + * Invoke init_callback on HASH partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition REGCLASS, + init_callback REGPROCEDURE) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; +/* ------------------------------------------------------------------------ + * + * hash.sql + * HASH partitioning functions + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +/* + * Creates hash partitions for specified relation + */ +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) + VALUES (parent_relid, attribute, 1); + + /* Create partitions */ + PERFORM @extschema@.create_hash_partitions_internal(parent_relid, + attribute, + partitions_count); + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Copy data */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN partitions_count; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; + +/* + * Replace hash partition with another one. It could be useful in case when + * someone wants to attach foreign table as a partition. + * + * lock_parent - should we take an exclusive lock? + */ +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS +$$ +DECLARE + parent_relid REGCLASS; + part_attname TEXT; /* partitioned column */ + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); + + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(old_partition); + PERFORM @extschema@.prevent_relation_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.validate_relations_equality(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Get partitioning key */ + part_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + IF part_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS, + part_attname); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND conname = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS, + part_attname), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN new_partition; +END +$$ +LANGUAGE plpgsql; + +/* + * Creates an update trigger + */ +CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() + RETURNS TRIGGER AS + $body$ + DECLARE + old_idx INTEGER; /* partition indices */ + new_idx INTEGER; + + BEGIN + old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); + new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); + + IF old_idx = new_idx THEN + RETURN NEW; + END IF; + + EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) + USING %5$s; + + EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) + USING %7$s; + + RETURN NULL; + END $body$ + LANGUAGE plpgsql'; + + trigger TEXT := 'CREATE TRIGGER %s + BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE %s()'; + + att_names TEXT; + old_fields TEXT; + new_fields TEXT; + att_val_fmt TEXT; + att_fmt TEXT; + attr TEXT; + plain_schema TEXT; + plain_relname TEXT; + child_relname_format TEXT; + funcname TEXT; + triggername TEXT; + atttype REGTYPE; + partitions_count INTEGER; + +BEGIN + attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF attr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT string_agg(attname, ', '), + string_agg('OLD.' || attname, ', '), + string_agg('NEW.' || attname, ', '), + string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || + attname || ' = $' || attnum || ' ' || + 'ELSE ' || + attname || ' IS NULL END', + ' AND '), + string_agg('$' || attnum, ', ') + FROM pg_catalog.pg_attribute + WHERE attrelid = parent_relid AND attnum > 0 + INTO att_names, + old_fields, + new_fields, + att_val_fmt, + att_fmt; + + partitions_count := @extschema@.get_number_of_partitions(parent_relid); + + /* Build trigger & trigger function's names */ + funcname := @extschema@.build_update_trigger_func_name(parent_relid); + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Build partition name template */ + SELECT * INTO plain_schema, plain_relname + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + child_relname_format := quote_ident(plain_schema) || '.' || + quote_ident(plain_relname || '_%s'); + + /* Fetch base hash function for atttype */ + atttype := @extschema@.get_attribute_type(parent_relid, attr); + + /* Format function definition and execute it */ + EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, + old_fields, att_fmt, new_fields, child_relname_format, + @extschema@.get_type_hash_func(atttype)::TEXT); + + /* Create trigger on each partition */ + FOR num IN 0..partitions_count-1 + LOOP + EXECUTE format(trigger, + triggername, + format(child_relname_format, num), + funcname); + END LOOP; + + return funcname; +END +$$ LANGUAGE plpgsql; + +/* + * Just create HASH partitions, called by create_hash_partitions(). + */ +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER) +RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' +LANGUAGE C STRICT; + +/* + * Returns hash function OID for specified type + */ +CREATE OR REPLACE FUNCTION @extschema@.get_type_hash_func(REGTYPE) +RETURNS REGPROC AS 'pg_pathman', 'get_type_hash_func' +LANGUAGE C STRICT; + +/* + * Calculates hash for integer value + */ +CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INTEGER, INTEGER) +RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' +LANGUAGE C STRICT; + +/* + * Build hash condition for a CHECK CONSTRAINT + */ +CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( + attribute_type REGTYPE, + attribute TEXT, + partitions_count INT4, + partitions_index INT4) +RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' +LANGUAGE C STRICT; +/* ------------------------------------------------------------------------ + * + * range.sql + * RANGE partitioning functions + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +CREATE OR REPLACE FUNCTION @extschema@.create_or_replace_sequence( + parent_relid REGCLASS, + OUT seq_name TEXT) +AS $$ +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); +END +$$ +LANGUAGE plpgsql; + +/* + * Check RANGE partition boundaries. + */ +CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS +$$ +DECLARE + v_min start_value%TYPE; + v_max start_value%TYPE; + v_count BIGINT; + +BEGIN + /* Get min and max values */ + EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) + FROM %2$s WHERE NOT %1$s IS NULL', + attribute, parent_relid::TEXT) + INTO v_count, v_min, v_max; + + /* Check if column has NULL values */ + IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN + RAISE EXCEPTION 'column "%" contains NULL values', attribute; + END IF; + + /* Check lower boundary */ + IF start_value > v_min THEN + RAISE EXCEPTION 'start value is less than min value of "%"', attribute; + END IF; + + /* Check upper boundary */ + IF end_value <= v_max THEN + RAISE EXCEPTION 'not enough partitions to fit all values of "%"', attribute; + END IF; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_atttype REGTYPE; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + i INTEGER; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_count < 0 THEN + RAISE EXCEPTION '"p_count" must not be less than 0'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + v_atttype := @extschema@.get_base_type(pg_typeof(start_value)); + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', ''%s'', ''%s'', ''%s''::%s)', + parent_relid, + attribute, + start_value, + end_value, + v_atttype::TEXT); + END IF; + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Create first partition */ + FOR i IN 1..p_count + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4)', + v_atttype::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_tablespace(parent_relid); + + start_value := start_value + p_interval; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on numerical attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + i INTEGER; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_count < 0 THEN + RAISE EXCEPTION 'partitions count must not be less than zero'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + IF v_max IS NULL THEN + RAISE EXCEPTION 'column "%" has NULL values', attribute; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + END IF; + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* create first partition */ + FOR i IN 1..p_count + LOOP + PERFORM @extschema@.create_single_range_partition( + parent_relid, + start_value, + start_value + p_interval, + tablespace := @extschema@.get_tablespace(parent_relid)); + + start_value := start_value + p_interval; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified range + */ +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval ANYELEMENT, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_interval <= 0 THEN + RAISE EXCEPTION 'interval must be positive'; + END IF; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + WHILE start_value <= end_value + LOOP + PERFORM @extschema@.create_single_range_partition( + parent_relid, + start_value, + start_value + p_interval, + tablespace := @extschema@.get_tablespace(parent_relid)); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified range based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval INTERVAL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + WHILE start_value <= end_value + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', + @extschema@.get_base_type(pg_typeof(start_value))::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_tablespace(parent_relid); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + +/* + * Split RANGE partition + */ +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS +$$ +DECLARE + v_parent REGCLASS; + v_attname TEXT; + v_atttype REGTYPE; + v_cond TEXT; + v_new_partition TEXT; + v_part_type INTEGER; + v_check_name TEXT; + +BEGIN + v_parent = @extschema@.get_parent_of_partition(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent); + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent + INTO v_attname, v_part_type; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', v_parent::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + END IF; + + v_atttype = @extschema@.get_attribute_type(v_parent, v_attname); + + /* Get partition values range */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING partition + INTO p_range; + + IF p_range IS NULL THEN + RAISE EXCEPTION 'could not find specified partition'; + END IF; + + /* Check if value fit into the range */ + IF p_range[1] > split_value OR p_range[2] <= split_value + THEN + RAISE EXCEPTION 'specified value does not fit into the range [%, %)', + p_range[1], p_range[2]; + END IF; + + /* Create new partition */ + v_new_partition := @extschema@.create_single_range_partition(v_parent, + split_value, + p_range[2], + partition_name, + tablespace); + + /* Copy data */ + v_cond := @extschema@.build_range_condition(v_attname, split_value, p_range[2]); + EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition::TEXT, + v_cond, + v_new_partition); + + /* Alter original partition */ + v_cond := @extschema@.build_range_condition(v_attname, p_range[1], split_value); + v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); + + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition::TEXT, + v_check_name); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + v_check_name, + v_cond); + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent); +END +$$ +LANGUAGE plpgsql; + + +/* + * Merge RANGE partitions + */ +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partition1 REGCLASS, + partition2 REGCLASS) +RETURNS VOID AS +$$ +DECLARE + v_parent1 REGCLASS; + v_parent2 REGCLASS; + v_attname TEXT; + v_part_type INTEGER; + v_atttype REGTYPE; + +BEGIN + IF partition1 = partition2 THEN + RAISE EXCEPTION 'cannot merge partition with itself'; + END IF; + + v_parent1 := @extschema@.get_parent_of_partition(partition1); + v_parent2 := @extschema@.get_parent_of_partition(partition2); + + /* Acquire data modification locks (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition1); + PERFORM @extschema@.prevent_relation_modification(partition2); + + IF v_parent1 != v_parent2 THEN + RAISE EXCEPTION 'cannot merge partitions with different parents'; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent1); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent1 + INTO v_attname, v_part_type; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', v_parent1::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION 'specified partitions are not RANGE partitions'; + END IF; + + v_atttype := @extschema@.get_attribute_type(partition1, v_attname); + + EXECUTE format('SELECT @extschema@.merge_range_partitions_internal($1, $2, $3, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING v_parent1, partition1, partition2; + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent1); +END +$$ +LANGUAGE plpgsql; + + +/* + * Merge two partitions. All data will be copied to the first one. Second + * partition will be destroyed. + * + * NOTE: dummy field is used to pass the element type to the function + * (it is necessary because of pseudo-types used in function). + */ +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions_internal( + parent_relid REGCLASS, + partition1 REGCLASS, + partition2 REGCLASS, + dummy ANYELEMENT, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS +$$ +DECLARE + v_attname TEXT; + v_atttype REGTYPE; + v_check_name TEXT; + +BEGIN + SELECT attname FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_attname; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + v_atttype = @extschema@.get_attribute_type(parent_relid, v_attname); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%1$s) || + @extschema@.get_part_range($2, NULL::%1$s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING partition1, partition2 + INTO p_range; + + /* Check if ranges are adjacent */ + IF p_range[1] != p_range[4] AND p_range[2] != p_range[3] THEN + RAISE EXCEPTION 'merge failed, partitions must be adjacent'; + END IF; + + /* Drop constraint on first partition... */ + v_check_name := @extschema@.build_check_constraint_name(partition1, v_attname); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition1::TEXT, + v_check_name); + + /* and create a new one */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition1::TEXT, + v_check_name, + @extschema@.build_range_condition(v_attname, + least(p_range[1], p_range[3]), + greatest(p_range[2], p_range[4]))); + + /* Copy data from second partition to the first one */ + EXECUTE format('WITH part_data AS (DELETE FROM %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition2::TEXT, + partition1::TEXT); + + /* Remove second partition */ + EXECUTE format('DROP TABLE %s', partition2::TEXT); +END +$$ LANGUAGE plpgsql; + + +/* + * Append new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + v_atttype REGTYPE; + v_part_name TEXT; + v_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + SELECT attname, range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_attname, v_interval; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); + + EXECUTE + format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING + parent_relid, + v_atttype, + v_interval, + partition_name, + tablespace + INTO + v_part_name; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + +/* + * Spawn logic for append_partition(). We have to + * separate this in order to pass the 'p_range'. + * + * NOTE: we don't take a xact_handling lock here. + */ +CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot append to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF @extschema@.is_date_type(p_atttype) THEN + v_part_name := @extschema@.create_single_range_partition( + parent_relid, + p_range[2], + p_range[2] + p_interval::interval, + partition_name, + tablespace); + ELSE + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $2 + $3::%s, $4, $5)', + v_atttype::TEXT) + USING + parent_relid, + p_range[2], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + END IF; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +/* + * Prepend new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + v_atttype REGTYPE; + v_part_name TEXT; + v_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + SELECT attname, range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_attname, v_interval; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); + + EXECUTE + format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING + parent_relid, + v_atttype, + v_interval, + partition_name, + tablespace + INTO + v_part_name; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + +/* + * Spawn logic for prepend_partition(). We have to + * separate this in order to pass the 'p_range'. + * + * NOTE: we don't take a xact_handling lock here. + */ +CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot prepend to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF @extschema@.is_date_type(p_atttype) THEN + v_part_name := @extschema@.create_single_range_partition( + parent_relid, + p_range[1] - p_interval::interval, + p_range[1], + partition_name, + tablespace); + ELSE + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2 - $3::%s, $2, $4, $5)', + v_atttype::TEXT) + USING + parent_relid, + p_range[1], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + END IF; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +/* + * Add new partition + */ +CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; + END IF; + + /* check range overlap */ + IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN + PERFORM @extschema@.check_range_available(parent_relid, + start_value, + end_value); + END IF; + + /* Create new partition */ + v_part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +/* + * Drop range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( + partition REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS +$$ +DECLARE + parent_relid REGCLASS; + part_name TEXT; + v_relkind CHAR; + v_rows BIGINT; + v_part_type INTEGER; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition); + part_name := partition::TEXT; /* save the name to be returned */ + + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_part_type; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', partition::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', partition::TEXT); + END IF; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN part_name; +END +$$ +LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + + +/* + * Attach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition::TEXT; + END IF; + + /* check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); + + IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); + + v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname), + @extschema@.build_range_condition(v_attname, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition, + v_init_callback, + start_value, + end_value); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition; +END +$$ +LANGUAGE plpgsql; + + +/* + * Detach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( + partition REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + parent_relid REGCLASS; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + v_attname := attname + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Remove inheritance */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', + partition::TEXT, + parent_relid::TEXT); + + /* Remove check constraint */ + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname)); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition; +END +$$ +LANGUAGE plpgsql; + + +/* + * Creates an update trigger + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_update_trigger( + IN parent_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() + RETURNS TRIGGER AS + $body$ + DECLARE + old_oid Oid; + new_oid Oid; + + BEGIN + old_oid := TG_RELID; + new_oid := @extschema@.find_or_create_range_partition( + ''%2$s''::regclass, NEW.%3$s); + + IF old_oid = new_oid THEN + RETURN NEW; + END IF; + + EXECUTE format(''DELETE FROM %%s WHERE %5$s'', + old_oid::regclass::text) + USING %6$s; + + EXECUTE format(''INSERT INTO %%s VALUES (%7$s)'', + new_oid::regclass::text) + USING %8$s; + + RETURN NULL; + END $body$ + LANGUAGE plpgsql'; + + trigger TEXT := 'CREATE TRIGGER %s ' || + 'BEFORE UPDATE ON %s ' || + 'FOR EACH ROW EXECUTE PROCEDURE %s()'; + + triggername TEXT; + funcname TEXT; + att_names TEXT; + old_fields TEXT; + new_fields TEXT; + att_val_fmt TEXT; + att_fmt TEXT; + attr TEXT; + rec RECORD; + +BEGIN + attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF attr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT string_agg(attname, ', '), + string_agg('OLD.' || attname, ', '), + string_agg('NEW.' || attname, ', '), + string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || + attname || ' = $' || attnum || ' ' || + 'ELSE ' || + attname || ' IS NULL END', + ' AND '), + string_agg('$' || attnum, ', ') + FROM pg_attribute + WHERE attrelid::REGCLASS = parent_relid AND attnum > 0 + INTO att_names, + old_fields, + new_fields, + att_val_fmt, + att_fmt; + + /* Build trigger & trigger function's names */ + funcname := @extschema@.build_update_trigger_func_name(parent_relid); + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Create function for trigger */ + EXECUTE format(func, funcname, parent_relid, attr, 0, att_val_fmt, + old_fields, att_fmt, new_fields); + + /* Create trigger on every partition */ + FOR rec in (SELECT * FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid) + LOOP + EXECUTE format(trigger, + triggername, + rec.inhrelid::REGCLASS::TEXT, + funcname); + END LOOP; + + RETURN funcname; +END +$$ LANGUAGE plpgsql; + +/* + * Creates new RANGE partition. Returns partition name. + * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). + */ +CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'create_single_range_partition_pl' +LANGUAGE C +SET client_min_messages = WARNING; + +/* + * Construct CHECK constraint condition for a range partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS 'pg_pathman', 'build_range_condition' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' +LANGUAGE C; + +/* + * Returns N-th range (as an array of two elements). + */ +CREATE OR REPLACE FUNCTION @extschema@.get_part_range( + parent_relid REGCLASS, + partition_idx INTEGER, + dummy ANYELEMENT) +RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_idx' +LANGUAGE C; + +/* + * Returns min and max values for specified RANGE partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_part_range( + partition_relid REGCLASS, + dummy ANYELEMENT) +RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' +LANGUAGE C; + +/* + * Checks if range overlaps with existing partitions. + * Returns TRUE if overlaps and FALSE otherwise. + */ +CREATE OR REPLACE FUNCTION @extschema@.check_range_available( + parent_relid REGCLASS, + range_min ANYELEMENT, + range_max ANYELEMENT) +RETURNS VOID AS 'pg_pathman', 'check_range_available_pl' +LANGUAGE C; + +/* + * Needed for an UPDATE trigger. + */ +CREATE OR REPLACE FUNCTION @extschema@.find_or_create_range_partition( + parent_relid REGCLASS, + value ANYELEMENT) +RETURNS REGCLASS AS 'pg_pathman', 'find_or_create_range_partition' +LANGUAGE C; From bc4871af5ab29f666563dc31dcb9dce7907bd52c Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 14 Feb 2017 12:13:58 +0300 Subject: [PATCH 0215/1124] removed pg_pathman--1.2.sql added by mistake --- pg_pathman--1.2.sql | 2373 ------------------------------------------- 1 file changed, 2373 deletions(-) delete mode 100644 pg_pathman--1.2.sql diff --git a/pg_pathman--1.2.sql b/pg_pathman--1.2.sql deleted file mode 100644 index ffbfcc55..00000000 --- a/pg_pathman--1.2.sql +++ /dev/null @@ -1,2373 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * init.sql - * Creates config table and provides common utility functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -/* - * Pathman config - * partrel - regclass (relation type, stored as Oid) - * attname - partitioning key - * parttype - partitioning type: - * 1 - HASH - * 2 - RANGE - * range_interval - base interval for RANGE partitioning as string - */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( - partrel REGCLASS NOT NULL PRIMARY KEY, - attname TEXT NOT NULL, - parttype INTEGER NOT NULL, - range_interval TEXT, - - CHECK (parttype IN (1, 2)) /* check for allowed part types */ -); - - -/* - * Checks that callback function meets specific requirements. - * Particularly it must have the only JSONB argument and VOID return type. - * - * NOTE: this function is used in CHECK CONSTRAINT. - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( - callback REGPROC, - raise_error BOOL DEFAULT TRUE) -RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' -LANGUAGE C STRICT; - - -/* - * Optional parameters for partitioned tables. - * partrel - regclass (relation type, stored as Oid) - * enable_parent - add parent table to plan - * auto - enable automatic partition creation - * init_callback - cb to be executed on partition creation - */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( - partrel REGCLASS NOT NULL PRIMARY KEY, - enable_parent BOOLEAN NOT NULL DEFAULT FALSE, - auto BOOLEAN NOT NULL DEFAULT TRUE, - init_callback REGPROCEDURE NOT NULL DEFAULT 0, - spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE - - CHECK (@extschema@.validate_part_callback(init_callback)) /* check signature */ -); - -GRANT SELECT, INSERT, UPDATE, DELETE -ON @extschema@.pathman_config, @extschema@.pathman_config_params -TO public; - -/* - * Check if current user can alter/drop specified relation - */ -CREATE OR REPLACE FUNCTION @extschema@.check_security_policy(relation regclass) -RETURNS BOOL AS 'pg_pathman', 'check_security_policy' LANGUAGE C STRICT; - -/* - * Row security policy to restrict partitioning operations to owner and - * superusers only - */ -CREATE POLICY deny_modification ON @extschema@.pathman_config -FOR ALL USING (check_security_policy(partrel)); - -CREATE POLICY deny_modification ON @extschema@.pathman_config_params -FOR ALL USING (check_security_policy(partrel)); - -CREATE POLICY allow_select ON @extschema@.pathman_config FOR SELECT USING (true); - -CREATE POLICY allow_select ON @extschema@.pathman_config_params FOR SELECT USING (true); - -ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; -ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; - -/* - * Invalidate relcache every time someone changes parameters config. - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() -RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' -LANGUAGE C; - -CREATE TRIGGER pathman_config_params_trigger -BEFORE INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params -FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); - -/* - * Enable dump of config tables with pg_dump. - */ -SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config', ''); -SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config_params', ''); - - -/* - * Add a row describing the optional parameter to pathman_config_params. - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( - relation REGCLASS, - param TEXT, - value ANYELEMENT) -RETURNS VOID AS -$$ -BEGIN - EXECUTE format('INSERT INTO @extschema@.pathman_config_params - (partrel, %1$s) VALUES ($1, $2) - ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) - USING relation, value; -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Include\exclude parent relation in query plan. - */ -CREATE OR REPLACE FUNCTION @extschema@.set_enable_parent( - relation REGCLASS, - value BOOLEAN) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'enable_parent', value); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Enable\disable automatic partition creation. - */ -CREATE OR REPLACE FUNCTION @extschema@.set_auto( - relation REGCLASS, - value BOOLEAN) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'auto', value); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Set partition creation callback - */ -CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( - relation REGCLASS, - callback REGPROC DEFAULT 0) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'init_callback', callback); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Set 'spawn using BGW' option - */ -CREATE OR REPLACE FUNCTION @extschema@.set_spawn_using_bgw( - relation REGCLASS, - value BOOLEAN) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'spawn_using_bgw', value); -END -$$ -LANGUAGE plpgsql STRICT; - - -/* - * Show all existing parents and partitions. - */ -CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() -RETURNS TABLE ( - parent REGCLASS, - partition REGCLASS, - parttype INT4, - partattr TEXT, - range_min TEXT, - range_max TEXT) -AS 'pg_pathman', 'show_partition_list_internal' -LANGUAGE C STRICT; - -/* - * View for show_partition_list(). - */ -CREATE OR REPLACE VIEW @extschema@.pathman_partition_list -AS SELECT * FROM @extschema@.show_partition_list(); - -GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; - -/* - * Show all existing concurrent partitioning tasks. - */ -CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() -RETURNS TABLE ( - userid REGROLE, - pid INT, - dbid OID, - relid REGCLASS, - processed INT, - status TEXT) -AS 'pg_pathman', 'show_concurrent_part_tasks_internal' -LANGUAGE C STRICT; - -/* - * View for show_concurrent_part_tasks(). - */ -CREATE OR REPLACE VIEW @extschema@.pathman_concurrent_part_tasks -AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); - -GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; - -/* - * Partition table using ConcurrentPartWorker. - */ -CREATE OR REPLACE FUNCTION @extschema@.partition_table_concurrently( - relation REGCLASS, - batch_size INTEGER DEFAULT 1000, - sleep_time FLOAT8 DEFAULT 1.0) -RETURNS VOID AS 'pg_pathman', 'partition_table_concurrently' -LANGUAGE C STRICT; - -/* - * Stop concurrent partitioning task. - */ -CREATE OR REPLACE FUNCTION @extschema@.stop_concurrent_part_task( - relation REGCLASS) -RETURNS BOOL AS 'pg_pathman', 'stop_concurrent_part_task' -LANGUAGE C STRICT; - - -/* - * Copy rows to partitions concurrently. - */ -CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( - relation REGCLASS, - p_min ANYELEMENT DEFAULT NULL::text, - p_max ANYELEMENT DEFAULT NULL::text, - p_limit INT DEFAULT NULL, - OUT p_total BIGINT) -AS -$$ -DECLARE - v_attr TEXT; - v_limit_clause TEXT := ''; - v_where_clause TEXT := ''; - ctids TID[]; - -BEGIN - SELECT attname INTO v_attr - FROM @extschema@.pathman_config WHERE partrel = relation; - - p_total := 0; - - /* Format LIMIT clause if needed */ - IF NOT p_limit IS NULL THEN - v_limit_clause := format('LIMIT %s', p_limit); - END IF; - - /* Format WHERE clause if needed */ - IF NOT p_min IS NULL THEN - v_where_clause := format('%1$s >= $1', v_attr); - END IF; - - IF NOT p_max IS NULL THEN - IF NOT p_min IS NULL THEN - v_where_clause := v_where_clause || ' AND '; - END IF; - v_where_clause := v_where_clause || format('%1$s < $2', v_attr); - END IF; - - IF v_where_clause != '' THEN - v_where_clause := 'WHERE ' || v_where_clause; - END IF; - - /* Lock rows and copy data */ - RAISE NOTICE 'Copying data to partitions...'; - EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', - relation, v_where_clause, v_limit_clause) - USING p_min, p_max - INTO ctids; - - EXECUTE format(' - WITH data AS ( - DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) - INSERT INTO %1$s SELECT * FROM data', - relation) - USING ctids; - - /* Get number of inserted rows */ - GET DIAGNOSTICS p_total = ROW_COUNT; - RETURN; -END -$$ -LANGUAGE plpgsql -SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ - -/* - * Old school way to distribute rows to partitions. - */ -CREATE OR REPLACE FUNCTION @extschema@.partition_data( - parent_relid REGCLASS, - OUT p_total BIGINT) -AS -$$ -DECLARE - relname TEXT; - rec RECORD; - cnt BIGINT := 0; - -BEGIN - p_total := 0; - - /* Create partitions and copy rest of the data */ - EXECUTE format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) - INSERT INTO %1$s SELECT * FROM part_data', - parent_relid::TEXT); - - /* Get number of inserted rows */ - GET DIAGNOSTICS p_total = ROW_COUNT; - RETURN; -END -$$ -LANGUAGE plpgsql STRICT -SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ - -/* - * Disable pathman partitioning for specified relation. - */ -CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( - parent_relid REGCLASS) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Delete rows from both config tables */ - DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; - DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; - - /* Drop triggers on update */ - PERFORM @extschema@.drop_triggers(parent_relid); - - /* Notify backend about changes */ - PERFORM @extschema@.on_remove_partitions(parent_relid); -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Aggregates several common relation checks before partitioning. - * Suitable for every partitioning type. - */ -CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( - relation REGCLASS, - p_attribute TEXT) -RETURNS BOOLEAN AS -$$ -DECLARE - v_rec RECORD; - is_referenced BOOLEAN; - rel_persistence CHAR; - -BEGIN - /* Ignore temporary tables */ - SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = relation INTO rel_persistence; - - IF rel_persistence = 't'::CHAR THEN - RAISE EXCEPTION 'temporary table "%" cannot be partitioned', - relation::TEXT; - END IF; - - IF EXISTS (SELECT * FROM @extschema@.pathman_config - WHERE partrel = relation) THEN - RAISE EXCEPTION 'relation "%" has already been partitioned', relation; - END IF; - - IF @extschema@.is_attribute_nullable(relation, p_attribute) THEN - RAISE EXCEPTION 'partitioning key "%" must be NOT NULL', p_attribute; - END IF; - - /* Check if there are foreign keys that reference the relation */ - FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint - WHERE confrelid = relation::REGCLASS::OID) - LOOP - is_referenced := TRUE; - RAISE WARNING 'foreign key "%" references relation "%"', - v_rec.conname, relation; - END LOOP; - - IF is_referenced THEN - RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; - END IF; - - RETURN TRUE; -END -$$ -LANGUAGE plpgsql; - -/* - * Returns relname without quotes or something. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_plain_schema_and_relname( - cls REGCLASS, - OUT schema TEXT, - OUT relname TEXT) -AS -$$ -BEGIN - SELECT pg_catalog.pg_class.relnamespace::regnamespace, - pg_catalog.pg_class.relname - FROM pg_catalog.pg_class WHERE oid = cls::oid - INTO schema, relname; -END -$$ -LANGUAGE plpgsql STRICT; - -/* - * Check if two relations have equal structures. - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relations_equality( - relation1 OID, relation2 OID) -RETURNS BOOLEAN AS -$$ -DECLARE - rec RECORD; - -BEGIN - FOR rec IN ( - WITH - a1 AS (select * from pg_catalog.pg_attribute - where attrelid = relation1 and attnum > 0), - a2 AS (select * from pg_catalog.pg_attribute - where attrelid = relation2 and attnum > 0) - SELECT a1.attname name1, a2.attname name2, a1.atttypid type1, a2.atttypid type2 - FROM a1 - FULL JOIN a2 ON a1.attnum = a2.attnum - ) - LOOP - IF rec.name1 IS NULL OR rec.name2 IS NULL OR rec.name1 != rec.name2 THEN - RETURN false; - END IF; - END LOOP; - - RETURN true; -END -$$ -LANGUAGE plpgsql; - -/* - * DDL trigger that removes entry from pathman_config table. - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() -RETURNS event_trigger AS -$$ -DECLARE - obj record; - pg_class_oid oid; - relids regclass[]; -BEGIN - pg_class_oid = 'pg_catalog.pg_class'::regclass; - - /* Find relids to remove from config */ - SELECT array_agg(cfg.partrel) INTO relids - FROM pg_event_trigger_dropped_objects() AS events - JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid - WHERE events.classid = pg_class_oid AND events.objsubid = 0; - - /* Cleanup pathman_config */ - DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); - - /* Cleanup params table too */ - DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); -END -$$ -LANGUAGE plpgsql; - -/* - * Drop triggers. - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( - parent_relid REGCLASS) -RETURNS VOID AS -$$ -BEGIN - EXECUTE format('DROP FUNCTION IF EXISTS %s() CASCADE', - @extschema@.build_update_trigger_func_name(parent_relid)); -END -$$ LANGUAGE plpgsql STRICT; - -/* - * Drop partitions. If delete_data set to TRUE, partitions - * will be dropped with all the data. - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( - parent_relid REGCLASS, - delete_data BOOLEAN DEFAULT FALSE) -RETURNS INTEGER AS -$$ -DECLARE - v_rec RECORD; - v_rows BIGINT; - v_part_count INTEGER := 0; - conf_num_del INTEGER; - v_relkind CHAR; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Drop trigger first */ - PERFORM @extschema@.drop_triggers(parent_relid); - - WITH config_num_deleted AS (DELETE FROM @extschema@.pathman_config - WHERE partrel = parent_relid - RETURNING *) - SELECT count(*) from config_num_deleted INTO conf_num_del; - - DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; - - IF conf_num_del = 0 THEN - RAISE EXCEPTION 'relation "%" has no partitions', parent_relid::TEXT; - END IF; - - FOR v_rec IN (SELECT inhrelid::REGCLASS AS tbl - FROM pg_catalog.pg_inherits - WHERE inhparent::regclass = parent_relid - ORDER BY inhrelid ASC) - LOOP - IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', - parent_relid::TEXT, - v_rec.tbl::TEXT); - GET DIAGNOSTICS v_rows = ROW_COUNT; - - /* Show number of copied rows */ - RAISE NOTICE '% rows copied from %', v_rows, v_rec.tbl::TEXT; - END IF; - - SELECT relkind FROM pg_catalog.pg_class - WHERE oid = v_rec.tbl - INTO v_relkind; - - /* - * Determine the kind of child relation. It can be either regular - * table (r) or foreign table (f). Depending on relkind we use - * DROP TABLE or DROP FOREIGN TABLE. - */ - IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', v_rec.tbl::TEXT); - ELSE - EXECUTE format('DROP TABLE %s', v_rec.tbl::TEXT); - END IF; - - v_part_count := v_part_count + 1; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_remove_partitions(parent_relid); - - RETURN v_part_count; -END -$$ LANGUAGE plpgsql -SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ - - -/* - * Copy all of parent's foreign keys. - */ -CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( - parent_relid REGCLASS, - partition REGCLASS) -RETURNS VOID AS -$$ -DECLARE - rec RECORD; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition); - - FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint - WHERE conrelid = parent_relid AND contype = 'f') - LOOP - EXECUTE format('ALTER TABLE %s ADD %s', - partition::TEXT, - pg_catalog.pg_get_constraintdef(rec.conid)); - END LOOP; -END -$$ LANGUAGE plpgsql STRICT; - - -/* - * Create DDL trigger to call pathman_ddl_trigger_func(). - */ -CREATE EVENT TRIGGER pathman_ddl_trigger -ON sql_drop -EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); - - - -CREATE OR REPLACE FUNCTION @extschema@.on_create_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_created' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.on_update_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_updated' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.on_remove_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_removed' -LANGUAGE C STRICT; - - -/* - * Get number of partitions managed by pg_pathman. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( - parent_relid REGCLASS) -RETURNS INT4 AS 'pg_pathman', 'get_number_of_partitions_pl' -LANGUAGE C STRICT; - -/* - * Get parent of pg_pathman's partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition( - partition_relid REGCLASS) -RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' -LANGUAGE C STRICT; - -/* - * Extract basic type of a domain. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_base_type( - typid REGTYPE) -RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' -LANGUAGE C STRICT; - -/* - * Returns attribute type name for relation. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_attribute_type( - relid REGCLASS, - attname TEXT) -RETURNS REGTYPE AS 'pg_pathman', 'get_attribute_type_pl' -LANGUAGE C STRICT; - -/* - * Return tablespace name for specified relation. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_tablespace( - relid REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'get_tablespace_pl' -LANGUAGE C STRICT; - - -/* - * Check that relation exists. - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relname( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'validate_relname' -LANGUAGE C; - -/* - * Checks if attribute is nullable - */ -CREATE OR REPLACE FUNCTION @extschema@.is_attribute_nullable( - relid REGCLASS, - attname TEXT) -RETURNS BOOLEAN AS 'pg_pathman', 'is_attribute_nullable' -LANGUAGE C STRICT; - -/* - * Check if regclass is date or timestamp. - */ -CREATE OR REPLACE FUNCTION @extschema@.is_date_type( - typid REGTYPE) -RETURNS BOOLEAN AS 'pg_pathman', 'is_date_type' -LANGUAGE C STRICT; - - -/* - * Build check constraint name for a specified relation's column. - */ -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - partition_relid REGCLASS, - attribute INT2) -RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attnum' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - partition_relid REGCLASS, - attribute TEXT) -RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attname' -LANGUAGE C STRICT; - -/* - * Build update trigger and its underlying function's names. - */ -CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_name( - relid REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_name' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_func_name( - relid REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_func_name' -LANGUAGE C STRICT; - - -/* - * Attach a previously partitioned table. - */ -CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( - parent_relid REGCLASS, - attname TEXT, - range_interval TEXT DEFAULT NULL) -RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' -LANGUAGE C; - - -/* - * Lock partitioned relation to restrict concurrent - * modification of partitioning scheme. - */ -CREATE OR REPLACE FUNCTION @extschema@.lock_partitioned_relation( - parent_relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'lock_partitioned_relation' -LANGUAGE C STRICT; - -/* - * Lock relation to restrict concurrent modification of data. - */ -CREATE OR REPLACE FUNCTION @extschema@.prevent_relation_modification( - parent_relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'prevent_relation_modification' -LANGUAGE C STRICT; - - -/* - * DEBUG: Place this inside some plpgsql fuction and set breakpoint. - */ -CREATE OR REPLACE FUNCTION @extschema@.debug_capture() -RETURNS VOID AS 'pg_pathman', 'debug_capture' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() -RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' -LANGUAGE C STRICT; - - -/* - * Invoke init_callback on RANGE partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( - parent_relid REGCLASS, - partition REGCLASS, - init_callback REGPROCEDURE, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' -LANGUAGE C; - -/* - * Invoke init_callback on HASH partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( - parent_relid REGCLASS, - partition REGCLASS, - init_callback REGPROCEDURE) -RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' -LANGUAGE C; -/* ------------------------------------------------------------------------ - * - * hash.sql - * HASH partitioning functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -/* - * Creates hash partitions for specified relation - */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( - parent_relid REGCLASS, - attribute TEXT, - partitions_count INTEGER, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) - VALUES (parent_relid, attribute, 1); - - /* Create partitions */ - PERFORM @extschema@.create_hash_partitions_internal(parent_relid, - attribute, - partitions_count); - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Copy data */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN partitions_count; -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; - -/* - * Replace hash partition with another one. It could be useful in case when - * someone wants to attach foreign table as a partition. - * - * lock_parent - should we take an exclusive lock? - */ -CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( - old_partition REGCLASS, - new_partition REGCLASS, - lock_parent BOOL DEFAULT TRUE) -RETURNS REGCLASS AS -$$ -DECLARE - parent_relid REGCLASS; - part_attname TEXT; /* partitioned column */ - old_constr_name TEXT; /* name of old_partition's constraint */ - old_constr_def TEXT; /* definition of old_partition's constraint */ - rel_persistence CHAR; - p_init_callback REGPROCEDURE; - -BEGIN - PERFORM @extschema@.validate_relname(old_partition); - PERFORM @extschema@.validate_relname(new_partition); - - /* Parent relation */ - parent_relid := @extschema@.get_parent_of_partition(old_partition); - - IF lock_parent THEN - /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(old_partition); - PERFORM @extschema@.prevent_relation_modification(new_partition); - - /* Ignore temporary tables */ - SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = new_partition INTO rel_persistence; - - IF rel_persistence = 't'::CHAR THEN - RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', - new_partition::TEXT; - END IF; - - /* Check that new partition has an equal structure as parent does */ - IF NOT @extschema@.validate_relations_equality(parent_relid, new_partition) THEN - RAISE EXCEPTION 'partition must have the exact same structure as parent'; - END IF; - - /* Get partitioning key */ - part_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - IF part_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - /* Fetch name of old_partition's HASH constraint */ - old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS, - part_attname); - - /* Fetch definition of old_partition's HASH constraint */ - SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint - WHERE conrelid = old_partition AND conname = old_constr_name - INTO old_constr_def; - - /* Detach old partition */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - old_partition, - old_constr_name); - - /* Attach the new one */ - EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', - new_partition, - @extschema@.build_check_constraint_name(new_partition::REGCLASS, - part_attname), - old_constr_def); - - /* Fetch init_callback from 'params' table */ - WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) - FROM stub_callback - LEFT JOIN @extschema@.pathman_config_params AS params - ON params.partrel = parent_relid - INTO p_init_callback; - - /* Finally invoke init_callback */ - PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - new_partition, - p_init_callback); - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN new_partition; -END -$$ -LANGUAGE plpgsql; - -/* - * Creates an update trigger - */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( - parent_relid REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() - RETURNS TRIGGER AS - $body$ - DECLARE - old_idx INTEGER; /* partition indices */ - new_idx INTEGER; - - BEGIN - old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); - new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); - - IF old_idx = new_idx THEN - RETURN NEW; - END IF; - - EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) - USING %5$s; - - EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) - USING %7$s; - - RETURN NULL; - END $body$ - LANGUAGE plpgsql'; - - trigger TEXT := 'CREATE TRIGGER %s - BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE %s()'; - - att_names TEXT; - old_fields TEXT; - new_fields TEXT; - att_val_fmt TEXT; - att_fmt TEXT; - attr TEXT; - plain_schema TEXT; - plain_relname TEXT; - child_relname_format TEXT; - funcname TEXT; - triggername TEXT; - atttype REGTYPE; - partitions_count INTEGER; - -BEGIN - attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - - IF attr IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT string_agg(attname, ', '), - string_agg('OLD.' || attname, ', '), - string_agg('NEW.' || attname, ', '), - string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || - attname || ' = $' || attnum || ' ' || - 'ELSE ' || - attname || ' IS NULL END', - ' AND '), - string_agg('$' || attnum, ', ') - FROM pg_catalog.pg_attribute - WHERE attrelid = parent_relid AND attnum > 0 - INTO att_names, - old_fields, - new_fields, - att_val_fmt, - att_fmt; - - partitions_count := @extschema@.get_number_of_partitions(parent_relid); - - /* Build trigger & trigger function's names */ - funcname := @extschema@.build_update_trigger_func_name(parent_relid); - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Build partition name template */ - SELECT * INTO plain_schema, plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - child_relname_format := quote_ident(plain_schema) || '.' || - quote_ident(plain_relname || '_%s'); - - /* Fetch base hash function for atttype */ - atttype := @extschema@.get_attribute_type(parent_relid, attr); - - /* Format function definition and execute it */ - EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, - old_fields, att_fmt, new_fields, child_relname_format, - @extschema@.get_type_hash_func(atttype)::TEXT); - - /* Create trigger on each partition */ - FOR num IN 0..partitions_count-1 - LOOP - EXECUTE format(trigger, - triggername, - format(child_relname_format, num), - funcname); - END LOOP; - - return funcname; -END -$$ LANGUAGE plpgsql; - -/* - * Just create HASH partitions, called by create_hash_partitions(). - */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( - parent_relid REGCLASS, - attribute TEXT, - partitions_count INTEGER) -RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' -LANGUAGE C STRICT; - -/* - * Returns hash function OID for specified type - */ -CREATE OR REPLACE FUNCTION @extschema@.get_type_hash_func(REGTYPE) -RETURNS REGPROC AS 'pg_pathman', 'get_type_hash_func' -LANGUAGE C STRICT; - -/* - * Calculates hash for integer value - */ -CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INTEGER, INTEGER) -RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' -LANGUAGE C STRICT; - -/* - * Build hash condition for a CHECK CONSTRAINT - */ -CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( - attribute_type REGTYPE, - attribute TEXT, - partitions_count INT4, - partitions_index INT4) -RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' -LANGUAGE C STRICT; -/* ------------------------------------------------------------------------ - * - * range.sql - * RANGE partitioning functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -CREATE OR REPLACE FUNCTION @extschema@.create_or_replace_sequence( - parent_relid REGCLASS, - OUT seq_name TEXT) -AS $$ -BEGIN - seq_name := @extschema@.build_sequence_name(parent_relid); - - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); - EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); -END -$$ -LANGUAGE plpgsql; - -/* - * Check RANGE partition boundaries. - */ -CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS VOID AS -$$ -DECLARE - v_min start_value%TYPE; - v_max start_value%TYPE; - v_count BIGINT; - -BEGIN - /* Get min and max values */ - EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) - FROM %2$s WHERE NOT %1$s IS NULL', - attribute, parent_relid::TEXT) - INTO v_count, v_min, v_max; - - /* Check if column has NULL values */ - IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN - RAISE EXCEPTION 'column "%" contains NULL values', attribute; - END IF; - - /* Check lower boundary */ - IF start_value > v_min THEN - RAISE EXCEPTION 'start value is less than min value of "%"', attribute; - END IF; - - /* Check upper boundary */ - IF end_value <= v_max THEN - RAISE EXCEPTION 'not enough partitions to fit all values of "%"', attribute; - END IF; -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified relation based on datetime attribute - */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - p_interval INTERVAL, - p_count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - v_rows_count BIGINT; - v_atttype REGTYPE; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; - end_value start_value%TYPE; - i INTEGER; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - IF p_count < 0 THEN - RAISE EXCEPTION '"p_count" must not be less than 0'; - END IF; - - /* Try to determine partitions count if not set */ - IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) - INTO v_rows_count, v_max; - - IF v_rows_count = 0 THEN - RAISE EXCEPTION 'cannot determine partitions count for empty table'; - END IF; - - p_count := 0; - WHILE v_cur_value <= v_max - LOOP - v_cur_value := v_cur_value + p_interval; - p_count := p_count + 1; - END LOOP; - END IF; - - v_atttype := @extschema@.get_base_type(pg_typeof(start_value)); - - /* - * In case when user doesn't want to automatically create partitions - * and specifies partition count as 0 then do not check boundaries - */ - IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ - end_value := start_value; - FOR i IN 1..p_count - LOOP - end_value := end_value + p_interval; - END LOOP; - - /* Check boundaries */ - EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', ''%s'', ''%s'', ''%s''::%s)', - parent_relid, - attribute, - start_value, - end_value, - v_atttype::TEXT); - END IF; - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Create first partition */ - FOR i IN 1..p_count - LOOP - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4)', - v_atttype::TEXT) - USING - parent_relid, - start_value, - start_value + p_interval, - @extschema@.get_tablespace(parent_relid); - - start_value := start_value + p_interval; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN p_count; -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified relation based on numerical attribute - */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - p_interval ANYELEMENT, - p_count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - v_rows_count BIGINT; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; - end_value start_value%TYPE; - i INTEGER; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - IF p_count < 0 THEN - RAISE EXCEPTION 'partitions count must not be less than zero'; - END IF; - - /* Try to determine partitions count if not set */ - IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) - INTO v_rows_count, v_max; - - IF v_rows_count = 0 THEN - RAISE EXCEPTION 'cannot determine partitions count for empty table'; - END IF; - - IF v_max IS NULL THEN - RAISE EXCEPTION 'column "%" has NULL values', attribute; - END IF; - - p_count := 0; - WHILE v_cur_value <= v_max - LOOP - v_cur_value := v_cur_value + p_interval; - p_count := p_count + 1; - END LOOP; - END IF; - - /* - * In case when user doesn't want to automatically create partitions - * and specifies partition count as 0 then do not check boundaries - */ - IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ - end_value := start_value; - FOR i IN 1..p_count - LOOP - end_value := end_value + p_interval; - END LOOP; - - /* check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - attribute, - start_value, - end_value); - END IF; - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* create first partition */ - FOR i IN 1..p_count - LOOP - PERFORM @extschema@.create_single_range_partition( - parent_relid, - start_value, - start_value + p_interval, - tablespace := @extschema@.get_tablespace(parent_relid)); - - start_value := start_value + p_interval; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN p_count; -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified range - */ -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval ANYELEMENT, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - part_count INTEGER := 0; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - IF p_interval <= 0 THEN - RAISE EXCEPTION 'interval must be positive'; - END IF; - - /* Check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - attribute, - start_value, - end_value); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - WHILE start_value <= end_value - LOOP - PERFORM @extschema@.create_single_range_partition( - parent_relid, - start_value, - start_value + p_interval, - tablespace := @extschema@.get_tablespace(parent_relid)); - - start_value := start_value + p_interval; - part_count := part_count + 1; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN part_count; /* number of created partitions */ -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified range based on datetime attribute - */ -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( - parent_relid REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval INTERVAL, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ -DECLARE - part_count INTEGER := 0; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - /* Check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - attribute, - start_value, - end_value); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - WHILE start_value <= end_value - LOOP - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', - @extschema@.get_base_type(pg_typeof(start_value))::TEXT) - USING - parent_relid, - start_value, - start_value + p_interval, - @extschema@.get_tablespace(parent_relid); - - start_value := start_value + p_interval; - part_count := part_count + 1; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN part_count; /* number of created partitions */ -END -$$ LANGUAGE plpgsql; - -/* - * Split RANGE partition - */ -CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( - partition REGCLASS, - split_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS -$$ -DECLARE - v_parent REGCLASS; - v_attname TEXT; - v_atttype REGTYPE; - v_cond TEXT; - v_new_partition TEXT; - v_part_type INTEGER; - v_check_name TEXT; - -BEGIN - v_parent = @extschema@.get_parent_of_partition(partition); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(v_parent); - - /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition); - - SELECT attname, parttype - FROM @extschema@.pathman_config - WHERE partrel = v_parent - INTO v_attname, v_part_type; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', v_parent::TEXT; - END IF; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; - END IF; - - v_atttype = @extschema@.get_attribute_type(v_parent, v_attname); - - /* Get partition values range */ - EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING partition - INTO p_range; - - IF p_range IS NULL THEN - RAISE EXCEPTION 'could not find specified partition'; - END IF; - - /* Check if value fit into the range */ - IF p_range[1] > split_value OR p_range[2] <= split_value - THEN - RAISE EXCEPTION 'specified value does not fit into the range [%, %)', - p_range[1], p_range[2]; - END IF; - - /* Create new partition */ - v_new_partition := @extschema@.create_single_range_partition(v_parent, - split_value, - p_range[2], - partition_name, - tablespace); - - /* Copy data */ - v_cond := @extschema@.build_range_condition(v_attname, split_value, p_range[2]); - EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - partition::TEXT, - v_cond, - v_new_partition); - - /* Alter original partition */ - v_cond := @extschema@.build_range_condition(v_attname, p_range[1], split_value); - v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); - - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, - v_check_name); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, - v_check_name, - v_cond); - - /* Tell backend to reload configuration */ - PERFORM @extschema@.on_update_partitions(v_parent); -END -$$ -LANGUAGE plpgsql; - - -/* - * Merge RANGE partitions - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partition1 REGCLASS, - partition2 REGCLASS) -RETURNS VOID AS -$$ -DECLARE - v_parent1 REGCLASS; - v_parent2 REGCLASS; - v_attname TEXT; - v_part_type INTEGER; - v_atttype REGTYPE; - -BEGIN - IF partition1 = partition2 THEN - RAISE EXCEPTION 'cannot merge partition with itself'; - END IF; - - v_parent1 := @extschema@.get_parent_of_partition(partition1); - v_parent2 := @extschema@.get_parent_of_partition(partition2); - - /* Acquire data modification locks (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition1); - PERFORM @extschema@.prevent_relation_modification(partition2); - - IF v_parent1 != v_parent2 THEN - RAISE EXCEPTION 'cannot merge partitions with different parents'; - END IF; - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(v_parent1); - - SELECT attname, parttype - FROM @extschema@.pathman_config - WHERE partrel = v_parent1 - INTO v_attname, v_part_type; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', v_parent1::TEXT; - END IF; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION 'specified partitions are not RANGE partitions'; - END IF; - - v_atttype := @extschema@.get_attribute_type(partition1, v_attname); - - EXECUTE format('SELECT @extschema@.merge_range_partitions_internal($1, $2, $3, NULL::%s)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING v_parent1, partition1, partition2; - - /* Tell backend to reload configuration */ - PERFORM @extschema@.on_update_partitions(v_parent1); -END -$$ -LANGUAGE plpgsql; - - -/* - * Merge two partitions. All data will be copied to the first one. Second - * partition will be destroyed. - * - * NOTE: dummy field is used to pass the element type to the function - * (it is necessary because of pseudo-types used in function). - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions_internal( - parent_relid REGCLASS, - partition1 REGCLASS, - partition2 REGCLASS, - dummy ANYELEMENT, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS -$$ -DECLARE - v_attname TEXT; - v_atttype REGTYPE; - v_check_name TEXT; - -BEGIN - SELECT attname FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype = @extschema@.get_attribute_type(parent_relid, v_attname); - - /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%1$s) || - @extschema@.get_part_range($2, NULL::%1$s)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING partition1, partition2 - INTO p_range; - - /* Check if ranges are adjacent */ - IF p_range[1] != p_range[4] AND p_range[2] != p_range[3] THEN - RAISE EXCEPTION 'merge failed, partitions must be adjacent'; - END IF; - - /* Drop constraint on first partition... */ - v_check_name := @extschema@.build_check_constraint_name(partition1, v_attname); - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition1::TEXT, - v_check_name); - - /* and create a new one */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition1::TEXT, - v_check_name, - @extschema@.build_range_condition(v_attname, - least(p_range[1], p_range[3]), - greatest(p_range[2], p_range[4]))); - - /* Copy data from second partition to the first one */ - EXECUTE format('WITH part_data AS (DELETE FROM %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - partition2::TEXT, - partition1::TEXT); - - /* Remove second partition */ - EXECUTE format('DROP TABLE %s', partition2::TEXT); -END -$$ LANGUAGE plpgsql; - - -/* - * Append new partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( - parent_relid REGCLASS, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - v_atttype REGTYPE; - v_part_name TEXT; - v_interval TEXT; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - SELECT attname, range_interval - FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname, v_interval; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); - - EXECUTE - format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING - parent_relid, - v_atttype, - v_interval, - partition_name, - tablespace - INTO - v_part_name; - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - -/* - * Spawn logic for append_partition(). We have to - * separate this in order to pass the 'p_range'. - * - * NOTE: we don't take a xact_handling lock here. - */ -CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( - parent_relid REGCLASS, - p_atttype REGTYPE, - p_interval TEXT, - p_range ANYARRAY DEFAULT NULL, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_name TEXT; - v_atttype REGTYPE; - -BEGIN - IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN - RAISE EXCEPTION 'cannot append to empty partitions set'; - END IF; - - v_atttype := @extschema@.get_base_type(p_atttype); - - /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', - v_atttype::TEXT) - USING parent_relid - INTO p_range; - - IF @extschema@.is_date_type(p_atttype) THEN - v_part_name := @extschema@.create_single_range_partition( - parent_relid, - p_range[2], - p_range[2] + p_interval::interval, - partition_name, - tablespace); - ELSE - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $2 + $3::%s, $4, $5)', - v_atttype::TEXT) - USING - parent_relid, - p_range[2], - p_interval, - partition_name, - tablespace - INTO - v_part_name; - END IF; - - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - - -/* - * Prepend new partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( - parent_relid REGCLASS, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - v_atttype REGTYPE; - v_part_name TEXT; - v_interval TEXT; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - SELECT attname, range_interval - FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname, v_interval; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); - - EXECUTE - format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', - @extschema@.get_base_type(v_atttype)::TEXT) - USING - parent_relid, - v_atttype, - v_interval, - partition_name, - tablespace - INTO - v_part_name; - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - -/* - * Spawn logic for prepend_partition(). We have to - * separate this in order to pass the 'p_range'. - * - * NOTE: we don't take a xact_handling lock here. - */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( - parent_relid REGCLASS, - p_atttype REGTYPE, - p_interval TEXT, - p_range ANYARRAY DEFAULT NULL, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_name TEXT; - v_atttype REGTYPE; - -BEGIN - IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN - RAISE EXCEPTION 'cannot prepend to empty partitions set'; - END IF; - - v_atttype := @extschema@.get_base_type(p_atttype); - - /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', - v_atttype::TEXT) - USING parent_relid - INTO p_range; - - IF @extschema@.is_date_type(p_atttype) THEN - v_part_name := @extschema@.create_single_range_partition( - parent_relid, - p_range[1] - p_interval::interval, - p_range[1], - partition_name, - tablespace); - ELSE - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2 - $3::%s, $2, $4, $5)', - v_atttype::TEXT) - USING - parent_relid, - p_range[1], - p_interval, - partition_name, - tablespace - INTO - v_part_name; - END IF; - - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - - -/* - * Add new partition - */ -CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( - parent_relid REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_name TEXT; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - IF start_value >= end_value THEN - RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; - END IF; - - /* check range overlap */ - IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN - PERFORM @extschema@.check_range_available(parent_relid, - start_value, - end_value); - END IF; - - /* Create new partition */ - v_part_name := @extschema@.create_single_range_partition(parent_relid, - start_value, - end_value, - partition_name, - tablespace); - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; - - -/* - * Drop range partition - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( - partition REGCLASS, - delete_data BOOLEAN DEFAULT TRUE) -RETURNS TEXT AS -$$ -DECLARE - parent_relid REGCLASS; - part_name TEXT; - v_relkind CHAR; - v_rows BIGINT; - v_part_type INTEGER; - -BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition); - part_name := partition::TEXT; /* save the name to be returned */ - - SELECT parttype - FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_part_type; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; - END IF; - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', - parent_relid::TEXT, - partition::TEXT); - GET DIAGNOSTICS v_rows = ROW_COUNT; - - /* Show number of copied rows */ - RAISE NOTICE '% rows copied from %', v_rows, partition::TEXT; - END IF; - - SELECT relkind FROM pg_catalog.pg_class - WHERE oid = partition - INTO v_relkind; - - /* - * Determine the kind of child relation. It can be either regular - * table (r) or foreign table (f). Depending on relkind we use - * DROP TABLE or DROP FOREIGN TABLE. - */ - IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', partition::TEXT); - ELSE - EXECUTE format('DROP TABLE %s', partition::TEXT); - END IF; - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN part_name; -END -$$ -LANGUAGE plpgsql -SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ - - -/* - * Attach range partition - */ -CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( - parent_relid REGCLASS, - partition REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - rel_persistence CHAR; - v_init_callback REGPROCEDURE; - -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - /* Ignore temporary tables */ - SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = partition INTO rel_persistence; - - IF rel_persistence = 't'::CHAR THEN - RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', - partition::TEXT; - END IF; - - /* check range overlap */ - PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); - - IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN - RAISE EXCEPTION 'partition must have the exact same structure as parent'; - END IF; - - /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); - - v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - /* Set check constraint */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname), - @extschema@.build_range_condition(v_attname, - start_value, - end_value)); - - /* Fetch init_callback from 'params' table */ - WITH stub_callback(stub) as (values (0)) - SELECT coalesce(init_callback, 0::REGPROCEDURE) - FROM stub_callback - LEFT JOIN @extschema@.pathman_config_params AS params - ON params.partrel = parent_relid - INTO v_init_callback; - - PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - partition, - v_init_callback, - start_value, - end_value); - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN partition; -END -$$ -LANGUAGE plpgsql; - - -/* - * Detach range partition - */ -CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( - partition REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - parent_relid REGCLASS; - -BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - v_attname := attname - FROM @extschema@.pathman_config - WHERE partrel = parent_relid; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - /* Remove inheritance */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', - partition::TEXT, - parent_relid::TEXT); - - /* Remove check constraint */ - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname)); - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - - RETURN partition; -END -$$ -LANGUAGE plpgsql; - - -/* - * Creates an update trigger - */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_update_trigger( - IN parent_relid REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() - RETURNS TRIGGER AS - $body$ - DECLARE - old_oid Oid; - new_oid Oid; - - BEGIN - old_oid := TG_RELID; - new_oid := @extschema@.find_or_create_range_partition( - ''%2$s''::regclass, NEW.%3$s); - - IF old_oid = new_oid THEN - RETURN NEW; - END IF; - - EXECUTE format(''DELETE FROM %%s WHERE %5$s'', - old_oid::regclass::text) - USING %6$s; - - EXECUTE format(''INSERT INTO %%s VALUES (%7$s)'', - new_oid::regclass::text) - USING %8$s; - - RETURN NULL; - END $body$ - LANGUAGE plpgsql'; - - trigger TEXT := 'CREATE TRIGGER %s ' || - 'BEFORE UPDATE ON %s ' || - 'FOR EACH ROW EXECUTE PROCEDURE %s()'; - - triggername TEXT; - funcname TEXT; - att_names TEXT; - old_fields TEXT; - new_fields TEXT; - att_val_fmt TEXT; - att_fmt TEXT; - attr TEXT; - rec RECORD; - -BEGIN - attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - - IF attr IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT string_agg(attname, ', '), - string_agg('OLD.' || attname, ', '), - string_agg('NEW.' || attname, ', '), - string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || - attname || ' = $' || attnum || ' ' || - 'ELSE ' || - attname || ' IS NULL END', - ' AND '), - string_agg('$' || attnum, ', ') - FROM pg_attribute - WHERE attrelid::REGCLASS = parent_relid AND attnum > 0 - INTO att_names, - old_fields, - new_fields, - att_val_fmt, - att_fmt; - - /* Build trigger & trigger function's names */ - funcname := @extschema@.build_update_trigger_func_name(parent_relid); - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Create function for trigger */ - EXECUTE format(func, funcname, parent_relid, attr, 0, att_val_fmt, - old_fields, att_fmt, new_fields); - - /* Create trigger on every partition */ - FOR rec in (SELECT * FROM pg_catalog.pg_inherits - WHERE inhparent = parent_relid) - LOOP - EXECUTE format(trigger, - triggername, - rec.inhrelid::REGCLASS::TEXT, - funcname); - END LOOP; - - RETURN funcname; -END -$$ LANGUAGE plpgsql; - -/* - * Creates new RANGE partition. Returns partition name. - * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). - */ -CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( - parent_relid REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) -RETURNS REGCLASS AS 'pg_pathman', 'create_single_range_partition_pl' -LANGUAGE C -SET client_min_messages = WARNING; - -/* - * Construct CHECK constraint condition for a range partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT) -RETURNS TEXT AS 'pg_pathman', 'build_range_condition' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( - parent_relid REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' -LANGUAGE C; - -/* - * Returns N-th range (as an array of two elements). - */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( - parent_relid REGCLASS, - partition_idx INTEGER, - dummy ANYELEMENT) -RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_idx' -LANGUAGE C; - -/* - * Returns min and max values for specified RANGE partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( - partition_relid REGCLASS, - dummy ANYELEMENT) -RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' -LANGUAGE C; - -/* - * Checks if range overlaps with existing partitions. - * Returns TRUE if overlaps and FALSE otherwise. - */ -CREATE OR REPLACE FUNCTION @extschema@.check_range_available( - parent_relid REGCLASS, - range_min ANYELEMENT, - range_max ANYELEMENT) -RETURNS VOID AS 'pg_pathman', 'check_range_available_pl' -LANGUAGE C; - -/* - * Needed for an UPDATE trigger. - */ -CREATE OR REPLACE FUNCTION @extschema@.find_or_create_range_partition( - parent_relid REGCLASS, - value ANYELEMENT) -RETURNS REGCLASS AS 'pg_pathman', 'find_or_create_range_partition' -LANGUAGE C; From b2c4f8e0cf38e5da9255fe17edfc4ffb64b7dcad Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 14 Feb 2017 12:39:29 +0300 Subject: [PATCH 0216/1124] migration script fix --- pg_pathman--1.2--1.3.sql | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pg_pathman--1.2--1.3.sql b/pg_pathman--1.2--1.3.sql index bd31902d..8baa8399 100644 --- a/pg_pathman--1.2--1.3.sql +++ b/pg_pathman--1.2--1.3.sql @@ -16,9 +16,16 @@ ADD CHECK (@extschema@.validate_interval_value(partrel, parttype, range_interval)); +/* + * Drop check constraint to be able to update column type. We recreate it + * later and it will be slightly different + */ +DROP FUNCTION @extschema@.validate_part_callback(REGPROC, BOOL) CASCADE; + /* Change type for init_callback attribute */ ALTER TABLE @extschema@.pathman_config_params ALTER COLUMN init_callback TYPE TEXT, +ALTER COLUMN init_callback DROP NOT NULL, ALTER COLUMN init_callback SET DEFAULT NULL; /* Set init_callback to NULL where it used to be 0 */ @@ -26,8 +33,6 @@ UPDATE @extschema@.pathman_config_params SET init_callback = NULL WHERE init_callback = '-'; -DROP FUNCTION @extschema@.validate_part_callback(REGPROC, BOOL); - CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( callback REGPROCEDURE, raise_error BOOL DEFAULT TRUE) From 411a111bdd471d3b86914f6639b3ce38d57d2e8f Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 14 Feb 2017 12:54:49 +0300 Subject: [PATCH 0217/1124] add the header to migration script --- pg_pathman--1.2--1.3.sql | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pg_pathman--1.2--1.3.sql b/pg_pathman--1.2--1.3.sql index 8baa8399..1214e52c 100644 --- a/pg_pathman--1.2--1.3.sql +++ b/pg_pathman--1.2--1.3.sql @@ -1,3 +1,13 @@ +/* ------------------------------------------------------------------------ + * + * pg_pathman--1.1--1.2.sql + * Migration scripts to version 1.2 + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + /* ------------------------------------------------------------------------ * Alter config tables From b8659fa56fb6d627a433d16e7503c6946b4f5f68 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 14 Feb 2017 14:33:57 +0300 Subject: [PATCH 0218/1124] rename "partition" arguments to "partition_relid" --- init.sql | 10 +-- pg_pathman--1.2--1.3.sql | 133 ++++++++++++++++++++++++++++----------- range.sql | 76 +++++++++++----------- 3 files changed, 141 insertions(+), 78 deletions(-) diff --git a/init.sql b/init.sql index 319069be..583080ad 100644 --- a/init.sql +++ b/init.sql @@ -643,7 +643,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is */ CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( parent_relid REGCLASS, - partition REGCLASS) + partition_relid REGCLASS) RETURNS VOID AS $$ DECLARE @@ -651,13 +651,13 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition); + PERFORM @extschema@.validate_relname(partition_relid); FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint WHERE conrelid = parent_relid AND contype = 'f') LOOP EXECUTE format('ALTER TABLE %s ADD %s', - partition::TEXT, + partition_relid::TEXT, pg_catalog.pg_get_constraintdef(rec.conid)); END LOOP; END @@ -880,7 +880,7 @@ LANGUAGE C STRICT; */ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, - partition REGCLASS, + partition_relid REGCLASS, init_callback REGPROCEDURE, start_value ANYELEMENT, end_value ANYELEMENT) @@ -892,7 +892,7 @@ LANGUAGE C; */ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, - partition REGCLASS, + partition_relid REGCLASS, init_callback REGPROCEDURE) RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; diff --git a/pg_pathman--1.2--1.3.sql b/pg_pathman--1.2--1.3.sql index 1214e52c..7818f6b7 100644 --- a/pg_pathman--1.2--1.3.sql +++ b/pg_pathman--1.2--1.3.sql @@ -63,7 +63,13 @@ DROP FUNCTION @extschema@.get_attribute_type(REGCLASS, TEXT); DROP FUNCTION @extschema@.create_hash_partitions(REGCLASS, TEXT, INTEGER, BOOLEAN); DROP FUNCTION @extschema@.create_hash_partitions_internal(REGCLASS, TEXT, INTEGER); DROP FUNCTION @extschema@.build_range_condition(TEXT, ANYELEMENT, ANYELEMENT); -DROP FUNCTION @extschema@.get_part_range(REGCLASS, ANYELEMENT); +DROP FUNCTION @extschema@.copy_foreign_keys(REGCLASS, REGCLASS); +DROP FUNCTION @extschema@.invoke_on_partition_created_callback(REGCLASS, REGCLASS, REGPROCEDURE, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.invoke_on_partition_created_callback(REGCLASS, REGCLASS, REGPROCEDURE); +DROP FUNCTION @extschema@.split_range_partition(REGCLASS, ANYELEMENT, TEXT, TEXT, OUT ANYARRAY); +DROP FUNCTION @extschema@.drop_range_partition(REGCLASS, BOOLEAN); +DROP FUNCTION @extschema@.attach_range_partition(REGCLASS, REGCLASS, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.detach_range_partition(REGCLASS); /* ------------------------------------------------------------------------ * Alter functions' modifiers @@ -441,7 +447,7 @@ LANGUAGE C; CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( - partition REGCLASS, + partition_relid REGCLASS, split_value ANYELEMENT, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL, @@ -458,13 +464,13 @@ DECLARE v_check_name TEXT; BEGIN - v_parent = @extschema@.get_parent_of_partition(partition); + v_parent = @extschema@.get_parent_of_partition(partition_relid); /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(v_parent); /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition); + PERFORM @extschema@.prevent_relation_modification(partition_relid); v_atttype = @extschema@.get_partition_key_type(v_parent); @@ -475,13 +481,13 @@ BEGIN /* Check if this is a RANGE partition */ IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; /* Get partition values range */ EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', @extschema@.get_base_type(v_atttype)::TEXT) - USING partition + USING partition_relid INTO p_range; IF p_range IS NULL THEN @@ -507,21 +513,21 @@ BEGIN v_attname, split_value, p_range[2]); EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) INSERT INTO %s SELECT * FROM part_data', - partition::TEXT, + partition_relid::TEXT, v_cond, v_new_partition); /* Alter original partition */ - v_cond := @extschema@.build_range_condition(partition::regclass, + v_cond := @extschema@.build_range_condition(partition_relid::regclass, v_attname, p_range[1], split_value); - v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); + v_check_name := @extschema@.build_check_constraint_name(partition_relid, v_attname); EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, + partition_relid::TEXT, v_check_name); EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, + partition_relid::TEXT, v_check_name, v_cond); @@ -747,7 +753,7 @@ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( parent_relid REGCLASS, - partition REGCLASS, + partition_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT) RETURNS TEXT AS @@ -759,29 +765,29 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition); + PERFORM @extschema@.validate_relname(partition_relid); /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(parent_relid); /* Ignore temporary tables */ SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = partition INTO rel_persistence; + WHERE oid = partition_relid INTO rel_persistence; IF rel_persistence = 't'::CHAR THEN RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', - partition::TEXT; + partition_relid::TEXT; END IF; /* check range overlap */ PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); - IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN + IF NOT @extschema@.validate_relations_equality(parent_relid, partition_relid) THEN RAISE EXCEPTION 'partition must have the exact same structure as parent'; END IF; /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; @@ -791,9 +797,9 @@ BEGIN /* Set check constraint */ EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname), - @extschema@.build_range_condition(partition, + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid, v_attname), + @extschema@.build_range_condition(partition_relid, v_attname, start_value, end_value)); @@ -807,7 +813,7 @@ BEGIN INTO v_init_callback; PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - partition, + partition_relid, v_init_callback, start_value, end_value); @@ -815,14 +821,14 @@ BEGIN /* Invalidate cache */ PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN partition; + RETURN partition_relid; END $$ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( - partition REGCLASS) + partition_relid REGCLASS) RETURNS TEXT AS $$ DECLARE @@ -830,7 +836,7 @@ DECLARE parent_relid REGCLASS; BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition); + parent_relid := @extschema@.get_parent_of_partition(partition_relid); /* Acquire lock on parent */ PERFORM @extschema@.prevent_relation_modification(parent_relid); @@ -845,23 +851,87 @@ BEGIN /* Remove inheritance */ EXECUTE format('ALTER TABLE %s NO INHERIT %s', - partition::TEXT, + partition_relid::TEXT, parent_relid::TEXT); /* Remove check constraint */ EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname)); + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid, v_attname)); /* Invalidate cache */ PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN partition; + RETURN partition_relid; END $$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( + partition_relid REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS +$$ +DECLARE + parent_relid REGCLASS; + part_name TEXT; + v_relkind CHAR; + v_rows BIGINT; + v_part_type INTEGER; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + part_name := partition_relid::TEXT; /* save the name to be returned */ + + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_part_type; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition_relid::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition_relid + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', partition_relid::TEXT); + END IF; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN part_name; +END +$$ +LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; + + CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( partition REGCLASS) RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' @@ -875,10 +945,3 @@ CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( end_value ANYELEMENT) RETURNS TEXT AS 'pg_pathman', 'build_range_condition' LANGUAGE C; - - -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( - partition REGCLASS, - dummy ANYELEMENT) -RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' -LANGUAGE C; diff --git a/range.sql b/range.sql index c2733a3a..84c9fefa 100644 --- a/range.sql +++ b/range.sql @@ -439,7 +439,7 @@ $$ LANGUAGE plpgsql; * Split RANGE partition */ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( - partition REGCLASS, + partition_relid REGCLASS, split_value ANYELEMENT, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL, @@ -456,13 +456,13 @@ DECLARE v_check_name TEXT; BEGIN - v_parent = @extschema@.get_parent_of_partition(partition); + v_parent = @extschema@.get_parent_of_partition(partition_relid); /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(v_parent); /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition); + PERFORM @extschema@.prevent_relation_modification(partition_relid); v_atttype = @extschema@.get_partition_key_type(v_parent); @@ -473,13 +473,13 @@ BEGIN /* Check if this is a RANGE partition */ IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; /* Get partition values range */ EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', @extschema@.get_base_type(v_atttype)::TEXT) - USING partition + USING partition_relid INTO p_range; IF p_range IS NULL THEN @@ -505,21 +505,21 @@ BEGIN v_attname, split_value, p_range[2]); EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) INSERT INTO %s SELECT * FROM part_data', - partition::TEXT, + partition_relid::TEXT, v_cond, v_new_partition); /* Alter original partition */ - v_cond := @extschema@.build_range_condition(partition::regclass, + v_cond := @extschema@.build_range_condition(partition_relid::regclass, v_attname, p_range[1], split_value); - v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); + v_check_name := @extschema@.build_check_constraint_name(partition_relid, v_attname); EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, + partition_relid::TEXT, v_check_name); EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, + partition_relid::TEXT, v_check_name, v_cond); @@ -809,7 +809,7 @@ LANGUAGE plpgsql; * Drop range partition */ CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( - partition REGCLASS, + partition_relid REGCLASS, delete_data BOOLEAN DEFAULT TRUE) RETURNS TEXT AS $$ @@ -821,8 +821,8 @@ DECLARE v_part_type INTEGER; BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition); - part_name := partition::TEXT; /* save the name to be returned */ + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + part_name := partition_relid::TEXT; /* save the name to be returned */ SELECT parttype FROM @extschema@.pathman_config @@ -831,7 +831,7 @@ BEGIN /* Check if this is a RANGE partition */ IF v_part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; /* Acquire lock on parent */ @@ -840,15 +840,15 @@ BEGIN IF NOT delete_data THEN EXECUTE format('INSERT INTO %s SELECT * FROM %s', parent_relid::TEXT, - partition::TEXT); + partition_relid::TEXT); GET DIAGNOSTICS v_rows = ROW_COUNT; /* Show number of copied rows */ - RAISE NOTICE '% rows copied from %', v_rows, partition::TEXT; + RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; END IF; SELECT relkind FROM pg_catalog.pg_class - WHERE oid = partition + WHERE oid = partition_relid INTO v_relkind; /* @@ -857,9 +857,9 @@ BEGIN * DROP TABLE or DROP FOREIGN TABLE. */ IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', partition::TEXT); + EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); ELSE - EXECUTE format('DROP TABLE %s', partition::TEXT); + EXECUTE format('DROP TABLE %s', partition_relid::TEXT); END IF; /* Invalidate cache */ @@ -876,7 +876,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is */ CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( parent_relid REGCLASS, - partition REGCLASS, + partition_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT) RETURNS TEXT AS @@ -888,29 +888,29 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition); + PERFORM @extschema@.validate_relname(partition_relid); /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(parent_relid); /* Ignore temporary tables */ SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = partition INTO rel_persistence; + WHERE oid = partition_relid INTO rel_persistence; IF rel_persistence = 't'::CHAR THEN RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', - partition::TEXT; + partition_relid::TEXT; END IF; /* check range overlap */ PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); - IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN + IF NOT @extschema@.validate_relations_equality(parent_relid, partition_relid) THEN RAISE EXCEPTION 'partition must have the exact same structure as parent'; END IF; /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; @@ -920,9 +920,9 @@ BEGIN /* Set check constraint */ EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname), - @extschema@.build_range_condition(partition, + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid, v_attname), + @extschema@.build_range_condition(partition_relid, v_attname, start_value, end_value)); @@ -936,7 +936,7 @@ BEGIN INTO v_init_callback; PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, - partition, + partition_relid, v_init_callback, start_value, end_value); @@ -944,7 +944,7 @@ BEGIN /* Invalidate cache */ PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN partition; + RETURN partition_relid; END $$ LANGUAGE plpgsql; @@ -953,7 +953,7 @@ LANGUAGE plpgsql; * Detach range partition */ CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( - partition REGCLASS) + partition_relid REGCLASS) RETURNS TEXT AS $$ DECLARE @@ -961,7 +961,7 @@ DECLARE parent_relid REGCLASS; BEGIN - parent_relid := @extschema@.get_parent_of_partition(partition); + parent_relid := @extschema@.get_parent_of_partition(partition_relid); /* Acquire lock on parent */ PERFORM @extschema@.prevent_relation_modification(parent_relid); @@ -976,18 +976,18 @@ BEGIN /* Remove inheritance */ EXECUTE format('ALTER TABLE %s NO INHERIT %s', - partition::TEXT, + partition_relid::TEXT, parent_relid::TEXT); /* Remove check constraint */ EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition::TEXT, - @extschema@.build_check_constraint_name(partition, v_attname)); + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid, v_attname)); /* Invalidate cache */ PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN partition; + RETURN partition_relid; END $$ LANGUAGE plpgsql; @@ -1098,7 +1098,7 @@ $$ LANGUAGE plpgsql; * partition is dropped the next one automatically covers freed range */ CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( - partition REGCLASS) + partition_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' LANGUAGE C STRICT; @@ -1146,7 +1146,7 @@ LANGUAGE C; * Returns min and max values for specified RANGE partition. */ CREATE OR REPLACE FUNCTION @extschema@.get_part_range( - partition REGCLASS, + partition_relid REGCLASS, dummy ANYELEMENT) RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' LANGUAGE C; From 1bbbd78cbea6bfa792025ecbd5d6b1ee82f54f4a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 14 Feb 2017 15:56:39 +0300 Subject: [PATCH 0219/1124] introduce safe wrapper function pathman_cache_search_relid() --- .gitignore | 2 +- src/init.c | 42 +++++++++++++++++++++++++++++++++++ src/init.h | 5 +++++ src/relation_info.c | 53 +++++++++++++++++++++------------------------ 4 files changed, 73 insertions(+), 29 deletions(-) diff --git a/.gitignore b/.gitignore index 90108e07..f0d2c2c4 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,4 @@ regression.out *.gcda *.gcno *.gcov -pg_pathman--1.3.sql +pg_pathman--*.sql diff --git a/src/init.c b/src/init.c index b23121b6..a427192e 100644 --- a/src/init.c +++ b/src/init.c @@ -105,6 +105,44 @@ static uint32 build_sql_facade_version(char *version_cstr); static uint32 get_sql_facade_version(void); static void validate_sql_facade_version(uint32 ver); + +/* + * Safe hash search (takes care of disabled pg_pathman). + */ +void * +pathman_cache_search_relid(HTAB *cache_table, + Oid relid, + HASHACTION action, + bool *found) +{ + switch (action) + { + /* May return NULL */ + case HASH_FIND: + case HASH_REMOVE: + if (!cache_table) + return NULL; + break; + + /* Must return valid pointer */ + case HASH_ENTER: + if (!cache_table) + elog(ERROR, "pg_pathman is not initialized yet"); + break; + + /* Something strange has just happened */ + default: + elog(ERROR, "unexpected action in function " + CppAsString(pathman_cache_search_relid)); + break; + } + + Assert(cache_table); + + /* Everything is fine */ + return hash_search(cache_table, (const void *) &relid, action, found); +} + /* * Save and restore main init state. */ @@ -279,6 +317,10 @@ init_local_cache(void) { HASHCTL ctl; + /* Destroy caches, just in case */ + hash_destroy(partitioned_rels); + hash_destroy(parent_cache); + memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(PartRelationInfo); diff --git a/src/init.h b/src/init.h index 1ac9e52e..6b342ed2 100644 --- a/src/init.h +++ b/src/init.h @@ -102,6 +102,11 @@ extern PathmanInitState pg_pathman_init_state; #define CURRENT_LIB_VERSION 0x010300 +void *pathman_cache_search_relid(HTAB *cache_table, + Oid relid, + HASHACTION action, + bool *found); + /* * Save and restore PathmanInitState. */ diff --git a/src/relation_info.c b/src/relation_info.c index daaf7b62..fb67f845 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -81,9 +81,9 @@ refresh_pathman_relation_info(Oid relid, Datum param_values[Natts_pathman_config_params]; bool param_isnull[Natts_pathman_config_params]; - prel = (PartRelationInfo *) hash_search(partitioned_rels, - (const void *) &relid, - HASH_ENTER, &found_entry); + prel = (PartRelationInfo *) pathman_cache_search_relid(partitioned_rels, + relid, HASH_ENTER, + &found_entry); elog(DEBUG2, found_entry ? "Refreshing record for relation %u in pg_pathman's cache [%u]" : @@ -239,9 +239,9 @@ invalidate_pathman_relation_info(Oid relid, bool *found) HASHACTION action = found ? HASH_FIND : HASH_ENTER; PartRelationInfo *prel; - prel = hash_search(partitioned_rels, - (const void *) &relid, - action, &prel_found); + prel = pathman_cache_search_relid(partitioned_rels, + relid, action, + &prel_found); if ((action == HASH_FIND || (action == HASH_ENTER && prel_found)) && PrelIsValid(prel)) @@ -272,10 +272,9 @@ invalidate_pathman_relation_info(Oid relid, bool *found) const PartRelationInfo * get_pathman_relation_info(Oid relid) { - const PartRelationInfo *prel = hash_search(partitioned_rels, - (const void *) &relid, - HASH_FIND, NULL); - + const PartRelationInfo *prel = pathman_cache_search_relid(partitioned_rels, + relid, HASH_FIND, + NULL); /* Refresh PartRelationInfo if needed */ if (prel && !PrelIsValid(prel)) { @@ -345,10 +344,10 @@ get_pathman_relation_info_after_lock(Oid relid, void remove_pathman_relation_info(Oid relid) { - PartRelationInfo *prel = hash_search(partitioned_rels, - (const void *) &relid, - HASH_FIND, NULL); - if (prel && PrelIsValid(prel)) + PartRelationInfo *prel = pathman_cache_search_relid(partitioned_rels, + relid, HASH_FIND, + NULL); + if (PrelIsValid(prel)) { /* Free these arrays iff they're not NULL */ FreeChildrenArray(prel); @@ -356,9 +355,8 @@ remove_pathman_relation_info(Oid relid) } /* Now let's remove the entry completely */ - hash_search(partitioned_rels, - (const void *) &relid, - HASH_REMOVE, NULL); + pathman_cache_search_relid(partitioned_rels, relid, + HASH_REMOVE, NULL); elog(DEBUG2, "Removing record for relation %u in pg_pathman's cache [%u]", @@ -509,10 +507,10 @@ cache_parent_of_partition(Oid partition, Oid parent) bool found; PartParentInfo *ppar; - ppar = hash_search(parent_cache, - (const void *) &partition, - HASH_ENTER, &found); - + ppar = pathman_cache_search_relid(parent_cache, + partition, + HASH_ENTER, + &found); elog(DEBUG2, found ? "Refreshing record for child %u in pg_pathman's cache [%u]" : @@ -551,10 +549,10 @@ get_parent_of_partition_internal(Oid partition, { const char *action_str; /* "Fetching"\"Resetting" */ Oid parent; - PartParentInfo *ppar = hash_search(parent_cache, - (const void *) &partition, - HASH_FIND, NULL); - + PartParentInfo *ppar = pathman_cache_search_relid(parent_cache, + partition, + HASH_FIND, + NULL); /* Set 'action_str' */ switch (action) { @@ -581,9 +579,8 @@ get_parent_of_partition_internal(Oid partition, /* Remove entry if necessary */ if (action == HASH_REMOVE) - hash_search(parent_cache, - (const void *) &partition, - HASH_REMOVE, NULL); + pathman_cache_search_relid(parent_cache, partition, + HASH_REMOVE, NULL); } /* Try fetching parent from syscache if 'status' is provided */ else if (status) From 04ca43602e1d4375172d240cb74a27306c0e3414 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 14 Feb 2017 17:39:01 +0300 Subject: [PATCH 0220/1124] improve pathman_cache_search_relid(), tests --- expected/pathman_calamity.out | 75 +++++++++++++++++++++++++++++++++++ sql/pathman_calamity.sql | 38 ++++++++++++++++++ src/init.c | 8 +--- 3 files changed, 114 insertions(+), 7 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 1830584f..e9a5c7e4 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -670,3 +670,78 @@ NOTICE: drop cascades to table calamity.test_range_oid_1 DROP SCHEMA calamity CASCADE; NOTICE: drop cascades to 18 other objects DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); +NOTICE: sequence "survivor_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is not initialized yet +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | partattr | range_min | range_max +-------------------+---------------------+----------+----------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA calamity CASCADE; +NOTICE: drop cascades to sequence calamity.survivor_seq +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 0b8058c8..6094cf5b 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -261,6 +261,44 @@ SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ DROP TABLE calamity.test_range_oid CASCADE; +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; + + + +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ + +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; + + +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +SELECT * FROM pathman_partition_list; /* not ok */ +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + +SET pg_pathman.enable = t; /* LOAD CONFIG */ + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ +SELECT * FROM pathman_partition_list; /* OK */ +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + +DROP TABLE calamity.survivor CASCADE; + DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/init.c b/src/init.c index a427192e..2eba0fb1 100644 --- a/src/init.c +++ b/src/init.c @@ -117,14 +117,8 @@ pathman_cache_search_relid(HTAB *cache_table, { switch (action) { - /* May return NULL */ case HASH_FIND: case HASH_REMOVE: - if (!cache_table) - return NULL; - break; - - /* Must return valid pointer */ case HASH_ENTER: if (!cache_table) elog(ERROR, "pg_pathman is not initialized yet"); @@ -137,7 +131,7 @@ pathman_cache_search_relid(HTAB *cache_table, break; } - Assert(cache_table); + AssertArg(cache_table); /* Everything is fine */ return hash_search(cache_table, (const void *) &relid, action, found); From 01d1074066d60c68f3f6fa3dfa6fbe4e07ac01fc Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 14 Feb 2017 18:08:33 +0300 Subject: [PATCH 0221/1124] beautify code in pathman_cache_search_relid() --- src/init.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/init.c b/src/init.c index 2eba0fb1..0df69355 100644 --- a/src/init.c +++ b/src/init.c @@ -115,23 +115,22 @@ pathman_cache_search_relid(HTAB *cache_table, HASHACTION action, bool *found) { - switch (action) - { - case HASH_FIND: - case HASH_REMOVE: - case HASH_ENTER: - if (!cache_table) + /* Table is NULL, take some actions */ + if (cache_table == NULL) + switch (action) + { + case HASH_FIND: + case HASH_ENTER: + case HASH_REMOVE: elog(ERROR, "pg_pathman is not initialized yet"); - break; - - /* Something strange has just happened */ - default: - elog(ERROR, "unexpected action in function " - CppAsString(pathman_cache_search_relid)); - break; - } + break; - AssertArg(cache_table); + /* Something strange has just happened */ + default: + elog(ERROR, "unexpected action in function " + CppAsString(pathman_cache_search_relid)); + break; + } /* Everything is fine */ return hash_search(cache_table, (const void *) &relid, action, found); From 11faec2db59b5929ba3ff4a481220d1783d04b1f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 14 Feb 2017 18:36:04 +0300 Subject: [PATCH 0222/1124] fix migration script (1.3) --- pg_pathman--1.2--1.3.sql | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/pg_pathman--1.2--1.3.sql b/pg_pathman--1.2--1.3.sql index 7818f6b7..0aabfd39 100644 --- a/pg_pathman--1.2--1.3.sql +++ b/pg_pathman--1.2--1.3.sql @@ -63,13 +63,11 @@ DROP FUNCTION @extschema@.get_attribute_type(REGCLASS, TEXT); DROP FUNCTION @extschema@.create_hash_partitions(REGCLASS, TEXT, INTEGER, BOOLEAN); DROP FUNCTION @extschema@.create_hash_partitions_internal(REGCLASS, TEXT, INTEGER); DROP FUNCTION @extschema@.build_range_condition(TEXT, ANYELEMENT, ANYELEMENT); -DROP FUNCTION @extschema@.copy_foreign_keys(REGCLASS, REGCLASS); -DROP FUNCTION @extschema@.invoke_on_partition_created_callback(REGCLASS, REGCLASS, REGPROCEDURE, ANYELEMENT, ANYELEMENT); -DROP FUNCTION @extschema@.invoke_on_partition_created_callback(REGCLASS, REGCLASS, REGPROCEDURE); DROP FUNCTION @extschema@.split_range_partition(REGCLASS, ANYELEMENT, TEXT, TEXT, OUT ANYARRAY); DROP FUNCTION @extschema@.drop_range_partition(REGCLASS, BOOLEAN); DROP FUNCTION @extschema@.attach_range_partition(REGCLASS, REGCLASS, ANYELEMENT, ANYELEMENT); DROP FUNCTION @extschema@.detach_range_partition(REGCLASS); +DROP FUNCTION @extschema@.merge_range_partitions_internal(REGCLASS, REGCLASS, REGCLASS, ANYELEMENT); /* ------------------------------------------------------------------------ * Alter functions' modifiers @@ -933,7 +931,7 @@ SET pg_pathman.enable_partitionfilter = off; CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( - partition REGCLASS) + partition_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' LANGUAGE C STRICT; @@ -945,3 +943,14 @@ CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( end_value ANYELEMENT) RETURNS TEXT AS 'pg_pathman', 'build_range_condition' LANGUAGE C; + + +/* ------------------------------------------------------------------------ + * Final words of wisdom + * ----------------------------------------------------------------------*/ +DO language plpgsql +$$ + BEGIN + RAISE WARNING 'Don''t forget to execute "SET pg_pathman.enable = t" to activate pg_pathman'; + END +$$; From 4e2d48cf89e400a1926b97b0c8702ccc872e7006 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 14 Feb 2017 19:34:38 +0300 Subject: [PATCH 0223/1124] alter functions with renamed args (1.3) --- pg_pathman--1.2--1.3.sql | 100 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/pg_pathman--1.2--1.3.sql b/pg_pathman--1.2--1.3.sql index 0aabfd39..86f6d36e 100644 --- a/pg_pathman--1.2--1.3.sql +++ b/pg_pathman--1.2--1.3.sql @@ -68,15 +68,71 @@ DROP FUNCTION @extschema@.drop_range_partition(REGCLASS, BOOLEAN); DROP FUNCTION @extschema@.attach_range_partition(REGCLASS, REGCLASS, ANYELEMENT, ANYELEMENT); DROP FUNCTION @extschema@.detach_range_partition(REGCLASS); DROP FUNCTION @extschema@.merge_range_partitions_internal(REGCLASS, REGCLASS, REGCLASS, ANYELEMENT); +DROP FUNCTION @extschema@.copy_foreign_keys(REGCLASS, REGCLASS); +DROP FUNCTION @extschema@.invoke_on_partition_created_callback(REGCLASS, REGCLASS, REGPROCEDURE, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.invoke_on_partition_created_callback(REGCLASS, REGCLASS, REGPROCEDURE); + /* ------------------------------------------------------------------------ * Alter functions' modifiers * ----------------------------------------------------------------------*/ ALTER FUNCTION @extschema@.pathman_set_param(REGCLASS, TEXT, ANYELEMENT) STRICT; + /* ------------------------------------------------------------------------ * (Re)create functions * ----------------------------------------------------------------------*/ + +/* + * Invoke init_callback on RANGE partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition_relid REGCLASS, + init_callback REGPROCEDURE, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; + + +/* + * Invoke init_callback on HASH partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition_relid REGCLASS, + init_callback REGPROCEDURE) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; + + +/* + * Copy all of parent's foreign keys. + */ +CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( + parent_relid REGCLASS, + partition_relid REGCLASS) +RETURNS VOID AS +$$ +DECLARE + rec RECORD; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint + WHERE conrelid = parent_relid AND contype = 'f') + LOOP + EXECUTE format('ALTER TABLE %s ADD %s', + partition_relid::TEXT, + pg_catalog.pg_get_constraintdef(rec.conid)); + END LOOP; +END +$$ LANGUAGE plpgsql STRICT; + + CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( relation REGCLASS, callback REGPROCEDURE DEFAULT 0) @@ -945,6 +1001,50 @@ RETURNS TEXT AS 'pg_pathman', 'build_range_condition' LANGUAGE C; +/* + * Old school way to distribute rows to partitions. + */ +CREATE OR REPLACE FUNCTION @extschema@.partition_data( + parent_relid REGCLASS, + OUT p_total BIGINT) +AS +$$ +BEGIN + p_total := 0; + + /* Create partitions and copy rest of the data */ + EXECUTE format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) + INSERT INTO %1$s SELECT * FROM part_data', + parent_relid::TEXT); + + /* Get number of inserted rows */ + GET DIAGNOSTICS p_total = ROW_COUNT; + RETURN; +END +$$ +LANGUAGE plpgsql STRICT +SET pg_pathman.enable_partitionfilter = on; + +/* + * Add a row describing the optional parameter to pathman_config_params. + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( + relation REGCLASS, + param TEXT, + value ANYELEMENT) +RETURNS VOID AS +$$ +BEGIN + EXECUTE format('INSERT INTO @extschema@.pathman_config_params + (partrel, %1$s) VALUES ($1, $2) + ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) + USING relation, value; +END +$$ +LANGUAGE plpgsql; + + + /* ------------------------------------------------------------------------ * Final words of wisdom * ----------------------------------------------------------------------*/ From ae6b49b117535889eaad037992d45cb99ca63b9c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 15 Feb 2017 12:59:43 +0300 Subject: [PATCH 0224/1124] fixes for MS compiler --- src/pl_range_funcs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 8d1fc523..b78f44a4 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -269,7 +269,10 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) if (ranges[i].child_oid == partition_relid) { ArrayType *arr; - Bound elems[2] = { ranges[i].min, ranges[i].max }; + Bound elems[2]; + + elems[0] = ranges[i].min; + elems[1] = ranges[i].max; arr = construct_infinitable_array(elems, 2, prel->atttype, prel->attlen, From 92f321e70aab75e08963191ab630eae9ea3e3bb6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 15 Feb 2017 16:39:57 +0300 Subject: [PATCH 0225/1124] update README.md --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index 0febc6e1..58556f05 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,19 @@ Done! Now it's time to setup your partitioning schemes. > **Important:** Don't forget to set the `PG_CONFIG` variable in case you want to test `pg_pathman` on a custom build of PostgreSQL. Read more [here](https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules). +## How to update +In order to update pg_pathman: + +1. Install the latest _stable_ release of pg_pathman. +2. Restart your PostgreSQL cluster. +3. Execute the following queries: + +```plpgsql +/* replace X.Y with the version number, e.g. 1.3 */ +ALTER EXTENSION pg_pathman UPDATE TO "X.Y"; +SET pg_pathman.enable = t; +``` + ## Available functions ### Partition creation @@ -654,3 +667,4 @@ Do not hesitate to post your issues, questions and new ideas at the [issues](htt Ildar Musin Postgres Professional Ltd., Russia Alexander Korotkov Postgres Professional Ltd., Russia Dmitry Ivanov Postgres Professional Ltd., Russia + From dd0151355e5af51eb8e55ec6d5e8f93398d0ae68 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 16 Feb 2017 17:04:06 +0300 Subject: [PATCH 0226/1124] update trigger rewrited in C --- hash.sql | 110 ---------------------------------------- init.sql | 40 +++++++++++++++ range.sql | 78 +++------------------------- src/pl_range_funcs.c | 118 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 165 insertions(+), 181 deletions(-) diff --git a/hash.sql b/hash.sql index 59a2ae64..55cd70b7 100644 --- a/hash.sql +++ b/hash.sql @@ -164,116 +164,6 @@ END $$ LANGUAGE plpgsql; -/* - * Creates an update trigger - */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( - parent_relid REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() - RETURNS TRIGGER AS - $body$ - DECLARE - old_idx INTEGER; /* partition indices */ - new_idx INTEGER; - - BEGIN - old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); - new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); - - IF old_idx = new_idx THEN - RETURN NEW; - END IF; - - EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) - USING %5$s; - - EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) - USING %7$s; - - RETURN NULL; - END $body$ - LANGUAGE plpgsql'; - - trigger TEXT := 'CREATE TRIGGER %s - BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE %s()'; - - att_names TEXT; - old_fields TEXT; - new_fields TEXT; - att_val_fmt TEXT; - att_fmt TEXT; - attr TEXT; - plain_schema TEXT; - plain_relname TEXT; - child_relname_format TEXT; - funcname TEXT; - triggername TEXT; - atttype REGTYPE; - partitions_count INTEGER; - -BEGIN - attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - - IF attr IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT string_agg(attname, ', '), - string_agg('OLD.' || attname, ', '), - string_agg('NEW.' || attname, ', '), - string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || - attname || ' = $' || attnum || ' ' || - 'ELSE ' || - attname || ' IS NULL END', - ' AND '), - string_agg('$' || attnum, ', ') - FROM pg_catalog.pg_attribute - WHERE attrelid = parent_relid AND attnum > 0 - INTO att_names, - old_fields, - new_fields, - att_val_fmt, - att_fmt; - - partitions_count := @extschema@.get_number_of_partitions(parent_relid); - - /* Build trigger & trigger function's names */ - funcname := @extschema@.build_update_trigger_func_name(parent_relid); - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Build partition name template */ - SELECT * INTO plain_schema, plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - child_relname_format := quote_ident(plain_schema) || '.' || - quote_ident(plain_relname || '_%s'); - - /* Fetch base hash function for atttype */ - atttype := @extschema@.get_partition_key_type(parent_relid); - - /* Format function definition and execute it */ - EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, - old_fields, att_fmt, new_fields, child_relname_format, - @extschema@.get_type_hash_func(atttype)::TEXT); - - /* Create trigger on each partition */ - FOR num IN 0..partitions_count-1 - LOOP - EXECUTE format(trigger, - triggername, - format(child_relname_format, num), - funcname); - END LOOP; - - return funcname; -END -$$ LANGUAGE plpgsql; - - /* * Just create HASH partitions, called by create_hash_partitions(). */ diff --git a/init.sql b/init.sql index 583080ad..eb62c0e5 100644 --- a/init.sql +++ b/init.sql @@ -896,3 +896,43 @@ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( init_callback REGPROCEDURE) RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; + + +/* + * Function for update triggers + */ +CREATE OR REPLACE FUNCTION @extschema@.update_trigger_func() +RETURNS TRIGGER AS 'pg_pathman', 'update_trigger_func' +LANGUAGE C; + + +/* + * Creates an update trigger + */ +CREATE OR REPLACE FUNCTION @extschema@.create_update_trigger( + IN parent_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + trigger TEXT := 'CREATE TRIGGER %s + BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE + @extschema@.update_trigger_func()'; + triggername TEXT; + rec RECORD; + +BEGIN + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Create trigger on every partition */ + FOR rec in (SELECT * FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid) + LOOP + EXECUTE format(trigger, + triggername, + rec.inhrelid::REGCLASS::TEXT); + END LOOP; + + RETURN 'update_trigger_func()'; +END +$$ LANGUAGE plpgsql; \ No newline at end of file diff --git a/range.sql b/range.sql index 84c9fefa..11c7d577 100644 --- a/range.sql +++ b/range.sql @@ -992,6 +992,7 @@ END $$ LANGUAGE plpgsql; + /* * Creates an update trigger */ @@ -1000,91 +1001,26 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_update_trigger( RETURNS TEXT AS $$ DECLARE - func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() - RETURNS TRIGGER AS - $body$ - DECLARE - old_oid Oid; - new_oid Oid; - - BEGIN - old_oid := TG_RELID; - new_oid := @extschema@.find_or_create_range_partition( - ''%2$s''::regclass, NEW.%3$s); - - IF old_oid = new_oid THEN - RETURN NEW; - END IF; - - EXECUTE format(''DELETE FROM %%s WHERE %5$s'', - old_oid::regclass::text) - USING %6$s; - - EXECUTE format(''INSERT INTO %%s VALUES (%7$s)'', - new_oid::regclass::text) - USING %8$s; - - RETURN NULL; - END $body$ - LANGUAGE plpgsql'; - - trigger TEXT := 'CREATE TRIGGER %s ' || - 'BEFORE UPDATE ON %s ' || - 'FOR EACH ROW EXECUTE PROCEDURE %s()'; - + trigger TEXT := 'CREATE TRIGGER %s + BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE + @extschema@.update_trigger_func()'; triggername TEXT; - funcname TEXT; - att_names TEXT; - old_fields TEXT; - new_fields TEXT; - att_val_fmt TEXT; - att_fmt TEXT; - attr TEXT; rec RECORD; BEGIN - attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - - IF attr IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT string_agg(attname, ', '), - string_agg('OLD.' || attname, ', '), - string_agg('NEW.' || attname, ', '), - string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || - attname || ' = $' || attnum || ' ' || - 'ELSE ' || - attname || ' IS NULL END', - ' AND '), - string_agg('$' || attnum, ', ') - FROM pg_attribute - WHERE attrelid::REGCLASS = parent_relid AND attnum > 0 - INTO att_names, - old_fields, - new_fields, - att_val_fmt, - att_fmt; - - /* Build trigger & trigger function's names */ - funcname := @extschema@.build_update_trigger_func_name(parent_relid); triggername := @extschema@.build_update_trigger_name(parent_relid); - /* Create function for trigger */ - EXECUTE format(func, funcname, parent_relid, attr, 0, att_val_fmt, - old_fields, att_fmt, new_fields); - /* Create trigger on every partition */ FOR rec in (SELECT * FROM pg_catalog.pg_inherits WHERE inhparent = parent_relid) LOOP EXECUTE format(trigger, triggername, - rec.inhrelid::REGCLASS::TEXT, - funcname); + rec.inhrelid::REGCLASS::TEXT); END LOOP; - RETURN funcname; + RETURN 'update_trigger_func()'; END $$ LANGUAGE plpgsql; diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index b78f44a4..6694fee6 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -31,6 +31,13 @@ #include "utils/syscache.h" +/* TODO */ +#include "commands/trigger.h" +#include "access/htup_details.h" +#include "access/tupconvert.h" +#include "partition_filter.h" + + static char *deparse_constraint(Oid relid, Node *expr); static ArrayType *construct_infinitable_array(Bound *elems, int nelems, @@ -54,6 +61,8 @@ static bool interval_is_trivial(Oid atttype, Datum interval, Oid interval_type); +static Oid get_partition_for_key(const PartRelationInfo *prel, Datum key); + /* Function declarations */ PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); @@ -69,6 +78,8 @@ PG_FUNCTION_INFO_V1( merge_range_partitions ); PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); PG_FUNCTION_INFO_V1( validate_interval_value ); +PG_FUNCTION_INFO_V1( update_trigger_func ); + /* * ----------------------------- @@ -1007,3 +1018,110 @@ drop_table_by_oid(Oid relid) RemoveRelations(n); } + +/* + * Update trigger + */ +Datum +update_trigger_func(PG_FUNCTION_ARGS) +{ + const PartRelationInfo *prel; + PartParentSearch parent_search; + Oid parent; + TriggerData *trigdata = (TriggerData *) fcinfo->context; + char *key_name; + Datum key; + bool isnull; + TupleConversionMap *conversion_map; + + TupleDesc source_tupdesc; + HeapTuple source_tuple; + Oid source_relid; + AttrNumber source_key; + + Relation target_rel; + TupleDesc target_tupdesc; + HeapTuple target_tuple; + Oid target_relid; + + /* This function can only be invoked as a trigger */ + if (!CALLED_AS_TRIGGER(fcinfo)) + elog(ERROR, "Function invoked not in a trigger context"); + + /* tuple to return to executor */ + if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) + elog(ERROR, "This function must only be used as UPDATE trigger"); + + source_relid = trigdata->tg_relation->rd_id; + source_tuple = trigdata->tg_newtuple; + source_tupdesc = trigdata->tg_relation->rd_att; + + parent = get_parent_of_partition(source_relid, &parent_search); + if (parent_search != PPS_ENTRY_PART_PARENT) + elog(ERROR, "relation \"%s\" is not a partition", + get_rel_name_or_relid(source_relid)); + + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_INDIFFERENT); + + /* + * Find partitioning key attribute of source partition. Keep in mind that + * there could be dropped columns in parent relation or partition and so + * key attribute may have different number + */ + key_name = get_attname(parent, prel->attnum); + source_key = get_attnum(source_relid, key_name); + key = heap_getattr(source_tuple, source_key, source_tupdesc, &isnull); + + /* Find partition it should go into */ + target_relid = get_partition_for_key(prel, key); + + /* If target partition is the same then do nothing */ + if (target_relid == source_relid) + return PointerGetDatum(source_tuple); + + target_rel = heap_open(target_relid, RowExclusiveLock); + target_tupdesc = target_rel->rd_att; + + /* + * Else if it's a different partition then build a TupleConversionMap + * between original partition and new one. And then do a convertation + */ + conversion_map = convert_tuples_by_name(source_tupdesc, + target_tupdesc, + "Failed to convert tuple"); + target_tuple = do_convert_tuple(source_tuple, conversion_map); + + /* Delete old tuple from original partition */ + simple_heap_delete(trigdata->tg_relation, &trigdata->tg_trigtuple->t_self); + + /* Insert tuple into new partition */ + simple_heap_insert(target_rel, target_tuple); + + heap_close(target_rel, RowExclusiveLock); + + PG_RETURN_VOID(); +} + +/* + * Returns Oid of partition corresponding to partitioning key value. Throws + * an error if no partition found + */ +static Oid +get_partition_for_key(const PartRelationInfo *prel, Datum key) +{ + Oid *parts; + int nparts; + + /* Search for matching partitions */ + parts = find_partitions_for_value(key, prel->atttype, prel, &nparts); + + if (nparts > 1) + elog(ERROR, ERR_PART_ATTR_MULTIPLE); + else if (nparts == 0) + elog(ERROR, + "There is not partition to fit partition key \"%s\"", + datum_to_cstring(key, prel->atttype)); + else + return parts[0]; +} From 8eaba425dd6936c55b233318ecf159f3aa4b9508 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 16 Feb 2017 17:44:00 +0300 Subject: [PATCH 0227/1124] refactoring in subsystem 'pg_compat' --- src/pg_compat.c | 33 +++++++------- src/pg_compat.h | 115 ++++++++++++++++++++++++++++++++++------------- src/pg_pathman.c | 2 +- 3 files changed, 101 insertions(+), 49 deletions(-) diff --git a/src/pg_compat.c b/src/pg_compat.c index 8d8e49f6..d7b0c30e 100644 --- a/src/pg_compat.c +++ b/src/pg_compat.c @@ -5,6 +5,9 @@ * * Copyright (c) 2016, Postgres Professional * + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * * ------------------------------------------------------------------------ */ @@ -22,6 +25,7 @@ #include +/* Common code */ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) { @@ -51,6 +55,7 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) Assert(childrel->rows > 0); parent_rows += childrel->rows; + #if PG_VERSION_NUM >= 90600 parent_size += childrel->reltarget->width * childrel->rows; #else @@ -66,27 +71,20 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) #else rel->width = rint(parent_size / parent_rows); #endif + rel->tuples = parent_rows; } -void -adjust_targetlist_compat(PlannerInfo *root, RelOptInfo *dest, - RelOptInfo *rel, AppendRelInfo *appinfo) -{ -#if PG_VERSION_NUM >= 90600 - dest->reltarget->exprs = (List *) - adjust_appendrel_attrs(root, - (Node *) rel->reltarget->exprs, - appinfo); -#else - dest->reltargetlist = (List *) - adjust_appendrel_attrs(root, - (Node *) rel->reltargetlist, - appinfo); -#endif -} + +/* + * ---------- + * Variants + * ---------- + */ #if PG_VERSION_NUM >= 90600 + + /* * make_result * Build a Result plan node @@ -108,6 +106,7 @@ make_result(List *tlist, return node; } + /* * If this relation could possibly be scanned from within a worker, then set * its consider_parallel flag. @@ -256,6 +255,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, rel->consider_parallel = true; } + /* * create_plain_partial_paths * Build partial access paths for parallel scan of a plain relation @@ -320,6 +320,7 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) #else /* PG_VERSION_NUM >= 90500 */ + /* * set_dummy_rel_pathlist * Build a dummy path for a relation that's been excluded by constraints diff --git a/src/pg_compat.h b/src/pg_compat.h index eb69d84e..a54b73d3 100644 --- a/src/pg_compat.h +++ b/src/pg_compat.h @@ -21,76 +21,127 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); -void adjust_targetlist_compat(PlannerInfo *root, RelOptInfo *dest, - RelOptInfo *rel, AppendRelInfo *appinfo); #if PG_VERSION_NUM >= 90600 -#define get_parameterized_joinrel_size_compat(root, rel, outer_path, \ - inner_path, sjinfo, \ - restrict_clauses) \ - get_parameterized_joinrel_size(root, rel, outer_path, \ - inner_path, sjinfo, \ - restrict_clauses) +/* adjust_appendrel_attrs() */ +#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ + do { \ + (dst_rel)->reltarget->exprs = (List *) \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltarget->exprs, \ + (appinfo)); \ + } while (0) -#define check_index_predicates_compat(rool, rel) \ - check_index_predicates(root, rel) +/* create_append_path() */ #ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ - create_append_path(rel, subpaths, required_outer, parallel_workers) + create_append_path((rel), (subpaths), (required_outer), (parallel_workers)) #else #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ - create_append_path(rel, subpaths, required_outer, false, NIL, parallel_workers) + create_append_path((rel), (subpaths), (required_outer), \ + false, NIL, (parallel_workers)) #endif -#define pull_var_clause_compat(node, aggbehavior, phbehavior) \ - pull_var_clause(node, aggbehavior | phbehavior) -extern void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte); -#define set_rel_consider_parallel_compat(root, rel, rte) \ - set_rel_consider_parallel(root, rel, rte) +/* check_index_predicates() */ +#define check_index_predicates_compat(rool, rel) \ + check_index_predicates((root), (rel)) -extern void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel); + +/* create_plain_partial_paths() */ +extern void create_plain_partial_paths(PlannerInfo *root, + RelOptInfo *rel); #define create_plain_partial_paths_compat(root, rel) \ - create_plain_partial_paths(root, rel) + create_plain_partial_paths((root), (rel)) + -extern Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan); +/* get_parameterized_joinrel_size() */ +#define get_parameterized_joinrel_size_compat(root, rel, outer_path, \ + inner_path, sjinfo, \ + restrict_clauses) \ + get_parameterized_joinrel_size((root), (rel), (outer_path), \ + (inner_path), (sjinfo), \ + (restrict_clauses)) + + +/* make_result() */ +extern Result *make_result(List *tlist, + Node *resconstantqual, + Plan *subplan); #define make_result_compat(root, tlist, resconstantqual, subplan) \ - make_result(tlist, resconstantqual, subplan) + make_result((tlist), (resconstantqual), (subplan)) + + +/* pull_var_clause() */ +#define pull_var_clause_compat(node, aggbehavior, phbehavior) \ + pull_var_clause((node), (aggbehavior) | (phbehavior)) + + +/* set_rel_consider_parallel() */ +extern void set_rel_consider_parallel(PlannerInfo *root, + RelOptInfo *rel, + RangeTblEntry *rte); +#define set_rel_consider_parallel_compat(root, rel, rte) \ + set_rel_consider_parallel((root), (rel), (rte)) #else /* PG_VERSION_NUM >= 90500 */ +/* adjust_appendrel_attrs() */ +#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ + do { \ + (dst_rel)->reltargetlist = (List *) \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltargetlist, \ + (appinfo)); \ + } while (0) + + +/* create_append_path() */ +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path((rel), (subpaths), (required_outer)) + + +/* check_partial_indexes() */ +#define check_index_predicates_compat(rool, rel) \ + check_partial_indexes((root), (rel)) + + +/* create_plain_partial_paths() */ +#define create_plain_partial_paths_compat(root, rel) ((void) true) + + +/* get_parameterized_joinrel_size() */ #define get_parameterized_joinrel_size_compat(root, rel, \ outer_path, \ inner_path, \ sjinfo, restrict_clauses) \ - get_parameterized_joinrel_size(root, rel, \ + get_parameterized_joinrel_size((root), (rel), \ (outer_path)->rows, \ (inner_path)->rows, \ - sjinfo, restrict_clauses) + (sjinfo), (restrict_clauses)) -#define check_index_predicates_compat(rool, rel) \ - check_partial_indexes(root, rel) -#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ - create_append_path(rel, subpaths, required_outer) +/* make_result() */ +#define make_result_compat(root, tlist, resconstantqual, subplan) \ + make_result((root), (tlist), (resconstantqual), (subplan)) + +/* pull_var_clause() */ #define pull_var_clause_compat(node, aggbehavior, phbehavior) \ - pull_var_clause(node, aggbehavior, phbehavior) + pull_var_clause((node), (aggbehavior), (phbehavior)) -#define make_result_compat(root, tlist, resconstantqual, subplan) \ - make_result(root, tlist, resconstantqual, subplan) +/* set_rel_consider_parallel() */ #define set_rel_consider_parallel_compat(root, rel, rte) ((void) true) -#define create_plain_partial_paths_compat(root, rel) ((void) true) +/* set_dummy_rel_pathlist() */ void set_dummy_rel_pathlist(RelOptInfo *rel); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index abb7d9f1..65d06aef 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -341,7 +341,7 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, root->append_rel_list = lappend(root->append_rel_list, appinfo); /* Adjust target list for this child */ - adjust_targetlist_compat(root, child_rel, parent_rel, appinfo); + adjust_rel_targetlist_compat(root, child_rel, parent_rel, appinfo); /* * Copy restrictions. If it's not the parent table, copy only From ded87de43ee1f895a1f663fc6e10918c126fe094 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 16 Feb 2017 18:10:47 +0300 Subject: [PATCH 0228/1124] rewrited drop_triggers() func and tests are fixed --- expected/pathman_basic.out | 35 +++++------- expected/pathman_calamity.out | 9 --- expected/pathman_callbacks.out | 1 - expected/pathman_cte.out | 1 - expected/pathman_domains.out | 1 - expected/pathman_foreign_keys.out | 2 - expected/pathman_permissions.out | 2 - init.sql | 95 +++++++++++++++++-------------- sql/pathman_basic.sql | 6 +- 9 files changed, 68 insertions(+), 84 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index a5902c58..4c5406f8 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -81,7 +81,6 @@ SELECT * FROM test.hash_rel; (3 rows) SELECT pathman.drop_partitions('test.hash_rel'); -NOTICE: function test.hash_rel_upd_trig_func() does not exist, skipping NOTICE: 0 rows copied from test.hash_rel_0 NOTICE: 0 rows copied from test.hash_rel_1 NOTICE: 0 rows copied from test.hash_rel_2 @@ -570,10 +569,10 @@ SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; VACUUM; /* update triggers test */ -SELECT pathman.create_hash_update_trigger('test.hash_rel'); - create_hash_update_trigger ------------------------------ - test.hash_rel_upd_trig_func +SELECT pathman.create_update_triggers('test.hash_rel'); + create_update_triggers +------------------------ + (1 row) UPDATE test.hash_rel SET value = 7 WHERE value = 6; @@ -591,10 +590,10 @@ SELECT * FROM test.hash_rel WHERE value = 7; 6 | 7 (1 row) -SELECT pathman.create_range_update_trigger('test.num_range_rel'); - create_range_update_trigger ----------------------------------- - test.num_range_rel_upd_trig_func +SELECT pathman.create_update_triggers('test.num_range_rel'); + create_update_triggers +------------------------ + (1 row) UPDATE test.num_range_rel SET id = 3001 WHERE id = 1; @@ -1477,7 +1476,7 @@ SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern') Indexes: "hash_rel_0_pkey" PRIMARY KEY, btree (id) Triggers: - hash_rel_upd_trig BEFORE UPDATE ON test.hash_rel_0 FOR EACH ROW EXECUTE PROCEDURE test.hash_rel_upd_trig_func() + hash_rel_upd_trig BEFORE UPDATE ON test.hash_rel_0 FOR EACH ROW EXECUTE PROCEDURE pathman.update_trigger_func() \d+ test.hash_rel_extern Table "test.hash_rel_extern" @@ -1513,7 +1512,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; * Clean up */ SELECT pathman.drop_partitions('test.hash_rel'); -NOTICE: drop cascades to 2 other objects NOTICE: 3 rows copied from test.hash_rel_1 NOTICE: 2 rows copied from test.hash_rel_2 NOTICE: 2 rows copied from test.hash_rel_extern @@ -1535,7 +1533,6 @@ SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); (1 row) SELECT pathman.drop_partitions('test.hash_rel', TRUE); -NOTICE: function test.hash_rel_upd_trig_func() does not exist, skipping drop_partitions ----------------- 3 @@ -1549,7 +1546,6 @@ SELECT COUNT(*) FROM ONLY test.hash_rel; DROP TABLE test.hash_rel CASCADE; SELECT pathman.drop_partitions('test.num_range_rel'); -NOTICE: drop cascades to 3 other objects NOTICE: 998 rows copied from test.num_range_rel_1 NOTICE: 1000 rows copied from test.num_range_rel_2 NOTICE: 1000 rows copied from test.num_range_rel_3 @@ -1690,10 +1686,10 @@ SELECT * FROM test."TeSt"; 1 | 1 (3 rows) -SELECT pathman.create_hash_update_trigger('test."TeSt"'); - create_hash_update_trigger ----------------------------- - test."TeSt_upd_trig_func" +SELECT pathman.create_update_triggers('test."TeSt"'); + create_update_triggers +------------------------ + (1 row) UPDATE test."TeSt" SET a = 1; @@ -1722,7 +1718,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test."TeSt" WHERE a = 1; (3 rows) SELECT pathman.drop_partitions('test."TeSt"'); -NOTICE: drop cascades to 3 other objects NOTICE: 0 rows copied from test."TeSt_0" NOTICE: 0 rows copied from test."TeSt_1" NOTICE: 3 rows copied from test."TeSt_2" @@ -1777,7 +1772,6 @@ SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); (1 row) SELECT pathman.drop_partitions('test."RangeRel"'); -NOTICE: function test.RangeRel_upd_trig_func() does not exist, skipping NOTICE: 0 rows copied from test."RangeRel_1" NOTICE: 1 rows copied from test."RangeRel_2" NOTICE: 1 rows copied from test."RangeRel_3" @@ -1813,7 +1807,6 @@ SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); (1 row) SELECT pathman.drop_partitions('test."RangeRel"'); -NOTICE: function test.RangeRel_upd_trig_func() does not exist, skipping NOTICE: 0 rows copied from test."RangeRel_1" NOTICE: 0 rows copied from test."RangeRel_2" NOTICE: 0 rows copied from test."RangeRel_3" @@ -1971,7 +1964,6 @@ EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = ' DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; /* Create range partitions from whole range */ SELECT drop_partitions('test.range_rel'); -NOTICE: function test.range_rel_upd_trig_func() does not exist, skipping NOTICE: 44 rows copied from test.range_rel_1 NOTICE: 31 rows copied from test.range_rel_3 NOTICE: 30 rows copied from test.range_rel_4 @@ -1998,7 +1990,6 @@ SELECT create_partitions_from_range('test.range_rel', 'id', 1, 1000, 100); (1 row) SELECT drop_partitions('test.range_rel', TRUE); -NOTICE: function test.range_rel_upd_trig_func() does not exist, skipping drop_partitions ----------------- 10 diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index e9a5c7e4..44a6786f 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -28,7 +28,6 @@ NOTICE: sequence "part_test_seq" does not exist, skipping (1 row) SELECT drop_partitions('calamity.part_test'); -NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping NOTICE: 10 rows copied from calamity.part_test_1 NOTICE: 10 rows copied from calamity.part_test_2 NOTICE: 10 rows copied from calamity.part_test_3 @@ -44,7 +43,6 @@ SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); (1 row) SELECT drop_partitions('calamity.part_test'); -NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping NOTICE: 10 rows copied from calamity.part_test_1 NOTICE: 10 rows copied from calamity.part_test_2 NOTICE: 10 rows copied from calamity.part_test_3 @@ -66,7 +64,6 @@ SELECT append_range_partition('calamity.part_test'); (1 row) SELECT drop_partitions('calamity.part_test'); -NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping NOTICE: 10 rows copied from calamity.part_test_1 NOTICE: 10 rows copied from calamity.part_test_2 NOTICE: 10 rows copied from calamity.part_test_3 @@ -89,7 +86,6 @@ SELECT append_range_partition('calamity.part_test'); (1 row) SELECT drop_partitions('calamity.part_test'); -NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping NOTICE: 10 rows copied from calamity.part_test_1 NOTICE: 10 rows copied from calamity.part_test_2 NOTICE: 10 rows copied from calamity.part_test_3 @@ -171,7 +167,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; (5 rows) SELECT drop_partitions('calamity.part_test', true); -NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping drop_partitions ----------------- 4 @@ -199,7 +194,6 @@ ERROR: invalid input syntax for integer: "15.6" SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ ERROR: invalid input syntax for integer: "abc" SELECT drop_partitions('calamity.part_test', true); -NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping drop_partitions ----------------- 3 @@ -479,7 +473,6 @@ SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ (1 row) SELECT disable_pathman_for('calamity.part_test'); -NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping disable_pathman_for --------------------- @@ -492,7 +485,6 @@ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ (1 row) SELECT disable_pathman_for('calamity.part_test'); -NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping disable_pathman_for --------------------- @@ -599,7 +591,6 @@ SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params (1 row) SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ -NOTICE: function calamity.to_be_disabled_upd_trig_func() does not exist, skipping disable_pathman_for --------------------- diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 4903b7b4..e0343526 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -133,7 +133,6 @@ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_7", (1 row) SELECT drop_partitions('callbacks.abc'); -NOTICE: function callbacks.abc_upd_trig_func() does not exist, skipping NOTICE: 0 rows copied from callbacks.abc_1 NOTICE: 1 rows copied from callbacks.abc_2 NOTICE: 1 rows copied from callbacks.abc_3 diff --git a/expected/pathman_cte.out b/expected/pathman_cte.out index facda1bb..3e028c54 100644 --- a/expected/pathman_cte.out +++ b/expected/pathman_cte.out @@ -121,7 +121,6 @@ WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; (24 rows) SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ -NOTICE: function test_cte.cte_del_xacts_upd_trig_func() does not exist, skipping NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 drop_partitions diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index 6062adbc..2b3170c5 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -88,7 +88,6 @@ ORDER BY range_min::INT, range_max::INT; (14 rows) SELECT drop_partitions('domains.dom_table'); -NOTICE: function domains.dom_table_upd_trig_func() does not exist, skipping NOTICE: 49 rows copied from domains.dom_table_1 NOTICE: 100 rows copied from domains.dom_table_3 NOTICE: 100 rows copied from domains.dom_table_4 diff --git a/expected/pathman_foreign_keys.out b/expected/pathman_foreign_keys.out index 8b3db83e..df83efc3 100644 --- a/expected/pathman_foreign_keys.out +++ b/expected/pathman_foreign_keys.out @@ -20,7 +20,6 @@ INSERT INTO fkeys.test_fkey VALUES(1, 'wrong'); ERROR: insert or update on table "test_fkey_1" violates foreign key constraint "test_fkey_1_comment_fkey" INSERT INTO fkeys.test_fkey VALUES(1, 'test'); SELECT drop_partitions('fkeys.test_fkey'); -NOTICE: function fkeys.test_fkey_upd_trig_func() does not exist, skipping NOTICE: 101 rows copied from fkeys.test_fkey_1 NOTICE: 100 rows copied from fkeys.test_fkey_2 NOTICE: 100 rows copied from fkeys.test_fkey_3 @@ -46,7 +45,6 @@ INSERT INTO fkeys.test_fkey VALUES(1, 'wrong'); ERROR: insert or update on table "test_fkey_0" violates foreign key constraint "test_fkey_0_comment_fkey" INSERT INTO fkeys.test_fkey VALUES(1, 'test'); SELECT drop_partitions('fkeys.test_fkey'); -NOTICE: function fkeys.test_fkey_upd_trig_func() does not exist, skipping NOTICE: 100 rows copied from fkeys.test_fkey_0 NOTICE: 90 rows copied from fkeys.test_fkey_1 NOTICE: 90 rows copied from fkeys.test_fkey_2 diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index eef1c993..81fd7b34 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -134,7 +134,6 @@ ERROR: no suitable partition for key '55' /* Finally drop partitions */ SET ROLE user1; SELECT drop_partitions('permissions.user1_table'); -NOTICE: function permissions.user1_table_upd_trig_func() does not exist, skipping NOTICE: 10 rows copied from permissions.user1_table_1 NOTICE: 10 rows copied from permissions.user1_table_2 NOTICE: 0 rows copied from permissions.user1_table_4 @@ -157,7 +156,6 @@ SELECT create_hash_partitions('permissions.user2_table', 'id', 3); INSERT INTO permissions.user2_table SELECT generate_series(1, 30); SELECT drop_partitions('permissions.user2_table'); -NOTICE: function permissions.user2_table_upd_trig_func() does not exist, skipping NOTICE: 9 rows copied from permissions.user2_table_0 NOTICE: 11 rows copied from permissions.user2_table_1 NOTICE: 10 rows copied from permissions.user2_table_2 diff --git a/init.sql b/init.sql index eb62c0e5..91a7b327 100644 --- a/init.sql +++ b/init.sql @@ -551,15 +551,64 @@ $$ LANGUAGE plpgsql; /* - * Drop triggers. + * Function for update triggers + */ +CREATE OR REPLACE FUNCTION @extschema@.update_trigger_func() +RETURNS TRIGGER AS 'pg_pathman', 'update_trigger_func' +LANGUAGE C; + +/* + * Creates an update trigger + */ +CREATE OR REPLACE FUNCTION @extschema@.create_update_triggers( + IN parent_relid REGCLASS) +RETURNS VOID AS +$$ +DECLARE + trigger TEXT := 'CREATE TRIGGER %s + BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE + @extschema@.update_trigger_func()'; + triggername TEXT; + rec RECORD; + +BEGIN + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Create trigger on every partition */ + FOR rec in (SELECT * FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid) + LOOP + EXECUTE format(trigger, + triggername, + rec.inhrelid::REGCLASS::TEXT); + END LOOP; +END +$$ LANGUAGE plpgsql; + +/* + * Drop triggers */ CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( parent_relid REGCLASS) RETURNS VOID AS $$ +DECLARE + triggername TEXT; + rec RECORD; + BEGIN - EXECUTE format('DROP FUNCTION IF EXISTS %s() CASCADE', - @extschema@.build_update_trigger_func_name(parent_relid)); + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Drop trigger for each partition if exists */ + FOR rec IN (SELECT pg_catalog.pg_inherits.* FROM pg_catalog.pg_inherits + JOIN pg_catalog.pg_trigger on inhrelid = tgrelid + WHERE inhparent = parent_relid AND tgname = triggername) + LOOP + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + triggername, + rec.inhrelid::REGCLASS::TEXT); + END LOOP; END $$ LANGUAGE plpgsql STRICT; @@ -896,43 +945,3 @@ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( init_callback REGPROCEDURE) RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; - - -/* - * Function for update triggers - */ -CREATE OR REPLACE FUNCTION @extschema@.update_trigger_func() -RETURNS TRIGGER AS 'pg_pathman', 'update_trigger_func' -LANGUAGE C; - - -/* - * Creates an update trigger - */ -CREATE OR REPLACE FUNCTION @extschema@.create_update_trigger( - IN parent_relid REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - trigger TEXT := 'CREATE TRIGGER %s - BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE - @extschema@.update_trigger_func()'; - triggername TEXT; - rec RECORD; - -BEGIN - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Create trigger on every partition */ - FOR rec in (SELECT * FROM pg_catalog.pg_inherits - WHERE inhparent = parent_relid) - LOOP - EXECUTE format(trigger, - triggername, - rec.inhrelid::REGCLASS::TEXT); - END LOOP; - - RETURN 'update_trigger_func()'; -END -$$ LANGUAGE plpgsql; \ No newline at end of file diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 6dc25deb..a69940db 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -173,12 +173,12 @@ SET pg_pathman.enable_runtimemergeappend = OFF; VACUUM; /* update triggers test */ -SELECT pathman.create_hash_update_trigger('test.hash_rel'); +SELECT pathman.create_update_triggers('test.hash_rel'); UPDATE test.hash_rel SET value = 7 WHERE value = 6; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 7; SELECT * FROM test.hash_rel WHERE value = 7; -SELECT pathman.create_range_update_trigger('test.num_range_rel'); +SELECT pathman.create_update_triggers('test.num_range_rel'); UPDATE test.num_range_rel SET id = 3001 WHERE id = 1; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = 3001; SELECT * FROM test.num_range_rel WHERE id = 3001; @@ -464,7 +464,7 @@ INSERT INTO test."TeSt" VALUES (1, 1); INSERT INTO test."TeSt" VALUES (2, 2); INSERT INTO test."TeSt" VALUES (3, 3); SELECT * FROM test."TeSt"; -SELECT pathman.create_hash_update_trigger('test."TeSt"'); +SELECT pathman.create_update_triggers('test."TeSt"'); UPDATE test."TeSt" SET a = 1; SELECT * FROM test."TeSt"; SELECT * FROM test."TeSt" WHERE a = 1; From 165a7242b253a62f5289603d4ea3a7ef055a22e1 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 16 Feb 2017 18:33:18 +0300 Subject: [PATCH 0229/1124] moved update_trigger_func() from pl_range_funcs.c to pl_funcs.c --- src/pl_funcs.c | 118 ++++++++++++++++++++++++++++++++++++++++++- src/pl_range_funcs.c | 118 ------------------------------------------- 2 files changed, 117 insertions(+), 119 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index f64433d3..c2327bf9 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -12,11 +12,13 @@ #include "utils.h" #include "pathman.h" #include "partition_creation.h" +#include "partition_filter.h" #include "relation_info.h" #include "xact_handling.h" -#include "access/htup_details.h" +#include "access/tupconvert.h" #include "access/nbtree.h" +#include "access/htup_details.h" #include "access/xact.h" #include "catalog/indexing.h" #include "catalog/pg_type.h" @@ -32,6 +34,9 @@ #include "utils/syscache.h" +static Oid get_partition_for_key(const PartRelationInfo *prel, Datum key); + + /* Function declarations */ PG_FUNCTION_INFO_V1( on_partitions_created ); @@ -69,6 +74,8 @@ PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( get_pathman_lib_version ); +PG_FUNCTION_INFO_V1( update_trigger_func ); + /* * User context for function show_partition_list_internal(). @@ -926,3 +933,112 @@ get_pathman_lib_version(PG_FUNCTION_ARGS) { PG_RETURN_CSTRING(psprintf("%x", CURRENT_LIB_VERSION)); } + +/* + * Update trigger + */ +Datum +update_trigger_func(PG_FUNCTION_ARGS) +{ + const PartRelationInfo *prel; + PartParentSearch parent_search; + Oid parent; + TriggerData *trigdata = (TriggerData *) fcinfo->context; + char *key_name; + Datum key; + bool isnull; + TupleConversionMap *conversion_map; + + TupleDesc source_tupdesc; + HeapTuple source_tuple; + Oid source_relid; + AttrNumber source_key; + + Relation target_rel; + TupleDesc target_tupdesc; + HeapTuple target_tuple; + Oid target_relid; + + /* This function can only be invoked as a trigger */ + if (!CALLED_AS_TRIGGER(fcinfo)) + elog(ERROR, "Function invoked not in a trigger context"); + + /* Make sure that trigger was fired during UPDATE command */ + if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) + elog(ERROR, "This function must only be used as UPDATE trigger"); + + source_relid = trigdata->tg_relation->rd_id; + source_tuple = trigdata->tg_newtuple; + source_tupdesc = trigdata->tg_relation->rd_att; + + /* Find parent relation and partitioning info */ + parent = get_parent_of_partition(source_relid, &parent_search); + if (parent_search != PPS_ENTRY_PART_PARENT) + elog(ERROR, "relation \"%s\" is not a partition", + get_rel_name_or_relid(source_relid)); + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_INDIFFERENT); + + /* + * Find partitioning key attribute of source partition. Keep in mind that + * there could be dropped columns in parent relation or partition and so + * key attribute may have different number + */ + key_name = get_attname(parent, prel->attnum); + source_key = get_attnum(source_relid, key_name); + key = heap_getattr(source_tuple, source_key, source_tupdesc, &isnull); + + /* Find partition it should go into */ + target_relid = get_partition_for_key(prel, key); + + /* If target partition is the same then do nothing */ + if (target_relid == source_relid) + return PointerGetDatum(source_tuple); + + target_rel = heap_open(target_relid, RowExclusiveLock); + target_tupdesc = target_rel->rd_att; + + /* + * Else if it's a different partition then build a TupleConversionMap + * between original partition and new one. And then do a convertation + */ + conversion_map = convert_tuples_by_name(source_tupdesc, + target_tupdesc, + "Failed to convert tuple"); + target_tuple = do_convert_tuple(source_tuple, conversion_map); + + /* + * To make an UPDATE on a tuple in case when the tuple should be moved from + * one partition to another we need to perform two actions. First, remove + * old tuple from original partition and then insert updated version + * of tuple to the target partition + */ + simple_heap_delete(trigdata->tg_relation, &trigdata->tg_trigtuple->t_self); + simple_heap_insert(target_rel, target_tuple); + + heap_close(target_rel, RowExclusiveLock); + PG_RETURN_VOID(); +} + +/* + * Returns Oid of partition corresponding to partitioning key value. Throws + * an error if no partition found + */ +static Oid +get_partition_for_key(const PartRelationInfo *prel, Datum key) +{ + Oid *parts; + int nparts; + + /* Search for matching partitions */ + parts = find_partitions_for_value(key, prel->atttype, prel, &nparts); + + if (nparts > 1) + elog(ERROR, ERR_PART_ATTR_MULTIPLE); + else if (nparts == 0) + elog(ERROR, + "There is not partition to fit partition key \"%s\"", + datum_to_cstring(key, prel->atttype)); + else + return parts[0]; +} diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 6694fee6..b78f44a4 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -31,13 +31,6 @@ #include "utils/syscache.h" -/* TODO */ -#include "commands/trigger.h" -#include "access/htup_details.h" -#include "access/tupconvert.h" -#include "partition_filter.h" - - static char *deparse_constraint(Oid relid, Node *expr); static ArrayType *construct_infinitable_array(Bound *elems, int nelems, @@ -61,8 +54,6 @@ static bool interval_is_trivial(Oid atttype, Datum interval, Oid interval_type); -static Oid get_partition_for_key(const PartRelationInfo *prel, Datum key); - /* Function declarations */ PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); @@ -78,8 +69,6 @@ PG_FUNCTION_INFO_V1( merge_range_partitions ); PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); PG_FUNCTION_INFO_V1( validate_interval_value ); -PG_FUNCTION_INFO_V1( update_trigger_func ); - /* * ----------------------------- @@ -1018,110 +1007,3 @@ drop_table_by_oid(Oid relid) RemoveRelations(n); } - -/* - * Update trigger - */ -Datum -update_trigger_func(PG_FUNCTION_ARGS) -{ - const PartRelationInfo *prel; - PartParentSearch parent_search; - Oid parent; - TriggerData *trigdata = (TriggerData *) fcinfo->context; - char *key_name; - Datum key; - bool isnull; - TupleConversionMap *conversion_map; - - TupleDesc source_tupdesc; - HeapTuple source_tuple; - Oid source_relid; - AttrNumber source_key; - - Relation target_rel; - TupleDesc target_tupdesc; - HeapTuple target_tuple; - Oid target_relid; - - /* This function can only be invoked as a trigger */ - if (!CALLED_AS_TRIGGER(fcinfo)) - elog(ERROR, "Function invoked not in a trigger context"); - - /* tuple to return to executor */ - if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) - elog(ERROR, "This function must only be used as UPDATE trigger"); - - source_relid = trigdata->tg_relation->rd_id; - source_tuple = trigdata->tg_newtuple; - source_tupdesc = trigdata->tg_relation->rd_att; - - parent = get_parent_of_partition(source_relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(source_relid)); - - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_INDIFFERENT); - - /* - * Find partitioning key attribute of source partition. Keep in mind that - * there could be dropped columns in parent relation or partition and so - * key attribute may have different number - */ - key_name = get_attname(parent, prel->attnum); - source_key = get_attnum(source_relid, key_name); - key = heap_getattr(source_tuple, source_key, source_tupdesc, &isnull); - - /* Find partition it should go into */ - target_relid = get_partition_for_key(prel, key); - - /* If target partition is the same then do nothing */ - if (target_relid == source_relid) - return PointerGetDatum(source_tuple); - - target_rel = heap_open(target_relid, RowExclusiveLock); - target_tupdesc = target_rel->rd_att; - - /* - * Else if it's a different partition then build a TupleConversionMap - * between original partition and new one. And then do a convertation - */ - conversion_map = convert_tuples_by_name(source_tupdesc, - target_tupdesc, - "Failed to convert tuple"); - target_tuple = do_convert_tuple(source_tuple, conversion_map); - - /* Delete old tuple from original partition */ - simple_heap_delete(trigdata->tg_relation, &trigdata->tg_trigtuple->t_self); - - /* Insert tuple into new partition */ - simple_heap_insert(target_rel, target_tuple); - - heap_close(target_rel, RowExclusiveLock); - - PG_RETURN_VOID(); -} - -/* - * Returns Oid of partition corresponding to partitioning key value. Throws - * an error if no partition found - */ -static Oid -get_partition_for_key(const PartRelationInfo *prel, Datum key) -{ - Oid *parts; - int nparts; - - /* Search for matching partitions */ - parts = find_partitions_for_value(key, prel->atttype, prel, &nparts); - - if (nparts > 1) - elog(ERROR, ERR_PART_ATTR_MULTIPLE); - else if (nparts == 0) - elog(ERROR, - "There is not partition to fit partition key \"%s\"", - datum_to_cstring(key, prel->atttype)); - else - return parts[0]; -} From 4d37a4c29fb59b24268cb9f892540d31320448e1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 16 Feb 2017 19:08:01 +0300 Subject: [PATCH 0230/1124] fix function handle_modification_query(), refactoring (+function build_part_tuple_map()) --- Makefile | 1 + expected/pathman_updates.out | 64 +++++++++++ sql/pathman_updates.sql | 37 +++++++ src/partition_filter.c | 191 ++++++++++++++++++-------------- src/partition_filter.h | 18 ++- src/planner_tree_modification.c | 43 ++++++- src/utility_stmt_hooking.c | 6 +- 7 files changed, 263 insertions(+), 97 deletions(-) create mode 100644 expected/pathman_updates.out create mode 100644 sql/pathman_updates.sql diff --git a/Makefile b/Makefile index 6f9680cb..c151604d 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,7 @@ REGRESS = pathman_basic \ pathman_cte \ pathman_bgw \ pathman_inserts \ + pathman_updates \ pathman_domains \ pathman_interval \ pathman_callbacks \ diff --git a/expected/pathman_updates.out b/expected/pathman_updates.out new file mode 100644 index 00000000..4fb1ee58 --- /dev/null +++ b/expected/pathman_updates.out @@ -0,0 +1,64 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_updates; +/* + * Test UPDATEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_updates.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_updates.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_updates.test', 'val', 1, 10); +NOTICE: sequence "test_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_updates.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_updates.test'); + append_range_partition +------------------------ + test_updates.test_11 +(1 row) + +INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; + QUERY PLAN +--------------------------- + Update on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 0 | test_updates.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; + QUERY PLAN +----------------------------- + Update on test + Update on test + Update on test_11 + -> Seq Scan on test + Filter: (val = 101) + -> Seq Scan on test_11 + Filter: (val = 101) +(7 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+---------------------- + 101 | 0 | test_updates.test_11 +(1 row) + +DROP SCHEMA test_updates CASCADE; +NOTICE: drop cascades to 13 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_updates.sql b/sql/pathman_updates.sql new file mode 100644 index 00000000..ec4924ea --- /dev/null +++ b/sql/pathman_updates.sql @@ -0,0 +1,37 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_updates; + + +/* + * Test UPDATEs on a partition with different TupleDescriptor. + */ + +/* create partitioned table */ +CREATE TABLE test_updates.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_updates.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_updates.test', 'val', 1, 10); + +/* drop column 'a' */ +ALTER TABLE test_updates.test DROP COLUMN a; + +/* append new partition */ +SELECT append_range_partition('test_updates.test'); +INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); + + +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; +UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS; + + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; +UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; + + + +DROP SCHEMA test_updates CASCADE; +DROP EXTENSION pg_pathman; diff --git a/src/partition_filter.c b/src/partition_filter.c index daed5d43..e4c2c47c 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -132,8 +132,12 @@ init_partition_filter_static_data(void) /* - * Initialize ResultPartsStorage (hash table etc). + * --------------------------- + * Partition Storage (cache) + * --------------------------- */ + +/* Initialize ResultPartsStorage (hash table etc) */ void init_result_parts_storage(ResultPartsStorage *parts_storage, EState *estate, @@ -171,9 +175,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->heap_close_lock_mode = NoLock; } -/* - * Free ResultPartsStorage (close relations etc). - */ +/* Free ResultPartsStorage (close relations etc) */ void fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) { @@ -223,9 +225,7 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) hash_destroy(parts_storage->result_rels_table); } -/* - * Find a ResultRelInfo for the partition using ResultPartsStorage. - */ +/* Find a ResultRelInfo for the partition using ResultPartsStorage */ ResultRelInfoHolder * scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) { @@ -248,8 +248,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) *parent_rte; Index child_rte_idx; ResultRelInfo *child_result_rel_info; - TupleDesc child_tupdesc, - parent_tupdesc; /* Lock partition and check if it exists */ LockRelationOid(partid, parts_storage->head_open_lock_mode); @@ -313,24 +311,8 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; - /* Use fake 'tdtypeid' in order to fool convert_tuples_by_name() */ - child_tupdesc = CreateTupleDescCopy(RelationGetDescr(child_rel)); - child_tupdesc->tdtypeid = InvalidOid; - - parent_tupdesc = CreateTupleDescCopy(RelationGetDescr(parent_rel)); - parent_tupdesc->tdtypeid = InvalidOid; - /* Generate tuple transformation map and some other stuff */ - rri_holder->tuple_map = convert_tuples_by_name(parent_tupdesc, - child_tupdesc, - "could not convert row type"); - - /* If map is one-to-one, free unused TupleDescs */ - if (!rri_holder->tuple_map) - { - FreeTupleDesc(child_tupdesc); - FreeTupleDesc(parent_tupdesc); - } + rri_holder->tuple_map = build_part_tuple_map(parent_rel, child_rel); /* Call on_new_rri_holder_callback() if needed */ if (parts_storage->on_new_rri_holder_callback) @@ -346,6 +328,44 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) return rri_holder; } + +/* Build tuple conversion map (e.g. parent has a dropped column) */ +TupleConversionMap * +build_part_tuple_map(Relation parent_rel, Relation child_rel) +{ + TupleConversionMap *tuple_map; + TupleDesc child_tupdesc, + parent_tupdesc; + + /* Use fake 'tdtypeid' in order to fool convert_tuples_by_name() */ + child_tupdesc = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc->tdtypeid = InvalidOid; + + parent_tupdesc = CreateTupleDescCopy(RelationGetDescr(parent_rel)); + parent_tupdesc->tdtypeid = InvalidOid; + + /* Generate tuple transformation map and some other stuff */ + tuple_map = convert_tuples_by_name(parent_tupdesc, + child_tupdesc, + "could not convert row type for partition"); + + /* If map is one-to-one, free unused TupleDescs */ + if (!tuple_map) + { + FreeTupleDesc(child_tupdesc); + FreeTupleDesc(parent_tupdesc); + } + + return tuple_map; +} + + +/* + * ----------------------------------- + * Partition search helper functions + * ----------------------------------- + */ + /* * Find matching partitions for 'value' using PartRelationInfo. */ @@ -366,11 +386,11 @@ find_partitions_for_value(Datum value, Oid value_type, temp_const.location = -1; /* Fill const with value ... */ - temp_const.constvalue = value; - temp_const.constisnull = false; + temp_const.constvalue = value; + temp_const.consttype = value_type; + temp_const.constisnull = false; /* ... and some other important data */ - CopyToTempConst(consttype, atttype); CopyToTempConst(consttypmod, atttypmod); CopyToTempConst(constcollid, attcollid); CopyToTempConst(constlen, attlen); @@ -379,9 +399,59 @@ find_partitions_for_value(Datum value, Oid value_type, /* We use 0 since varno doesn't matter for Const */ InitWalkerContext(&wcxt, 0, prel, NULL, true); ranges = walk_expr_tree((Expr *) &temp_const, &wcxt)->rangeset; + return get_partition_oids(ranges, nparts, prel, false); } +/* + * Smart wrapper for scan_result_parts_storage(). + */ +ResultRelInfoHolder * +select_partition_for_insert(Datum value, Oid value_type, + const PartRelationInfo *prel, + ResultPartsStorage *parts_storage, + EState *estate) +{ + MemoryContext old_cxt; + ResultRelInfoHolder *rri_holder; + Oid selected_partid = InvalidOid; + Oid *parts; + int nparts; + + /* Search for matching partitions */ + parts = find_partitions_for_value(value, value_type, prel, &nparts); + + if (nparts > 1) + elog(ERROR, ERR_PART_ATTR_MULTIPLE); + else if (nparts == 0) + { + selected_partid = create_partitions_for_value(PrelParentRelid(prel), + value, prel->atttype); + + /* get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); + } + else selected_partid = parts[0]; + + /* Replace parent table with a suitable partition */ + old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); + rri_holder = scan_result_parts_storage(selected_partid, parts_storage); + MemoryContextSwitchTo(old_cxt); + + /* Could not find suitable partition */ + if (rri_holder == NULL) + elog(ERROR, ERR_PART_ATTR_NO_PART, + datum_to_cstring(value, prel->atttype)); + + return rri_holder; +} + + +/* + * -------------------------------- + * PartitionFilter implementation + * -------------------------------- + */ Plan * make_partition_filter(Plan *subplan, Oid parent_relid, @@ -516,8 +586,8 @@ partition_filter_exec(CustomScanState *node) old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); /* Search for a matching partition */ - rri_holder = select_partition_for_insert(prel, &state->result_parts, - value, prel->atttype, estate); + rri_holder = select_partition_for_insert(value, prel->atttype, prel, + &state->result_parts, estate); /* Switch back and clean up per-tuple context */ MemoryContextSwitchTo(old_cxt); @@ -583,49 +653,6 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e } -/* - * Smart wrapper for scan_result_parts_storage(). - */ -ResultRelInfoHolder * -select_partition_for_insert(const PartRelationInfo *prel, - ResultPartsStorage *parts_storage, - Datum value, Oid value_type, - EState *estate) -{ - MemoryContext old_cxt; - ResultRelInfoHolder *rri_holder; - Oid selected_partid = InvalidOid; - Oid *parts; - int nparts; - - /* Search for matching partitions */ - parts = find_partitions_for_value(value, value_type, prel, &nparts); - - if (nparts > 1) - elog(ERROR, ERR_PART_ATTR_MULTIPLE); - else if (nparts == 0) - { - selected_partid = create_partitions_for_value(PrelParentRelid(prel), - value, prel->atttype); - - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); - } - else selected_partid = parts[0]; - - /* Replace parent table with a suitable partition */ - old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); - rri_holder = scan_result_parts_storage(selected_partid, parts_storage); - MemoryContextSwitchTo(old_cxt); - - /* Could not find suitable partition */ - if (rri_holder == NULL) - elog(ERROR, ERR_PART_ATTR_NO_PART, - datum_to_cstring(value, prel->atttype)); - - return rri_holder; -} - /* * Build partition filter's target list pointing to subplan tuple's elements. @@ -958,9 +985,7 @@ fix_returning_list_mutator(Node *node, void *state) * ------------------------------------- */ -/* - * Append RangeTblEntry 'rte' to estate->es_range_table. - */ +/* Append RangeTblEntry 'rte' to estate->es_range_table */ static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte) { @@ -978,9 +1003,7 @@ append_rte_to_estate(EState *estate, RangeTblEntry *rte) return list_length(estate->es_range_table); } -/* - * Append ResultRelInfo 'rri' to estate->es_result_relations. - */ +/* Append ResultRelInfo 'rri' to estate->es_result_relations */ static int append_rri_to_estate(EState *estate, ResultRelInfo *rri) { @@ -1021,15 +1044,11 @@ append_rri_to_estate(EState *estate, ResultRelInfo *rri) * -------------------------------------- */ -/* - * Used by fetch_estate_mod_data() to find estate_mod_data. - */ +/* Used by fetch_estate_mod_data() to find estate_mod_data */ static void pf_memcxt_callback(void *arg) { elog(DEBUG1, "EState is destroyed"); } -/* - * Fetch (or create) a estate_mod_data structure we've hidden inside es_query_cxt. - */ +/* Fetch (or create) a estate_mod_data structure we've hidden inside es_query_cxt */ static estate_mod_data * fetch_estate_mod_data(EState *estate) { diff --git a/src/partition_filter.h b/src/partition_filter.h index c366abb6..af8d0993 100644 --- a/src/partition_filter.h +++ b/src/partition_filter.h @@ -107,6 +107,7 @@ extern CustomExecMethods partition_filter_exec_methods; void init_partition_filter_static_data(void); + /* ResultPartsStorage init\fini\scan function */ void init_result_parts_storage(ResultPartsStorage *parts_storage, EState *estate, @@ -114,21 +115,33 @@ void init_result_parts_storage(ResultPartsStorage *parts_storage, Size table_entry_size, on_new_rri_holder on_new_rri_holder_cb, void *on_new_rri_holder_cb_arg); + void fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels); + ResultRelInfoHolder * scan_result_parts_storage(Oid partid, ResultPartsStorage *storage); +TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); + + /* Find suitable partition using 'value' */ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); +ResultRelInfoHolder * select_partition_for_insert(Datum value, Oid value_type, + const PartRelationInfo *prel, + ResultPartsStorage *parts_storage, + EState *estate); + + Plan * make_partition_filter(Plan *subplan, Oid parent_relid, OnConflictAction conflict_action, List *returning_list); + Node * partition_filter_create_scan_state(CustomScan *node); void partition_filter_begin(CustomScanState *node, @@ -145,10 +158,5 @@ void partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *es); -ResultRelInfoHolder * select_partition_for_insert(const PartRelationInfo *prel, - ResultPartsStorage *parts_storage, - Datum value, Oid value_type, - EState *estate); - #endif /* PARTITION_FILTER_H */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 4e032c67..013b6370 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -17,8 +17,10 @@ #include "catalog/pg_type.h" #include "miscadmin.h" #include "optimizer/clauses.h" +#include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/memutils.h" +#include "utils/syscache.h" /* Special column name for rowmarks */ @@ -337,11 +339,46 @@ handle_modification_query(Query *parse) /* Exactly one partition (bounds are equal) */ if (irange_lower(irange) == irange_upper(irange)) { - Oid *children = PrelGetChildrenArray(prel); + Oid *children = PrelGetChildrenArray(prel), + child = children[irange_lower(irange)], + parent = rte->relid; - rte->relid = children[irange_lower(irange)]; + Relation child_rel, + parent_rel; - /* Disable standard planning */ + void *tuple_map; /* we don't need the map itself */ + + LOCKMODE lockmode = RowExclusiveLock; /* UPDATE | DELETE */ + + /* Make sure that 'child' exists */ + LockRelationOid(child, lockmode); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(child))) + { + UnlockRelationOid(child, lockmode); + return; /* nothing to do here */ + } + + /* Both tables are already locked */ + child_rel = heap_open(child, NoLock); + parent_rel = heap_open(parent, NoLock); + + /* Build a conversion map (may be trivial, i.e. NULL) */ + tuple_map = build_part_tuple_map(parent_rel, child_rel); + if (tuple_map) + free_conversion_map((TupleConversionMap *) tuple_map); + + /* Close relations (should remain locked, though) */ + heap_close(child_rel, NoLock); + heap_close(parent_rel, NoLock); + + /* Exit if tuple map was NOT trivial */ + if (tuple_map) /* just checking the pointer! */ + return; + + /* Update RTE's relid */ + rte->relid = child; + + /* Finally disable standard planning */ rte->inh = false; } } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 92d2081f..bce69ce1 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -553,9 +553,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, elog(ERROR, ERR_PART_ATTR_NULL); /* Search for a matching partition */ - rri_holder = select_partition_for_insert(prel, &parts_storage, - values[prel->attnum - 1], - prel->atttype, estate); + rri_holder = select_partition_for_insert(values[prel->attnum - 1], + prel->atttype, prel, + &parts_storage, estate); child_result_rel = rri_holder->result_rel_info; estate->es_result_relation_info = child_result_rel; From 0b5edecaab4d5e9c650468bc7361c6890e14d3f0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 17 Feb 2017 12:58:23 +0300 Subject: [PATCH 0231/1124] add codecov coverage badge --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 58556f05..709ccf35 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ [![Build Status](https://travis-ci.org/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.org/postgrespro/pg_pathman) [![PGXN version](https://badge.fury.io/pg/pg_pathman.svg)](https://badge.fury.io/pg/pg_pathman) +[![codecov](https://codecov.io/gh/postgrespro/pg_pathman/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/pg_pathman) # pg_pathman From 73b8a4115eb823d2c639bf6fba3379695ff20d71 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 17 Feb 2017 17:33:10 +0300 Subject: [PATCH 0232/1124] trigger creation rewrited in C --- init.sql | 28 +------- src/partition_creation.c | 11 --- src/pl_funcs.c | 148 +++++++++++++++++++++++++++++++++++---- src/utils.c | 28 ++++++++ src/utils.h | 3 + 5 files changed, 167 insertions(+), 51 deletions(-) diff --git a/init.sql b/init.sql index 91a7b327..2e2ab5bd 100644 --- a/init.sql +++ b/init.sql @@ -560,31 +560,9 @@ LANGUAGE C; /* * Creates an update trigger */ -CREATE OR REPLACE FUNCTION @extschema@.create_update_triggers( - IN parent_relid REGCLASS) -RETURNS VOID AS -$$ -DECLARE - trigger TEXT := 'CREATE TRIGGER %s - BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE - @extschema@.update_trigger_func()'; - triggername TEXT; - rec RECORD; - -BEGIN - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Create trigger on every partition */ - FOR rec in (SELECT * FROM pg_catalog.pg_inherits - WHERE inhparent = parent_relid) - LOOP - EXECUTE format(trigger, - triggername, - rec.inhrelid::REGCLASS::TEXT); - END LOOP; -END -$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION @extschema@.create_update_triggers(parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'create_update_triggers' +LANGUAGE C; /* * Drop triggers diff --git a/src/partition_creation.c b/src/partition_creation.c index 61650ae7..60b826cc 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -77,8 +77,6 @@ static Constraint *make_constraint_common(char *name, Node *raw_expr); static Value make_string_value_struct(char *str); static Value make_int_value_struct(int int_val); -static RangeVar *makeRangeVarFromRelid(Oid relid); - /* * --------------------------------------- @@ -1402,15 +1400,6 @@ make_int_value_struct(int int_val) return val; } -static RangeVar * -makeRangeVarFromRelid(Oid relid) -{ - char *relname = get_rel_name(relid); - char *namespace = get_namespace_name(get_rel_namespace(relid)); - - return makeRangeVar(namespace, relname, -1); -} - /* * --------------------- diff --git a/src/pl_funcs.c b/src/pl_funcs.c index c2327bf9..3281c58b 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -21,12 +21,14 @@ #include "access/htup_details.h" #include "access/xact.h" #include "catalog/indexing.h" +#include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "commands/tablespace.h" #include "commands/trigger.h" #include "funcapi.h" #include "miscadmin.h" #include "utils/builtins.h" +#include "utils/fmgroids.h" #include "utils/inval.h" #include "utils/jsonb.h" #include "utils/snapmgr.h" @@ -35,6 +37,9 @@ static Oid get_partition_for_key(const PartRelationInfo *prel, Datum key); +static void create_single_update_trigger_internal(Oid relid, + const char *attname); +static bool update_trigger_exists(Oid relid, char *trigname); /* Function declarations */ @@ -74,7 +79,9 @@ PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( get_pathman_lib_version ); +PG_FUNCTION_INFO_V1( create_update_triggers ); PG_FUNCTION_INFO_V1( update_trigger_func ); +PG_FUNCTION_INFO_V1( create_single_update_trigger ); /* @@ -97,16 +104,6 @@ static void on_partitions_updated_internal(Oid partitioned_table, bool add_callb static void on_partitions_removed_internal(Oid partitioned_table, bool add_callbacks); -/* - * Extracted common check. - */ -static bool -check_relation_exists(Oid relid) -{ - return get_rel_type_id(relid) != InvalidOid; -} - - /* * ---------------------------- * Partition events callbacks @@ -543,11 +540,7 @@ build_update_trigger_name(PG_FUNCTION_ARGS) Oid relid = PG_GETARG_OID(0); const char *result; /* trigger's name can't be qualified */ - /* Check that relation exists */ - if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); - - result = quote_identifier(psprintf("%s_upd_trig", get_rel_name(relid))); + result = build_update_trigger_name_internal(relid); PG_RETURN_TEXT_P(cstring_to_text(result)); } @@ -1020,6 +1013,7 @@ update_trigger_func(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } + /* * Returns Oid of partition corresponding to partitioning key value. Throws * an error if no partition found @@ -1042,3 +1036,127 @@ get_partition_for_key(const PartRelationInfo *prel, Datum key) else return parts[0]; } + +/* + * Create UPDATE triggers for all partitions + */ +Datum +create_update_triggers(PG_FUNCTION_ARGS) +{ + const PartRelationInfo *prel; + Oid parent = PG_GETARG_OID(0); + Oid *children; + char *attname, + *trigname; + int i; + + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_INDIFFERENT); + + attname = get_attname(prel->key, prel->attnum); + children = PrelGetChildrenArray(prel); + trigname = build_update_trigger_name_internal(parent); + + /* Create triggers for each partition */ + for (i = 0; i < PrelChildrenCount(prel); i++) + { + if (update_trigger_exists(children[i], trigname)) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("trigger \"%s\" for relation \"%s\" already exists", + trigname, get_rel_name_or_relid(children[i])))); + + create_single_update_trigger_internal(children[i], attname); + } + + PG_RETURN_VOID(); +} + +static bool +update_trigger_exists(Oid relid, char *trigname) +{ + bool res = false; + Relation tgrel; + SysScanDesc tgscan; + ScanKeyData key; + HeapTuple tuple; + + tgrel = heap_open(TriggerRelationId, RowExclusiveLock); + + ScanKeyInit(&key, + Anum_pg_trigger_tgrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(relid)); + tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, + NULL, 1, &key); + while (HeapTupleIsValid(tuple = systable_getnext(tgscan))) + { + Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple); + + if (namestrcmp(&(pg_trigger->tgname), trigname) == 0) + { + res = true; + break; + } + } + systable_endscan(tgscan); + heap_close(tgrel, RowExclusiveLock); + + return res; +} + +/* + * Create an UPDATE trigger for partition + */ +Datum +create_single_update_trigger(PG_FUNCTION_ARGS) +{ + const PartRelationInfo *prel; + Oid partition = PG_GETARG_OID(0); + Oid parent; + PartParentSearch parent_search; + char *attname; + + /* Get parent's Oid */ + parent = get_parent_of_partition(partition, &parent_search); + if (parent_search != PPS_ENTRY_PART_PARENT) + elog(ERROR, "\"%s\" is not a partition", + get_rel_name_or_relid(partition)); + + /* Determine partitioning key name */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(partition, prel, PT_INDIFFERENT); + attname = get_attname(prel->key, prel->attnum); + + create_single_update_trigger_internal(partition, attname); + + PG_RETURN_VOID(); +} + +static void +create_single_update_trigger_internal(Oid relid, const char *attname) +{ + CreateTrigStmt *stmt; + List *func; + + func = list_make2(makeString(get_namespace_name(get_pathman_schema())), + makeString("update_trigger_func")); + + stmt = makeNode(CreateTrigStmt); + stmt->trigname = build_update_trigger_name_internal(relid); + stmt->relation = makeRangeVarFromRelid(relid); + stmt->funcname = func; + stmt->args = NIL; + stmt->row = true; + stmt->timing = TRIGGER_TYPE_BEFORE; + stmt->events = TRIGGER_TYPE_UPDATE; + stmt->columns = list_make1(makeString((char *) attname)); + stmt->whenClause = NULL; + stmt->isconstraint = false; + stmt->deferrable = false; + stmt->initdeferred = false; + stmt->constrrel = NULL; + + (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, + InvalidOid, InvalidOid, false); +} diff --git a/src/utils.c b/src/utils.c index 38960b3a..9d3c504f 100644 --- a/src/utils.c +++ b/src/utils.c @@ -103,7 +103,18 @@ check_security_policy_internal(Oid relid, Oid role) return true; } +/* + * Create an update trigger name + */ +char * +build_update_trigger_name_internal(Oid relid) +{ + /* Check that relation exists */ + if (!check_relation_exists(relid)) + elog(ERROR, "Invalid relation %u", relid); + return (char *) quote_identifier(psprintf("%s_upd_trig", get_rel_name(relid))); +} /* * Return pg_pathman schema's Oid or InvalidOid if that's not possible. @@ -255,6 +266,23 @@ get_rel_persistence(Oid relid) } #endif +RangeVar * +makeRangeVarFromRelid(Oid relid) +{ + char *relname = get_rel_name(relid); + char *namespace = get_namespace_name(get_rel_namespace(relid)); + + return makeRangeVar(namespace, relname, -1); +} + +/* + * Extracted common check. + */ +bool +check_relation_exists(Oid relid) +{ + return get_rel_type_id(relid) != InvalidOid; +} /* diff --git a/src/utils.h b/src/utils.h index 24bad286..fbdf7017 100644 --- a/src/utils.h +++ b/src/utils.h @@ -27,6 +27,7 @@ bool clause_contains_params(Node *clause); bool is_date_type_internal(Oid typid); bool check_security_policy_internal(Oid relid, Oid role); +char *build_update_trigger_name_internal(Oid relid); /* * Misc. @@ -43,6 +44,8 @@ Oid get_attribute_type(Oid relid, const char *attname, bool missing_ok); #if PG_VERSION_NUM < 90600 char get_rel_persistence(Oid relid); #endif +RangeVar *makeRangeVarFromRelid(Oid relid); +bool check_relation_exists(Oid relid); /* * Operator-related stuff. From e0d3fe01759b2736b311017ff47c663e963109d7 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 17 Feb 2017 18:40:52 +0300 Subject: [PATCH 0233/1124] fixed update trigger names --- expected/pathman_basic.out | 2 +- range.sql | 33 --------------------------------- src/pl_funcs.c | 24 +++++++++++++++--------- src/utils.c | 2 +- 4 files changed, 17 insertions(+), 44 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 4c5406f8..8baa94bd 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1476,7 +1476,7 @@ SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern') Indexes: "hash_rel_0_pkey" PRIMARY KEY, btree (id) Triggers: - hash_rel_upd_trig BEFORE UPDATE ON test.hash_rel_0 FOR EACH ROW EXECUTE PROCEDURE pathman.update_trigger_func() + hash_rel_upd_trig BEFORE UPDATE OF value ON test.hash_rel_0 FOR EACH ROW EXECUTE PROCEDURE pathman.update_trigger_func() \d+ test.hash_rel_extern Table "test.hash_rel_extern" diff --git a/range.sql b/range.sql index 11c7d577..8e42bc9b 100644 --- a/range.sql +++ b/range.sql @@ -992,39 +992,6 @@ END $$ LANGUAGE plpgsql; - -/* - * Creates an update trigger - */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_update_trigger( - IN parent_relid REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - trigger TEXT := 'CREATE TRIGGER %s - BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE - @extschema@.update_trigger_func()'; - triggername TEXT; - rec RECORD; - -BEGIN - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Create trigger on every partition */ - FOR rec in (SELECT * FROM pg_catalog.pg_inherits - WHERE inhparent = parent_relid) - LOOP - EXECUTE format(trigger, - triggername, - rec.inhrelid::REGCLASS::TEXT); - END LOOP; - - RETURN 'update_trigger_func()'; -END -$$ LANGUAGE plpgsql; - - /* * Drops partition and expands the next partition so that it cover dropped * one diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 3281c58b..99d75dfa 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -38,6 +38,7 @@ static Oid get_partition_for_key(const PartRelationInfo *prel, Datum key); static void create_single_update_trigger_internal(Oid relid, + const char *trigname, const char *attname); static bool update_trigger_exists(Oid relid, char *trigname); @@ -540,7 +541,7 @@ build_update_trigger_name(PG_FUNCTION_ARGS) Oid relid = PG_GETARG_OID(0); const char *result; /* trigger's name can't be qualified */ - result = build_update_trigger_name_internal(relid); + result = quote_identifier(build_update_trigger_name_internal(relid)); PG_RETURN_TEXT_P(cstring_to_text(result)); } @@ -1066,7 +1067,7 @@ create_update_triggers(PG_FUNCTION_ARGS) errmsg("trigger \"%s\" for relation \"%s\" already exists", trigname, get_rel_name_or_relid(children[i])))); - create_single_update_trigger_internal(children[i], attname); + create_single_update_trigger_internal(children[i], trigname, attname); } PG_RETURN_VOID(); @@ -1115,7 +1116,8 @@ create_single_update_trigger(PG_FUNCTION_ARGS) Oid partition = PG_GETARG_OID(0); Oid parent; PartParentSearch parent_search; - char *attname; + char *trigname, + *attname; /* Get parent's Oid */ parent = get_parent_of_partition(partition, &parent_search); @@ -1126,15 +1128,19 @@ create_single_update_trigger(PG_FUNCTION_ARGS) /* Determine partitioning key name */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(partition, prel, PT_INDIFFERENT); + + trigname = build_update_trigger_name_internal(parent); attname = get_attname(prel->key, prel->attnum); - create_single_update_trigger_internal(partition, attname); + create_single_update_trigger_internal(partition, trigname, attname); PG_RETURN_VOID(); } static void -create_single_update_trigger_internal(Oid relid, const char *attname) +create_single_update_trigger_internal(Oid relid, + const char *trigname, + const char *attname) { CreateTrigStmt *stmt; List *func; @@ -1143,7 +1149,7 @@ create_single_update_trigger_internal(Oid relid, const char *attname) makeString("update_trigger_func")); stmt = makeNode(CreateTrigStmt); - stmt->trigname = build_update_trigger_name_internal(relid); + stmt->trigname = (char *) trigname; stmt->relation = makeRangeVarFromRelid(relid); stmt->funcname = func; stmt->args = NIL; @@ -1152,9 +1158,9 @@ create_single_update_trigger_internal(Oid relid, const char *attname) stmt->events = TRIGGER_TYPE_UPDATE; stmt->columns = list_make1(makeString((char *) attname)); stmt->whenClause = NULL; - stmt->isconstraint = false; - stmt->deferrable = false; - stmt->initdeferred = false; + stmt->isconstraint = false; + stmt->deferrable = false; + stmt->initdeferred = false; stmt->constrrel = NULL; (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, diff --git a/src/utils.c b/src/utils.c index 9d3c504f..c07486a5 100644 --- a/src/utils.c +++ b/src/utils.c @@ -113,7 +113,7 @@ build_update_trigger_name_internal(Oid relid) if (!check_relation_exists(relid)) elog(ERROR, "Invalid relation %u", relid); - return (char *) quote_identifier(psprintf("%s_upd_trig", get_rel_name(relid))); + return (char *) psprintf("%s_upd_trig", get_rel_name(relid)); } /* From 1204842fc919e6f43db5af69320cb8a448d4467e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 20 Feb 2017 15:18:14 +0300 Subject: [PATCH 0234/1124] refactoring of function select_range_partitions() --- src/pg_pathman.c | 66 +++++++++++++++++++++++---------------------- src/relation_info.h | 5 +++- 2 files changed, 38 insertions(+), 33 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 65d06aef..2e690422 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -455,41 +455,43 @@ select_range_partitions(const Datum value, const int strategy, WrapperNode *result) { - const RangeEntry *current_re; - bool lossy = false, - is_less, - is_greater; + bool lossy = false, + is_less, + is_greater; #ifdef USE_ASSERT_CHECKING - bool found = false; - int counter = 0; + bool found = false; + int counter = 0; #endif - int i, - startidx = 0, - endidx = nranges - 1, - cmp_min, - cmp_max; + int startidx = 0, + endidx = nranges - 1, + cmp_min, + cmp_max, + i; + + Bound value_bound = MakeBound(value); /* convert value to Bound */ + /* Initial value (no missing partitions found) */ result->found_gap = false; - /* Check boundaries */ + /* Check 'ranges' array */ if (nranges == 0) { result->rangeset = NIL; return; } + + /* Check corner cases */ else { Assert(ranges); Assert(cmp_func); - /* Corner cases */ - cmp_min = IsInfinite(&ranges[startidx].min) ? - 1 : DatumGetInt32(FunctionCall2(cmp_func, value, BoundGetValue(&ranges[startidx].min))); - cmp_max = IsInfinite(&ranges[endidx].max) ? - -1 : DatumGetInt32(FunctionCall2(cmp_func, value, BoundGetValue(&ranges[endidx].max))); + /* Compare 'value' to absolute MIN and MAX bounds */ + cmp_min = cmp_bounds(cmp_func, &value_bound, &ranges[startidx].min); + cmp_max = cmp_bounds(cmp_func, &value_bound, &ranges[endidx].max); if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || @@ -529,21 +531,16 @@ select_range_partitions(const Datum value, /* Binary search */ while (true) { + Assert(ranges); Assert(cmp_func); + /* Calculate new pivot */ i = startidx + (endidx - startidx) / 2; Assert(i >= 0 && i < nranges); - current_re = &ranges[i]; - - cmp_min = IsInfinite(¤t_re->min) ? - 1 : - FunctionCall2(cmp_func, value, - BoundGetValue(¤t_re->min)); - cmp_max = IsInfinite(¤t_re->max) ? - -1 : - FunctionCall2(cmp_func, value, - BoundGetValue(¤t_re->max)); + /* Compare 'value' to current MIN and MAX bounds */ + cmp_min = cmp_bounds(cmp_func, &value_bound, &ranges[i].min); + cmp_max = cmp_bounds(cmp_func, &value_bound, &ranges[i].max); is_less = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); is_greater = (cmp_max > 0 || (cmp_max >= 0 && strategy != BTLessStrategyNumber)); @@ -556,13 +553,14 @@ select_range_partitions(const Datum value, lossy = false; else lossy = true; + #ifdef USE_ASSERT_CHECKING found = true; #endif break; } - /* If we still haven't found partition then it doesn't exist */ + /* Indices have met, looks like there's no partition */ if (startidx >= endidx) { result->rangeset = NIL; @@ -579,6 +577,7 @@ select_range_partitions(const Datum value, Assert(++counter < 100); } + /* Should've been found by now */ Assert(found); /* Filter partitions */ @@ -638,8 +637,8 @@ wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) *alwaysTrue = false; /* - * TODO: use faster algorithm using knowledge that we enumerate indexes - * sequntially. + * TODO: use faster algorithm using knowledge + * that we enumerate indexes sequntially. */ found = irange_list_find(wrap->rangeset, index, &lossy); @@ -670,16 +669,19 @@ wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) arg = wrapper_make_expression((WrapperNode *) lfirst(lc), index, &childAlwaysTrue); + #ifdef USE_ASSERT_CHECKING /* - * We shouldn't get there for always true clause under OR and - * always false clause under AND. + * We shouldn't get there for always true clause + * under OR and always false clause under AND. */ if (expr->boolop == OR_EXPR) Assert(!childAlwaysTrue); + if (expr->boolop == AND_EXPR) Assert(arg || childAlwaysTrue); #endif + if (arg) args = lappend(args, arg); } diff --git a/src/relation_info.h b/src/relation_info.h index 9c8956bb..587de24e 100644 --- a/src/relation_info.h +++ b/src/relation_info.h @@ -79,12 +79,15 @@ cmp_bounds(FmgrInfo *cmp_func, const Bound *b1, const Bound *b2) { if (IsMinusInfinity(b1) || IsPlusInfinity(b2)) return -1; + if (IsMinusInfinity(b2) || IsPlusInfinity(b1)) return 1; Assert(cmp_func); - return FunctionCall2(cmp_func, BoundGetValue(b1), BoundGetValue(b2)); + return DatumGetInt32(FunctionCall2(cmp_func, + BoundGetValue(b1), + BoundGetValue(b2))); } From be231f06a2ed3a0c9d92ccc589a31d80bfbb8b7f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 20 Feb 2017 17:54:35 +0300 Subject: [PATCH 0235/1124] fix infinite bounds in view 'pathman_partition_list' --- expected/pathman_basic.out | 4 ++-- src/pl_funcs.c | 30 +++++++++++++++++------------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index a5902c58..9c4652d9 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1376,14 +1376,14 @@ SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_in SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; parent | partition | parttype | partattr | range_min | range_max ----------------+-------------------------------+----------+----------+--------------------------+-------------------------- - test.range_rel | test.range_rel_minus_infinity | 2 | dt | NULL | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 - test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | NULL + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | (8 rows) INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index f64433d3..9267b177 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -398,28 +398,32 @@ show_partition_list_internal(PG_FUNCTION_ARGS) case PT_RANGE: { RangeEntry *re; - Datum rmin, - rmax; re = &PrelGetRangesArray(prel)[usercxt->child_number]; + values[Anum_pathman_pl_partition - 1] = re->child_oid; + /* Lower bound text */ - rmin = !IsInfinite(&re->min) ? - CStringGetTextDatum( + if (!IsInfinite(&re->min)) + { + Datum rmin = CStringGetTextDatum( datum_to_cstring(BoundGetValue(&re->min), - prel->atttype)) : - CStringGetTextDatum("NULL"); + prel->atttype)); + + values[Anum_pathman_pl_range_min - 1] = rmin; + } + else isnull[Anum_pathman_pl_range_min - 1] = true; /* Upper bound text */ - rmax = !IsInfinite(&re->max) ? - CStringGetTextDatum( + if (!IsInfinite(&re->max)) + { + Datum rmax = CStringGetTextDatum( datum_to_cstring(BoundGetValue(&re->max), - prel->atttype)) : - CStringGetTextDatum("NULL"); + prel->atttype)); - values[Anum_pathman_pl_partition - 1] = re->child_oid; - values[Anum_pathman_pl_range_min - 1] = rmin; - values[Anum_pathman_pl_range_max - 1] = rmax; + values[Anum_pathman_pl_range_max - 1] = rmax; + } + else isnull[Anum_pathman_pl_range_max - 1] = true; } break; From d4cebec7e3b5134978c7b21936a4bcb078f7552a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 20 Feb 2017 17:59:08 +0300 Subject: [PATCH 0236/1124] fix various typos --- src/init.c | 2 +- src/partition_filter.c | 2 +- src/rangeset.h | 16 ++++++++-------- tests/cmocka/rangeset_tests.c | 8 ++++---- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/init.c b/src/init.c index 0df69355..783816da 100644 --- a/src/init.c +++ b/src/init.c @@ -976,7 +976,7 @@ validate_range_opexpr(const Expr *expr, return false; /* Fail fast if it's not an OpExpr node */ - if(!IsA(expr, OpExpr)) + if (!IsA(expr, OpExpr)) return false; /* Perform cast */ diff --git a/src/partition_filter.c b/src/partition_filter.c index e4c2c47c..d46bc937 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -251,7 +251,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Lock partition and check if it exists */ LockRelationOid(partid, parts_storage->head_open_lock_mode); - if(!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partid))) + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partid))) { UnlockRelationOid(partid, parts_storage->head_open_lock_mode); return NULL; diff --git a/src/rangeset.h b/src/rangeset.h index 3d17cc51..5f273fd3 100644 --- a/src/rangeset.h +++ b/src/rangeset.h @@ -33,14 +33,14 @@ typedef struct { #define IR_COMPLETE false #define IRANGE_SPECIAL_BIT ( (uint32) ( ((uint32) 1) << 31) ) -#define IRANGE_BONDARY_MASK ( (uint32) (~IRANGE_SPECIAL_BIT) ) +#define IRANGE_BOUNDARY_MASK ( (uint32) (~IRANGE_SPECIAL_BIT) ) #define InvalidIndexRange { 0, 0 } #define is_irange_valid(irange) ( (irange.lower & IRANGE_SPECIAL_BIT) > 0 ) #define is_irange_lossy(irange) ( (irange.upper & IRANGE_SPECIAL_BIT) > 0 ) -#define irange_lower(irange) ( (uint32) (irange.lower & IRANGE_BONDARY_MASK) ) -#define irange_upper(irange) ( (uint32) (irange.upper & IRANGE_BONDARY_MASK) ) +#define irange_lower(irange) ( (uint32) (irange.lower & IRANGE_BOUNDARY_MASK) ) +#define irange_upper(irange) ( (uint32) (irange.upper & IRANGE_BOUNDARY_MASK) ) #define lfirst_irange(lc) ( *(IndexRange *) lfirst(lc) ) #define lappend_irange(list, irange) ( lappend((list), alloc_irange(irange)) ) @@ -53,8 +53,8 @@ typedef struct { inline static IndexRange make_irange(uint32 lower, uint32 upper, bool lossy) { - IndexRange result = { lower & IRANGE_BONDARY_MASK, - upper & IRANGE_BONDARY_MASK }; + IndexRange result = { lower & IRANGE_BOUNDARY_MASK, + upper & IRANGE_BOUNDARY_MASK }; /* Set VALID */ result.lower |= IRANGE_SPECIAL_BIT; @@ -83,7 +83,7 @@ inline static uint32 irb_pred(uint32 boundary) { if (boundary > 0) - return (boundary - 1) & IRANGE_BONDARY_MASK; + return (boundary - 1) & IRANGE_BOUNDARY_MASK; return 0; } @@ -92,8 +92,8 @@ irb_pred(uint32 boundary) inline static uint32 irb_succ(uint32 boundary) { - if (boundary >= IRANGE_BONDARY_MASK) - return IRANGE_BONDARY_MASK; + if (boundary >= IRANGE_BOUNDARY_MASK) + return IRANGE_BOUNDARY_MASK; return boundary + 1; } diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index ea06e648..98d8d4d5 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -64,13 +64,13 @@ test_irange_basic(void **state) /* test irb_succ() */ assert_int_equal(100, irb_succ(99)); - assert_int_equal(IRANGE_BONDARY_MASK, irb_succ(IRANGE_BONDARY_MASK)); - assert_int_equal(IRANGE_BONDARY_MASK, irb_succ(IRANGE_BONDARY_MASK + 1)); + assert_int_equal(IRANGE_BOUNDARY_MASK, irb_succ(IRANGE_BOUNDARY_MASK)); + assert_int_equal(IRANGE_BOUNDARY_MASK, irb_succ(IRANGE_BOUNDARY_MASK + 1)); /* test convenience macros */ - irange = make_irange(0, IRANGE_BONDARY_MASK, IR_LOSSY); + irange = make_irange(0, IRANGE_BOUNDARY_MASK, IR_LOSSY); assert_int_equal(irange_lower(irange), 0); - assert_int_equal(irange_upper(irange), IRANGE_BONDARY_MASK); + assert_int_equal(irange_upper(irange), IRANGE_BOUNDARY_MASK); assert_true(is_irange_lossy(irange)); assert_true(is_irange_valid(irange)); From 9fc267a5514ea3ed19ff1a2aa610389f6f135144 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 20 Feb 2017 18:51:04 +0300 Subject: [PATCH 0237/1124] improve README.md (partition creation callback, json) --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 709ccf35..85d942d3 100644 --- a/README.md +++ b/README.md @@ -267,8 +267,10 @@ Set partition creation callback to be invoked for each attached or created parti /* RANGE-partitioned table abc (child abc_4) */ { "parent": "abc", + "parent_schema": "public", "parttype": "2", "partition": "abc_4", + "partition_schema": "public", "range_max": "401", "range_min": "301" } @@ -276,8 +278,10 @@ Set partition creation callback to be invoked for each attached or created parti /* HASH-partitioned table abc (child abc_0) */ { "parent": "abc", + "parent_schema": "public", "parttype": "1", "partition": "abc_0" + "partition_schema": "public" } ``` From eafacf0ad000b9861bd75e4c4b27937a9f2d4516 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 21 Feb 2017 12:46:28 +0300 Subject: [PATCH 0238/1124] move headers to 'include' --- Makefile | 6 ++++-- src/{ => compat}/pg_compat.c | 2 +- src/hooks.c | 5 +++-- src/{ => include/compat}/pg_compat.h | 0 src/{ => include}/hooks.h | 0 src/{ => include}/init.h | 0 src/{ => include}/nodes_common.h | 0 src/{ => include}/partition_creation.h | 0 src/{ => include}/partition_filter.h | 0 src/{ => include}/pathman.h | 0 src/{ => include}/pathman_workers.h | 0 src/{ => include}/planner_tree_modification.h | 0 src/{ => include}/rangeset.h | 0 src/{ => include}/relation_info.h | 0 src/{ => include}/runtime_merge_append.h | 0 src/{ => include}/runtimeappend.h | 0 src/{ => include}/utility_stmt_hooking.h | 0 src/{ => include}/utils.h | 0 src/{ => include}/xact_handling.h | 0 src/pg_pathman.c | 2 +- src/runtime_merge_append.c | 2 +- 21 files changed, 10 insertions(+), 7 deletions(-) rename src/{ => compat}/pg_compat.c (99%) rename src/{ => include/compat}/pg_compat.h (100%) rename src/{ => include}/hooks.h (100%) rename src/{ => include}/init.h (100%) rename src/{ => include}/nodes_common.h (100%) rename src/{ => include}/partition_creation.h (100%) rename src/{ => include}/partition_filter.h (100%) rename src/{ => include}/pathman.h (100%) rename src/{ => include}/pathman_workers.h (100%) rename src/{ => include}/planner_tree_modification.h (100%) rename src/{ => include}/rangeset.h (100%) rename src/{ => include}/relation_info.h (100%) rename src/{ => include}/runtime_merge_append.h (100%) rename src/{ => include}/runtimeappend.h (100%) rename src/{ => include}/utility_stmt_hooking.h (100%) rename src/{ => include}/utils.h (100%) rename src/{ => include}/xact_handling.h (100%) diff --git a/Makefile b/Makefile index c151604d..21d0dbfc 100644 --- a/Makefile +++ b/Makefile @@ -6,8 +6,10 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/runtimeappend.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ - src/planner_tree_modification.o src/debug_print.o src/pg_compat.o \ - src/partition_creation.o $(WIN32RES) + src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ + src/compat/pg_compat.o $(WIN32RES) + +PG_CPPFLAGS = -I$(CURDIR)/src/include EXTENSION = pg_pathman diff --git a/src/pg_compat.c b/src/compat/pg_compat.c similarity index 99% rename from src/pg_compat.c rename to src/compat/pg_compat.c index d7b0c30e..315942ce 100644 --- a/src/pg_compat.c +++ b/src/compat/pg_compat.c @@ -11,7 +11,7 @@ * ------------------------------------------------------------------------ */ -#include "pg_compat.h" +#include "compat/pg_compat.h" #include "catalog/pg_proc.h" #include "foreign/fdwapi.h" diff --git a/src/hooks.c b/src/hooks.c index 72169921..d4b4ceb0 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -8,14 +8,15 @@ * ------------------------------------------------------------------------ */ -#include "utility_stmt_hooking.h" +#include "compat/pg_compat.h" + #include "hooks.h" #include "init.h" #include "partition_filter.h" -#include "pg_compat.h" #include "planner_tree_modification.h" #include "runtimeappend.h" #include "runtime_merge_append.h" +#include "utility_stmt_hooking.h" #include "utils.h" #include "xact_handling.h" diff --git a/src/pg_compat.h b/src/include/compat/pg_compat.h similarity index 100% rename from src/pg_compat.h rename to src/include/compat/pg_compat.h diff --git a/src/hooks.h b/src/include/hooks.h similarity index 100% rename from src/hooks.h rename to src/include/hooks.h diff --git a/src/init.h b/src/include/init.h similarity index 100% rename from src/init.h rename to src/include/init.h diff --git a/src/nodes_common.h b/src/include/nodes_common.h similarity index 100% rename from src/nodes_common.h rename to src/include/nodes_common.h diff --git a/src/partition_creation.h b/src/include/partition_creation.h similarity index 100% rename from src/partition_creation.h rename to src/include/partition_creation.h diff --git a/src/partition_filter.h b/src/include/partition_filter.h similarity index 100% rename from src/partition_filter.h rename to src/include/partition_filter.h diff --git a/src/pathman.h b/src/include/pathman.h similarity index 100% rename from src/pathman.h rename to src/include/pathman.h diff --git a/src/pathman_workers.h b/src/include/pathman_workers.h similarity index 100% rename from src/pathman_workers.h rename to src/include/pathman_workers.h diff --git a/src/planner_tree_modification.h b/src/include/planner_tree_modification.h similarity index 100% rename from src/planner_tree_modification.h rename to src/include/planner_tree_modification.h diff --git a/src/rangeset.h b/src/include/rangeset.h similarity index 100% rename from src/rangeset.h rename to src/include/rangeset.h diff --git a/src/relation_info.h b/src/include/relation_info.h similarity index 100% rename from src/relation_info.h rename to src/include/relation_info.h diff --git a/src/runtime_merge_append.h b/src/include/runtime_merge_append.h similarity index 100% rename from src/runtime_merge_append.h rename to src/include/runtime_merge_append.h diff --git a/src/runtimeappend.h b/src/include/runtimeappend.h similarity index 100% rename from src/runtimeappend.h rename to src/include/runtimeappend.h diff --git a/src/utility_stmt_hooking.h b/src/include/utility_stmt_hooking.h similarity index 100% rename from src/utility_stmt_hooking.h rename to src/include/utility_stmt_hooking.h diff --git a/src/utils.h b/src/include/utils.h similarity index 100% rename from src/utils.h rename to src/include/utils.h diff --git a/src/xact_handling.h b/src/include/xact_handling.h similarity index 100% rename from src/xact_handling.h rename to src/include/xact_handling.h diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 2e690422..00ac0e79 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -9,7 +9,7 @@ * ------------------------------------------------------------------------ */ -#include "pg_compat.h" +#include "compat/pg_compat.h" #include "init.h" #include "hooks.h" diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index e021d3ce..ecdd29c0 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -10,7 +10,7 @@ * ------------------------------------------------------------------------ */ -#include "pg_compat.h" +#include "compat/pg_compat.h" #include "runtime_merge_append.h" #include "pathman.h" From 0a0d2cf794b2374e6ab38562e9c7e8b15a75ffa4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 21 Feb 2017 13:06:21 +0300 Subject: [PATCH 0239/1124] fix gcov calls (Travis CI) --- travis/pg-travis-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index 9a544a15..78f275a3 100644 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -106,7 +106,7 @@ set -u #generate *.gcov files -gcov src/*.c src/*.h +gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h exit $status From ecec00603dbba530f622a4809e09e4825e8d3065 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 21 Feb 2017 17:57:07 +0300 Subject: [PATCH 0240/1124] auto trigger creation for partitions --- expected/pathman_basic.out | 4 +- init.sql | 23 ++++++- range.sql | 11 ++++ sql/pathman_basic.sql | 1 + src/partition_creation.c | 123 ++++++++++++++++++++++++++++++++++--- src/partition_creation.h | 6 ++ src/pl_funcs.c | 103 ++++++------------------------- 7 files changed, 175 insertions(+), 96 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 8baa94bd..e926ca20 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1321,6 +1321,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' A (6 rows) SELECT pathman.detach_range_partition('test.range_rel_archive'); +NOTICE: trigger "range_rel_upd_trig" for relation "test.range_rel_archive" does not exist, skipping detach_range_partition ------------------------ test.range_rel_archive @@ -1734,6 +1735,7 @@ SELECT * FROM test."TeSt"; 1 | 1 (3 rows) +DROP TABLE test."TeSt" CASCADE; CREATE TABLE test."RangeRel" ( id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, @@ -2189,6 +2191,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 49 other objects +NOTICE: drop cascades to 48 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/init.sql b/init.sql index 2e2ab5bd..8f6b3174 100644 --- a/init.sql +++ b/init.sql @@ -562,7 +562,17 @@ LANGUAGE C; */ CREATE OR REPLACE FUNCTION @extschema@.create_update_triggers(parent_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'create_update_triggers' -LANGUAGE C; +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.create_single_update_trigger( + parent_relid REGCLASS, + partition_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'create_single_update_trigger' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.is_update_trigger_enabled(parent_relid REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'is_update_trigger_enabled' +LANGUAGE C STRICT; /* * Drop triggers @@ -580,13 +590,22 @@ BEGIN /* Drop trigger for each partition if exists */ FOR rec IN (SELECT pg_catalog.pg_inherits.* FROM pg_catalog.pg_inherits - JOIN pg_catalog.pg_trigger on inhrelid = tgrelid + JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid WHERE inhparent = parent_relid AND tgname = triggername) LOOP EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', triggername, rec.inhrelid::REGCLASS::TEXT); END LOOP; + + /* Drop trigger on parent */ + IF EXISTS (SELECT * FROM pg_catalog.pg_trigger + WHERE tgname = triggername AND tgrelid = parent_relid) + THEN + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + triggername, + parent_relid::TEXT); + END IF; END $$ LANGUAGE plpgsql STRICT; diff --git a/range.sql b/range.sql index 8e42bc9b..02936ae9 100644 --- a/range.sql +++ b/range.sql @@ -935,6 +935,12 @@ BEGIN ON params.partrel = parent_relid INTO v_init_callback; + /* If update trigger is enabled then create one for this partition */ + if @extschema@.is_update_trigger_enabled(parent_relid) THEN + PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); + END IF; + + /* Invoke an initialization callback */ PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, partition_relid, v_init_callback, @@ -984,6 +990,11 @@ BEGIN partition_relid::TEXT, @extschema@.build_check_constraint_name(partition_relid, v_attname)); + /* Remove update trigger */ + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + @extschema@.build_update_trigger_name(parent_relid), + partition_relid::TEXT); + /* Invalidate cache */ PERFORM @extschema@.on_update_partitions(parent_relid); diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index a69940db..18b61de2 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -471,6 +471,7 @@ SELECT * FROM test."TeSt" WHERE a = 1; EXPLAIN (COSTS OFF) SELECT * FROM test."TeSt" WHERE a = 1; SELECT pathman.drop_partitions('test."TeSt"'); SELECT * FROM test."TeSt"; +DROP TABLE test."TeSt" CASCADE; CREATE TABLE test."RangeRel" ( id SERIAL PRIMARY KEY, diff --git a/src/partition_creation.c b/src/partition_creation.c index 60b826cc..3e8a0aae 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -22,12 +22,14 @@ #include "catalog/heap.h" #include "catalog/pg_authid.h" #include "catalog/pg_proc.h" +#include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "catalog/toasting.h" #include "commands/event_trigger.h" #include "commands/sequence.h" #include "commands/tablecmds.h" #include "commands/tablespace.h" +#include "commands/trigger.h" #include "miscadmin.h" #include "parser/parse_func.h" #include "parser/parse_relation.h" @@ -52,9 +54,11 @@ static Oid spawn_partitions_val(Oid parent_relid, Datum value, Oid value_type); -static void create_single_partition_common(Oid partition_relid, +static void create_single_partition_common(Oid parent_relid, + Oid partition_relid, Constraint *check_constraint, - init_callback_params *callback_params); + init_callback_params *callback_params, + const char *attname); static Oid create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, @@ -77,6 +81,8 @@ static Constraint *make_constraint_common(char *name, Node *raw_expr); static Value make_string_value_struct(char *str); static Value make_int_value_struct(int int_val); +static bool update_trigger_exists(Oid relid, char *trigname); + /* * --------------------------------------- @@ -130,9 +136,11 @@ create_single_range_partition_internal(Oid parent_relid, *start_value, *end_value, value_type); /* Add constraint & execute init_callback */ - create_single_partition_common(partition_relid, + create_single_partition_common(parent_relid, + partition_relid, check_constr, - &callback_params); + &callback_params, + partitioned_column); /* Return the Oid */ return partition_relid; @@ -183,9 +191,11 @@ create_single_hash_partition_internal(Oid parent_relid, parent_relid, partition_relid); /* Add constraint & execute init_callback */ - create_single_partition_common(partition_relid, + create_single_partition_common(parent_relid, + partition_relid, check_constr, - &callback_params); + &callback_params, + partitioned_column); /* Return the Oid */ return partition_relid; @@ -193,11 +203,13 @@ create_single_hash_partition_internal(Oid parent_relid, /* Add constraint & execute init_callback */ void -create_single_partition_common(Oid partition_relid, +create_single_partition_common(Oid parent_relid, + Oid partition_relid, Constraint *check_constraint, - init_callback_params *callback_params) + init_callback_params *callback_params, + const char *attname) { - Relation child_relation; + Relation child_relation; /* Open the relation and add new check constraint & fkeys */ child_relation = heap_open(partition_relid, AccessExclusiveLock); @@ -209,6 +221,20 @@ create_single_partition_common(Oid partition_relid, /* Make constraint visible */ CommandCounterIncrement(); + /* Create trigger */ + if (is_update_trigger_enabled_internal(parent_relid)) + { + char *trigname; + + trigname = build_update_trigger_name_internal(parent_relid); + create_single_update_trigger_internal(partition_relid, + trigname, + attname); + } + + /* Make trigger visible */ + CommandCounterIncrement(); + /* Finally invoke 'init_callback' */ invoke_part_callback(callback_params); @@ -1649,3 +1675,82 @@ text_to_regprocedure(text *proc_signature) return DatumGetObjectId(result); } + +/* + * Create trigger for partition + */ +void +create_single_update_trigger_internal(Oid relid, + const char *trigname, + const char *attname) +{ + CreateTrigStmt *stmt; + List *func; + + func = list_make2(makeString(get_namespace_name(get_pathman_schema())), + makeString("update_trigger_func")); + + stmt = makeNode(CreateTrigStmt); + stmt->trigname = (char *) trigname; + stmt->relation = makeRangeVarFromRelid(relid); + stmt->funcname = func; + stmt->args = NIL; + stmt->row = true; + stmt->timing = TRIGGER_TYPE_BEFORE; + stmt->events = TRIGGER_TYPE_UPDATE; + stmt->columns = list_make1(makeString((char *) attname)); + stmt->whenClause = NULL; + stmt->isconstraint = false; + stmt->deferrable = false; + stmt->initdeferred = false; + stmt->constrrel = NULL; + + (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, + InvalidOid, InvalidOid, false); +} + +/* + * Check if update trigger is enabled. Basicly it returns true if update + * trigger exists for parent table + */ +bool +is_update_trigger_enabled_internal(Oid parent) +{ + char *trigname; + + trigname = build_update_trigger_name_internal(parent); + return update_trigger_exists(parent, trigname); +} + +static bool +update_trigger_exists(Oid relid, char *trigname) +{ + bool res = false; + Relation tgrel; + SysScanDesc tgscan; + ScanKeyData key; + HeapTuple tuple; + + tgrel = heap_open(TriggerRelationId, RowExclusiveLock); + + ScanKeyInit(&key, + Anum_pg_trigger_tgrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(relid)); + tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, + NULL, 1, &key); + while (HeapTupleIsValid(tuple = systable_getnext(tgscan))) + { + Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple); + + if (namestrcmp(&(pg_trigger->tgname), trigname) == 0) + { + res = true; + break; + } + } + systable_endscan(tgscan); + heap_close(tgrel, RowExclusiveLock); + + return res; +} diff --git a/src/partition_creation.h b/src/partition_creation.h index 0338fa4e..faa70f7c 100644 --- a/src/partition_creation.h +++ b/src/partition_creation.h @@ -77,6 +77,12 @@ Node * build_raw_hash_check_tree(char *attname, void drop_check_constraint(Oid relid, AttrNumber attnum); +/* Update triggers */ +void create_single_update_trigger_internal(Oid relid, + const char *trigname, + const char *attname); +bool is_update_trigger_enabled_internal(Oid parent); + /* Partitioning callback type */ typedef enum diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 99d75dfa..d909a453 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -37,10 +37,6 @@ static Oid get_partition_for_key(const PartRelationInfo *prel, Datum key); -static void create_single_update_trigger_internal(Oid relid, - const char *trigname, - const char *attname); -static bool update_trigger_exists(Oid relid, char *trigname); /* Function declarations */ @@ -83,6 +79,7 @@ PG_FUNCTION_INFO_V1( get_pathman_lib_version ); PG_FUNCTION_INFO_V1( create_update_triggers ); PG_FUNCTION_INFO_V1( update_trigger_func ); PG_FUNCTION_INFO_V1( create_single_update_trigger ); +PG_FUNCTION_INFO_V1( is_update_trigger_enabled ); /* @@ -968,8 +965,8 @@ update_trigger_func(PG_FUNCTION_ARGS) /* Find parent relation and partitioning info */ parent = get_parent_of_partition(source_relid, &parent_search); if (parent_search != PPS_ENTRY_PART_PARENT) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(source_relid)); + parent = source_relid; + prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_INDIFFERENT); @@ -1038,6 +1035,13 @@ get_partition_for_key(const PartRelationInfo *prel, Datum key) return parts[0]; } + +/* + * ------------------------ + * Trigger functions + * ------------------------ + */ + /* * Create UPDATE triggers for all partitions */ @@ -1058,54 +1062,16 @@ create_update_triggers(PG_FUNCTION_ARGS) children = PrelGetChildrenArray(prel); trigname = build_update_trigger_name_internal(parent); + /* Create triggers for parent */ + create_single_update_trigger_internal(parent, trigname, attname); + /* Create triggers for each partition */ for (i = 0; i < PrelChildrenCount(prel); i++) - { - if (update_trigger_exists(children[i], trigname)) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("trigger \"%s\" for relation \"%s\" already exists", - trigname, get_rel_name_or_relid(children[i])))); - create_single_update_trigger_internal(children[i], trigname, attname); - } PG_RETURN_VOID(); } -static bool -update_trigger_exists(Oid relid, char *trigname) -{ - bool res = false; - Relation tgrel; - SysScanDesc tgscan; - ScanKeyData key; - HeapTuple tuple; - - tgrel = heap_open(TriggerRelationId, RowExclusiveLock); - - ScanKeyInit(&key, - Anum_pg_trigger_tgrelid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(relid)); - tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, - NULL, 1, &key); - while (HeapTupleIsValid(tuple = systable_getnext(tgscan))) - { - Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple); - - if (namestrcmp(&(pg_trigger->tgname), trigname) == 0) - { - res = true; - break; - } - } - systable_endscan(tgscan); - heap_close(tgrel, RowExclusiveLock); - - return res; -} - /* * Create an UPDATE trigger for partition */ @@ -1113,21 +1079,14 @@ Datum create_single_update_trigger(PG_FUNCTION_ARGS) { const PartRelationInfo *prel; - Oid partition = PG_GETARG_OID(0); - Oid parent; - PartParentSearch parent_search; + Oid parent = PG_GETARG_OID(0); + Oid partition = PG_GETARG_OID(1); char *trigname, *attname; - /* Get parent's Oid */ - parent = get_parent_of_partition(partition, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) - elog(ERROR, "\"%s\" is not a partition", - get_rel_name_or_relid(partition)); - /* Determine partitioning key name */ prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(partition, prel, PT_INDIFFERENT); + shout_if_prel_is_invalid(parent, prel, PT_INDIFFERENT); trigname = build_update_trigger_name_internal(parent); attname = get_attname(prel->key, prel->attnum); @@ -1137,32 +1096,8 @@ create_single_update_trigger(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -static void -create_single_update_trigger_internal(Oid relid, - const char *trigname, - const char *attname) +Datum +is_update_trigger_enabled(PG_FUNCTION_ARGS) { - CreateTrigStmt *stmt; - List *func; - - func = list_make2(makeString(get_namespace_name(get_pathman_schema())), - makeString("update_trigger_func")); - - stmt = makeNode(CreateTrigStmt); - stmt->trigname = (char *) trigname; - stmt->relation = makeRangeVarFromRelid(relid); - stmt->funcname = func; - stmt->args = NIL; - stmt->row = true; - stmt->timing = TRIGGER_TYPE_BEFORE; - stmt->events = TRIGGER_TYPE_UPDATE; - stmt->columns = list_make1(makeString((char *) attname)); - stmt->whenClause = NULL; - stmt->isconstraint = false; - stmt->deferrable = false; - stmt->initdeferred = false; - stmt->constrrel = NULL; - - (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, - InvalidOid, InvalidOid, false); + PG_RETURN_BOOL(is_update_trigger_enabled_internal(PG_GETARG_OID(0))); } From c08c648d07a7aadd57b8475546bb6f2cbf88d3aa Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 22 Feb 2017 18:05:28 +0300 Subject: [PATCH 0241/1124] introduce subsystem compat/relation_tags --- Makefile | 2 +- src/compat/relation_tags.c | 212 ++++++++++++++++++++++++ src/hooks.c | 12 +- src/include/compat/relation_tags.h | 66 ++++++++ src/include/planner_tree_modification.h | 14 +- src/planner_tree_modification.c | 142 ++++------------ 6 files changed, 328 insertions(+), 120 deletions(-) create mode 100644 src/compat/relation_tags.c create mode 100644 src/include/compat/relation_tags.h diff --git a/Makefile b/Makefile index 21d0dbfc..801259e1 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ - src/compat/pg_compat.o $(WIN32RES) + src/compat/pg_compat.o src/compat/relation_tags.o $(WIN32RES) PG_CPPFLAGS = -I$(CURDIR)/src/include diff --git a/src/compat/relation_tags.c b/src/compat/relation_tags.c new file mode 100644 index 00000000..f521b8d7 --- /dev/null +++ b/src/compat/relation_tags.c @@ -0,0 +1,212 @@ +/* ------------------------------------------------------------------------ + * + * relation_tags.c + * Attach custom (Key, Value) pairs to an arbitrary RangeTblEntry + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#include "compat/relation_tags.h" +#include "planner_tree_modification.h" + +#include "nodes/nodes.h" + + +/* + * This table is used to ensure that partitioned relation + * cant't be used with both and without ONLY modifiers. + */ +static HTAB *per_table_relation_tags = NULL; +static int per_table_relation_tags_refcount = 0; + + +/* private struct stored by parenthood lists */ +typedef struct +{ + Oid relid; /* key (part #1) */ + uint32 queryId; /* key (part #2) */ + List *relation_tags; +} relation_tags_entry; + + +/* Look through RTE's relation tags */ +List * +rte_fetch_tag(const uint32 query_id, + const RangeTblEntry *rte, + const char *key) +{ + relation_tags_entry *htab_entry, + htab_key = { rte->relid, query_id, NIL /* unused */ }; + + AssertArg(rte); + AssertArg(key); + + /* Skip if table is not initialized */ + if (per_table_relation_tags) + { + /* Search by 'htab_key' */ + htab_entry = hash_search(per_table_relation_tags, + &htab_key, HASH_FIND, NULL); + + if (htab_entry) + return relation_tags_search(htab_entry->relation_tags, key); + } + + /* Not found, return stub value */ + return NIL; +} + +/* Attach new relation tag to RTE. Returns KVP with duplicate key. */ +List * +rte_attach_tag(const uint32 query_id, + RangeTblEntry *rte, + List *key_value_pair) +{ + relation_tags_entry *htab_entry, + htab_key = { rte->relid, query_id, NIL /* unused */ }; + bool found; + MemoryContext old_mcxt; + + AssertArg(rte); + AssertArg(key_value_pair && list_length(key_value_pair) == 2); + + /* We prefer to initialize this table lazily */ + if (!per_table_relation_tags) + { + const long start_elems = 50; + HASHCTL hashctl; + + memset(&hashctl, 0, sizeof(HASHCTL)); + hashctl.entrysize = sizeof(relation_tags_entry); + hashctl.keysize = offsetof(relation_tags_entry, relation_tags); + hashctl.hcxt = TAG_MEMORY_CONTEXT; + + per_table_relation_tags = hash_create("Custom tags for RangeTblEntry", + start_elems, &hashctl, + HASH_ELEM | HASH_BLOBS); + } + + /* Search by 'htab_key' */ + htab_entry = hash_search(per_table_relation_tags, + &htab_key, HASH_ENTER, &found); + + if (found) + { + const char *current_key; + + /* Extract key of this KVP */ + rte_deconstruct_tag(key_value_pair, ¤t_key, NULL); + + /* Check if this KVP already exists */ + return relation_tags_search(htab_entry->relation_tags, current_key); + } + + /* Don't forget to initialize list! */ + else htab_entry->relation_tags = NIL; + + /* Add this KVP */ + old_mcxt = MemoryContextSwitchTo(TAG_MEMORY_CONTEXT); + htab_entry->relation_tags = lappend(htab_entry->relation_tags, + key_value_pair); + MemoryContextSwitchTo(old_mcxt); + + /* Success! */ + return NIL; +} + + + +/* Extract key & value from 'key_value_pair' */ +void +rte_deconstruct_tag(const List *key_value_pair, + const char **key, /* ret value #1 */ + const Value **value) /* ret value #2 */ +{ + const char *r_key; + const Value *r_value; + + AssertArg(key_value_pair && list_length(key_value_pair) == 2); + + r_key = (const char *) strVal(linitial(key_value_pair)); + r_value = (const Value *) lsecond(key_value_pair); + + /* Check that 'key' is valid */ + Assert(IsA(linitial(key_value_pair), String)); + + /* Check that 'value' is valid or NULL */ + Assert(r_value == NULL || + IsA(r_value, Integer) || + IsA(r_value, Float) || + IsA(r_value, String)); + + /* Finally return key & value */ + if (key) *key = r_key; + if (value) *value = r_value; +} + +/* Search through list of 'relation_tags' */ +List * +relation_tags_search(List *relation_tags, const char *key) +{ + ListCell *lc; + + AssertArg(key); + + /* Scan KVP list */ + foreach (lc, relation_tags) + { + List *current_kvp = (List *) lfirst(lc); + const char *current_key; + + /* Extract key of this KVP */ + rte_deconstruct_tag(current_kvp, ¤t_key, NULL); + + /* Check if this is the KVP we're looking for */ + if (strcmp(key, current_key) == 0) + return current_kvp; + } + + /* Nothing! */ + return NIL; +} + + + +/* Increate usage counter by 1 */ +void +incr_refcount_relation_tags(void) +{ + /* Increment reference counter */ + if (++per_table_relation_tags_refcount <= 0) + elog(WARNING, "imbalanced %s", + CppAsString(incr_refcount_relation_tags)); +} + +/* Return current value of usage counter */ +uint32 +get_refcount_relation_tags(void) +{ + /* incr_refcount_parenthood_statuses() is called by pathman_planner_hook() */ + return per_table_relation_tags_refcount; +} + +/* Reset all cached statuses if needed (query end) */ +void +decr_refcount_relation_tags(void) +{ + /* Decrement reference counter */ + if (--per_table_relation_tags_refcount < 0) + elog(WARNING, "imbalanced %s", + CppAsString(decr_refcount_relation_tags)); + + /* Free resources if no one is using them */ + if (per_table_relation_tags_refcount == 0) + { + reset_query_id_generator(); + + hash_destroy(per_table_relation_tags); + per_table_relation_tags = NULL; + } +} diff --git a/src/hooks.c b/src/hooks.c index d4b4ceb0..315c030c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -9,6 +9,7 @@ */ #include "compat/pg_compat.h" +#include "compat/relation_tags.h" #include "hooks.h" #include "init.h" @@ -214,8 +215,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, return; /* Skip if this table is not allowed to act as parent (see FROM ONLY) */ - if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, - rte->relid)) + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, rte)) return; /* Proceed iff relation 'rel' is partitioned */ @@ -476,7 +476,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready) { /* Increment parenthood_statuses refcount */ - incr_refcount_parenthood_statuses(); + incr_refcount_relation_tags(); /* Modify query tree if needed */ pathman_transform_query(parse); @@ -497,7 +497,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) ExecuteForPlanTree(result, add_partition_filters); /* Decrement parenthood_statuses refcount */ - decr_refcount_parenthood_statuses(); + decr_refcount_relation_tags(); /* HACK: restore queryId set by pg_stat_statements */ result->queryId = query_id; @@ -509,7 +509,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready) { /* Caught an ERROR, decrease refcount */ - decr_refcount_parenthood_statuses(); + decr_refcount_relation_tags(); } /* Rethrow ERROR further */ @@ -552,7 +552,7 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) } /* Process inlined SQL functions (we've already entered planning stage) */ - if (IsPathmanReady() && get_refcount_parenthood_statuses() > 0) + if (IsPathmanReady() && get_refcount_relation_tags() > 0) { /* Check that pg_pathman is the last extension loaded */ if (post_parse_analyze_hook != pathman_post_parse_analysis_hook) diff --git a/src/include/compat/relation_tags.h b/src/include/compat/relation_tags.h new file mode 100644 index 00000000..849e10d0 --- /dev/null +++ b/src/include/compat/relation_tags.h @@ -0,0 +1,66 @@ +/* ------------------------------------------------------------------------ + * + * relation_tags.h + * Attach custom (Key, Value) pairs to an arbitrary RangeTblEntry + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef RELATION_TAGS_H +#define RELATION_TAGS_H + + +#include "pathman.h" + +#include "postgres.h" +#include "nodes/relation.h" +#include "nodes/value.h" +#include "utils/memutils.h" + + + +/* Memory context we're going to use for TAGs */ +#define TAG_MEMORY_CONTEXT TopTransactionContext + +/* Safe TAG constructor (Integer) */ +static inline List * +make_rte_tag_int(char *key, int value) +{ + List *kvp; + MemoryContext old_mcxt; + + /* Allocate TAG in a persistent memory context */ + old_mcxt = MemoryContextSwitchTo(TAG_MEMORY_CONTEXT); + kvp = list_make2(makeString(key), makeInteger(value)); + MemoryContextSwitchTo(old_mcxt); + + return kvp; +} + + + +List *rte_fetch_tag(const uint32 query_id, + const RangeTblEntry *rte, + const char *key); + +List *rte_attach_tag(const uint32 query_id, + RangeTblEntry *rte, + List *key_value_pair); + + +List *relation_tags_search(List *custom_tags, + const char *key); + +void rte_deconstruct_tag(const List *key_value_pair, + const char **key, + const Value **value); + + +void incr_refcount_relation_tags(void); +uint32 get_refcount_relation_tags(void); +void decr_refcount_relation_tags(void); + + +#endif /* RELATION_TAGS_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index ddf546ac..e69f0b1e 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -20,6 +20,10 @@ #include "nodes/nodeFuncs.h" +/* Query ID generator */ +void assign_query_id(Query *query); +void reset_query_id_generator(void); + /* Plan tree rewriting utility */ void plan_tree_walker(Plan *plan, void (*visitor) (Plan *plan, void *context), @@ -41,12 +45,12 @@ typedef enum PARENTHOOD_ALLOWED /* children are enabled (default) */ } rel_parenthood_status; -void assign_rel_parenthood_status(uint32 query_id, Oid relid, +#define PARENTHOOD_TAG CppAsString(PARENTHOOD) + +void assign_rel_parenthood_status(uint32 query_id, + RangeTblEntry *rte, rel_parenthood_status new_status); -rel_parenthood_status get_rel_parenthood_status(uint32 query_id, Oid relid); -void incr_refcount_parenthood_statuses(void); -uint32 get_refcount_parenthood_statuses(void); -void decr_refcount_parenthood_statuses(void); +rel_parenthood_status get_rel_parenthood_status(uint32 query_id, RangeTblEntry *rte); #endif /* PLANNER_TREE_MODIFICATION_H */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 013b6370..844713fa 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -8,6 +8,8 @@ * ------------------------------------------------------------------------ */ +#include "compat/relation_tags.h" + #include "nodes_common.h" #include "partition_filter.h" #include "planner_tree_modification.h" @@ -19,7 +21,6 @@ #include "optimizer/clauses.h" #include "storage/lmgr.h" #include "utils/builtins.h" -#include "utils/memutils.h" #include "utils/syscache.h" @@ -39,13 +40,8 @@ static void partition_filter_visitor(Plan *plan, void *context); static void lock_rows_visitor(Plan *plan, void *context); static List *get_tableoids_list(List *tlist); +static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); -/* - * This table is used to ensure that partitioned relation - * cant't be used with both and without ONLY modifiers. - */ -static HTAB *per_table_parenthood_mapping = NULL; -static int per_table_parenthood_mapping_refcount = 0; /* * We have to mark each Query with a unique id in order @@ -54,7 +50,8 @@ static int per_table_parenthood_mapping_refcount = 0; #define QUERY_ID_INITIAL 0 static uint32 latest_query_id = QUERY_ID_INITIAL; -static inline void + +void assign_query_id(Query *query) { uint32 prev_id = latest_query_id++; @@ -65,7 +62,7 @@ assign_query_id(Query *query) query->queryId = latest_query_id; } -static inline void +void reset_query_id_generator(void) { latest_query_id = QUERY_ID_INITIAL; @@ -222,14 +219,12 @@ disable_standard_inheritance(Query *parse) rte->inh = false; /* Try marking it using PARENTHOOD_ALLOWED */ - assign_rel_parenthood_status(parse->queryId, - rte->relid, + assign_rel_parenthood_status(parse->queryId, rte, PARENTHOOD_ALLOWED); } } /* Else try marking it using PARENTHOOD_DISALLOWED */ - else assign_rel_parenthood_status(parse->queryId, - rte->relid, + else assign_rel_parenthood_status(parse->queryId, rte, PARENTHOOD_DISALLOWED); } } @@ -557,124 +552,55 @@ lock_rows_visitor(Plan *plan, void *context) * ----------------------------------------------- */ -/* private struct stored by parenthood lists */ -typedef struct -{ - Oid relid; /* key (part #1) */ - uint32 queryId; /* key (part #2) */ - rel_parenthood_status parenthood_status; -} cached_parenthood_status; - - /* Set parenthood status (per query level) */ void assign_rel_parenthood_status(uint32 query_id, - Oid relid, + RangeTblEntry *rte, rel_parenthood_status new_status) { - cached_parenthood_status *status_entry, - key = { relid, query_id, PARENTHOOD_NOT_SET }; - bool found; - /* We prefer to init this table lazily */ - if (per_table_parenthood_mapping == NULL) - { - const long start_elems = 50; - HASHCTL hashctl; - - memset(&hashctl, 0, sizeof(HASHCTL)); - hashctl.entrysize = sizeof(cached_parenthood_status); - hashctl.keysize = offsetof(cached_parenthood_status, parenthood_status); - hashctl.hcxt = TopTransactionContext; - - per_table_parenthood_mapping = hash_create("Parenthood Storage", - start_elems, &hashctl, - HASH_ELEM | HASH_BLOBS); - } + List *old_relation_tag; - /* Search by 'key' */ - status_entry = hash_search(per_table_parenthood_mapping, - &key, HASH_ENTER, &found); + old_relation_tag = rte_attach_tag(query_id, rte, + make_rte_tag_int(PARENTHOOD_TAG, + new_status)); - if (found) + /* We already have a PARENTHOOD_TAG, examine it's value */ + if (old_relation_tag && + tag_extract_parenthood_status(old_relation_tag) != new_status) { - /* Saved status conflicts with 'new_status' */ - if (status_entry->parenthood_status != new_status) - { - elog(ERROR, "it is prohibited to apply ONLY modifier to partitioned " - "tables which have already been mentioned without ONLY"); - } - } - else - { - /* This should NEVER happen! */ - Assert(new_status != PARENTHOOD_NOT_SET); - - status_entry->parenthood_status = new_status; + elog(ERROR, + "it is prohibited to apply ONLY modifier to partitioned " + "tables which have already been mentioned without ONLY"); } } /* Get parenthood status (per query level) */ rel_parenthood_status -get_rel_parenthood_status(uint32 query_id, Oid relid) +get_rel_parenthood_status(uint32 query_id, RangeTblEntry *rte) { - cached_parenthood_status *status_entry, - key = { relid, query_id, PARENTHOOD_NOT_SET }; - - /* Skip if table is not initialized */ - if (per_table_parenthood_mapping) - { - /* Search by 'key' */ - status_entry = hash_search(per_table_parenthood_mapping, - &key, HASH_FIND, NULL); - - if (status_entry) - { - /* This should NEVER happen! */ - Assert(status_entry->parenthood_status != PARENTHOOD_NOT_SET); + List *relation_tag; - /* Return cached parenthood status */ - return status_entry->parenthood_status; - } - } + relation_tag = rte_fetch_tag(query_id, rte, PARENTHOOD_TAG); + if (relation_tag) + return tag_extract_parenthood_status(relation_tag); /* Not found, return stub value */ return PARENTHOOD_NOT_SET; } -/* Increate usage counter by 1 */ -void -incr_refcount_parenthood_statuses(void) -{ - /* Increment reference counter */ - if (++per_table_parenthood_mapping_refcount <= 0) - elog(WARNING, "imbalanced %s", - CppAsString(incr_refcount_parenthood_statuses)); -} - -/* Return current value of usage counter */ -uint32 -get_refcount_parenthood_statuses(void) +static rel_parenthood_status +tag_extract_parenthood_status(List *relation_tag) { - /* incr_refcount_parenthood_statuses() is called by pathman_planner_hook() */ - return per_table_parenthood_mapping_refcount; -} + const Value *value; + rel_parenthood_status status; -/* Reset all cached statuses if needed (query end) */ -void -decr_refcount_parenthood_statuses(void) -{ - /* Decrement reference counter */ - if (--per_table_parenthood_mapping_refcount < 0) - elog(WARNING, "imbalanced %s", - CppAsString(decr_refcount_parenthood_statuses)); + rte_deconstruct_tag(relation_tag, NULL, &value); + Assert(value && IsA(value, Integer)); - /* Free resources if no one is using them */ - if (per_table_parenthood_mapping_refcount == 0) - { - reset_query_id_generator(); + status = (rel_parenthood_status) intVal(value); + Assert(status >= PARENTHOOD_NOT_SET && + status <= PARENTHOOD_ALLOWED); - hash_destroy(per_table_parenthood_mapping); - per_table_parenthood_mapping = NULL; - } + return status; } From 0c6d3b90e26d55ba9fdc9c9a91ccd319d139ae1c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 22 Feb 2017 18:41:38 +0300 Subject: [PATCH 0242/1124] fix duplicate KVP search in function rte_attach_tag() --- src/compat/relation_tags.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/compat/relation_tags.c b/src/compat/relation_tags.c index f521b8d7..15389655 100644 --- a/src/compat/relation_tags.c +++ b/src/compat/relation_tags.c @@ -95,12 +95,16 @@ rte_attach_tag(const uint32 query_id, if (found) { const char *current_key; + List *existing_kvp; /* Extract key of this KVP */ rte_deconstruct_tag(key_value_pair, ¤t_key, NULL); /* Check if this KVP already exists */ - return relation_tags_search(htab_entry->relation_tags, current_key); + existing_kvp = relation_tags_search(htab_entry->relation_tags, + current_key); + if (existing_kvp) + return existing_kvp; /* return KVP with duplicate key */ } /* Don't forget to initialize list! */ From 5c1b83d71b9a639e2a1ad54eecfeb5a9285a8272 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 22 Feb 2017 20:16:54 +0300 Subject: [PATCH 0243/1124] check that partitions tuple format is compatible with parent (instead of basic comparison of attributes of parent and partition) --- expected/pathman_basic.out | 6 +++--- hash.sql | 4 ++-- init.sql | 35 ++++++----------------------------- range.sql | 4 ++-- src/pl_funcs.c | 33 +++++++++++++++++++++++++++++++++ 5 files changed, 46 insertions(+), 36 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 3fcf346e..4de3a5e1 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1342,12 +1342,12 @@ CREATE TABLE test.range_rel_test1 ( txt TEXT, abc INTEGER); SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); -ERROR: partition must have the exact same structure as parent +ERROR: partition must have a compatible tuple format CREATE TABLE test.range_rel_test2 ( id SERIAL PRIMARY KEY, dt TIMESTAMP); SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); -ERROR: partition must have the exact same structure as parent +ERROR: column "dt" in child table must be marked NOT NULL /* Half open ranges */ SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); add_range_partition @@ -1499,7 +1499,7 @@ CREATE TABLE test.hash_rel_wrong( id INTEGER NOT NULL, value INTEGER); SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); -ERROR: partition must have the exact same structure as parent +ERROR: column "value" in child table must be marked NOT NULL EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; QUERY PLAN ----------------------------------- diff --git a/hash.sql b/hash.sql index 55cd70b7..a4828106 100644 --- a/hash.sql +++ b/hash.sql @@ -110,8 +110,8 @@ BEGIN END IF; /* Check that new partition has an equal structure as parent does */ - IF NOT @extschema@.validate_relations_equality(parent_relid, new_partition) THEN - RAISE EXCEPTION 'partition must have the exact same structure as parent'; + IF NOT @extschema@.tuple_format_is_convertable(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; END IF; /* Get partitioning key */ diff --git a/init.sql b/init.sql index 8f6b3174..b9180e61 100644 --- a/init.sql +++ b/init.sql @@ -491,36 +491,13 @@ $$ LANGUAGE plpgsql STRICT; /* - * Check if two relations have equal structures. + * Check that tuple from first relation could be converted to fit the second one */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relations_equality( - relation1 OID, relation2 OID) -RETURNS BOOLEAN AS -$$ -DECLARE - rec RECORD; - -BEGIN - FOR rec IN ( - WITH - a1 AS (select * from pg_catalog.pg_attribute - where attrelid = relation1 and attnum > 0), - a2 AS (select * from pg_catalog.pg_attribute - where attrelid = relation2 and attnum > 0) - SELECT a1.attname name1, a2.attname name2, a1.atttypid type1, a2.atttypid type2 - FROM a1 - FULL JOIN a2 ON a1.attnum = a2.attnum - ) - LOOP - IF rec.name1 IS NULL OR rec.name2 IS NULL OR rec.name1 != rec.name2 THEN - RETURN false; - END IF; - END LOOP; - - RETURN true; -END -$$ -LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION @extschema@.tuple_format_is_convertable( + relation1 OID, + relation2 OID) +RETURNS BOOL AS 'pg_pathman', 'tuple_format_is_convertable' +LANGUAGE C; /* * DDL trigger that removes entry from pathman_config table. diff --git a/range.sql b/range.sql index 02936ae9..775eda46 100644 --- a/range.sql +++ b/range.sql @@ -905,8 +905,8 @@ BEGIN /* check range overlap */ PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); - IF NOT @extschema@.validate_relations_equality(parent_relid, partition_relid) THEN - RAISE EXCEPTION 'partition must have the exact same structure as parent'; + IF NOT @extschema@.tuple_format_is_convertable(parent_relid, partition_relid) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; END IF; /* Set inheritance */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 158a0423..cf54d8e9 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -61,6 +61,7 @@ PG_FUNCTION_INFO_V1( build_check_constraint_name_attname ); PG_FUNCTION_INFO_V1( validate_relname ); PG_FUNCTION_INFO_V1( is_date_type ); PG_FUNCTION_INFO_V1( is_attribute_nullable ); +PG_FUNCTION_INFO_V1( tuple_format_is_convertable ); PG_FUNCTION_INFO_V1( add_to_pathman_config ); PG_FUNCTION_INFO_V1( pathman_config_params_trigger_func ); @@ -509,6 +510,38 @@ is_attribute_nullable(PG_FUNCTION_ARGS) PG_RETURN_BOOL(result); /* keep compiler happy */ } +Datum +tuple_format_is_convertable(PG_FUNCTION_ARGS) +{ + Oid relid1 = PG_GETARG_OID(0), + relid2 = PG_GETARG_OID(1); + Relation rel1, + rel2; + bool res = true; + + /* Relations should be already locked */ + rel1 = heap_open(relid1, NoLock); + rel2 = heap_open(relid2, NoLock); + + PG_TRY(); + { + /* Try to build a conversion map */ + (void) convert_tuples_by_name_map(rel1->rd_att, + rel2->rd_att, + "doesn't matter"); + } + PG_CATCH(); + { + res = false; + } + PG_END_TRY(); + + heap_close(rel1, NoLock); + heap_close(rel2, NoLock); + + PG_RETURN_BOOL(res); +} + /* * ------------------------ From 5157e6c748def046f9c39369d1ec8aecf7baa438 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 27 Feb 2017 15:25:08 +0300 Subject: [PATCH 0244/1124] improve 'relation_tags' subsystem (fixed behavior for PgPro), add a TODO, add a new regression test group 'pathman_only' --- Makefile | 1 + expected/pathman_basic.out | 203 +----------------------- expected/pathman_only.out | 231 +++++++++++++++++++++++++++ expected/pathman_only_1.out | 247 +++++++++++++++++++++++++++++ sql/pathman_basic.sql | 46 ------ sql/pathman_only.sql | 70 ++++++++ src/compat/relation_tags.c | 100 ++++++++---- src/include/compat/relation_tags.h | 12 +- 8 files changed, 625 insertions(+), 285 deletions(-) create mode 100644 expected/pathman_only.out create mode 100644 expected/pathman_only_1.out create mode 100644 sql/pathman_only.sql diff --git a/Makefile b/Makefile index 801259e1..6ef67a8b 100644 --- a/Makefile +++ b/Makefile @@ -24,6 +24,7 @@ DATA = pg_pathman--1.0--1.1.sql \ PGFILEDESC = "pg_pathman - partitioning tool" REGRESS = pathman_basic \ + pathman_only \ pathman_cte \ pathman_bgw \ pathman_inserts \ diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 9c4652d9..8877d99e 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -365,207 +365,6 @@ SELECT max(val) FROM test.insert_date_test; /* check last date */ DROP TABLE test.insert_date_test CASCADE; NOTICE: drop cascades to 8 other objects -/* Test special case: ONLY statement with not-ONLY for partitioned table */ -CREATE TABLE test.from_only_test(val INT NOT NULL); -INSERT INTO test.from_only_test SELECT generate_series(1, 20); -SELECT pathman.create_range_partitions('test.from_only_test', 'val', 1, 2); -NOTICE: sequence "from_only_test_seq" does not exist, skipping - create_range_partitions -------------------------- - 10 -(1 row) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM ONLY test.from_only_test -UNION SELECT * FROM test.from_only_test; - QUERY PLAN -------------------------------------------------- - HashAggregate - Group Key: from_only_test.val - -> Append - -> Seq Scan on from_only_test - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 -(15 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM test.from_only_test -UNION SELECT * FROM ONLY test.from_only_test; - QUERY PLAN -------------------------------------------------- - HashAggregate - Group Key: from_only_test_1.val - -> Append - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - -> Seq Scan on from_only_test -(15 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM test.from_only_test -UNION SELECT * FROM test.from_only_test -UNION SELECT * FROM ONLY test.from_only_test; - QUERY PLAN ---------------------------------------------------------------------- - HashAggregate - Group Key: from_only_test_1.val - -> Append - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 from_only_test_2_1 - -> Seq Scan on from_only_test_3 from_only_test_3_1 - -> Seq Scan on from_only_test_4 from_only_test_4_1 - -> Seq Scan on from_only_test_5 from_only_test_5_1 - -> Seq Scan on from_only_test_6 from_only_test_6_1 - -> Seq Scan on from_only_test_7 from_only_test_7_1 - -> Seq Scan on from_only_test_8 from_only_test_8_1 - -> Seq Scan on from_only_test_9 from_only_test_9_1 - -> Seq Scan on from_only_test_10 from_only_test_10_1 - -> Seq Scan on from_only_test -(26 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM ONLY test.from_only_test -UNION SELECT * FROM test.from_only_test -UNION SELECT * FROM test.from_only_test; - QUERY PLAN ---------------------------------------------------------------------- - HashAggregate - Group Key: from_only_test.val - -> Append - -> Seq Scan on from_only_test - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 from_only_test_2_1 - -> Seq Scan on from_only_test_3 from_only_test_3_1 - -> Seq Scan on from_only_test_4 from_only_test_4_1 - -> Seq Scan on from_only_test_5 from_only_test_5_1 - -> Seq Scan on from_only_test_6 from_only_test_6_1 - -> Seq Scan on from_only_test_7 from_only_test_7_1 - -> Seq Scan on from_only_test_8 from_only_test_8_1 - -> Seq Scan on from_only_test_9 from_only_test_9_1 - -> Seq Scan on from_only_test_10 from_only_test_10_1 -(26 rows) - -/* not ok, ONLY|non-ONLY in one query */ -EXPLAIN (COSTS OFF) -SELECT * FROM test.from_only_test a JOIN ONLY test.from_only_test b USING(val); -ERROR: it is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY -EXPLAIN (COSTS OFF) -WITH q1 AS (SELECT * FROM test.from_only_test), - q2 AS (SELECT * FROM ONLY test.from_only_test) -SELECT * FROM q1 JOIN q2 USING(val); - QUERY PLAN ---------------------------------------------- - Hash Join - Hash Cond: (q1.val = q2.val) - CTE q1 - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - CTE q2 - -> Seq Scan on from_only_test - -> CTE Scan on q1 - -> Hash - -> CTE Scan on q2 -(19 rows) - -EXPLAIN (COSTS OFF) -WITH q1 AS (SELECT * FROM ONLY test.from_only_test) -SELECT * FROM test.from_only_test JOIN q1 USING(val); - QUERY PLAN ----------------------------------------------- - Hash Join - Hash Cond: (from_only_test_1.val = q1.val) - CTE q1 - -> Seq Scan on from_only_test - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - -> Hash - -> CTE Scan on q1 -(17 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel WHERE id = (SELECT id FROM ONLY test.range_rel LIMIT 1); - QUERY PLAN --------------------------------------------------------- - Append - InitPlan 1 (returns $0) - -> Limit - -> Seq Scan on range_rel - -> Index Scan using range_rel_1_pkey on range_rel_1 - Index Cond: (id = $0) - -> Index Scan using range_rel_2_pkey on range_rel_2 - Index Cond: (id = $0) - -> Index Scan using range_rel_3_pkey on range_rel_3 - Index Cond: (id = $0) - -> Index Scan using range_rel_4_pkey on range_rel_4 - Index Cond: (id = $0) -(12 rows) - -DROP TABLE test.from_only_test CASCADE; -NOTICE: drop cascades to 10 other objects SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; VACUUM; @@ -2198,6 +1997,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 49 other objects +NOTICE: drop cascades to 48 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_only.out b/expected/pathman_only.out new file mode 100644 index 00000000..6870ca6a --- /dev/null +++ b/expected/pathman_only.out @@ -0,0 +1,231 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); +NOTICE: sequence "from_only_test_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 + -> Seq Scan on from_only_test +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); +ERROR: it is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------- + Hash Join + Hash Cond: (q1.val = q2.val) + CTE q1 + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + CTE q2 + -> Seq Scan on from_only_test + -> CTE Scan on q1 + -> Hash + -> CTE Scan on q2 +(19 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +---------------------------------------------------------- + Nested Loop + CTE q1 + -> Seq Scan on from_only_test from_only_test_1 + -> CTE Scan on q1 + -> Custom Scan (RuntimeAppend) + -> Seq Scan on from_only_test_1 from_only_test + -> Seq Scan on from_only_test_2 from_only_test + -> Seq Scan on from_only_test_3 from_only_test + -> Seq Scan on from_only_test_4 from_only_test + -> Seq Scan on from_only_test_5 from_only_test + -> Seq Scan on from_only_test_6 from_only_test + -> Seq Scan on from_only_test_7 from_only_test + -> Seq Scan on from_only_test_8 from_only_test + -> Seq Scan on from_only_test_9 from_only_test + -> Seq Scan on from_only_test_10 from_only_test +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(26 rows) + +DROP SCHEMA test_only CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out new file mode 100644 index 00000000..77fc0dc5 --- /dev/null +++ b/expected/pathman_only_1.out @@ -0,0 +1,247 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); +NOTICE: sequence "from_only_test_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 + -> Seq Scan on from_only_test +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + -> Seq Scan on from_only_test_1 a + -> Seq Scan on from_only_test_2 a + -> Seq Scan on from_only_test_3 a + -> Seq Scan on from_only_test_4 a + -> Seq Scan on from_only_test_5 a + -> Seq Scan on from_only_test_6 a + -> Seq Scan on from_only_test_7 a + -> Seq Scan on from_only_test_8 a + -> Seq Scan on from_only_test_9 a + -> Seq Scan on from_only_test_10 a +(13 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------- + Hash Join + Hash Cond: (q1.val = q2.val) + CTE q1 + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + CTE q2 + -> Seq Scan on from_only_test + -> CTE Scan on q1 + -> Hash + -> CTE Scan on q2 +(19 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +---------------------------------------------------------- + Nested Loop + CTE q1 + -> Seq Scan on from_only_test from_only_test_1 + -> CTE Scan on q1 + -> Custom Scan (RuntimeAppend) + -> Seq Scan on from_only_test_1 from_only_test + -> Seq Scan on from_only_test_2 from_only_test + -> Seq Scan on from_only_test_3 from_only_test + -> Seq Scan on from_only_test_4 from_only_test + -> Seq Scan on from_only_test_5 from_only_test + -> Seq Scan on from_only_test_6 from_only_test + -> Seq Scan on from_only_test_7 from_only_test + -> Seq Scan on from_only_test_8 from_only_test + -> Seq Scan on from_only_test_9 from_only_test + -> Seq Scan on from_only_test_10 from_only_test +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(26 rows) + +DROP SCHEMA test_only CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 6dc25deb..36dd7e8d 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -121,52 +121,6 @@ SELECT max(val) FROM test.insert_date_test; /* check last date */ DROP TABLE test.insert_date_test CASCADE; -/* Test special case: ONLY statement with not-ONLY for partitioned table */ -CREATE TABLE test.from_only_test(val INT NOT NULL); -INSERT INTO test.from_only_test SELECT generate_series(1, 20); -SELECT pathman.create_range_partitions('test.from_only_test', 'val', 1, 2); - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM ONLY test.from_only_test -UNION SELECT * FROM test.from_only_test; - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM test.from_only_test -UNION SELECT * FROM ONLY test.from_only_test; - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM test.from_only_test -UNION SELECT * FROM test.from_only_test -UNION SELECT * FROM ONLY test.from_only_test; - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM ONLY test.from_only_test -UNION SELECT * FROM test.from_only_test -UNION SELECT * FROM test.from_only_test; - -/* not ok, ONLY|non-ONLY in one query */ -EXPLAIN (COSTS OFF) -SELECT * FROM test.from_only_test a JOIN ONLY test.from_only_test b USING(val); - -EXPLAIN (COSTS OFF) -WITH q1 AS (SELECT * FROM test.from_only_test), - q2 AS (SELECT * FROM ONLY test.from_only_test) -SELECT * FROM q1 JOIN q2 USING(val); - -EXPLAIN (COSTS OFF) -WITH q1 AS (SELECT * FROM ONLY test.from_only_test) -SELECT * FROM test.from_only_test JOIN q1 USING(val); - -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel WHERE id = (SELECT id FROM ONLY test.range_rel LIMIT 1); - -DROP TABLE test.from_only_test CASCADE; - - SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; diff --git a/sql/pathman_only.sql b/sql/pathman_only.sql new file mode 100644 index 00000000..e2813ea6 --- /dev/null +++ b/sql/pathman_only.sql @@ -0,0 +1,70 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; + + + +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + +VACUUM ANALYZE; + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + + + +DROP SCHEMA test_only CASCADE; +DROP EXTENSION pg_pathman; diff --git a/src/compat/relation_tags.c b/src/compat/relation_tags.c index 15389655..c6e8c47a 100644 --- a/src/compat/relation_tags.c +++ b/src/compat/relation_tags.c @@ -14,15 +14,18 @@ #include "nodes/nodes.h" +#ifndef NATIVE_RELATION_TAGS + /* * This table is used to ensure that partitioned relation - * cant't be used with both and without ONLY modifiers. + * cant't be referenced as ONLY and non-ONLY at the same time. */ -static HTAB *per_table_relation_tags = NULL; -static int per_table_relation_tags_refcount = 0; - +static HTAB *per_table_relation_tags = NULL; -/* private struct stored by parenthood lists */ +/* + * Single row of 'per_table_relation_tags'. + * NOTE: do not reorder these fields. + */ typedef struct { Oid relid; /* key (part #1) */ @@ -30,6 +33,12 @@ typedef struct List *relation_tags; } relation_tags_entry; +#endif + +/* Also used in get_refcount_relation_tags() etc... */ +static int per_table_relation_tags_refcount = 0; + + /* Look through RTE's relation tags */ List * @@ -37,12 +46,15 @@ rte_fetch_tag(const uint32 query_id, const RangeTblEntry *rte, const char *key) { +#ifdef NATIVE_RELATION_TAGS + + return relation_tags_search(rte->custom_tags, key); + +#else + relation_tags_entry *htab_entry, htab_key = { rte->relid, query_id, NIL /* unused */ }; - AssertArg(rte); - AssertArg(key); - /* Skip if table is not initialized */ if (per_table_relation_tags) { @@ -56,6 +68,8 @@ rte_fetch_tag(const uint32 query_id, /* Not found, return stub value */ return NIL; + +#endif } /* Attach new relation tag to RTE. Returns KVP with duplicate key. */ @@ -64,13 +78,23 @@ rte_attach_tag(const uint32 query_id, RangeTblEntry *rte, List *key_value_pair) { + /* Common variables */ + MemoryContext old_mcxt; + const char *current_key; + List *existing_kvp, + *temp_tags; /* rte->custom_tags OR + htab_entry->relation_tags */ + +#ifdef NATIVE_RELATION_TAGS + + /* Load relation tags to 'temp_tags' */ + temp_tags = rte->custom_tags; + +#else + relation_tags_entry *htab_entry, htab_key = { rte->relid, query_id, NIL /* unused */ }; bool found; - MemoryContext old_mcxt; - - AssertArg(rte); - AssertArg(key_value_pair && list_length(key_value_pair) == 2); /* We prefer to initialize this table lazily */ if (!per_table_relation_tags) @@ -81,7 +105,7 @@ rte_attach_tag(const uint32 query_id, memset(&hashctl, 0, sizeof(HASHCTL)); hashctl.entrysize = sizeof(relation_tags_entry); hashctl.keysize = offsetof(relation_tags_entry, relation_tags); - hashctl.hcxt = TAG_MEMORY_CONTEXT; + hashctl.hcxt = RELATION_TAG_MCXT; per_table_relation_tags = hash_create("Custom tags for RangeTblEntry", start_elems, &hashctl, @@ -92,30 +116,38 @@ rte_attach_tag(const uint32 query_id, htab_entry = hash_search(per_table_relation_tags, &htab_key, HASH_ENTER, &found); - if (found) - { - const char *current_key; - List *existing_kvp; + /* Don't forget to initialize list! */ + if (!found) + htab_entry->relation_tags = NIL; - /* Extract key of this KVP */ - rte_deconstruct_tag(key_value_pair, ¤t_key, NULL); + /* Load relation tags to 'temp_tags' */ + temp_tags = htab_entry->relation_tags; - /* Check if this KVP already exists */ - existing_kvp = relation_tags_search(htab_entry->relation_tags, - current_key); - if (existing_kvp) - return existing_kvp; /* return KVP with duplicate key */ - } +#endif - /* Don't forget to initialize list! */ - else htab_entry->relation_tags = NIL; + /* Check that 'key_value_pair' is valid */ + AssertArg(key_value_pair && list_length(key_value_pair) == 2); - /* Add this KVP */ - old_mcxt = MemoryContextSwitchTo(TAG_MEMORY_CONTEXT); - htab_entry->relation_tags = lappend(htab_entry->relation_tags, - key_value_pair); + /* Extract key of this KVP */ + rte_deconstruct_tag(key_value_pair, ¤t_key, NULL); + + /* Check if KVP with such key already exists */ + existing_kvp = relation_tags_search(temp_tags, current_key); + if (existing_kvp) + return existing_kvp; /* return KVP with duplicate key */ + + /* Add this KVP to relation tags list */ + old_mcxt = MemoryContextSwitchTo(RELATION_TAG_MCXT); + temp_tags = lappend(temp_tags, key_value_pair); MemoryContextSwitchTo(old_mcxt); +/* Finally store 'temp_tags' to relation tags list */ +#ifdef NATIVE_RELATION_TAGS + rte->custom_tags = temp_tags; +#else + htab_entry->relation_tags = temp_tags; +#endif + /* Success! */ return NIL; } @@ -128,8 +160,8 @@ rte_deconstruct_tag(const List *key_value_pair, const char **key, /* ret value #1 */ const Value **value) /* ret value #2 */ { - const char *r_key; - const Value *r_value; + const char *r_key; + const Value *r_value; AssertArg(key_value_pair && list_length(key_value_pair) == 2); @@ -210,7 +242,9 @@ decr_refcount_relation_tags(void) { reset_query_id_generator(); +#ifndef NATIVE_RELATION_TAGS hash_destroy(per_table_relation_tags); per_table_relation_tags = NULL; +#endif } } diff --git a/src/include/compat/relation_tags.h b/src/include/compat/relation_tags.h index 849e10d0..1521d112 100644 --- a/src/include/compat/relation_tags.h +++ b/src/include/compat/relation_tags.h @@ -21,8 +21,13 @@ -/* Memory context we're going to use for TAGs */ -#define TAG_MEMORY_CONTEXT TopTransactionContext +/* Does RTE contain 'custom_tags' list? */ +// TODO: fix this macro once PgPro contains 'relation_tags' patch +// #define NATIVE_RELATION_TAGS + +/* Memory context we're going to use for tags */ +#define RELATION_TAG_MCXT TopTransactionContext + /* Safe TAG constructor (Integer) */ static inline List * @@ -32,7 +37,7 @@ make_rte_tag_int(char *key, int value) MemoryContext old_mcxt; /* Allocate TAG in a persistent memory context */ - old_mcxt = MemoryContextSwitchTo(TAG_MEMORY_CONTEXT); + old_mcxt = MemoryContextSwitchTo(RELATION_TAG_MCXT); kvp = list_make2(makeString(key), makeInteger(value)); MemoryContextSwitchTo(old_mcxt); @@ -40,7 +45,6 @@ make_rte_tag_int(char *key, int value) } - List *rte_fetch_tag(const uint32 query_id, const RangeTblEntry *rte, const char *key); From ad49922c0bd2fe21e40d26d71dc4c018041477c0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 27 Feb 2017 17:15:19 +0300 Subject: [PATCH 0245/1124] move get_rel_persistence() to pg_compat, fix warnings (do_we_hold_the_lock()) --- src/compat/pg_compat.c | 135 ++++++++++++++++++++------------- src/include/compat/pg_compat.h | 21 ++++- src/include/utils.h | 3 - src/pl_funcs.c | 2 + src/utils.c | 26 ------- src/xact_handling.c | 3 + 6 files changed, 105 insertions(+), 85 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 315942ce..24915871 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -13,6 +13,7 @@ #include "compat/pg_compat.h" +#include "access/htup_details.h" #include "catalog/pg_proc.h" #include "foreign/fdwapi.h" #include "optimizer/clauses.h" @@ -21,60 +22,11 @@ #include "port.h" #include "utils.h" #include "utils/lsyscache.h" +#include "utils/syscache.h" #include -/* Common code */ -void -set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) -{ - double parent_rows = 0; - double parent_size = 0; - ListCell *l; - - foreach(l, root->append_rel_list) - { - AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); - Index childRTindex, - parentRTindex = rti; - RelOptInfo *childrel; - - /* append_rel_list contains all append rels; ignore others */ - if (appinfo->parent_relid != parentRTindex) - continue; - - childRTindex = appinfo->child_relid; - - childrel = find_base_rel(root, childRTindex); - Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL); - - /* - * Accumulate size information from each live child. - */ - Assert(childrel->rows > 0); - - parent_rows += childrel->rows; - -#if PG_VERSION_NUM >= 90600 - parent_size += childrel->reltarget->width * childrel->rows; -#else - parent_size += childrel->width * childrel->rows; -#endif - } - - /* Set 'rows' for append relation */ - rel->rows = parent_rows; - -#if PG_VERSION_NUM >= 90600 - rel->reltarget->width = rint(parent_size / parent_rows); -#else - rel->width = rint(parent_size / parent_rows); -#endif - - rel->tuples = parent_rows; -} - /* * ---------- @@ -84,7 +36,6 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) #if PG_VERSION_NUM >= 90600 - /* * make_result * Build a Result plan node @@ -320,7 +271,6 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) #else /* PG_VERSION_NUM >= 90500 */ - /* * set_dummy_rel_pathlist * Build a dummy path for a relation that's been excluded by constraints @@ -350,4 +300,85 @@ set_dummy_rel_pathlist(RelOptInfo *rel) } +/* + * Returns the relpersistence associated with a given relation. + * + * NOTE: this function is implemented in 9.6 + */ +char +get_rel_persistence(Oid relid) +{ + HeapTuple tp; + Form_pg_class reltup; + char result; + + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (!HeapTupleIsValid(tp)) + elog(ERROR, "cache lookup failed for relation %u", relid); + + reltup = (Form_pg_class) GETSTRUCT(tp); + result = reltup->relpersistence; + ReleaseSysCache(tp); + + return result; +} + + #endif /* PG_VERSION_NUM >= 90600 */ + + + +/* + * ------------- + * Common code + * ------------- + */ + +void +set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) +{ + double parent_rows = 0; + double parent_size = 0; + ListCell *l; + + foreach(l, root->append_rel_list) + { + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); + Index childRTindex, + parentRTindex = rti; + RelOptInfo *childrel; + + /* append_rel_list contains all append rels; ignore others */ + if (appinfo->parent_relid != parentRTindex) + continue; + + childRTindex = appinfo->child_relid; + + childrel = find_base_rel(root, childRTindex); + Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL); + + /* + * Accumulate size information from each live child. + */ + Assert(childrel->rows > 0); + + parent_rows += childrel->rows; + +#if PG_VERSION_NUM >= 90600 + parent_size += childrel->reltarget->width * childrel->rows; +#else + parent_size += childrel->width * childrel->rows; +#endif + } + + /* Set 'rows' for append relation */ + rel->rows = parent_rows; + +#if PG_VERSION_NUM >= 90600 + rel->reltarget->width = rint(parent_size / parent_rows); +#else + rel->width = rint(parent_size / parent_rows); +#endif + + rel->tuples = parent_rows; +} diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index a54b73d3..4e1873c1 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -20,12 +20,14 @@ #include "optimizer/paths.h" -void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); - +/* + * ---------- + * Variants + * ---------- + */ #if PG_VERSION_NUM >= 90600 - /* adjust_appendrel_attrs() */ #define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ do { \ @@ -91,7 +93,6 @@ extern void set_rel_consider_parallel(PlannerInfo *root, #else /* PG_VERSION_NUM >= 90500 */ - /* adjust_appendrel_attrs() */ #define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ do { \ @@ -145,7 +146,19 @@ extern void set_rel_consider_parallel(PlannerInfo *root, void set_dummy_rel_pathlist(RelOptInfo *rel); +/* get_rel_persistence() */ +char get_rel_persistence(Oid relid); + #endif /* PG_VERSION_NUM */ +/* + * ------------- + * Common code + * ------------- + */ + +void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); + + #endif /* PG_COMPAT_H */ diff --git a/src/include/utils.h b/src/include/utils.h index 24bad286..eaf863fe 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -40,9 +40,6 @@ List * list_reverse(List *l); Oid get_rel_owner(Oid relid); char * get_rel_name_or_relid(Oid relid); Oid get_attribute_type(Oid relid, const char *attname, bool missing_ok); -#if PG_VERSION_NUM < 90600 -char get_rel_persistence(Oid relid); -#endif /* * Operator-related stuff. diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 9267b177..b0ea3861 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -8,6 +8,8 @@ * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" + #include "init.h" #include "utils.h" #include "pathman.h" diff --git a/src/utils.c b/src/utils.c index 38960b3a..1f2ce440 100644 --- a/src/utils.c +++ b/src/utils.c @@ -18,7 +18,6 @@ #include "catalog/pg_type.h" #include "catalog/pg_extension.h" #include "catalog/pg_operator.h" -#include "catalog/pg_inherits.h" #include "commands/extension.h" #include "miscadmin.h" #include "optimizer/var.h" @@ -230,31 +229,6 @@ get_attribute_type(Oid relid, const char *attname, bool missing_ok) return InvalidOid; } -#if PG_VERSION_NUM < 90600 -/* - * Returns the relpersistence associated with a given relation. - * - * NOTE: this function is implemented in 9.6 - */ -char -get_rel_persistence(Oid relid) -{ - HeapTuple tp; - Form_pg_class reltup; - char result; - - tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for relation %u", relid); - - reltup = (Form_pg_class) GETSTRUCT(tp); - result = reltup->relpersistence; - ReleaseSysCache(tp); - - return result; -} -#endif - /* diff --git a/src/xact_handling.c b/src/xact_handling.c index 7eae0f25..2c49067e 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -171,6 +171,9 @@ xact_object_is_visible(TransactionId obj_xmin) /* * Do we hold the specified lock? */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif static inline bool do_we_hold_the_lock(Oid relid, LOCKMODE lockmode) { From 3999694c6d7ac7d514fee78b87f69ac7c5e70ca8 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 27 Feb 2017 18:26:51 +0300 Subject: [PATCH 0246/1124] check if operator + and - are available for given type in append_range_partition() and prepend_range_partition() functions --- init.sql | 10 ++++++++++ range.sql | 8 ++++++++ src/pl_funcs.c | 16 ++++++++++++++++ src/pl_range_funcs.c | 2 +- 4 files changed, 35 insertions(+), 1 deletion(-) diff --git a/init.sql b/init.sql index 583080ad..1abf06e0 100644 --- a/init.sql +++ b/init.sql @@ -896,3 +896,13 @@ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( init_callback REGPROCEDURE) RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; + +/* + * + */ +CREATE OR REPLACE FUNCTION @extschema@.is_operator_supported( + type_oid OID, + opname TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'is_operator_supported' +LANGUAGE C; + diff --git a/range.sql b/range.sql index 84c9fefa..2fb46fcc 100644 --- a/range.sql +++ b/range.sql @@ -573,6 +573,10 @@ BEGIN v_atttype := @extschema@.get_partition_key_type(parent_relid); + IF NOT @extschema@.is_operator_supported(v_atttype, '+') THEN + RAISE EXCEPTION 'Type % doesn''t support ''+'' operator', v_atttype::regtype; + END IF; + SELECT range_interval FROM @extschema@.pathman_config WHERE partrel = parent_relid @@ -678,6 +682,10 @@ BEGIN v_atttype := @extschema@.get_partition_key_type(parent_relid); + IF NOT @extschema@.is_operator_supported(v_atttype, '-') THEN + RAISE EXCEPTION 'Type % doesn''t support ''-'' operator', v_atttype::regtype; + END IF; + SELECT range_interval FROM @extschema@.pathman_config WHERE partrel = parent_relid diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 9267b177..2a167655 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -69,6 +69,8 @@ PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( get_pathman_lib_version ); +PG_FUNCTION_INFO_V1( is_operator_supported ); + /* * User context for function show_partition_list_internal(). @@ -900,6 +902,20 @@ check_security_policy(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } +Datum +is_operator_supported(PG_FUNCTION_ARGS) +{ + Oid tp = PG_GETARG_OID(0); + char *opname = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + Oid opid; + + opid = compatible_oper_opid(list_make1(makeString(opname)), tp, tp, true); + if (!OidIsValid(opid)) + PG_RETURN_BOOL(false); + + PG_RETURN_BOOL(true); +} + /* * ------- diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 29cb64b8..ea8b51a1 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -795,7 +795,7 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) /* * If operator result type isn't the same as original value then * convert it. We need this to make sure that specified interval would - * change the _origianal_ value somehow. For example, if we add one second + * change the _original_ value somehow. For example, if we add one second * to a date then we'll get a timestamp which is one second later than * original date (obviously). But when we convert it back to a date we will * get the same original value meaning that one second interval wouldn't From bf219ed15bfe2d68ed6ed3536f928815498edaf9 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 2 Mar 2017 16:13:50 +0300 Subject: [PATCH 0247/1124] create_range_partitions() for array of boundaries --- range.sql | 75 +++++++++++++++++++++++++++++++++++++++++--- src/pl_range_funcs.c | 73 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+), 4 deletions(-) diff --git a/range.sql b/range.sql index 2fb46fcc..a923ec9f 100644 --- a/range.sql +++ b/range.sql @@ -317,10 +317,6 @@ BEGIN attribute := lower(attribute); PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - IF p_interval <= 0 THEN - RAISE EXCEPTION 'interval must be positive'; - END IF; - /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, attribute, @@ -435,6 +431,77 @@ BEGIN END $$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions2( + parent_relid REGCLASS, + attribute TEXT, + bounds ANYARRAY) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER; +BEGIN + IF array_ndims(bounds) > 1 THEN + RAISE EXCEPTION 'Bounds array must be a one dimensional array'; + END IF; + + IF array_length(bounds, 1) < 2 THEN + RAISE EXCEPTION 'Bounds array must have at least two values'; + END IF; + + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + bounds[0], + bounds[array_length(bounds, 1) - 1]); + + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, NULL); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Create partitions */ + part_count := @extschema@.create_range_partitions_internal(parent_relid, bounds); + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN 0; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( + parent_relid REGCLASS, + value ANYARRAY) +RETURNS REGCLASS AS 'pg_pathman', 'create_range_partitions_internal' +LANGUAGE C; + + /* * Split RANGE partition */ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index ea8b51a1..090dd723 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -69,6 +69,8 @@ PG_FUNCTION_INFO_V1( merge_range_partitions ); PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); PG_FUNCTION_INFO_V1( validate_interval_value ); +PG_FUNCTION_INFO_V1( create_range_partitions_internal ); + /* * ----------------------------- @@ -1007,3 +1009,74 @@ drop_table_by_oid(Oid relid) RemoveRelations(n); } + + +Datum +create_range_partitions_internal(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + // char *attname = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + int16 typlen; + bool typbyval; + char typalign; + FmgrInfo cmp_func; + + /* bounds */ + ArrayType *arr = PG_GETARG_ARRAYTYPE_P(1); + Oid elemtype = ARR_ELEMTYPE(arr); + Datum *datums; + bool *nulls; + int ndatums; + int i; + + /* Extract bounds */ + get_typlenbyvalalign(elemtype, &typlen, &typbyval, &typalign); + deconstruct_array(arr, elemtype, + typlen, typbyval, typalign, + &datums, &nulls, &ndatums); + + /* Check if bounds array is ascending */ + fill_type_cmp_fmgr_info(&cmp_func, + getBaseType(elemtype), + getBaseType(elemtype)); + for (i = 0; i < ndatums-1; i++) + { + /* + * Only first bound can be NULL + * + * XXX Probably the last one too... + */ + if (nulls[i]) + { + if (i == 0) + continue; + else + elog(ERROR, + "Only first bound can be NULL"); + } + + if (DatumGetInt32(FunctionCall2(&cmp_func, datums[i], datums[i+1])) >= 0) + elog(ERROR, + "Bounds array must be ascending"); + } + + /* Create partitions */ + for (i = 0; i < ndatums-1; i++) + { + Bound start = nulls[i] ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(datums[i]); + Bound end = nulls[i+1] ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(datums[i+1]); + + (void) create_single_range_partition_internal(relid, + &start, + &end, + elemtype, + NULL, + NULL); + } + + PG_RETURN_VOID(); +} From 27cac1d2b7f90e45a99513e368315950ad58fcbd Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 2 Mar 2017 19:30:06 +0300 Subject: [PATCH 0248/1124] refactoring, clean code & comments, introduce debug_compat_features.h, new subsystems: 'rowmarks_fix' & 'expand_rte_hook', changed behavior of pathman_rel_pathlist_hook() --- Makefile | 3 +- sql/pathman_rowmarks.sql | 1 - src/compat/expand_rte_hook.c | 59 +++++++ src/compat/pg_compat.c | 2 +- src/compat/relation_tags.c | 1 + src/compat/rowmarks_fix.c | 178 ++++++++++++++++++++ src/hooks.c | 24 +-- src/include/compat/debug_compat_features.h | 20 +++ src/include/compat/expand_rte_hook.h | 37 +++++ src/include/compat/pg_compat.h | 4 +- src/include/compat/relation_tags.h | 16 +- src/include/compat/rowmarks_fix.h | 43 +++++ src/include/planner_tree_modification.h | 7 +- src/partition_creation.c | 2 +- src/partition_filter.c | 2 +- src/pg_pathman.c | 6 +- src/planner_tree_modification.c | 184 ++------------------- src/relation_info.c | 2 +- src/utility_stmt_hooking.c | 10 +- 19 files changed, 395 insertions(+), 206 deletions(-) create mode 100644 src/compat/expand_rte_hook.c create mode 100644 src/compat/rowmarks_fix.c create mode 100644 src/include/compat/debug_compat_features.h create mode 100644 src/include/compat/expand_rte_hook.h create mode 100644 src/include/compat/rowmarks_fix.h diff --git a/Makefile b/Makefile index 6ef67a8b..80a74d0b 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,8 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ - src/compat/pg_compat.o src/compat/relation_tags.o $(WIN32RES) + src/compat/pg_compat.o src/compat/relation_tags.o src/compat/expand_rte_hook.o \ + src/compat/rowmarks_fix.o $(WIN32RES) PG_CPPFLAGS = -I$(CURDIR)/src/include diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index 8397b7fc..72e40b8e 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -1,7 +1,6 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; - CREATE TABLE rowmarks.first(id int NOT NULL); CREATE TABLE rowmarks.second(id int NOT NULL); diff --git a/src/compat/expand_rte_hook.c b/src/compat/expand_rte_hook.c new file mode 100644 index 00000000..94c866b3 --- /dev/null +++ b/src/compat/expand_rte_hook.c @@ -0,0 +1,59 @@ +/* ------------------------------------------------------------------------ + * + * expand_rte_hook.c + * Fix rowmarks etc using the 'expand_inherited_rtentry_hook' + * NOTE: this hook exists in PostgresPro + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#include "compat/expand_rte_hook.h" +#include "relation_info.h" +#include "init.h" + +#include "postgres.h" +#include "optimizer/prep.h" + + +#ifdef NATIVE_EXPAND_RTE_HOOK + +static expand_inherited_rtentry_hook_type expand_inherited_rtentry_hook_next = NULL; + +static void pathman_expand_inherited_rtentry_hook(PlannerInfo *root, + RangeTblEntry *rte, + Index rti); + + +/* Initialize 'expand_inherited_rtentry_hook' */ +void +init_expand_rte_hook(void) +{ + expand_inherited_rtentry_hook_next = expand_inherited_rtentry_hook; + expand_inherited_rtentry_hook = pathman_expand_inherited_rtentry_hook; +} + + +/* Fix parent's RowMark (makes 'rowmarks_fix' pointless) */ +static void +pathman_expand_inherited_rtentry_hook(PlannerInfo *root, + RangeTblEntry *rte, + Index rti) +{ + PlanRowMark *oldrc; + + if (!IsPathmanReady()) + return; + + /* Check that table is partitioned by pg_pathman */ + if (!get_pathman_relation_info(rte->relid)) + return; + + /* HACK: fix rowmark for parent (for preprocess_targetlist() etc) */ + oldrc = get_plan_rowmark(root->rowMarks, rti); + if (oldrc) + oldrc->isParent = true; +} + +#endif /* NATIVE_EXPAND_RTE_HOOK */ diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 24915871..766cfc74 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------ * * pg_compat.c - * Compatibility tools + * Compatibility tools for PostgreSQL API * * Copyright (c) 2016, Postgres Professional * diff --git a/src/compat/relation_tags.c b/src/compat/relation_tags.c index c6e8c47a..b7d2260b 100644 --- a/src/compat/relation_tags.c +++ b/src/compat/relation_tags.c @@ -2,6 +2,7 @@ * * relation_tags.c * Attach custom (Key, Value) pairs to an arbitrary RangeTblEntry + * NOTE: implementations for vanilla and PostgresPro differ * * Copyright (c) 2017, Postgres Professional * diff --git a/src/compat/rowmarks_fix.c b/src/compat/rowmarks_fix.c new file mode 100644 index 00000000..21259e66 --- /dev/null +++ b/src/compat/rowmarks_fix.c @@ -0,0 +1,178 @@ +/* ------------------------------------------------------------------------ + * + * rowmarks_fix.h + * Hack incorrect RowMark generation due to unset 'RTE->inh' flag + * NOTE: this code is only useful for vanilla + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#include "compat/rowmarks_fix.h" +#include "planner_tree_modification.h" + +#include "access/sysattr.h" +#include "catalog/pg_type.h" +#include "nodes/relation.h" +#include "nodes/nodeFuncs.h" +#include "utils/builtins.h" +#include "utils/rel.h" + + +#ifndef NATIVE_PARTITIONING_ROWMARKS + +/* Special column name for rowmarks */ +#define TABLEOID_STR(subst) ( "pathman_tableoid" subst ) +#define TABLEOID_STR_BASE_LEN ( sizeof(TABLEOID_STR("")) - 1 ) + + +static void lock_rows_visitor(Plan *plan, void *context); +static List *get_tableoids_list(List *tlist); + + +/* Final rowmark processing for partitioned tables */ +void +postprocess_lock_rows(List *rtable, Plan *plan) +{ + plan_tree_walker(plan, lock_rows_visitor, rtable); +} + +/* + * Add missing 'TABLEOID_STR%u' junk attributes for inherited partitions + * + * This is necessary since preprocess_targetlist() heavily + * depends on the 'inh' flag which we have to unset. + * + * postprocess_lock_rows() will later transform 'TABLEOID_STR:Oid' + * relnames into 'tableoid:rowmarkId'. + */ +void +rowmark_add_tableoids(Query *parse) +{ + ListCell *lc; + + /* Generate 'tableoid' for partitioned table rowmark */ + foreach (lc, parse->rowMarks) + { + RowMarkClause *rc = (RowMarkClause *) lfirst(lc); + Oid parent = getrelid(rc->rti, parse->rtable); + Var *var; + TargetEntry *tle; + char resname[64]; + + /* Check that table is partitioned */ + if (!get_pathman_relation_info(parent)) + continue; + + var = makeVar(rc->rti, + TableOidAttributeNumber, + OIDOID, + -1, + InvalidOid, + 0); + + /* Use parent's Oid as TABLEOID_STR's key (%u) */ + snprintf(resname, sizeof(resname), TABLEOID_STR("%u"), parent); + + tle = makeTargetEntry((Expr *) var, + list_length(parse->targetList) + 1, + pstrdup(resname), + true); + + /* There's no problem here since new attribute is junk */ + parse->targetList = lappend(parse->targetList, tle); + } +} + +/* + * Extract target entries with resnames beginning with TABLEOID_STR + * and var->varoattno == TableOidAttributeNumber + */ +static List * +get_tableoids_list(List *tlist) +{ + List *result = NIL; + ListCell *lc; + + foreach (lc, tlist) + { + TargetEntry *te = (TargetEntry *) lfirst(lc); + Var *var = (Var *) te->expr; + + if (!IsA(var, Var)) + continue; + + /* Check that column name begins with TABLEOID_STR & it's tableoid */ + if (var->varoattno == TableOidAttributeNumber && + (te->resname && strlen(te->resname) > TABLEOID_STR_BASE_LEN) && + 0 == strncmp(te->resname, TABLEOID_STR(""), TABLEOID_STR_BASE_LEN)) + { + result = lappend(result, te); + } + } + + return result; +} + +/* + * Find 'TABLEOID_STR%u' attributes that were manually + * created for partitioned tables and replace Oids + * (used for '%u') with expected rc->rowmarkIds + */ +static void +lock_rows_visitor(Plan *plan, void *context) +{ + List *rtable = (List *) context; + LockRows *lock_rows = (LockRows *) plan; + Plan *lock_child = outerPlan(plan); + List *tableoids; + ListCell *lc; + + if (!IsA(lock_rows, LockRows)) + return; + + Assert(rtable && IsA(rtable, List) && lock_child); + + /* Select tableoid attributes that must be renamed */ + tableoids = get_tableoids_list(lock_child->targetlist); + if (!tableoids) + return; /* this LockRows has nothing to do with partitioned table */ + + foreach (lc, lock_rows->rowMarks) + { + PlanRowMark *rc = (PlanRowMark *) lfirst(lc); + Oid parent_oid = getrelid(rc->rti, rtable); + ListCell *mark_lc; + List *finished_tes = NIL; /* postprocessed target entries */ + + foreach (mark_lc, tableoids) + { + TargetEntry *te = (TargetEntry *) lfirst(mark_lc); + const char *cur_oid_str = &(te->resname[TABLEOID_STR_BASE_LEN]); + Datum cur_oid_datum; + + cur_oid_datum = DirectFunctionCall1(oidin, CStringGetDatum(cur_oid_str)); + + if (DatumGetObjectId(cur_oid_datum) == parent_oid) + { + char resname[64]; + + /* Replace 'TABLEOID_STR:Oid' with 'tableoid:rowmarkId' */ + snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); + te->resname = pstrdup(resname); + + finished_tes = lappend(finished_tes, te); + } + } + + /* Remove target entries that have been processed in this step */ + foreach (mark_lc, finished_tes) + tableoids = list_delete_ptr(tableoids, lfirst(mark_lc)); + + if (list_length(tableoids) == 0) + break; /* nothing to do */ + } +} + +#endif /* NATIVE_PARTITIONING_ROWMARKS */ diff --git a/src/hooks.c b/src/hooks.c index 315c030c..a9c4796c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -10,6 +10,7 @@ #include "compat/pg_compat.h" #include "compat/relation_tags.h" +#include "compat/rowmarks_fix.h" #include "hooks.h" #include "init.h" @@ -207,14 +208,16 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady()) return; - /* This works only for SELECTs or INSERTs on simple relations */ + /* + * Skip if it's a result relation (UPDATE | DELETE | INSERT), + * or not a (partitioned) physical relation at all. + */ if (rte->rtekind != RTE_RELATION || rte->relkind != RELKIND_RELATION || - (root->parse->commandType != CMD_SELECT && - root->parse->commandType != CMD_INSERT)) /* INSERT INTO ... SELECT ... */ + root->parse->resultRelation == rti) return; - /* Skip if this table is not allowed to act as parent (see FROM ONLY) */ + /* Skip if this table is not allowed to act as parent (e.g. FROM ONLY) */ if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, rte)) return; @@ -245,7 +248,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, int32 type_mod; TypeCacheEntry *tce; - /* Make Var from patition column */ + /* Make Var from partition column */ get_rte_attribute_type(rte, prel->attnum, &vartypeid, &type_mod, &varcollid); var = makeVar(rti, prel->attnum, vartypeid, type_mod, varcollid, 0); @@ -255,17 +258,18 @@ pathman_rel_pathlist_hook(PlannerInfo *root, tce = lookup_type_cache(var->vartype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); /* Make pathkeys */ - pathkeys = build_expression_pathkey(root, (Expr *)var, NULL, + pathkeys = build_expression_pathkey(root, (Expr *) var, NULL, tce->lt_opr, NULL, false); if (pathkeys) pathkeyAsc = (PathKey *) linitial(pathkeys); - pathkeys = build_expression_pathkey(root, (Expr *)var, NULL, + pathkeys = build_expression_pathkey(root, (Expr *) var, NULL, tce->gt_opr, NULL, false); if (pathkeys) pathkeyDesc = (PathKey *) linitial(pathkeys); } - rte->inh = true; /* we must restore 'inh' flag! */ + /* HACK: we must restore 'inh' flag! */ + rte->inh = true; children = PrelGetChildrenArray(prel); ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_COMPLETE)); @@ -475,7 +479,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { if (pathman_ready) { - /* Increment parenthood_statuses refcount */ + /* Increment relation tags refcount */ incr_refcount_relation_tags(); /* Modify query tree if needed */ @@ -496,7 +500,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); - /* Decrement parenthood_statuses refcount */ + /* Decrement relation tags refcount */ decr_refcount_relation_tags(); /* HACK: restore queryId set by pg_stat_statements */ diff --git a/src/include/compat/debug_compat_features.h b/src/include/compat/debug_compat_features.h new file mode 100644 index 00000000..0eb90db3 --- /dev/null +++ b/src/include/compat/debug_compat_features.h @@ -0,0 +1,20 @@ +/* ------------------------------------------------------------------------ + * + * debug_custom_features.h + * Macros to control PgPro-related features etc + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +/* Main toggle */ +#define ENABLE_PGPRO_PATCHES + +/* PgPro exclusive features */ +#define ENABLE_EXPAND_RTE_HOOK +#define ENABLE_RELATION_TAGS +#define ENABLE_PATHMAN_AWARE_COPY_WIN32 + +/* Hacks for vanilla */ +#define ENABLE_ROWMARKS_FIX diff --git a/src/include/compat/expand_rte_hook.h b/src/include/compat/expand_rte_hook.h new file mode 100644 index 00000000..51b57dd3 --- /dev/null +++ b/src/include/compat/expand_rte_hook.h @@ -0,0 +1,37 @@ +/* ------------------------------------------------------------------------ + * + * expand_rte_hook.h + * Fix rowmarks etc using the 'expand_inherited_rtentry_hook' + * NOTE: this hook exists in PostgresPro + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef EXPAND_RTE_HOOK_H +#define EXPAND_RTE_HOOK_H + +#include "compat/debug_compat_features.h" + + +/* Does PostgreSQL have 'expand_inherited_rtentry_hook'? */ +/* TODO: fix this definition once PgPro contains 'expand_rte_hook' patch */ +#if defined(ENABLE_PGPRO_PATCHES) && \ + defined(ENABLE_EXPAND_RTE_HOOK) /* && ... */ +#define NATIVE_EXPAND_RTE_HOOK +#endif + + +#ifdef NATIVE_EXPAND_RTE_HOOK + +void init_expand_rte_hook(void); + +#else + +#define init_expand_rte_hook() ( (void) true ) + +#endif /* NATIVE_EXPAND_RTE_HOOK */ + + +#endif /* EXPAND_RTE_HOOK_H */ diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 4e1873c1..0928f28d 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------ * * pg_compat.h - * Compatibility tools + * Compatibility tools for PostgreSQL API * * Copyright (c) 2016, Postgres Professional * @@ -11,9 +11,9 @@ #ifndef PG_COMPAT_H #define PG_COMPAT_H +#include "compat/debug_compat_features.h" #include "postgres.h" - #include "nodes/relation.h" #include "nodes/pg_list.h" #include "optimizer/cost.h" diff --git a/src/include/compat/relation_tags.h b/src/include/compat/relation_tags.h index 1521d112..cbd80b82 100644 --- a/src/include/compat/relation_tags.h +++ b/src/include/compat/relation_tags.h @@ -2,6 +2,7 @@ * * relation_tags.h * Attach custom (Key, Value) pairs to an arbitrary RangeTblEntry + * NOTE: implementations for vanilla and PostgresPro differ * * Copyright (c) 2017, Postgres Professional * @@ -11,8 +12,7 @@ #ifndef RELATION_TAGS_H #define RELATION_TAGS_H - -#include "pathman.h" +#include "compat/debug_compat_features.h" #include "postgres.h" #include "nodes/relation.h" @@ -20,10 +20,12 @@ #include "utils/memutils.h" - /* Does RTE contain 'custom_tags' list? */ -// TODO: fix this macro once PgPro contains 'relation_tags' patch -// #define NATIVE_RELATION_TAGS +/* TODO: fix this definition once PgPro contains 'relation_tags' patch */ +#if defined(ENABLE_PGPRO_PATCHES) && \ + defined(ENABLE_RELATION_TAGS) /* && ... */ +#define NATIVE_RELATION_TAGS +#endif /* Memory context we're going to use for tags */ #define RELATION_TAG_MCXT TopTransactionContext @@ -50,8 +52,8 @@ List *rte_fetch_tag(const uint32 query_id, const char *key); List *rte_attach_tag(const uint32 query_id, - RangeTblEntry *rte, - List *key_value_pair); + RangeTblEntry *rte, + List *key_value_pair); List *relation_tags_search(List *custom_tags, diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h new file mode 100644 index 00000000..4e441388 --- /dev/null +++ b/src/include/compat/rowmarks_fix.h @@ -0,0 +1,43 @@ +/* ------------------------------------------------------------------------ + * + * rowmarks_fix.h + * Hack incorrect RowMark generation due to unset 'RTE->inh' flag + * NOTE: this code is only useful for vanilla + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef ROWMARKS_FIX_H +#define ROWMARKS_FIX_H + +#include "compat/debug_compat_features.h" +#include "compat/expand_rte_hook.h" + +#include "postgres.h" +#include "nodes/parsenodes.h" +#include "nodes/plannodes.h" + + +/* + * If PostgreSQL supports 'expand_inherited_rtentry_hook', + * our hacks are completely unnecessary. + */ +#if defined(ENABLE_PGPRO_PATCHES) && \ + defined(ENABLE_ROWMARKS_FIX) && \ + defined(NATIVE_EXPAND_RTE_HOOK /* dependency */ ) +#define NATIVE_PARTITIONING_ROWMARKS +#endif + + +#ifdef NATIVE_PARTITIONING_ROWMARKS +#define postprocess_lock_rows(rtable, plan) ( (void) true ) +#define rowmark_add_tableoids(parse) ( (void) true ) +#else +void postprocess_lock_rows(List *rtable, Plan *plan); +void rowmark_add_tableoids(Query *parse); +#endif /* NATIVE_PARTITIONING_ROWMARKS */ + + +#endif /* ROWMARKS_FIX_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index e69f0b1e..17e17fb4 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -34,7 +34,6 @@ void pathman_transform_query(Query *parse); /* These functions scribble on Plan tree */ void add_partition_filters(List *rtable, Plan *plan); -void postprocess_lock_rows(List *rtable, Plan *plan); /* used by assign_rel_parenthood_status() etc */ @@ -45,12 +44,12 @@ typedef enum PARENTHOOD_ALLOWED /* children are enabled (default) */ } rel_parenthood_status; -#define PARENTHOOD_TAG CppAsString(PARENTHOOD) - void assign_rel_parenthood_status(uint32 query_id, RangeTblEntry *rte, rel_parenthood_status new_status); -rel_parenthood_status get_rel_parenthood_status(uint32 query_id, RangeTblEntry *rte); + +rel_parenthood_status get_rel_parenthood_status(uint32 query_id, + RangeTblEntry *rte); #endif /* PLANNER_TREE_MODIFICATION_H */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 61650ae7..a84b3480 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -316,7 +316,7 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ if (pathman_config_contains_relation(relid, values, isnull, NULL)) { - Oid base_bound_type; /* base type of prel->atttype */ + Oid base_bound_type; /* base type of prel->atttype */ Oid base_value_type; /* base type of value_type */ /* Fetch PartRelationInfo by 'relid' */ diff --git a/src/partition_filter.c b/src/partition_filter.c index d46bc937..4e8b171a 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -30,7 +30,7 @@ /* - * NOTE: 'estate->es_query_cxt' as data storage + * HACK: 'estate->es_query_cxt' as data storage * * We use this struct as an argument for fake * MemoryContextCallback pf_memcxt_callback() diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 00ac0e79..1a79c587 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -9,6 +9,7 @@ * ------------------------------------------------------------------------ */ +#include "compat/expand_rte_hook.h" #include "compat/pg_compat.h" #include "init.h" @@ -156,6 +157,9 @@ _PG_init(void) process_utility_hook_next = ProcessUtility_hook; ProcessUtility_hook = pathman_process_utility_hook; + /* Initialize PgPro-specific subsystems */ + init_expand_rte_hook(); + /* Initialize static data for all subsystems */ init_main_pathman_toggles(); init_runtimeappend_static_data(); @@ -313,7 +317,7 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, child_rte = copyObject(parent_rte); child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; - child_rte->inh = false; + child_rte->inh = false; /* relation has no children */ child_rte->requiredPerms = 0; /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 844713fa..fb0c8551 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -9,46 +9,38 @@ */ #include "compat/relation_tags.h" +#include "compat/rowmarks_fix.h" #include "nodes_common.h" #include "partition_filter.h" #include "planner_tree_modification.h" -#include "rangeset.h" -#include "access/sysattr.h" -#include "catalog/pg_type.h" #include "miscadmin.h" #include "optimizer/clauses.h" #include "storage/lmgr.h" -#include "utils/builtins.h" #include "utils/syscache.h" -/* Special column name for rowmarks */ -#define TABLEOID_STR(subst) ( "pathman_tableoid" subst ) -#define TABLEOID_STR_BASE_LEN ( sizeof(TABLEOID_STR("")) - 1 ) +/* for assign_rel_parenthood_status() */ +#define PARENTHOOD_TAG CppAsString(PARENTHOOD) static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse); -static void rowmark_add_tableoids(Query *parse); static void handle_modification_query(Query *parse); static void partition_filter_visitor(Plan *plan, void *context); -static void lock_rows_visitor(Plan *plan, void *context); -static List *get_tableoids_list(List *tlist); - static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); /* - * We have to mark each Query with a unique id in order - * to recognize them properly. + * HACK: We have to mark each Query with a unique + * id in order to recognize them properly. */ #define QUERY_ID_INITIAL 0 -static uint32 latest_query_id = QUERY_ID_INITIAL; +static uint32 latest_query_id = QUERY_ID_INITIAL; void @@ -184,11 +176,7 @@ pathman_transform_query_walker(Node *node, void *context) * ---------------------- */ -/* - * Disable standard inheritance if table is partitioned by pg_pathman. - * - * This function sets RangeTblEntry::inh flag to false. - */ +/* Disable standard inheritance if table is partitioned by pg_pathman */ static void disable_standard_inheritance(Query *parse) { @@ -215,7 +203,10 @@ disable_standard_inheritance(Query *parse) /* Proceed if table is partitioned by pg_pathman */ if ((prel = get_pathman_relation_info(rte->relid)) != NULL) { - /* We'll set this flag later */ + /* + * HACK: unset the 'inh' flag to disable standard + * planning. We'll set it again later. + */ rte->inh = false; /* Try marking it using PARENTHOOD_ALLOWED */ @@ -229,53 +220,6 @@ disable_standard_inheritance(Query *parse) } } -/* - * Add missing 'TABLEOID_STR%u' junk attributes for inherited partitions - * - * This is necessary since preprocess_targetlist() heavily - * depends on the 'inh' flag which we have to unset. - * - * postprocess_lock_rows() will later transform 'TABLEOID_STR:Oid' - * relnames into 'tableoid:rowmarkId'. - */ -static void -rowmark_add_tableoids(Query *parse) -{ - ListCell *lc; - - /* Generate 'tableoid' for partitioned table rowmark */ - foreach (lc, parse->rowMarks) - { - RowMarkClause *rc = (RowMarkClause *) lfirst(lc); - Oid parent = getrelid(rc->rti, parse->rtable); - Var *var; - TargetEntry *tle; - char resname[64]; - - /* Check that table is partitioned */ - if (!get_pathman_relation_info(parent)) - continue; - - var = makeVar(rc->rti, - TableOidAttributeNumber, - OIDOID, - -1, - InvalidOid, - 0); - - /* Use parent's Oid as TABLEOID_STR's key (%u) */ - snprintf(resname, sizeof(resname), TABLEOID_STR("%u"), parent); - - tle = makeTargetEntry((Expr *) var, - list_length(parse->targetList) + 1, - pstrdup(resname), - true); - - /* There's no problem here since new attribute is junk */ - parse->targetList = lappend(parse->targetList, tle); - } -} - /* Checks if query affects only one partition */ static void handle_modification_query(Query *parse) @@ -373,7 +317,7 @@ handle_modification_query(Query *parse) /* Update RTE's relid */ rte->relid = child; - /* Finally disable standard planning */ + /* HACK: unset the 'inh' flag (no children) */ rte->inh = false; } } @@ -442,110 +386,6 @@ partition_filter_visitor(Plan *plan, void *context) } -/* - * ----------------------- - * Rowmark-related stuff - * ----------------------- - */ - -/* Final rowmark processing for partitioned tables */ -void -postprocess_lock_rows(List *rtable, Plan *plan) -{ - plan_tree_walker(plan, lock_rows_visitor, rtable); -} - -/* - * Extract target entries with resnames beginning with TABLEOID_STR - * and var->varoattno == TableOidAttributeNumber - */ -static List * -get_tableoids_list(List *tlist) -{ - List *result = NIL; - ListCell *lc; - - foreach (lc, tlist) - { - TargetEntry *te = (TargetEntry *) lfirst(lc); - Var *var = (Var *) te->expr; - - if (!IsA(var, Var)) - continue; - - /* Check that column name begins with TABLEOID_STR & it's tableoid */ - if (var->varoattno == TableOidAttributeNumber && - (te->resname && strlen(te->resname) > TABLEOID_STR_BASE_LEN) && - 0 == strncmp(te->resname, TABLEOID_STR(""), TABLEOID_STR_BASE_LEN)) - { - result = lappend(result, te); - } - } - - return result; -} - -/* - * Find 'TABLEOID_STR%u' attributes that were manually - * created for partitioned tables and replace Oids - * (used for '%u') with expected rc->rowmarkIds - */ -static void -lock_rows_visitor(Plan *plan, void *context) -{ - List *rtable = (List *) context; - LockRows *lock_rows = (LockRows *) plan; - Plan *lock_child = outerPlan(plan); - List *tableoids; - ListCell *lc; - - if (!IsA(lock_rows, LockRows)) - return; - - Assert(rtable && IsA(rtable, List) && lock_child); - - /* Select tableoid attributes that must be renamed */ - tableoids = get_tableoids_list(lock_child->targetlist); - if (!tableoids) - return; /* this LockRows has nothing to do with partitioned table */ - - foreach (lc, lock_rows->rowMarks) - { - PlanRowMark *rc = (PlanRowMark *) lfirst(lc); - Oid parent_oid = getrelid(rc->rti, rtable); - ListCell *mark_lc; - List *finished_tes = NIL; /* postprocessed target entries */ - - foreach (mark_lc, tableoids) - { - TargetEntry *te = (TargetEntry *) lfirst(mark_lc); - const char *cur_oid_str = &(te->resname[TABLEOID_STR_BASE_LEN]); - Datum cur_oid_datum; - - cur_oid_datum = DirectFunctionCall1(oidin, CStringGetDatum(cur_oid_str)); - - if (DatumGetObjectId(cur_oid_datum) == parent_oid) - { - char resname[64]; - - /* Replace 'TABLEOID_STR:Oid' with 'tableoid:rowmarkId' */ - snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); - te->resname = pstrdup(resname); - - finished_tes = lappend(finished_tes, te); - } - } - - /* Remove target entries that have been processed in this step */ - foreach (mark_lc, finished_tes) - tableoids = list_delete_ptr(tableoids, lfirst(mark_lc)); - - if (list_length(tableoids) == 0) - break; /* nothing to do */ - } -} - - /* * ----------------------------------------------- * Parenthood safety checks (SELECT * FROM ONLY) diff --git a/src/relation_info.c b/src/relation_info.c index fb67f845..bde960c7 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -214,7 +214,7 @@ refresh_pathman_relation_info(Oid relid, if (prel_children) pfree(prel_children); - /* Read additional parameters ('enable_parent' and 'auto' at the moment) */ + /* Read additional parameters ('enable_parent' at the moment) */ if (read_pathman_params(relid, param_values, param_isnull)) { prel->enable_parent = param_values[Anum_pathman_config_params_enable_parent - 1]; diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index bce69ce1..1a5079c8 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -11,6 +11,7 @@ * ------------------------------------------------------------------------ */ +#include "compat/debug_compat_features.h" #include "init.h" #include "utility_stmt_hooking.h" #include "partition_filter.h" @@ -33,10 +34,11 @@ #include "libpq/libpq.h" -/* - * Determine whether we should enable COPY or not (PostgresPro has a fix). - */ -#if defined(WIN32) && !defined(PGPRO_PATHMAN_AWARE_COPY) +/* Determine whether we should enable COPY or not (PostgresPro has a fix) */ +#if defined(WIN32) && \ + (!defined(ENABLE_PGPRO_PATCHES) || \ + !defined(ENABLE_PATHMAN_AWARE_COPY_WIN32) || \ + !defined(PGPRO_PATHMAN_AWARE_COPY)) #define DISABLE_PATHMAN_COPY #endif From ffe595e9e958736f0abfdaf3b56ff15a9c62d5ed Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 2 Mar 2017 19:31:24 +0300 Subject: [PATCH 0249/1124] temporarily disabled EXPAND_RTE_HOOK & RELATION_TAGS --- src/include/compat/debug_compat_features.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/include/compat/debug_compat_features.h b/src/include/compat/debug_compat_features.h index 0eb90db3..c668d4ce 100644 --- a/src/include/compat/debug_compat_features.h +++ b/src/include/compat/debug_compat_features.h @@ -12,8 +12,8 @@ #define ENABLE_PGPRO_PATCHES /* PgPro exclusive features */ -#define ENABLE_EXPAND_RTE_HOOK -#define ENABLE_RELATION_TAGS +//#define ENABLE_EXPAND_RTE_HOOK +//#define ENABLE_RELATION_TAGS #define ENABLE_PATHMAN_AWARE_COPY_WIN32 /* Hacks for vanilla */ From 01ff60bac967c863973dea9b980a476e70aa58ff Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 3 Mar 2017 14:45:30 +0300 Subject: [PATCH 0250/1124] added relnames and tablespaces arguments to create_range_partitions_internal() function --- range.sql | 105 ++++++++++++++++++------------------------- src/include/utils.h | 3 +- src/pl_hash_funcs.c | 76 +------------------------------ src/pl_range_funcs.c | 37 +++++++++++++-- src/utils.c | 80 +++++++++++++++++++++++++++++++++ 5 files changed, 160 insertions(+), 141 deletions(-) diff --git a/range.sql b/range.sql index a923ec9f..bfbc867c 100644 --- a/range.sql +++ b/range.sql @@ -60,6 +60,29 @@ BEGIN END $$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( + parent_relid REGCLASS, + attribute TEXT, + partition_data BOOLEAN) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); +END +$$ LANGUAGE plpgsql; + /* * Creates RANGE partitions for specified relation based on datetime attribute */ @@ -81,18 +104,8 @@ DECLARE i INTEGER; BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, attribute, partition_data); IF p_count < 0 THEN RAISE EXCEPTION '"p_count" must not be less than 0'; @@ -196,18 +209,8 @@ DECLARE i INTEGER; BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, attribute, partition_data); IF p_count < 0 THEN RAISE EXCEPTION 'partitions count must not be less than zero'; @@ -304,18 +307,8 @@ DECLARE part_count INTEGER := 0; BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, attribute, partition_data); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, @@ -374,18 +367,8 @@ DECLARE part_count INTEGER := 0; BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, attribute, partition_data); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, @@ -435,7 +418,10 @@ $$ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions2( parent_relid REGCLASS, attribute TEXT, - bounds ANYARRAY) + bounds ANYARRAY, + relnames TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) RETURNS INTEGER AS $$ DECLARE @@ -449,18 +435,8 @@ BEGIN RAISE EXCEPTION 'Bounds array must have at least two values'; END IF; - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, attribute, partition_data); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, @@ -476,7 +452,10 @@ BEGIN FROM @extschema@.get_plain_schema_and_relname(parent_relid); /* Create partitions */ - part_count := @extschema@.create_range_partitions_internal(parent_relid, bounds); + part_count := @extschema@.create_range_partitions_internal(parent_relid, + bounds, + relnames, + tablespaces); /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); @@ -489,7 +468,7 @@ BEGIN PERFORM @extschema@.set_enable_parent(parent_relid, true); END IF; - RETURN 0; + RETURN part_count; END $$ LANGUAGE plpgsql; @@ -497,7 +476,9 @@ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( parent_relid REGCLASS, - value ANYARRAY) + bounds ANYARRAY, + relnames TEXT[], + tablespaces TEXT[]) RETURNS REGCLASS AS 'pg_pathman', 'create_range_partitions_internal' LANGUAGE C; @@ -640,7 +621,8 @@ BEGIN v_atttype := @extschema@.get_partition_key_type(parent_relid); - IF NOT @extschema@.is_operator_supported(v_atttype, '+') THEN + IF NOT @extschema@.is_date_type(v_atttype) AND + NOT @extschema@.is_operator_supported(v_atttype, '+') THEN RAISE EXCEPTION 'Type % doesn''t support ''+'' operator', v_atttype::regtype; END IF; @@ -749,7 +731,8 @@ BEGIN v_atttype := @extschema@.get_partition_key_type(parent_relid); - IF NOT @extschema@.is_operator_supported(v_atttype, '-') THEN + IF NOT @extschema@.is_date_type(v_atttype) AND + NOT @extschema@.is_operator_supported(v_atttype, '-') THEN RAISE EXCEPTION 'Type % doesn''t support ''-'' operator', v_atttype::regtype; END IF; diff --git a/src/include/utils.h b/src/include/utils.h index 24bad286..09747e81 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -63,7 +63,8 @@ Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); Datum extract_binary_interval_from_text(Datum interval_text, Oid part_atttype, Oid *interval_type); - +char **deconstruct_text_array(Datum array, int *array_size); +RangeVar ** qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); #endif /* PATHMAN_UTILS_H */ diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 55540196..a5390c6f 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -13,7 +13,6 @@ #include "relation_info.h" #include "utils.h" -#include "catalog/namespace.h" #include "catalog/pg_type.h" #include "utils/builtins.h" #include "utils/typcache.h" @@ -22,9 +21,6 @@ #include "utils/array.h" -static char **deconstruct_text_array(Datum array, int *array_size); - - /* Function declarations */ PG_FUNCTION_INFO_V1( create_hash_partitions_internal ); @@ -89,16 +85,7 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) elog(ERROR, "size of 'tablespaces' must be equal to 'partitions_count'"); /* Convert partition names into RangeVars */ - if (partition_names) - { - rangevars = palloc(sizeof(RangeVar) * partition_names_size); - for (i = 0; i < partition_names_size; i++) - { - List *nl = stringToQualifiedNameList(partition_names[i]); - - rangevars[i] = makeRangeVarFromNameList(nl); - } - } + rangevars = qualified_relnames_to_rangevars(partition_names, partitions_count); /* Finally create HASH partitions */ for (i = 0; i < partitions_count; i++) @@ -181,64 +168,3 @@ build_hash_condition(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(result)); } - - -/* - * ------------------ - * Helper functions - * ------------------ - */ - -/* Convert Datum into CSTRING array */ -static char ** -deconstruct_text_array(Datum array, int *array_size) -{ - ArrayType *array_ptr = DatumGetArrayTypeP(array); - int16 elemlen; - bool elembyval; - char elemalign; - - Datum *elem_values; - bool *elem_nulls; - - int arr_size = 0; - - /* Check type invariant */ - Assert(ARR_ELEMTYPE(array_ptr) == TEXTOID); - - /* Check number of dimensions */ - if (ARR_NDIM(array_ptr) > 1) - elog(ERROR, "'partition_names' and 'tablespaces' may contain only 1 dimension"); - - get_typlenbyvalalign(ARR_ELEMTYPE(array_ptr), - &elemlen, &elembyval, &elemalign); - - deconstruct_array(array_ptr, - ARR_ELEMTYPE(array_ptr), - elemlen, elembyval, elemalign, - &elem_values, &elem_nulls, &arr_size); - - /* If there are actual values, convert them into CSTRINGs */ - if (arr_size > 0) - { - char **strings = palloc(arr_size * sizeof(char *)); - int i; - - for (i = 0; i < arr_size; i++) - { - if (elem_nulls[i]) - elog(ERROR, "'partition_names' and 'tablespaces' may not contain NULLs"); - - strings[i] = TextDatumGetCString(elem_values[i]); - } - - /* Return an array and it's size */ - *array_size = arr_size; - return strings; - } - /* Else emit ERROR */ - else elog(ERROR, "'partition_names' and 'tablespaces' may not be empty"); - - /* Keep compiler happy */ - return NULL; -} diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 090dd723..52c36105 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -1015,12 +1015,18 @@ Datum create_range_partitions_internal(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); - // char *attname = TextDatumGetCString(PG_GETARG_TEXT_P(1)); int16 typlen; bool typbyval; char typalign; FmgrInfo cmp_func; + /* partition names and tablespaces */ + char **partnames = NULL; + RangeVar **rangevars = NULL; + char **tablespaces = NULL; + int npartnames = 0; + int ntablespaces = 0; + /* bounds */ ArrayType *arr = PG_GETARG_ARRAYTYPE_P(1); Oid elemtype = ARR_ELEMTYPE(arr); @@ -1029,12 +1035,33 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) int ndatums; int i; + /* Extract partition names */ + if (!PG_ARGISNULL(2)) + { + partnames = deconstruct_text_array(PG_GETARG_DATUM(2), &npartnames); + rangevars = qualified_relnames_to_rangevars(partnames, npartnames); + } + + /* Extract partition tablespaces */ + if (!PG_ARGISNULL(3)) + tablespaces = deconstruct_text_array(PG_GETARG_DATUM(3), &ntablespaces); + /* Extract bounds */ get_typlenbyvalalign(elemtype, &typlen, &typbyval, &typalign); deconstruct_array(arr, elemtype, typlen, typbyval, typalign, &datums, &nulls, &ndatums); + if (partnames && npartnames != ndatums-1) + ereport(ERROR, (errmsg("wrong length of relnames array"), + errdetail("relnames number must be less than " + "bounds array length by one"))); + + if (tablespaces && ntablespaces != ndatums-1) + ereport(ERROR, (errmsg("wrong length of tablespaces array"), + errdetail("tablespaces number must be less than " + "bounds array length by one"))); + /* Check if bounds array is ascending */ fill_type_cmp_fmgr_info(&cmp_func, getBaseType(elemtype), @@ -1069,14 +1096,16 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) Bound end = nulls[i+1] ? MakeBoundInf(PLUS_INFINITY) : MakeBound(datums[i+1]); + RangeVar *rv = npartnames > 0 ? rangevars[i] : NULL; + char *tablespace = ntablespaces > 0 ? tablespaces[i] : NULL; (void) create_single_range_partition_internal(relid, &start, &end, elemtype, - NULL, - NULL); + rv, + tablespace); } - PG_RETURN_VOID(); + PG_RETURN_INT32(ndatums-1); } diff --git a/src/utils.c b/src/utils.c index 38960b3a..f0b88461 100644 --- a/src/utils.c +++ b/src/utils.c @@ -15,6 +15,7 @@ #include "access/sysattr.h" #include "access/xact.h" #include "catalog/heap.h" +#include "catalog/namespace.h" #include "catalog/pg_type.h" #include "catalog/pg_extension.h" #include "catalog/pg_operator.h" @@ -492,3 +493,82 @@ extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ return interval_binary; } + + +/* Convert Datum into CSTRING array */ +char ** +deconstruct_text_array(Datum array, int *array_size) +{ + ArrayType *array_ptr = DatumGetArrayTypeP(array); + int16 elemlen; + bool elembyval; + char elemalign; + + Datum *elem_values; + bool *elem_nulls; + + int arr_size = 0; + + /* Check type invariant */ + Assert(ARR_ELEMTYPE(array_ptr) == TEXTOID); + + /* Check number of dimensions */ + if (ARR_NDIM(array_ptr) > 1) + elog(ERROR, "'partition_names' and 'tablespaces' may contain only 1 dimension"); + + get_typlenbyvalalign(ARR_ELEMTYPE(array_ptr), + &elemlen, &elembyval, &elemalign); + + deconstruct_array(array_ptr, + ARR_ELEMTYPE(array_ptr), + elemlen, elembyval, elemalign, + &elem_values, &elem_nulls, &arr_size); + + /* If there are actual values, convert them into CSTRINGs */ + if (arr_size > 0) + { + char **strings = palloc(arr_size * sizeof(char *)); + int i; + + for (i = 0; i < arr_size; i++) + { + if (elem_nulls[i]) + elog(ERROR, "'partition_names' and 'tablespaces' may not contain NULLs"); + + strings[i] = TextDatumGetCString(elem_values[i]); + } + + /* Return an array and it's size */ + *array_size = arr_size; + return strings; + } + /* Else emit ERROR */ + else elog(ERROR, "'partition_names' and 'tablespaces' may not be empty"); + + /* Keep compiler happy */ + return NULL; +} + +/* + * Convert schema qualified relation names array to RangeVars array + */ +RangeVar ** +qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) +{ + RangeVar **rangevars = NULL; + int i; + + /* Convert partition names into RangeVars */ + if (relnames) + { + rangevars = palloc(sizeof(RangeVar) * nrelnames); + for (i = 0; i < nrelnames; i++) + { + List *nl = stringToQualifiedNameList(relnames[i]); + + rangevars[i] = makeRangeVarFromNameList(nl); + } + } + + return rangevars; +} From 1c1ffaf285cf904aeb451fe28d2b62d7f9676d68 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 3 Mar 2017 16:24:21 +0300 Subject: [PATCH 0251/1124] improved array reallocation in pathman_rel_pathlist_hook() --- src/hooks.c | 45 +++++++++++++++++++++------------------------ 1 file changed, 21 insertions(+), 24 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index a9c4796c..13909eda 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -196,9 +196,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, RangeTblEntry *rte) { const PartRelationInfo *prel; - RangeTblEntry **new_rte_array; - RelOptInfo **new_rel_array; - int len; + int irange_len; /* Invoke original hook if needed */ if (set_rel_pathlist_hook_next != NULL) @@ -290,35 +288,34 @@ pathman_rel_pathlist_hook(PlannerInfo *root, } /* Get number of selected partitions */ - len = irange_list_length(ranges); + irange_len = irange_list_length(ranges); if (prel->enable_parent) - len++; /* add parent too */ + irange_len++; /* also add parent */ /* Expand simple_rte_array and simple_rel_array */ - if (len > 0) + if (irange_len > 0) { - /* Expand simple_rel_array and simple_rte_array */ - new_rel_array = (RelOptInfo **) - palloc0((root->simple_rel_array_size + len) * sizeof(RelOptInfo *)); + int current_len = root->simple_rel_array_size, + new_len = current_len + irange_len; - /* simple_rte_array is an array equivalent of the rtable list */ - new_rte_array = (RangeTblEntry **) - palloc0((root->simple_rel_array_size + len) * sizeof(RangeTblEntry *)); + /* Expand simple_rel_array */ + root->simple_rel_array = (RelOptInfo **) + repalloc(root->simple_rel_array, + new_len * sizeof(RelOptInfo *)); - /* Copy relations to the new arrays */ - for (i = 0; i < root->simple_rel_array_size; i++) - { - new_rel_array[i] = root->simple_rel_array[i]; - new_rte_array[i] = root->simple_rte_array[i]; - } + memset((void *) &root->simple_rel_array[current_len], 0, + irange_len * sizeof(RelOptInfo *)); + + /* Expand simple_rte_array */ + root->simple_rte_array = (RangeTblEntry **) + repalloc(root->simple_rte_array, + new_len * sizeof(RangeTblEntry *)); - /* Free old arrays */ - pfree(root->simple_rel_array); - pfree(root->simple_rte_array); + memset((void *) &root->simple_rte_array[current_len], 0, + irange_len * sizeof(RangeTblEntry *)); - root->simple_rel_array_size += len; - root->simple_rel_array = new_rel_array; - root->simple_rte_array = new_rte_array; + /* Don't forget to update array size! */ + root->simple_rel_array_size = new_len; } /* Parent has already been locked by rewriter */ From 27299868655982b73c6a2683296dd9267d707290 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 3 Mar 2017 17:39:02 +0300 Subject: [PATCH 0252/1124] refactoring & code cleansing, reformat pg_pathman.c --- src/hooks.c | 3 +- src/include/init.h | 11 +- src/include/pathman.h | 74 +- src/include/utils.h | 1 - src/init.c | 34 +- src/pg_pathman.c | 1640 +++++++++++++++++++++-------------------- 6 files changed, 866 insertions(+), 897 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 13909eda..93ba5441 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -15,6 +15,7 @@ #include "hooks.h" #include "init.h" #include "partition_filter.h" +#include "pathman_workers.h" #include "planner_tree_modification.h" #include "runtimeappend.h" #include "runtime_merge_append.h" @@ -591,7 +592,7 @@ pathman_shmem_startup_hook(void) /* Allocate shared memory objects */ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); - init_shmem_config(); + init_concurrent_part_task_slots(); LWLockRelease(AddinShmemInitLock); } diff --git a/src/include/init.h b/src/include/init.h index 6b342ed2..7b5459b0 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -94,11 +94,15 @@ extern PathmanInitState pg_pathman_init_state; #define DEFAULT_INIT_CALLBACK InvalidOid #define DEFAULT_SPAWN_USING_BGW false +/* Other default values (for GUCs etc) */ +#define DEFAULT_PATHMAN_ENABLE true +#define DEFAULT_OVERRIDE_COPY true + /* Lowest version of Pl/PgSQL frontend compatible with internals (0xAA_BB_CC) */ #define LOWEST_COMPATIBLE_FRONT 0x010300 -/* Current version on native C library (0xAA_BB_CC) */ +/* Current version of native C library (0xAA_BB_CC) */ #define CURRENT_LIB_VERSION 0x010300 @@ -118,9 +122,10 @@ void restore_pathman_init_state(const PathmanInitState *temp_init_state); */ void init_main_pathman_toggles(void); +/* + * Shared & local config. + */ Size estimate_pathman_shmem_size(void); -void init_shmem_config(void); - bool load_config(void); void unload_config(void); diff --git a/src/include/pathman.h b/src/include/pathman.h index bf910219..2b1208de 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -88,49 +88,15 @@ extern Oid pathman_config_params_relid; Oid get_pathman_config_relid(bool invalid_is_ok); Oid get_pathman_config_params_relid(bool invalid_is_ok); -/* - * pg_pathman's global state structure. - */ -typedef struct PathmanState -{ - LWLock *dsm_init_lock; /* unused */ -} PathmanState; - - -/* - * Result of search_range_partition_eq(). - */ -typedef enum -{ - SEARCH_RANGEREL_OUT_OF_RANGE = 0, - SEARCH_RANGEREL_GAP, - SEARCH_RANGEREL_FOUND -} search_rangerel_result; - - -/* - * pg_pathman's global state. - */ -extern PathmanState *pmstate; - - -int append_child_relation(PlannerInfo *root, Relation parent_relation, - Index parent_rti, int ir_index, Oid child_oid, - List *wrappers); - -search_rangerel_result search_range_partition_eq(const Datum value, - FmgrInfo *cmp_func, - const PartRelationInfo *prel, - RangeEntry *out_re); -uint32 hash_to_part_index(uint32 value, uint32 partitions); - -/* copied from allpaths.h */ -void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, - Index rti, RangeTblEntry *rte); void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, PathKey *pathkeyAsc, PathKey *pathkeyDesc); +Index append_child_relation(PlannerInfo *root, Relation parent_relation, + Index parent_rti, int ir_index, Oid child_oid, + List *wrappers); + + typedef struct { const Node *orig; /* examined expression */ @@ -148,9 +114,7 @@ typedef struct bool for_insert; /* are we in PartitionFilter now? */ } WalkerContext; -/* - * Usual initialization procedure for WalkerContext. - */ +/* Usual initialization procedure for WalkerContext */ #define InitWalkerContext(context, prel_vno, prel_info, ecxt, for_ins) \ do { \ (context)->prel_varno = (prel_vno); \ @@ -162,6 +126,10 @@ typedef struct /* Check that WalkerContext contains ExprContext (plan execution stage) */ #define WcxtHasExprContext(wcxt) ( (wcxt)->econtext ) +/* Examine expression in order to select partitions */ +WrapperNode *walk_expr_tree(Expr *expr, WalkerContext *context); + + void select_range_partitions(const Datum value, FmgrInfo *cmp_func, const RangeEntry *ranges, @@ -169,8 +137,26 @@ void select_range_partitions(const Datum value, const int strategy, WrapperNode *result); -/* Examine expression in order to select partitions. */ -WrapperNode *walk_expr_tree(Expr *expr, WalkerContext *context); +/* Result of search_range_partition_eq() */ +typedef enum +{ + SEARCH_RANGEREL_OUT_OF_RANGE = 0, + SEARCH_RANGEREL_GAP, + SEARCH_RANGEREL_FOUND +} search_rangerel_result; + +search_rangerel_result search_range_partition_eq(const Datum value, + FmgrInfo *cmp_func, + const PartRelationInfo *prel, + RangeEntry *out_re); + + +/* Convert hash value to the partition index */ +static inline uint32 +hash_to_part_index(uint32 value, uint32 partitions) +{ + return value % partitions; +} /* diff --git a/src/include/utils.h b/src/include/utils.h index eaf863fe..752e6e6d 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -51,7 +51,6 @@ void extract_op_func_and_ret_type(char *opname, Oid *op_func, Oid *op_ret_type); - /* * Print values and cast types. */ diff --git a/src/init.c b/src/init.c index 783816da..9d89730a 100644 --- a/src/init.c +++ b/src/init.c @@ -163,7 +163,7 @@ init_main_pathman_toggles(void) "Enables pg_pathman's optimizations during the planner stage", NULL, &pg_pathman_init_state.pg_pathman_enable, - true, + DEFAULT_PATHMAN_ENABLE, PGC_SUSET, 0, NULL, @@ -187,7 +187,7 @@ init_main_pathman_toggles(void) "Override COPY statement handling", NULL, &pg_pathman_init_state.override_copy, - true, + DEFAULT_OVERRIDE_COPY, PGC_SUSET, 0, NULL, @@ -259,8 +259,7 @@ unload_config(void) Size estimate_pathman_shmem_size(void) { - return estimate_concurrent_part_task_slots_size() + - MAXALIGN(sizeof(PathmanState)); + return estimate_concurrent_part_task_slots_size(); } /* @@ -358,33 +357,6 @@ fini_local_cache(void) parent_cache = NULL; } -/* - * Initializes pg_pathman's global state (PathmanState) & locks. - */ -void -init_shmem_config(void) -{ - bool found; - - /* Check if module was initialized in postmaster */ - pmstate = ShmemInitStruct("pg_pathman's global state", - sizeof(PathmanState), &found); - if (!found) - { - /* - * Initialize locks in postmaster - */ - if (!IsUnderPostmaster) - { - /* NOTE: dsm_array is redundant, hence the commented code */ - /* pmstate->dsm_init_lock = LWLockAssign(); */ - } - } - - /* Allocate some space for concurrent part slots */ - init_concurrent_part_task_slots(); -} - /* * Fill PartRelationInfo with partition-related info. */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 1a79c587..d0f80724 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -38,9 +38,8 @@ PG_MODULE_MAGIC; -PathmanState *pmstate; -Oid pathman_config_relid = InvalidOid; -Oid pathman_config_params_relid = InvalidOid; +Oid pathman_config_relid = InvalidOid, + pathman_config_params_relid = InvalidOid; /* pg module functions */ @@ -49,8 +48,10 @@ void _PG_init(void); /* Expression tree handlers */ static Node *wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue); - static WrapperNode *handle_const(const Const *c, WalkerContext *context); +static WrapperNode *handle_boolexpr(const BoolExpr *expr, WalkerContext *context); +static WrapperNode *handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context); +static WrapperNode *handle_opexpr(const OpExpr *expr, WalkerContext *context); static void handle_binary_opexpr(WalkerContext *context, WrapperNode *result, @@ -61,25 +62,23 @@ static void handle_binary_opexpr_param(const PartRelationInfo *prel, WrapperNode *result, const Node *varnode); -static WrapperNode *handle_opexpr(const OpExpr *expr, WalkerContext *context); -static WrapperNode *handle_boolexpr(const BoolExpr *expr, WalkerContext *context); -static WrapperNode *handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context); - -static double estimate_paramsel_using_prel(const PartRelationInfo *prel, - int strategy); - static bool pull_var_param(const WalkerContext *ctx, const OpExpr *expr, Node **var_ptr, Node **param_ptr); +static Const *extract_const(WalkerContext *wcxt, Param *param); + +static double estimate_paramsel_using_prel(const PartRelationInfo *prel, + int strategy); -/* Misc */ + +/* Copied from PostgreSQL (prepunion.c) */ static void make_inh_translation_list(Relation oldrelation, Relation newrelation, Index newvarno, List **translated_vars); -/* Copied from allpaths.h */ +/* Copied from PostgreSQL (allpaths.c) */ static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); @@ -114,13 +113,18 @@ static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, ) + /* - * Set initial values for all Postmaster's forks. + * ------------------- + * General functions + * ------------------- */ + +/* Set initial values for all Postmaster's forks */ void _PG_init(void) { - PathmanInitState temp_init_state; + PathmanInitState temp_init_state; if (!process_shared_preload_libraries_in_progress) { @@ -132,13 +136,11 @@ _PG_init(void) /* Request additional shared resources */ RequestAddinShmemSpace(estimate_pathman_shmem_size()); - /* NOTE: we don't need LWLocks now. RequestAddinLWLocks(1); */ - /* Assign pg_pathman's initial state */ - temp_init_state.pg_pathman_enable = true; - temp_init_state.auto_partition = true; - temp_init_state.override_copy = true; - temp_init_state.initialization_needed = true; + temp_init_state.pg_pathman_enable = DEFAULT_PATHMAN_ENABLE; + temp_init_state.auto_partition = DEFAULT_AUTO; + temp_init_state.override_copy = DEFAULT_OVERRIDE_COPY; + temp_init_state.initialization_needed = true; /* ofc it's needed! */ /* Apply initial state */ restore_pathman_init_state(&temp_init_state); @@ -167,128 +169,51 @@ _PG_init(void) init_partition_filter_static_data(); } -/* - * make_inh_translation_list - * Build the list of translations from parent Vars to child Vars for - * an inheritance child. - * - * For paranoia's sake, we match type/collation as well as attribute name. - * - * NOTE: borrowed from prepunion.c - */ -static void -make_inh_translation_list(Relation oldrelation, Relation newrelation, - Index newvarno, List **translated_vars) +/* Get cached PATHMAN_CONFIG relation Oid */ +Oid +get_pathman_config_relid(bool invalid_is_ok) { - List *vars = NIL; - TupleDesc old_tupdesc = RelationGetDescr(oldrelation); - TupleDesc new_tupdesc = RelationGetDescr(newrelation); - int oldnatts = old_tupdesc->natts; - int newnatts = new_tupdesc->natts; - int old_attno; - - for (old_attno = 0; old_attno < oldnatts; old_attno++) - { - Form_pg_attribute att; - char *attname; - Oid atttypid; - int32 atttypmod; - Oid attcollation; - int new_attno; - - att = old_tupdesc->attrs[old_attno]; - if (att->attisdropped) - { - /* Just put NULL into this list entry */ - vars = lappend(vars, NULL); - continue; - } - attname = NameStr(att->attname); - atttypid = att->atttypid; - atttypmod = att->atttypmod; - attcollation = att->attcollation; - - /* - * When we are generating the "translation list" for the parent table - * of an inheritance set, no need to search for matches. - */ - if (oldrelation == newrelation) - { - vars = lappend(vars, makeVar(newvarno, - (AttrNumber) (old_attno + 1), - atttypid, - atttypmod, - attcollation, - 0)); - continue; - } + /* Raise ERROR if Oid is invalid */ + if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) + elog(ERROR, + (!IsPathmanInitialized() ? + "pg_pathman is not initialized yet" : + "unexpected error in function " + CppAsString(get_pathman_config_relid))); - /* - * Otherwise we have to search for the matching column by name. - * There's no guarantee it'll have the same column position, because - * of cases like ALTER TABLE ADD COLUMN and multiple inheritance. - * However, in simple cases it will be the same column number, so try - * that before we go groveling through all the columns. - * - * Note: the test for (att = ...) != NULL cannot fail, it's just a - * notational device to include the assignment into the if-clause. - */ - if (old_attno < newnatts && - (att = new_tupdesc->attrs[old_attno]) != NULL && - !att->attisdropped && att->attinhcount != 0 && - strcmp(attname, NameStr(att->attname)) == 0) - new_attno = old_attno; - else - { - for (new_attno = 0; new_attno < newnatts; new_attno++) - { - att = new_tupdesc->attrs[new_attno]; + return pathman_config_relid; +} - /* - * Make clang analyzer happy: - * - * Access to field 'attisdropped' results - * in a dereference of a null pointer - */ - if (!att) - elog(ERROR, "error in function " - CppAsString(make_inh_translation_list)); +/* Get cached PATHMAN_CONFIG_PARAMS relation Oid */ +Oid +get_pathman_config_params_relid(bool invalid_is_ok) +{ + /* Raise ERROR if Oid is invalid */ + if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) + elog(ERROR, + (!IsPathmanInitialized() ? + "pg_pathman is not initialized yet" : + "unexpected error in function " + CppAsString(get_pathman_config_params_relid))); - if (!att->attisdropped && att->attinhcount != 0 && - strcmp(attname, NameStr(att->attname)) == 0) - break; - } - if (new_attno >= newnatts) - elog(ERROR, "could not find inherited attribute \"%s\" of relation \"%s\"", - attname, RelationGetRelationName(newrelation)); - } + return pathman_config_params_relid; +} - /* Found it, check type and collation match */ - if (atttypid != att->atttypid || atttypmod != att->atttypmod) - elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's type", - attname, RelationGetRelationName(newrelation)); - if (attcollation != att->attcollation) - elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's collation", - attname, RelationGetRelationName(newrelation)); - vars = lappend(vars, makeVar(newvarno, - (AttrNumber) (new_attno + 1), - atttypid, - atttypmod, - attcollation, - 0)); - } - *translated_vars = vars; -} +/* + * ---------------------------------------- + * RTE expansion (add RTE for partitions) + * ---------------------------------------- + */ /* * Creates child relation and adds it to root. * Returns child index in simple_rel_array. * - * NOTE: This code is partially based on the expand_inherited_rtentry() function. + * NOTE: partially based on the expand_inherited_rtentry() function. */ -int +Index append_child_relation(PlannerInfo *root, Relation parent_relation, Index parent_rti, int ir_index, Oid child_oid, List *wrappers) @@ -320,6 +245,8 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, child_rte->inh = false; /* relation has no children */ child_rte->requiredPerms = 0; + /* FIXME: call translate_col_privs() on this RTE's column bitmapsets */ + /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ root->parse->rtable = lappend(root->parse->rtable, child_rte); childRTindex = list_length(root->parse->rtable); @@ -447,17 +374,22 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, return childRTindex; } + + /* - * Given RangeEntry array and 'value', return selected - * RANGE partitions inside the WrapperNode. + * -------------------------- + * RANGE partition prunning + * -------------------------- */ + +/* Given 'value' and 'ranges', return selected partitions list */ void select_range_partitions(const Datum value, FmgrInfo *cmp_func, const RangeEntry *ranges, const int nranges, const int strategy, - WrapperNode *result) + WrapperNode *result) /* returned partitions */ { bool lossy = false, is_less, @@ -633,6 +565,98 @@ select_range_partitions(const Datum value, } } +/* Fetch RangeEntry of RANGE partition which suits 'value' */ +search_rangerel_result +search_range_partition_eq(const Datum value, + FmgrInfo *cmp_func, + const PartRelationInfo *prel, + RangeEntry *out_re) /* returned RangeEntry */ +{ + RangeEntry *ranges; + int nranges; + WrapperNode result; + + ranges = PrelGetRangesArray(prel); + nranges = PrelChildrenCount(prel); + + select_range_partitions(value, + cmp_func, + ranges, + nranges, + BTEqualStrategyNumber, + &result); /* output */ + + if (result.found_gap) + { + return SEARCH_RANGEREL_GAP; + } + else if (result.rangeset == NIL) + { + return SEARCH_RANGEREL_OUT_OF_RANGE; + } + else + { + IndexRange irange = linitial_irange(result.rangeset); + + Assert(list_length(result.rangeset) == 1); + Assert(irange_lower(irange) == irange_upper(irange)); + Assert(is_irange_valid(irange)); + + /* Write result to the 'out_rentry' if necessary */ + if (out_re) + memcpy((void *) out_re, + (const void *) &ranges[irange_lower(irange)], + sizeof(RangeEntry)); + + return SEARCH_RANGEREL_FOUND; + } +} + + + +/* + * --------------------------------- + * walk_expr_tree() implementation + * --------------------------------- + */ + +/* Examine expression in order to select partitions */ +WrapperNode * +walk_expr_tree(Expr *expr, WalkerContext *context) +{ + WrapperNode *result; + + switch (nodeTag(expr)) + { + /* Useful for INSERT optimization */ + case T_Const: + return handle_const((Const *) expr, context); + + /* AND, OR, NOT expressions */ + case T_BoolExpr: + return handle_boolexpr((BoolExpr *) expr, context); + + /* =, !=, <, > etc. */ + case T_OpExpr: + return handle_opexpr((OpExpr *) expr, context); + + /* IN expression */ + case T_ScalarArrayOpExpr: + return handle_arrexpr((ScalarArrayOpExpr *) expr, context); + + default: + result = (WrapperNode *) palloc(sizeof(WrapperNode)); + result->orig = (const Node *) expr; + result->args = NIL; + result->paramsel = 1.0; + + result->rangeset = list_make1_irange( + make_irange(0, PrelLastChild(context->prel), IR_LOSSY)); + + return result; + } +} + /* Convert wrapper into expression for given index */ static Node * wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) @@ -710,245 +734,15 @@ wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) return copyObject(wrap->orig); } -/* - * Recursive function to walk through conditions tree - */ -WrapperNode * -walk_expr_tree(Expr *expr, WalkerContext *context) +/* Const handler */ +static WrapperNode * +handle_const(const Const *c, WalkerContext *context) { - WrapperNode *result; + const PartRelationInfo *prel = context->prel; + WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); + int strategy = BTEqualStrategyNumber; - switch (nodeTag(expr)) - { - /* Useful for INSERT optimization */ - case T_Const: - return handle_const((Const *) expr, context); - - /* AND, OR, NOT expressions */ - case T_BoolExpr: - return handle_boolexpr((BoolExpr *) expr, context); - - /* =, !=, <, > etc. */ - case T_OpExpr: - return handle_opexpr((OpExpr *) expr, context); - - /* IN expression */ - case T_ScalarArrayOpExpr: - return handle_arrexpr((ScalarArrayOpExpr *) expr, context); - - default: - result = (WrapperNode *) palloc(sizeof(WrapperNode)); - result->orig = (const Node *) expr; - result->args = NIL; - result->paramsel = 1.0; - - result->rangeset = list_make1_irange( - make_irange(0, PrelLastChild(context->prel), IR_LOSSY)); - - return result; - } -} - -/* - * This function determines which partitions should appear in query plan. - */ -static void -handle_binary_opexpr(WalkerContext *context, WrapperNode *result, - const Node *varnode, const Const *c) -{ - int strategy; - TypeCacheEntry *tce; - Oid vartype; - const OpExpr *expr = (const OpExpr *) result->orig; - const PartRelationInfo *prel = context->prel; - - Assert(IsA(varnode, Var) || IsA(varnode, RelabelType)); - - vartype = !IsA(varnode, RelabelType) ? - ((Var *) varnode)->vartype : - ((RelabelType *) varnode)->resulttype; - - /* Exit if Constant is NULL */ - if (c->constisnull) - { - result->rangeset = NIL; - result->paramsel = 1.0; - return; - } - - tce = lookup_type_cache(vartype, TYPECACHE_BTREE_OPFAMILY); - strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); - - /* There's no strategy for this operator, go to end */ - if (strategy == 0) - goto binary_opexpr_return; - - switch (prel->parttype) - { - case PT_HASH: - /* If strategy is "=", select one partiton */ - if (strategy == BTEqualStrategyNumber) - { - Datum value = OidFunctionCall1(prel->hash_proc, c->constvalue); - uint32 idx = hash_to_part_index(DatumGetInt32(value), - PrelChildrenCount(prel)); - - result->paramsel = estimate_paramsel_using_prel(prel, strategy); - result->rangeset = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); - - return; /* exit on equal */ - } - /* Else go to end */ - else goto binary_opexpr_return; - - case PT_RANGE: - { - FmgrInfo cmp_func; - - fill_type_cmp_fmgr_info(&cmp_func, - getBaseType(c->consttype), - getBaseType(prel->atttype)); - - select_range_partitions(c->constvalue, - &cmp_func, - PrelGetRangesArray(context->prel), - PrelChildrenCount(context->prel), - strategy, - result); /* output */ - - result->paramsel = estimate_paramsel_using_prel(prel, strategy); - - return; /* done, now exit */ - } - - default: - elog(ERROR, "Unknown partitioning type %u", prel->parttype); - } - -binary_opexpr_return: - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); - result->paramsel = 1.0; -} - -/* - * Estimate selectivity of parametrized quals. - */ -static void -handle_binary_opexpr_param(const PartRelationInfo *prel, - WrapperNode *result, const Node *varnode) -{ - const OpExpr *expr = (const OpExpr *) result->orig; - TypeCacheEntry *tce; - int strategy; - Oid vartype; - - Assert(IsA(varnode, Var) || IsA(varnode, RelabelType)); - - vartype = !IsA(varnode, RelabelType) ? - ((Var *) varnode)->vartype : - ((RelabelType *) varnode)->resulttype; - - /* Determine operator type */ - tce = lookup_type_cache(vartype, TYPECACHE_BTREE_OPFAMILY); - strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); - - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); - result->paramsel = estimate_paramsel_using_prel(prel, strategy); -} - -/* - * Extracted common 'paramsel' estimator. - */ -static double -estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) -{ - /* If it's "=", divide by partitions number */ - if (strategy == BTEqualStrategyNumber) - return 1.0 / (double) PrelChildrenCount(prel); - - /* Default selectivity estimate for inequalities */ - else if (prel->parttype == PT_RANGE && strategy > 0) - return DEFAULT_INEQ_SEL; - - /* Else there's not much to do */ - else return 1.0; -} - -/* - * Convert hash value to the partition index. - */ -uint32 -hash_to_part_index(uint32 value, uint32 partitions) -{ - return value % partitions; -} - -search_rangerel_result -search_range_partition_eq(const Datum value, - FmgrInfo *cmp_func, - const PartRelationInfo *prel, - RangeEntry *out_re) /* returned RangeEntry */ -{ - RangeEntry *ranges; - int nranges; - WrapperNode result; - - ranges = PrelGetRangesArray(prel); - nranges = PrelChildrenCount(prel); - - select_range_partitions(value, - cmp_func, - ranges, - nranges, - BTEqualStrategyNumber, - &result); /* output */ - - if (result.found_gap) - { - return SEARCH_RANGEREL_GAP; - } - else if (result.rangeset == NIL) - { - return SEARCH_RANGEREL_OUT_OF_RANGE; - } - else - { - IndexRange irange = linitial_irange(result.rangeset); - - Assert(list_length(result.rangeset) == 1); - Assert(irange_lower(irange) == irange_upper(irange)); - Assert(is_irange_valid(irange)); - - /* Write result to the 'out_rentry' if necessary */ - if (out_re) - memcpy((void *) out_re, - (const void *) &ranges[irange_lower(irange)], - sizeof(RangeEntry)); - - return SEARCH_RANGEREL_FOUND; - } -} - -static Const * -extract_const(WalkerContext *wcxt, Param *param) -{ - ExprState *estate = ExecInitExpr((Expr *) param, NULL); - bool isnull; - Datum value = ExecEvalExpr(estate, wcxt->econtext, &isnull, NULL); - - return makeConst(param->paramtype, param->paramtypmod, - param->paramcollid, get_typlen(param->paramtype), - value, isnull, get_typbyval(param->paramtype)); -} - -static WrapperNode * -handle_const(const Const *c, WalkerContext *context) -{ - const PartRelationInfo *prel = context->prel; - WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); - int strategy = BTEqualStrategyNumber; - - result->orig = (const Node *) c; + result->orig = (const Node *) c; /* * Had to add this check for queries like: @@ -1023,120 +817,28 @@ handle_const(const Const *c, WalkerContext *context) return result; } -/* - * Operator expression handler - */ +/* Boolean expression handler */ static WrapperNode * -handle_opexpr(const OpExpr *expr, WalkerContext *context) +handle_boolexpr(const BoolExpr *expr, WalkerContext *context) { WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); - Node *var, *param; + ListCell *lc; const PartRelationInfo *prel = context->prel; - result->orig = (const Node *) expr; + result->orig = (const Node *)expr; result->args = NIL; + result->paramsel = 1.0; - if (list_length(expr->args) == 2) + if (expr->boolop == AND_EXPR) + result->rangeset = list_make1_irange(make_irange(0, + PrelLastChild(prel), + IR_COMPLETE)); + else + result->rangeset = NIL; + + foreach (lc, expr->args) { - if (pull_var_param(context, expr, &var, ¶m)) - { - if (IsConstValue(context, param)) - { - handle_binary_opexpr(context, result, var, ExtractConst(context, param)); - return result; - } - else if (IsA(param, Param) || IsA(param, Var)) - { - handle_binary_opexpr_param(prel, result, var); - return result; - } - } - } - - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); - result->paramsel = 1.0; - return result; -} - -/* - * Checks if expression is a KEY OP PARAM or PARAM OP KEY, - * where KEY is partition key (it could be Var or RelableType) and PARAM is - * whatever. Function returns variable (or RelableType) and param via var_ptr - * and param_ptr pointers. If partition key isn't in expression then function - * returns false. - */ -static bool -pull_var_param(const WalkerContext *ctx, - const OpExpr *expr, - Node **var_ptr, - Node **param_ptr) -{ - Node *left = linitial(expr->args), - *right = lsecond(expr->args); - Var *v = NULL; - - /* Check the case when variable is on the left side */ - if (IsA(left, Var) || IsA(left, RelabelType)) - { - v = !IsA(left, RelabelType) ? - (Var *) left : - (Var *) ((RelabelType *) left)->arg; - - /* Check if 'v' is partitioned column of 'prel' */ - if (v->varoattno == ctx->prel->attnum && - v->varno == ctx->prel_varno) - { - *var_ptr = left; - *param_ptr = right; - return true; - } - } - - /* ... variable is on the right side */ - if (IsA(right, Var) || IsA(right, RelabelType)) - { - v = !IsA(right, RelabelType) ? - (Var *) right : - (Var *) ((RelabelType *) right)->arg; - - /* Check if 'v' is partitioned column of 'prel' */ - if (v->varoattno == ctx->prel->attnum && - v->varno == ctx->prel_varno) - { - *var_ptr = right; - *param_ptr = left; - return true; - } - } - - /* Variable isn't a partitionig key */ - return false; -} - -/* - * Boolean expression handler - */ -static WrapperNode * -handle_boolexpr(const BoolExpr *expr, WalkerContext *context) -{ - WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); - ListCell *lc; - const PartRelationInfo *prel = context->prel; - - result->orig = (const Node *)expr; - result->args = NIL; - result->paramsel = 1.0; - - if (expr->boolop == AND_EXPR) - result->rangeset = list_make1_irange(make_irange(0, - PrelLastChild(prel), - IR_COMPLETE)); - else - result->rangeset = NIL; - - foreach (lc, expr->args) - { - WrapperNode *arg; + WrapperNode *arg; arg = walk_expr_tree((Expr *) lfirst(lc), context); result->args = lappend(result->args, arg); @@ -1179,9 +881,7 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) return result; } -/* - * Scalar array expression - */ +/* Scalar array expression handler */ static WrapperNode * handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) { @@ -1334,378 +1034,318 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) return result; } -/* - * These functions below are copied from allpaths.c with (or without) some - * modifications. Couldn't use original because of 'static' modifier. - */ - -/* - * set_plain_rel_size - * Set size estimates for a plain relation (no subquery, no inheritance) - */ -static void -set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +/* Operator expression handler */ +static WrapperNode * +handle_opexpr(const OpExpr *expr, WalkerContext *context) { - /* - * Test any partial indexes of rel for applicability. We must do this - * first since partial unique indexes can affect size estimates. - */ - check_index_predicates_compat(root, rel); + WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); + Node *var, *param; + const PartRelationInfo *prel = context->prel; - /* Mark rel with estimated output rows, width, etc */ - set_baserel_size_estimates(root, rel); + result->orig = (const Node *) expr; + result->args = NIL; + + if (list_length(expr->args) == 2) + { + if (pull_var_param(context, expr, &var, ¶m)) + { + if (IsConstValue(context, param)) + { + handle_binary_opexpr(context, result, var, ExtractConst(context, param)); + return result; + } + else if (IsA(param, Param) || IsA(param, Var)) + { + handle_binary_opexpr_param(prel, result, var); + return result; + } + } + } + + result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); + result->paramsel = 1.0; + return result; } -/* - * set_plain_rel_pathlist - * Build access paths for a plain relation (no subquery, no inheritance) - */ +/* Binary operator handler */ static void -set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +handle_binary_opexpr(WalkerContext *context, WrapperNode *result, + const Node *varnode, const Const *c) { - Relids required_outer; - Path *path; + int strategy; + TypeCacheEntry *tce; + Oid vartype; + const OpExpr *expr = (const OpExpr *) result->orig; + const PartRelationInfo *prel = context->prel; - /* - * We don't support pushing join clauses into the quals of a seqscan, but - * it could still have required parameterization due to LATERAL refs in - * its tlist. - */ - required_outer = rel->lateral_relids; + Assert(IsA(varnode, Var) || IsA(varnode, RelabelType)); - /* Consider sequential scan */ -#if PG_VERSION_NUM >= 90600 - path = create_seqscan_path(root, rel, required_outer, 0); -#else - path = create_seqscan_path(root, rel, required_outer); -#endif - add_path(rel, path); + vartype = !IsA(varnode, RelabelType) ? + ((Var *) varnode)->vartype : + ((RelabelType *) varnode)->resulttype; -#if PG_VERSION_NUM >= 90600 - /* If appropriate, consider parallel sequential scan */ - if (rel->consider_parallel && required_outer == NULL) - create_plain_partial_paths_compat(root, rel); -#endif + /* Exit if Constant is NULL */ + if (c->constisnull) + { + result->rangeset = NIL; + result->paramsel = 1.0; + return; + } - /* Consider index scans */ - create_index_paths(root, rel); + tce = lookup_type_cache(vartype, TYPECACHE_BTREE_OPFAMILY); + strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); - /* Consider TID scans */ - create_tidscan_paths(root, rel); -} + /* There's no strategy for this operator, go to end */ + if (strategy == 0) + goto binary_opexpr_return; -/* - * set_foreign_size - * Set size estimates for a foreign table RTE - */ -static void -set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) -{ - /* Mark rel with estimated output rows, width, etc */ - set_foreign_size_estimates(root, rel); + switch (prel->parttype) + { + case PT_HASH: + /* If strategy is "=", select one partiton */ + if (strategy == BTEqualStrategyNumber) + { + Datum value = OidFunctionCall1(prel->hash_proc, c->constvalue); + uint32 idx = hash_to_part_index(DatumGetInt32(value), + PrelChildrenCount(prel)); - /* Let FDW adjust the size estimates, if it can */ - rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid); + result->paramsel = estimate_paramsel_using_prel(prel, strategy); + result->rangeset = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); - /* ... but do not let it set the rows estimate to zero */ - rel->rows = clamp_row_est(rel->rows); + return; /* exit on equal */ + } + /* Else go to end */ + else goto binary_opexpr_return; + + case PT_RANGE: + { + FmgrInfo cmp_func; + + fill_type_cmp_fmgr_info(&cmp_func, + getBaseType(c->consttype), + getBaseType(prel->atttype)); + + select_range_partitions(c->constvalue, + &cmp_func, + PrelGetRangesArray(context->prel), + PrelChildrenCount(context->prel), + strategy, + result); /* output */ + + result->paramsel = estimate_paramsel_using_prel(prel, strategy); + + return; /* done, now exit */ + } + + default: + elog(ERROR, "Unknown partitioning type %u", prel->parttype); + } + +binary_opexpr_return: + result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); + result->paramsel = 1.0; } -/* - * set_foreign_pathlist - * Build access paths for a foreign table RTE - */ +/* Estimate selectivity of parametrized quals */ static void -set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +handle_binary_opexpr_param(const PartRelationInfo *prel, + WrapperNode *result, const Node *varnode) { - /* Call the FDW's GetForeignPaths function to generate path(s) */ - rel->fdwroutine->GetForeignPaths(root, rel, rte->relid); + const OpExpr *expr = (const OpExpr *) result->orig; + TypeCacheEntry *tce; + int strategy; + Oid vartype; + + Assert(IsA(varnode, Var) || IsA(varnode, RelabelType)); + + vartype = !IsA(varnode, RelabelType) ? + ((Var *) varnode)->vartype : + ((RelabelType *) varnode)->resulttype; + + /* Determine operator type */ + tce = lookup_type_cache(vartype, TYPECACHE_BTREE_OPFAMILY); + strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); + + result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); + result->paramsel = estimate_paramsel_using_prel(prel, strategy); } /* - * set_append_rel_pathlist - * Build access paths for an "append relation" + * Checks if expression is a KEY OP PARAM or PARAM OP KEY, where KEY is + * partition key (it could be Var or RelableType) and PARAM is whatever. + * + * NOTE: returns false if partition key is not in expression. */ -void -set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, - PathKey *pathkeyAsc, PathKey *pathkeyDesc) +static bool +pull_var_param(const WalkerContext *ctx, + const OpExpr *expr, + Node **var_ptr, + Node **param_ptr) { - Index parentRTindex = rti; - List *live_childrels = NIL; - List *subpaths = NIL; - bool subpaths_valid = true; -#if PG_VERSION_NUM >= 90600 - List *partial_subpaths = NIL; - bool partial_subpaths_valid = true; -#endif - List *all_child_pathkeys = NIL; - List *all_child_outers = NIL; - ListCell *l; + Node *left = linitial(expr->args), + *right = lsecond(expr->args); + Var *v = NULL; - /* - * Generate access paths for each member relation, and remember the - * cheapest path for each one. Also, identify all pathkeys (orderings) - * and parameterizations (required_outer sets) available for the member - * relations. - */ - foreach(l, root->append_rel_list) + /* Check the case when variable is on the left side */ + if (IsA(left, Var) || IsA(left, RelabelType)) { - AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); - Index childRTindex; - RangeTblEntry *childRTE; - RelOptInfo *childrel; - ListCell *lcp; - - /* append_rel_list contains all append rels; ignore others */ - if (appinfo->parent_relid != parentRTindex) - continue; - - /* Re-locate the child RTE and RelOptInfo */ - childRTindex = appinfo->child_relid; - childRTE = root->simple_rte_array[childRTindex]; - childrel = root->simple_rel_array[childRTindex]; - -#if PG_VERSION_NUM >= 90600 - /* - * If parallelism is allowable for this query in general and for parent - * appendrel, see whether it's allowable for this childrel in - * particular. - * - * For consistency, do this before calling set_rel_size() for the child. - */ - if (root->glob->parallelModeOK && rel->consider_parallel) - set_rel_consider_parallel_compat(root, childrel, childRTE); -#endif + v = !IsA(left, RelabelType) ? + (Var *) left : + (Var *) ((RelabelType *) left)->arg; - /* Compute child's access paths & sizes */ - if (childRTE->relkind == RELKIND_FOREIGN_TABLE) + /* Check if 'v' is partitioned column of 'prel' */ + if (v->varoattno == ctx->prel->attnum && + v->varno == ctx->prel_varno) { - /* childrel->rows should be >= 1 */ - set_foreign_size(root, childrel, childRTE); - - /* If child IS dummy, ignore it */ - if (IS_DUMMY_REL(childrel)) - continue; - - set_foreign_pathlist(root, childrel, childRTE); + *var_ptr = left; + *param_ptr = right; + return true; } - else - { - /* childrel->rows should be >= 1 */ - set_plain_rel_size(root, childrel, childRTE); + } - /* If child IS dummy, ignore it */ - if (IS_DUMMY_REL(childrel)) - continue; + /* ... variable is on the right side */ + if (IsA(right, Var) || IsA(right, RelabelType)) + { + v = !IsA(right, RelabelType) ? + (Var *) right : + (Var *) ((RelabelType *) right)->arg; - set_plain_rel_pathlist(root, childrel, childRTE); + /* Check if 'v' is partitioned column of 'prel' */ + if (v->varoattno == ctx->prel->attnum && + v->varno == ctx->prel_varno) + { + *var_ptr = right; + *param_ptr = left; + return true; } + } - /* Set cheapest path for child */ - set_cheapest(childrel); - - /* If child BECAME dummy, ignore it */ - if (IS_DUMMY_REL(childrel)) - continue; + /* Variable isn't a partitionig key */ + return false; +} - /* - * Child is live, so add it to the live_childrels list for use below. - */ - live_childrels = lappend(live_childrels, childrel); +/* Extract (evaluate) Const from Param node */ +static Const * +extract_const(WalkerContext *wcxt, Param *param) +{ + ExprState *estate = ExecInitExpr((Expr *) param, NULL); + bool isnull; + Datum value = ExecEvalExpr(estate, wcxt->econtext, &isnull, NULL); -#if PG_VERSION_NUM >= 90600 - /* - * If any live child is not parallel-safe, treat the whole appendrel - * as not parallel-safe. In future we might be able to generate plans - * in which some children are farmed out to workers while others are - * not; but we don't have that today, so it's a waste to consider - * partial paths anywhere in the appendrel unless it's all safe. - */ - if (!childrel->consider_parallel) - rel->consider_parallel = false; -#endif + return makeConst(param->paramtype, param->paramtypmod, + param->paramcollid, get_typlen(param->paramtype), + value, isnull, get_typbyval(param->paramtype)); +} - /* - * If child has an unparameterized cheapest-total path, add that to - * the unparameterized Append path we are constructing for the parent. - * If not, there's no workable unparameterized path. - */ - if (childrel->cheapest_total_path->param_info == NULL) - subpaths = accumulate_append_subpath(subpaths, - childrel->cheapest_total_path); - else - subpaths_valid = false; +/* Selectivity estimator for common 'paramsel' */ +static double +estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) +{ + /* If it's "=", divide by partitions number */ + if (strategy == BTEqualStrategyNumber) + return 1.0 / (double) PrelChildrenCount(prel); -#if PG_VERSION_NUM >= 90600 - /* Same idea, but for a partial plan. */ - if (childrel->partial_pathlist != NIL) - partial_subpaths = accumulate_append_subpath(partial_subpaths, - linitial(childrel->partial_pathlist)); - else - partial_subpaths_valid = false; -#endif + /* Default selectivity estimate for inequalities */ + else if (prel->parttype == PT_RANGE && strategy > 0) + return DEFAULT_INEQ_SEL; - /* - * Collect lists of all the available path orderings and - * parameterizations for all the children. We use these as a - * heuristic to indicate which sort orderings and parameterizations we - * should build Append and MergeAppend paths for. - */ - foreach(lcp, childrel->pathlist) - { - Path *childpath = (Path *) lfirst(lcp); - List *childkeys = childpath->pathkeys; - Relids childouter = PATH_REQ_OUTER(childpath); + /* Else there's not much to do */ + else return 1.0; +} - /* Unsorted paths don't contribute to pathkey list */ - if (childkeys != NIL) - { - ListCell *lpk; - bool found = false; - /* Have we already seen this ordering? */ - foreach(lpk, all_child_pathkeys) - { - List *existing_pathkeys = (List *) lfirst(lpk); - if (compare_pathkeys(existing_pathkeys, - childkeys) == PATHKEYS_EQUAL) - { - found = true; - break; - } - } - if (!found) - { - /* No, so add it to all_child_pathkeys */ - all_child_pathkeys = lappend(all_child_pathkeys, - childkeys); - } - } +/* + * ---------------------------------------------------------------------------------- + * NOTE: The following functions below are copied from PostgreSQL with (or without) + * some modifications. Couldn't use original because of 'static' modifier. + * ---------------------------------------------------------------------------------- + */ - /* Unparameterized paths don't contribute to param-set list */ - if (childouter) - { - ListCell *lco; - bool found = false; +/* + * set_plain_rel_size + * Set size estimates for a plain relation (no subquery, no inheritance) + */ +static void +set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +{ + /* + * Test any partial indexes of rel for applicability. We must do this + * first since partial unique indexes can affect size estimates. + */ + check_index_predicates_compat(root, rel); - /* Have we already seen this param set? */ - foreach(lco, all_child_outers) - { - Relids existing_outers = (Relids) lfirst(lco); + /* Mark rel with estimated output rows, width, etc */ + set_baserel_size_estimates(root, rel); +} - if (bms_equal(existing_outers, childouter)) - { - found = true; - break; - } - } - if (!found) - { - /* No, so add it to all_child_outers */ - all_child_outers = lappend(all_child_outers, - childouter); - } - } - } - } +/* + * set_plain_rel_pathlist + * Build access paths for a plain relation (no subquery, no inheritance) + */ +static void +set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +{ + Relids required_outer; + Path *path; /* - * If we found unparameterized paths for all children, build an unordered, - * unparameterized Append path for the rel. (Note: this is correct even - * if we have zero or one live subpath due to constraint exclusion.) + * We don't support pushing join clauses into the quals of a seqscan, but + * it could still have required parameterization due to LATERAL refs in + * its tlist. */ - if (subpaths_valid) - add_path(rel, - (Path *) create_append_path_compat(rel, subpaths, NULL, 0)); + required_outer = rel->lateral_relids; + /* Consider sequential scan */ #if PG_VERSION_NUM >= 90600 - /* - * Consider an append of partial unordered, unparameterized partial paths. - */ - if (partial_subpaths_valid) - { - AppendPath *appendpath; - ListCell *lc; - int parallel_workers = 0; - - /* - * Decide on the number of workers to request for this append path. - * For now, we just use the maximum value from among the members. It - * might be useful to use a higher number if the Append node were - * smart enough to spread out the workers, but it currently isn't. - */ - foreach(lc, partial_subpaths) - { - Path *path = lfirst(lc); - - parallel_workers = Max(parallel_workers, path->parallel_workers); - } - - if (parallel_workers > 0) - { + path = create_seqscan_path(root, rel, required_outer, 0); +#else + path = create_seqscan_path(root, rel, required_outer); +#endif + add_path(rel, path); - /* Generate a partial append path. */ - appendpath = create_append_path_compat(rel, partial_subpaths, NULL, - parallel_workers); - add_partial_path(rel, (Path *) appendpath); - } - } +#if PG_VERSION_NUM >= 90600 + /* If appropriate, consider parallel sequential scan */ + if (rel->consider_parallel && required_outer == NULL) + create_plain_partial_paths_compat(root, rel); #endif - /* - * Also build unparameterized MergeAppend paths based on the collected - * list of child pathkeys. - */ - if (subpaths_valid) - generate_mergeappend_paths(root, rel, live_childrels, - all_child_pathkeys, pathkeyAsc, - pathkeyDesc); + /* Consider index scans */ + create_index_paths(root, rel); - /* - * Build Append paths for each parameterization seen among the child rels. - * (This may look pretty expensive, but in most cases of practical - * interest, the child rels will expose mostly the same parameterizations, - * so that not that many cases actually get considered here.) - * - * The Append node itself cannot enforce quals, so all qual checking must - * be done in the child paths. This means that to have a parameterized - * Append path, we must have the exact same parameterization for each - * child path; otherwise some children might be failing to check the - * moved-down quals. To make them match up, we can try to increase the - * parameterization of lesser-parameterized paths. - */ - foreach(l, all_child_outers) - { - Relids required_outer = (Relids) lfirst(l); - ListCell *lcr; + /* Consider TID scans */ + create_tidscan_paths(root, rel); +} - /* Select the child paths for an Append with this parameterization */ - subpaths = NIL; - subpaths_valid = true; - foreach(lcr, live_childrels) - { - RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr); - Path *subpath; +/* + * set_foreign_size + * Set size estimates for a foreign table RTE + */ +static void +set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +{ + /* Mark rel with estimated output rows, width, etc */ + set_foreign_size_estimates(root, rel); - subpath = get_cheapest_parameterized_child_path(root, - childrel, - required_outer); - if (subpath == NULL) - { - /* failed to make a suitable path for this child */ - subpaths_valid = false; - break; - } - subpaths = accumulate_append_subpath(subpaths, subpath); - } + /* Let FDW adjust the size estimates, if it can */ + rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid); - if (subpaths_valid) - add_path(rel, (Path *) - create_append_path_compat(rel, subpaths, required_outer, 0)); - } + /* ... but do not let it set the rows estimate to zero */ + rel->rows = clamp_row_est(rel->rows); +} + +/* + * set_foreign_pathlist + * Build access paths for a foreign table RTE + */ +static void +set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +{ + /* Call the FDW's GetForeignPaths function to generate path(s) */ + rel->fdwroutine->GetForeignPaths(root, rel, rte->relid); } + static List * accumulate_append_subpath(List *subpaths, Path *path) { @@ -1933,35 +1573,401 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, } /* - * Get cached PATHMAN_CONFIG relation Oid. + * make_inh_translation_list + * Build the list of translations from parent Vars to child Vars for + * an inheritance child. + * + * For paranoia's sake, we match type/collation as well as attribute name. */ -Oid -get_pathman_config_relid(bool invalid_is_ok) +static void +make_inh_translation_list(Relation oldrelation, Relation newrelation, + Index newvarno, List **translated_vars) { - /* Raise ERROR if Oid is invalid */ - if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) - elog(ERROR, - (!IsPathmanInitialized() ? - "pg_pathman is not initialized yet" : - "unexpected error in function " - CppAsString(get_pathman_config_relid))); + List *vars = NIL; + TupleDesc old_tupdesc = RelationGetDescr(oldrelation); + TupleDesc new_tupdesc = RelationGetDescr(newrelation); + int oldnatts = old_tupdesc->natts; + int newnatts = new_tupdesc->natts; + int old_attno; - return pathman_config_relid; + for (old_attno = 0; old_attno < oldnatts; old_attno++) + { + Form_pg_attribute att; + char *attname; + Oid atttypid; + int32 atttypmod; + Oid attcollation; + int new_attno; + + att = old_tupdesc->attrs[old_attno]; + if (att->attisdropped) + { + /* Just put NULL into this list entry */ + vars = lappend(vars, NULL); + continue; + } + attname = NameStr(att->attname); + atttypid = att->atttypid; + atttypmod = att->atttypmod; + attcollation = att->attcollation; + + /* + * When we are generating the "translation list" for the parent table + * of an inheritance set, no need to search for matches. + */ + if (oldrelation == newrelation) + { + vars = lappend(vars, makeVar(newvarno, + (AttrNumber) (old_attno + 1), + atttypid, + atttypmod, + attcollation, + 0)); + continue; + } + + /* + * Otherwise we have to search for the matching column by name. + * There's no guarantee it'll have the same column position, because + * of cases like ALTER TABLE ADD COLUMN and multiple inheritance. + * However, in simple cases it will be the same column number, so try + * that before we go groveling through all the columns. + * + * Note: the test for (att = ...) != NULL cannot fail, it's just a + * notational device to include the assignment into the if-clause. + */ + if (old_attno < newnatts && + (att = new_tupdesc->attrs[old_attno]) != NULL && + !att->attisdropped && att->attinhcount != 0 && + strcmp(attname, NameStr(att->attname)) == 0) + new_attno = old_attno; + else + { + for (new_attno = 0; new_attno < newnatts; new_attno++) + { + att = new_tupdesc->attrs[new_attno]; + + /* + * Make clang analyzer happy: + * + * Access to field 'attisdropped' results + * in a dereference of a null pointer + */ + if (!att) + elog(ERROR, "error in function " + CppAsString(make_inh_translation_list)); + + if (!att->attisdropped && att->attinhcount != 0 && + strcmp(attname, NameStr(att->attname)) == 0) + break; + } + if (new_attno >= newnatts) + elog(ERROR, "could not find inherited attribute \"%s\" of relation \"%s\"", + attname, RelationGetRelationName(newrelation)); + } + + /* Found it, check type and collation match */ + if (atttypid != att->atttypid || atttypmod != att->atttypmod) + elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's type", + attname, RelationGetRelationName(newrelation)); + if (attcollation != att->attcollation) + elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's collation", + attname, RelationGetRelationName(newrelation)); + + vars = lappend(vars, makeVar(newvarno, + (AttrNumber) (new_attno + 1), + atttypid, + atttypmod, + attcollation, + 0)); + } + + *translated_vars = vars; } + /* - * Get cached PATHMAN_CONFIG_PARAMS relation Oid. + * set_append_rel_pathlist + * Build access paths for an "append relation" + * + * NOTE: this function is 'public' (used in hooks.c) */ -Oid -get_pathman_config_params_relid(bool invalid_is_ok) +void +set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, + PathKey *pathkeyAsc, PathKey *pathkeyDesc) { - /* Raise ERROR if Oid is invalid */ - if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) - elog(ERROR, - (!IsPathmanInitialized() ? - "pg_pathman is not initialized yet" : - "unexpected error in function " - CppAsString(get_pathman_config_params_relid))); - - return pathman_config_params_relid; + Index parentRTindex = rti; + List *live_childrels = NIL; + List *subpaths = NIL; + bool subpaths_valid = true; +#if PG_VERSION_NUM >= 90600 + List *partial_subpaths = NIL; + bool partial_subpaths_valid = true; +#endif + List *all_child_pathkeys = NIL; + List *all_child_outers = NIL; + ListCell *l; + + /* + * Generate access paths for each member relation, and remember the + * cheapest path for each one. Also, identify all pathkeys (orderings) + * and parameterizations (required_outer sets) available for the member + * relations. + */ + foreach(l, root->append_rel_list) + { + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); + Index childRTindex; + RangeTblEntry *childRTE; + RelOptInfo *childrel; + ListCell *lcp; + + /* append_rel_list contains all append rels; ignore others */ + if (appinfo->parent_relid != parentRTindex) + continue; + + /* Re-locate the child RTE and RelOptInfo */ + childRTindex = appinfo->child_relid; + childRTE = root->simple_rte_array[childRTindex]; + childrel = root->simple_rel_array[childRTindex]; + +#if PG_VERSION_NUM >= 90600 + /* + * If parallelism is allowable for this query in general and for parent + * appendrel, see whether it's allowable for this childrel in + * particular. + * + * For consistency, do this before calling set_rel_size() for the child. + */ + if (root->glob->parallelModeOK && rel->consider_parallel) + set_rel_consider_parallel_compat(root, childrel, childRTE); +#endif + + /* Compute child's access paths & sizes */ + if (childRTE->relkind == RELKIND_FOREIGN_TABLE) + { + /* childrel->rows should be >= 1 */ + set_foreign_size(root, childrel, childRTE); + + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(childrel)) + continue; + + set_foreign_pathlist(root, childrel, childRTE); + } + else + { + /* childrel->rows should be >= 1 */ + set_plain_rel_size(root, childrel, childRTE); + + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(childrel)) + continue; + + set_plain_rel_pathlist(root, childrel, childRTE); + } + + /* Set cheapest path for child */ + set_cheapest(childrel); + + /* If child BECAME dummy, ignore it */ + if (IS_DUMMY_REL(childrel)) + continue; + + /* + * Child is live, so add it to the live_childrels list for use below. + */ + live_childrels = lappend(live_childrels, childrel); + +#if PG_VERSION_NUM >= 90600 + /* + * If any live child is not parallel-safe, treat the whole appendrel + * as not parallel-safe. In future we might be able to generate plans + * in which some children are farmed out to workers while others are + * not; but we don't have that today, so it's a waste to consider + * partial paths anywhere in the appendrel unless it's all safe. + */ + if (!childrel->consider_parallel) + rel->consider_parallel = false; +#endif + + /* + * If child has an unparameterized cheapest-total path, add that to + * the unparameterized Append path we are constructing for the parent. + * If not, there's no workable unparameterized path. + */ + if (childrel->cheapest_total_path->param_info == NULL) + subpaths = accumulate_append_subpath(subpaths, + childrel->cheapest_total_path); + else + subpaths_valid = false; + +#if PG_VERSION_NUM >= 90600 + /* Same idea, but for a partial plan. */ + if (childrel->partial_pathlist != NIL) + partial_subpaths = accumulate_append_subpath(partial_subpaths, + linitial(childrel->partial_pathlist)); + else + partial_subpaths_valid = false; +#endif + + /* + * Collect lists of all the available path orderings and + * parameterizations for all the children. We use these as a + * heuristic to indicate which sort orderings and parameterizations we + * should build Append and MergeAppend paths for. + */ + foreach(lcp, childrel->pathlist) + { + Path *childpath = (Path *) lfirst(lcp); + List *childkeys = childpath->pathkeys; + Relids childouter = PATH_REQ_OUTER(childpath); + + /* Unsorted paths don't contribute to pathkey list */ + if (childkeys != NIL) + { + ListCell *lpk; + bool found = false; + + /* Have we already seen this ordering? */ + foreach(lpk, all_child_pathkeys) + { + List *existing_pathkeys = (List *) lfirst(lpk); + + if (compare_pathkeys(existing_pathkeys, + childkeys) == PATHKEYS_EQUAL) + { + found = true; + break; + } + } + if (!found) + { + /* No, so add it to all_child_pathkeys */ + all_child_pathkeys = lappend(all_child_pathkeys, + childkeys); + } + } + + /* Unparameterized paths don't contribute to param-set list */ + if (childouter) + { + ListCell *lco; + bool found = false; + + /* Have we already seen this param set? */ + foreach(lco, all_child_outers) + { + Relids existing_outers = (Relids) lfirst(lco); + + if (bms_equal(existing_outers, childouter)) + { + found = true; + break; + } + } + if (!found) + { + /* No, so add it to all_child_outers */ + all_child_outers = lappend(all_child_outers, + childouter); + } + } + } + } + + /* + * If we found unparameterized paths for all children, build an unordered, + * unparameterized Append path for the rel. (Note: this is correct even + * if we have zero or one live subpath due to constraint exclusion.) + */ + if (subpaths_valid) + add_path(rel, + (Path *) create_append_path_compat(rel, subpaths, NULL, 0)); + +#if PG_VERSION_NUM >= 90600 + /* + * Consider an append of partial unordered, unparameterized partial paths. + */ + if (partial_subpaths_valid) + { + AppendPath *appendpath; + ListCell *lc; + int parallel_workers = 0; + + /* + * Decide on the number of workers to request for this append path. + * For now, we just use the maximum value from among the members. It + * might be useful to use a higher number if the Append node were + * smart enough to spread out the workers, but it currently isn't. + */ + foreach(lc, partial_subpaths) + { + Path *path = lfirst(lc); + + parallel_workers = Max(parallel_workers, path->parallel_workers); + } + + if (parallel_workers > 0) + { + + /* Generate a partial append path. */ + appendpath = create_append_path_compat(rel, partial_subpaths, NULL, + parallel_workers); + add_partial_path(rel, (Path *) appendpath); + } + } +#endif + + /* + * Also build unparameterized MergeAppend paths based on the collected + * list of child pathkeys. + */ + if (subpaths_valid) + generate_mergeappend_paths(root, rel, live_childrels, + all_child_pathkeys, pathkeyAsc, + pathkeyDesc); + + /* + * Build Append paths for each parameterization seen among the child rels. + * (This may look pretty expensive, but in most cases of practical + * interest, the child rels will expose mostly the same parameterizations, + * so that not that many cases actually get considered here.) + * + * The Append node itself cannot enforce quals, so all qual checking must + * be done in the child paths. This means that to have a parameterized + * Append path, we must have the exact same parameterization for each + * child path; otherwise some children might be failing to check the + * moved-down quals. To make them match up, we can try to increase the + * parameterization of lesser-parameterized paths. + */ + foreach(l, all_child_outers) + { + Relids required_outer = (Relids) lfirst(l); + ListCell *lcr; + + /* Select the child paths for an Append with this parameterization */ + subpaths = NIL; + subpaths_valid = true; + foreach(lcr, live_childrels) + { + RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr); + Path *subpath; + + subpath = get_cheapest_parameterized_child_path(root, + childrel, + required_outer); + if (subpath == NULL) + { + /* failed to make a suitable path for this child */ + subpaths_valid = false; + break; + } + subpaths = accumulate_append_subpath(subpaths, subpath); + } + + if (subpaths_valid) + add_path(rel, (Path *) + create_append_path_compat(rel, subpaths, required_outer, 0)); + } } From c8851e78ac4e6e8acaf624d41f8c8417b8d0182b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 3 Mar 2017 18:05:50 +0300 Subject: [PATCH 0253/1124] improve function replace_tlist_varnos() --- src/nodes_common.c | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/src/nodes_common.c b/src/nodes_common.c index abf37f09..494cd9ad 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -15,6 +15,7 @@ #include "access/sysattr.h" #include "optimizer/restrictinfo.h" #include "optimizer/var.h" +#include "rewrite/rewriteManip.h" #include "utils/memutils.h" @@ -112,31 +113,19 @@ select_required_plans(HTAB *children_table, Oid *parts, int nparts, int *nres) return result; } -/* Replace Vars' varnos with the value provided by 'parent' */ +/* Replace 'varno' of child's Vars with the 'append_rel_rti' */ static List * -replace_tlist_varnos(List *child_tlist, RelOptInfo *parent) +replace_tlist_varnos(List *tlist, Index old_varno, Index new_varno) { - ListCell *lc; - List *result = NIL; - int i = 1; /* resnos begin with 1 */ - - foreach (lc, child_tlist) - { - Var *var = (Var *) ((TargetEntry *) lfirst(lc))->expr; - Var *newvar = (Var *) palloc(sizeof(Var)); + List *temp_tlist; - Assert(IsA(var, Var)); + AssertArg(old_varno != 0); + AssertArg(new_varno != 0); - *newvar = *var; - newvar->varno = parent->relid; - newvar->varnoold = parent->relid; + temp_tlist = copyObject(tlist); + ChangeVarNodes((Node *) temp_tlist, old_varno, new_varno, 0); - result = lappend(result, makeTargetEntry((Expr *) newvar, - i++, /* item's index */ - NULL, false)); - } - - return result; + return temp_tlist; } /* Append partition attribute in case it's not present in target list */ @@ -421,7 +410,9 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, /* Replace rel's tlist with a matching one */ if (!cscan->scan.plan.targetlist) - tlist = replace_tlist_varnos(child_plan->targetlist, rel); + tlist = replace_tlist_varnos(child_plan->targetlist, + child_rel->relid, + rel->relid); /* Add partition attribute if necessary (for ExecQual()) */ child_plan->targetlist = append_part_attr_to_tlist(child_plan->targetlist, @@ -431,7 +422,8 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, /* Now make custom_scan_tlist match child plans' targetlists */ if (!cscan->custom_scan_tlist) cscan->custom_scan_tlist = replace_tlist_varnos(child_plan->targetlist, - rel); + child_rel->relid, + rel->relid); } } From 1a97b11985dd0eadd5dac16f56aeb3bbbfdfc5d6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 3 Mar 2017 19:10:13 +0300 Subject: [PATCH 0254/1124] fix tlist rebuild logic in function create_append_plan_common() --- src/nodes_common.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/nodes_common.c b/src/nodes_common.c index 494cd9ad..ad6bfa8c 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -396,24 +396,31 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, cscan = makeNode(CustomScan); cscan->custom_scan_tlist = NIL; /* initial value (empty list) */ - cscan->scan.plan.targetlist = NIL; if (custom_plans) { ListCell *lc1, *lc2; + bool processed_rel_tlist = false; + + Assert(list_length(rpath->cpath.custom_paths) == list_length(custom_plans)); forboth (lc1, rpath->cpath.custom_paths, lc2, custom_plans) { Plan *child_plan = (Plan *) lfirst(lc2); RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; - /* Replace rel's tlist with a matching one */ - if (!cscan->scan.plan.targetlist) + /* Replace rel's tlist with a matching one (for ExecQual()) */ + if (!processed_rel_tlist) + { tlist = replace_tlist_varnos(child_plan->targetlist, child_rel->relid, rel->relid); + /* Done, new target list has been built */ + processed_rel_tlist = true; + } + /* Add partition attribute if necessary (for ExecQual()) */ child_plan->targetlist = append_part_attr_to_tlist(child_plan->targetlist, child_rel->relid, From 75f2c0ff480bd19acac336204f5182f2cfecfcdd Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 3 Mar 2017 19:18:55 +0300 Subject: [PATCH 0255/1124] rename test pathman_utility_stmt_hooking -> pathman_utility_stmt --- Makefile | 2 +- ...athman_utility_stmt_hooking.out => pathman_utility_stmt.out} | 0 ...athman_utility_stmt_hooking.sql => pathman_utility_stmt.sql} | 0 3 files changed, 1 insertion(+), 1 deletion(-) rename expected/{pathman_utility_stmt_hooking.out => pathman_utility_stmt.out} (100%) rename sql/{pathman_utility_stmt_hooking.sql => pathman_utility_stmt.sql} (100%) diff --git a/Makefile b/Makefile index 80a74d0b..f13ceeb4 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ REGRESS = pathman_basic \ pathman_permissions \ pathman_rowmarks \ pathman_runtime_nodes \ - pathman_utility_stmt_hooking \ + pathman_utility_stmt \ pathman_calamity EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_utility_stmt_hooking.out b/expected/pathman_utility_stmt.out similarity index 100% rename from expected/pathman_utility_stmt_hooking.out rename to expected/pathman_utility_stmt.out diff --git a/sql/pathman_utility_stmt_hooking.sql b/sql/pathman_utility_stmt.sql similarity index 100% rename from sql/pathman_utility_stmt_hooking.sql rename to sql/pathman_utility_stmt.sql From ab15beed4ccc22525a80e9e06b6f0965efdbe5da Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 3 Mar 2017 19:34:14 +0300 Subject: [PATCH 0256/1124] rewrited create_range_partitions() funcs using internal C function --- range.sql | 78 +++++++++++++++++++++++--------------------- src/pl_range_funcs.c | 62 +++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 37 deletions(-) diff --git a/range.sql b/range.sql index bfbc867c..c5f4f6bc 100644 --- a/range.sql +++ b/range.sql @@ -96,12 +96,14 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE - v_rows_count BIGINT; - v_atttype REGTYPE; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; - end_value start_value%TYPE; - i INTEGER; + v_rows_count BIGINT; + v_atttype REGTYPE; + v_tablespace TEXT; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER; + i INTEGER; BEGIN attribute := lower(attribute); @@ -159,20 +161,11 @@ BEGIN PERFORM @extschema@.create_or_replace_sequence(parent_relid) FROM @extschema@.get_plain_schema_and_relname(parent_relid); - /* Create first partition */ - FOR i IN 1..p_count - LOOP - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4)', - v_atttype::TEXT) - USING - parent_relid, - start_value, - start_value + p_interval, - @extschema@.get_tablespace(parent_relid); - - start_value := start_value + p_interval; - END LOOP; + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_bounds(start_value, p_interval, p_count), + NULL, + NULL); /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); @@ -185,7 +178,7 @@ BEGIN PERFORM @extschema@.set_enable_parent(parent_relid, true); END IF; - RETURN p_count; + RETURN part_count; END $$ LANGUAGE plpgsql; @@ -202,11 +195,12 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE - v_rows_count BIGINT; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; - end_value start_value%TYPE; - i INTEGER; + v_rows_count BIGINT; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER; + i INTEGER; BEGIN attribute := lower(attribute); @@ -264,17 +258,11 @@ BEGIN PERFORM @extschema@.create_or_replace_sequence(parent_relid) FROM @extschema@.get_plain_schema_and_relname(parent_relid); - /* create first partition */ - FOR i IN 1..p_count - LOOP - PERFORM @extschema@.create_single_range_partition( - parent_relid, - start_value, - start_value + p_interval, - tablespace := @extschema@.get_tablespace(parent_relid)); - - start_value := start_value + p_interval; - END LOOP; + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_bounds(start_value, p_interval, p_count), + NULL, + NULL); /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); @@ -483,6 +471,22 @@ RETURNS REGCLASS AS 'pg_pathman', 'create_range_partitions_internal' LANGUAGE C; +CREATE OR REPLACE FUNCTION @extschema@.generate_bounds( + p_start ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER) +RETURNS ANYARRAY AS 'pg_pathman', 'generate_bounds' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.generate_bounds( + p_start ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER) +RETURNS ANYARRAY AS 'pg_pathman', 'generate_bounds' +LANGUAGE C; + + /* * Split RANGE partition */ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 52c36105..86cbd1aa 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -70,6 +70,7 @@ PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); PG_FUNCTION_INFO_V1( validate_interval_value ); PG_FUNCTION_INFO_V1( create_range_partitions_internal ); +PG_FUNCTION_INFO_V1( generate_bounds ); /* @@ -1109,3 +1110,64 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) PG_RETURN_INT32(ndatums-1); } + + +Datum +generate_bounds(PG_FUNCTION_ARGS) +{ + /* input params */ + Datum value = PG_GETARG_DATUM(0); + Oid v_type = get_fn_expr_argtype(fcinfo->flinfo, 0); + Datum interval = PG_GETARG_DATUM(1); + Oid i_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + int count = PG_GETARG_INT32(2); + int i; + + /* operator */ + Oid plus_op_func; + Datum plus_op_result; + Oid plus_op_result_type; + + /* array */ + ArrayType *arr; + int16 elemlen; + bool elembyval; + char elemalign; + Datum *datums; + + if (count < 1) + elog(ERROR, "Partitions count must be greater than zero"); + + /* Find suitable addition operator for given value and interval */ + extract_op_func_and_ret_type("+", v_type, i_type, + &plus_op_func, + &plus_op_result_type); + + get_typlenbyvalalign(v_type, &elemlen, &elembyval, &elemalign); + + datums = palloc(sizeof(Datum) * (count + 1)); + // datums[0] = datumCopy(value, elembyval, elemlen); + datums[0] = value; + + /* calculate bounds */ + for (i = 1; i <= count; i++) + { + /* Invoke addition operator and get a result */ + plus_op_result = OidFunctionCall2(plus_op_func, value, interval); + + if (plus_op_result_type != v_type) + plus_op_result = perform_type_cast(plus_op_result, + plus_op_result_type, + v_type, NULL); + + value = datums[i] = plus_op_result; + } + + /* build an array based on calculated datums */ + arr = construct_array(datums, count + 1, v_type, + elemlen, elembyval, elemalign); + + pfree(datums); + + PG_RETURN_ARRAYTYPE_P(arr); +} From 1d45b48b5bca3964d4534d702251e4d15e8ddd86 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 6 Mar 2017 12:25:41 +0300 Subject: [PATCH 0257/1124] broken tests fixed --- range.sql | 28 ++++++++++++++++------------ src/pl_range_funcs.c | 1 - 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/range.sql b/range.sql index c5f4f6bc..6b104d82 100644 --- a/range.sql +++ b/range.sql @@ -102,7 +102,7 @@ DECLARE v_max start_value%TYPE; v_cur_value start_value%TYPE := start_value; end_value start_value%TYPE; - part_count INTEGER; + part_count INTEGER := 0; i INTEGER; BEGIN @@ -161,11 +161,13 @@ BEGIN PERFORM @extschema@.create_or_replace_sequence(parent_relid) FROM @extschema@.get_plain_schema_and_relname(parent_relid); - part_count := @extschema@.create_range_partitions_internal( - parent_relid, - @extschema@.generate_bounds(start_value, p_interval, p_count), - NULL, - NULL); + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_bounds(start_value, p_interval, p_count), + NULL, + NULL); + END IF; /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); @@ -199,7 +201,7 @@ DECLARE v_max start_value%TYPE; v_cur_value start_value%TYPE := start_value; end_value start_value%TYPE; - part_count INTEGER; + part_count INTEGER := 0; i INTEGER; BEGIN @@ -258,11 +260,13 @@ BEGIN PERFORM @extschema@.create_or_replace_sequence(parent_relid) FROM @extschema@.get_plain_schema_and_relname(parent_relid); - part_count := @extschema@.create_range_partitions_internal( - parent_relid, - @extschema@.generate_bounds(start_value, p_interval, p_count), - NULL, - NULL); + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_bounds(start_value, p_interval, p_count), + NULL, + NULL); + END IF; /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 86cbd1aa..d0851f9f 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -1146,7 +1146,6 @@ generate_bounds(PG_FUNCTION_ARGS) get_typlenbyvalalign(v_type, &elemlen, &elembyval, &elemalign); datums = palloc(sizeof(Datum) * (count + 1)); - // datums[0] = datumCopy(value, elembyval, elemlen); datums[0] = value; /* calculate bounds */ From e961ef28ad5bf3ff7c73b715bf27ab6340d37223 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 6 Mar 2017 17:50:19 +0300 Subject: [PATCH 0258/1124] added add_range_partitions() function --- range.sql | 61 +++++++++++++++++++++++++++++++++++----- src/pl_range_funcs.c | 66 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 120 insertions(+), 7 deletions(-) diff --git a/range.sql b/range.sql index 6b104d82..00eba7b8 100644 --- a/range.sql +++ b/range.sql @@ -476,20 +476,33 @@ LANGUAGE C; CREATE OR REPLACE FUNCTION @extschema@.generate_bounds( - p_start ANYELEMENT, - p_interval INTERVAL, - p_count INTEGER) + p_start ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER) RETURNS ANYARRAY AS 'pg_pathman', 'generate_bounds' LANGUAGE C; - CREATE OR REPLACE FUNCTION @extschema@.generate_bounds( - p_start ANYELEMENT, - p_interval ANYELEMENT, - p_count INTEGER) + p_start ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER) RETURNS ANYARRAY AS 'pg_pathman', 'generate_bounds' LANGUAGE C; +-- CREATE OR REPLACE FUNCTION @extschema@.generate_bounds_by_range( +-- p_start ANYELEMENT, +-- p_end ANYELEMENT, +-- p_interval INTERVAL) +-- RETURNS ANYARRAY AS 'pg_pathman', 'generate_bounds_by_range' +-- LANGUAGE C; + +-- CREATE OR REPLACE FUNCTION @extschema@.generate_bounds_by_range( +-- p_start ANYELEMENT, +-- p_end ANYELEMENT, +-- p_interval ANYELEMENT) +-- RETURNS ANYARRAY AS 'pg_pathman', 'generate_bounds_by_range' +-- LANGUAGE C; + /* * Split RANGE partition @@ -871,6 +884,40 @@ END $$ LANGUAGE plpgsql; + +/* + * Add multiple partitions + */ +CREATE OR REPLACE FUNCTION @extschema@.add_range_partitions( + parent_relid REGCLASS, + bounds ANYARRAY, + relnames TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER; +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + /* Create partitions */ + part_count := @extschema@.create_range_partitions_internal(parent_relid, + bounds, + relnames, + tablespaces); + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + RETURN part_count; +END +$$ +LANGUAGE plpgsql; + + /* * Drop range partition */ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index d0851f9f..3ce53573 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -71,6 +71,7 @@ PG_FUNCTION_INFO_V1( validate_interval_value ); PG_FUNCTION_INFO_V1( create_range_partitions_internal ); PG_FUNCTION_INFO_V1( generate_bounds ); +// PG_FUNCTION_INFO_V1( generate_bounds_by_range ); /* @@ -1170,3 +1171,68 @@ generate_bounds(PG_FUNCTION_ARGS) PG_RETURN_ARRAYTYPE_P(arr); } + + +// Datum +// generate_bounds_by_range(PG_FUNCTION_ARGS) +// { +// /* input params */ +// Datum start = PG_GETARG_DATUM(0); +// Datum end = PG_GETARG_DATUM(1); +// Oid v_type = get_fn_expr_argtype(fcinfo->flinfo, 0); +// Datum interval = PG_GETARG_DATUM(2); +// Oid i_type = get_fn_expr_argtype(fcinfo->flinfo, 2); +// int i; + +// /* operators */ +// Oid plus_op_func; +// Datum plus_op_result; +// Oid plus_op_result_type; + +// FmgrInfo cmp_func; + +// /* array */ +// ArrayType *arr; +// int16 elemlen; +// bool elembyval; +// char elemalign; +// Datum *datums; +// List *datum_list = NIL; + +// /* Find suitable addition operator for given value and interval */ +// extract_op_func_and_ret_type("+", v_type, i_type, +// &plus_op_func, +// &plus_op_result_type); + +// /* Find comparison operator */ +// fill_type_cmp_fmgr_info(&cmp_func, +// getBaseType(v_type), +// getBaseType(v_type)); + +// while (DatumGetInt32(FunctionCall2(cmp_func, start, end)) < 0) +// { +// /* Invoke addition operator and get a result */ +// plus_op_result = OidFunctionCall2(plus_op_func, start, interval); + +// if (plus_op_result_type != v_type) +// plus_op_result = perform_type_cast(plus_op_result, +// plus_op_result_type, +// v_type, NULL); +// start = plus_op_result; +// datum_list = lappend(datum_list, start); +// } + +// datums = palloc(sizeof(Datum) * list_length(datum_list)); +// foreach(lc, datum_list) +// datums[i++] = (Datum) lfirst(lc); + +// /* build an array based on calculated datums */ +// get_typlenbyvalalign(v_type, &elemlen, &elembyval, &elemalign); +// arr = construct_array(datums, count + 1, v_type, +// elemlen, elembyval, elemalign); + +// pfree(datums); +// list_free(datum_list); + +// PG_RETURN_ARRAYTYPE_P(arr); +// } From c0854eb859aecd218d8a5466a125e0ae194f968b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 6 Mar 2017 22:27:48 +0300 Subject: [PATCH 0259/1124] make use of function translate_col_privs() --- src/include/pathman.h | 23 +++++++- src/partition_creation.c | 50 ++++++++++++----- src/partition_filter.c | 11 +++- src/pg_pathman.c | 115 +++++++++++++++++++++++++++++---------- 4 files changed, 151 insertions(+), 48 deletions(-) diff --git a/src/include/pathman.h b/src/include/pathman.h index 2b1208de..6c8c17e5 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -89,14 +89,31 @@ Oid get_pathman_config_relid(bool invalid_is_ok); Oid get_pathman_config_params_relid(bool invalid_is_ok); -void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, - PathKey *pathkeyAsc, PathKey *pathkeyDesc); - +/* + * Create RelOptInfo & RTE for a selected partition. + */ Index append_child_relation(PlannerInfo *root, Relation parent_relation, Index parent_rti, int ir_index, Oid child_oid, List *wrappers); +/* + * Copied from PostgreSQL (prepunion.c) + */ +void make_inh_translation_list(Relation oldrelation, Relation newrelation, + Index newvarno, List **translated_vars); + +Bitmapset *translate_col_privs(const Bitmapset *parent_privs, + List *translated_vars); + + +/* + * Copied from PostgreSQL (allpaths.c) + */ +void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, + PathKey *pathkeyAsc, PathKey *pathkeyDesc); + + typedef struct { const Node *orig; /* examined expression */ diff --git a/src/partition_creation.c b/src/partition_creation.c index a84b3480..3baf067b 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -857,12 +857,16 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) { - Relation pg_class_rel, + Relation parent_rel, + partition_rel, + pg_class_rel, pg_attribute_rel; TupleDesc pg_class_desc, pg_attribute_desc; + List *translated_vars; + HeapTuple htup; ScanKeyData skey[2]; SysScanDesc scan; @@ -872,6 +876,16 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) Snapshot snapshot; + /* Both parent & partition have already been locked */ + parent_rel = heap_open(parent_relid, NoLock); + partition_rel = heap_open(partition_relid, NoLock); + + make_inh_translation_list(parent_rel, partition_rel, 0, &translated_vars); + + heap_close(parent_rel, NoLock); + heap_close(partition_rel, NoLock); + + /* Open catalog's relations */ pg_class_rel = heap_open(RelationRelationId, RowExclusiveLock); pg_attribute_rel = heap_open(AttributeRelationId, RowExclusiveLock); @@ -914,9 +928,9 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) if (HeapTupleIsValid(htup = systable_getnext(scan))) { ItemPointerData iptr; - Datum values[Natts_pg_class] = { (Datum) 0 }; - bool nulls[Natts_pg_class] = { false }; - bool replaces[Natts_pg_class] = { false }; + Datum values[Natts_pg_class] = { (Datum) 0 }; + bool nulls[Natts_pg_class] = { false }; + bool replaces[Natts_pg_class] = { false }; /* Copy ItemPointer of this tuple */ iptr = htup->t_self; @@ -950,9 +964,8 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) BTEqualStrategyNumber, F_INT2GT, Int16GetDatum(InvalidAttrNumber)); - scan = systable_beginscan(pg_attribute_rel, - AttributeRelidNumIndexId, - true, snapshot, 2, skey); + scan = systable_beginscan(pg_attribute_rel, AttributeRelidNumIndexId, + true, snapshot, lengthof(skey), skey); /* Go through the list of parent's columns */ while (HeapTupleIsValid(htup = systable_getnext(scan))) @@ -963,6 +976,7 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) AttrNumber cur_attnum; bool cur_attnum_null; + Var *cur_var; /* Get parent column's ACL */ acl_datum = heap_getattr(htup, Anum_pg_attribute_attacl, @@ -980,11 +994,18 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) acl_column->attlen); } - /* Fetch number of current column */ + /* Fetch number of current column (parent) */ cur_attnum = DatumGetInt16(heap_getattr(htup, Anum_pg_attribute_attnum, pg_attribute_desc, &cur_attnum_null)); Assert(cur_attnum_null == false); /* must not be NULL! */ + /* Fetch Var of partition's corresponding column */ + cur_var = (Var *) list_nth(translated_vars, cur_attnum - 1); + if (!cur_var) + continue; /* column is dropped */ + + Assert(cur_var->varattno != InvalidAttrNumber); + /* Search for 'partition_relid' */ ScanKeyInit(&subskey[0], Anum_pg_attribute_attrelid, @@ -995,19 +1016,18 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) ScanKeyInit(&subskey[1], Anum_pg_attribute_attnum, BTEqualStrategyNumber, F_INT2EQ, - Int16GetDatum(cur_attnum)); + Int16GetDatum(cur_var->varattno)); /* partition's column */ - subscan = systable_beginscan(pg_attribute_rel, - AttributeRelidNumIndexId, - true, snapshot, 2, subskey); + subscan = systable_beginscan(pg_attribute_rel, AttributeRelidNumIndexId, + true, snapshot, lengthof(subskey), subskey); /* There should be exactly one tuple (our child's column) */ if (HeapTupleIsValid(subhtup = systable_getnext(subscan))) { ItemPointerData iptr; - Datum values[Natts_pg_attribute] = { (Datum) 0 }; - bool nulls[Natts_pg_attribute] = { false }; - bool replaces[Natts_pg_attribute] = { false }; + Datum values[Natts_pg_attribute] = { (Datum) 0 }; + bool nulls[Natts_pg_attribute] = { false }; + bool replaces[Natts_pg_attribute] = { false }; /* Copy ItemPointer of this tuple */ iptr = subhtup->t_self; diff --git a/src/partition_filter.c b/src/partition_filter.c index 4e8b171a..a5e80e6f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -10,6 +10,7 @@ #include "init.h" #include "nodes_common.h" +#include "pathman.h" #include "partition_creation.h" #include "partition_filter.h" #include "planner_tree_modification.h" @@ -248,6 +249,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) *parent_rte; Index child_rte_idx; ResultRelInfo *child_result_rel_info; + List *translated_vars; /* Lock partition and check if it exists */ LockRelationOid(partid, parts_storage->head_open_lock_mode); @@ -264,16 +266,21 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) child_rel = heap_open(partid, NoLock); CheckValidResultRel(child_rel, parts_storage->command_type); + /* Build Var translation list for 'inserted_cols' */ + make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); + /* Create RangeTblEntry for partition */ child_rte = makeNode(RangeTblEntry); - child_rte->rtekind = RTE_RELATION; child_rte->relid = partid; child_rte->relkind = child_rel->rd_rel->relkind; child_rte->eref = parent_rte->eref; child_rte->requiredPerms = parent_rte->requiredPerms; child_rte->checkAsUser = parent_rte->checkAsUser; - child_rte->insertedCols = parent_rte->insertedCols; + child_rte->insertedCols = translate_col_privs(parent_rte->insertedCols, + translated_vars); + child_rte->updatedCols = translate_col_privs(parent_rte->updatedCols, + translated_vars); /* Check permissions for partition */ ExecCheckRTPerms(list_make1(child_rte), true); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index d0f80724..bf493fb2 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -21,6 +21,7 @@ #include "runtime_merge_append.h" #include "postgres.h" +#include "access/sysattr.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "optimizer/clauses.h" @@ -73,11 +74,6 @@ static double estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy); -/* Copied from PostgreSQL (prepunion.c) */ -static void make_inh_translation_list(Relation oldrelation, Relation newrelation, - Index newvarno, List **translated_vars); - - /* Copied from PostgreSQL (allpaths.c) */ static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, @@ -240,12 +236,10 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, /* Create RangeTblEntry for child relation */ child_rte = copyObject(parent_rte); - child_rte->relid = child_oid; - child_rte->relkind = child_relation->rd_rel->relkind; - child_rte->inh = false; /* relation has no children */ - child_rte->requiredPerms = 0; - - /* FIXME: call translate_col_privs() on this RTE's column bitmapsets */ + child_rte->relid = child_oid; + child_rte->relkind = child_relation->rd_rel->relkind; + child_rte->inh = false; /* relation has no children */ + child_rte->requiredPerms = 0; /* perform all checks on parent */ /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ root->parse->rtable = lappend(root->parse->rtable, child_rte); @@ -261,9 +255,9 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, /* Build an AppendRelInfo for this child */ appinfo = makeNode(AppendRelInfo); - appinfo->parent_relid = parent_rti; - appinfo->child_relid = childRTindex; - appinfo->parent_reloid = parent_rte->relid; + appinfo->parent_relid = parent_rti; + appinfo->child_relid = childRTindex; + appinfo->parent_reloid = parent_rte->relid; make_inh_translation_list(parent_relation, child_relation, childRTindex, &appinfo->translated_vars); @@ -271,6 +265,18 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, /* Now append 'appinfo' to 'root->append_rel_list' */ root->append_rel_list = lappend(root->append_rel_list, appinfo); + + if (parent_rte->relid != child_oid) + { + child_rte->selectedCols = translate_col_privs(parent_rte->selectedCols, + appinfo->translated_vars); + child_rte->insertedCols = translate_col_privs(parent_rte->insertedCols, + appinfo->translated_vars); + child_rte->updatedCols = translate_col_privs(parent_rte->updatedCols, + appinfo->translated_vars); + } + + /* Adjust target list for this child */ adjust_rel_targetlist_compat(root, child_rel, parent_rel, appinfo); @@ -352,22 +358,21 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, { child_rowmark = makeNode(PlanRowMark); - child_rowmark->rti = childRTindex; - child_rowmark->prti = parent_rti; - child_rowmark->rowmarkId = parent_rowmark->rowmarkId; + child_rowmark->rti = childRTindex; + child_rowmark->prti = parent_rti; + child_rowmark->rowmarkId = parent_rowmark->rowmarkId; /* Reselect rowmark type, because relkind might not match parent */ - child_rowmark->markType = select_rowmark_type(child_rte, - parent_rowmark->strength); - child_rowmark->allMarkTypes = (1 << child_rowmark->markType); - child_rowmark->strength = parent_rowmark->strength; - child_rowmark->waitPolicy = parent_rowmark->waitPolicy; - child_rowmark->isParent = false; - - /* Include child's rowmark type in parent's allMarkTypes */ - parent_rowmark->allMarkTypes |= child_rowmark->allMarkTypes; + child_rowmark->markType = select_rowmark_type(child_rte, + parent_rowmark->strength); + child_rowmark->allMarkTypes = (1 << child_rowmark->markType); + child_rowmark->strength = parent_rowmark->strength; + child_rowmark->waitPolicy = parent_rowmark->waitPolicy; + child_rowmark->isParent = false; root->rowMarks = lappend(root->rowMarks, child_rowmark); + /* Include child's rowmark type in parent's allMarkTypes */ + parent_rowmark->allMarkTypes |= child_rowmark->allMarkTypes; parent_rowmark->isParent = true; } @@ -1572,6 +1577,61 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, } } + +/* + * translate_col_privs + * Translate a bitmapset representing per-column privileges from the + * parent rel's attribute numbering to the child's. + * + * The only surprise here is that we don't translate a parent whole-row + * reference into a child whole-row reference. That would mean requiring + * permissions on all child columns, which is overly strict, since the + * query is really only going to reference the inherited columns. Instead + * we set the per-column bits for all inherited columns. + */ +Bitmapset * +translate_col_privs(const Bitmapset *parent_privs, + List *translated_vars) +{ + Bitmapset *child_privs = NULL; + bool whole_row; + int attno; + ListCell *lc; + + /* System attributes have the same numbers in all tables */ + for (attno = FirstLowInvalidHeapAttributeNumber + 1; attno < 0; attno++) + { + if (bms_is_member(attno - FirstLowInvalidHeapAttributeNumber, + parent_privs)) + child_privs = bms_add_member(child_privs, + attno - FirstLowInvalidHeapAttributeNumber); + } + + /* Check if parent has whole-row reference */ + whole_row = bms_is_member(InvalidAttrNumber - FirstLowInvalidHeapAttributeNumber, + parent_privs); + + /* And now translate the regular user attributes, using the vars list */ + attno = InvalidAttrNumber; + foreach(lc, translated_vars) + { + Var *var = (Var *) lfirst(lc); + + attno++; + if (var == NULL) /* ignore dropped columns */ + continue; + Assert(IsA(var, Var)); + if (whole_row || + bms_is_member(attno - FirstLowInvalidHeapAttributeNumber, + parent_privs)) + child_privs = bms_add_member(child_privs, + var->varattno - FirstLowInvalidHeapAttributeNumber); + } + + return child_privs; +} + + /* * make_inh_translation_list * Build the list of translations from parent Vars to child Vars for @@ -1579,7 +1639,7 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, * * For paranoia's sake, we match type/collation as well as attribute name. */ -static void +void make_inh_translation_list(Relation oldrelation, Relation newrelation, Index newvarno, List **translated_vars) { @@ -1685,7 +1745,6 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, *translated_vars = vars; } - /* * set_append_rel_pathlist * Build access paths for an "append relation" From d7d77f934e2da8ce81f4b3dc05416e79e8e256b4 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 7 Mar 2017 15:43:26 +0300 Subject: [PATCH 0260/1124] remove trash --- range.sql | 14 ---------- src/pl_range_funcs.c | 66 -------------------------------------------- 2 files changed, 80 deletions(-) diff --git a/range.sql b/range.sql index 00eba7b8..8bc559c2 100644 --- a/range.sql +++ b/range.sql @@ -489,20 +489,6 @@ CREATE OR REPLACE FUNCTION @extschema@.generate_bounds( RETURNS ANYARRAY AS 'pg_pathman', 'generate_bounds' LANGUAGE C; --- CREATE OR REPLACE FUNCTION @extschema@.generate_bounds_by_range( --- p_start ANYELEMENT, --- p_end ANYELEMENT, --- p_interval INTERVAL) --- RETURNS ANYARRAY AS 'pg_pathman', 'generate_bounds_by_range' --- LANGUAGE C; - --- CREATE OR REPLACE FUNCTION @extschema@.generate_bounds_by_range( --- p_start ANYELEMENT, --- p_end ANYELEMENT, --- p_interval ANYELEMENT) --- RETURNS ANYARRAY AS 'pg_pathman', 'generate_bounds_by_range' --- LANGUAGE C; - /* * Split RANGE partition diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 3ce53573..d0851f9f 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -71,7 +71,6 @@ PG_FUNCTION_INFO_V1( validate_interval_value ); PG_FUNCTION_INFO_V1( create_range_partitions_internal ); PG_FUNCTION_INFO_V1( generate_bounds ); -// PG_FUNCTION_INFO_V1( generate_bounds_by_range ); /* @@ -1171,68 +1170,3 @@ generate_bounds(PG_FUNCTION_ARGS) PG_RETURN_ARRAYTYPE_P(arr); } - - -// Datum -// generate_bounds_by_range(PG_FUNCTION_ARGS) -// { -// /* input params */ -// Datum start = PG_GETARG_DATUM(0); -// Datum end = PG_GETARG_DATUM(1); -// Oid v_type = get_fn_expr_argtype(fcinfo->flinfo, 0); -// Datum interval = PG_GETARG_DATUM(2); -// Oid i_type = get_fn_expr_argtype(fcinfo->flinfo, 2); -// int i; - -// /* operators */ -// Oid plus_op_func; -// Datum plus_op_result; -// Oid plus_op_result_type; - -// FmgrInfo cmp_func; - -// /* array */ -// ArrayType *arr; -// int16 elemlen; -// bool elembyval; -// char elemalign; -// Datum *datums; -// List *datum_list = NIL; - -// /* Find suitable addition operator for given value and interval */ -// extract_op_func_and_ret_type("+", v_type, i_type, -// &plus_op_func, -// &plus_op_result_type); - -// /* Find comparison operator */ -// fill_type_cmp_fmgr_info(&cmp_func, -// getBaseType(v_type), -// getBaseType(v_type)); - -// while (DatumGetInt32(FunctionCall2(cmp_func, start, end)) < 0) -// { -// /* Invoke addition operator and get a result */ -// plus_op_result = OidFunctionCall2(plus_op_func, start, interval); - -// if (plus_op_result_type != v_type) -// plus_op_result = perform_type_cast(plus_op_result, -// plus_op_result_type, -// v_type, NULL); -// start = plus_op_result; -// datum_list = lappend(datum_list, start); -// } - -// datums = palloc(sizeof(Datum) * list_length(datum_list)); -// foreach(lc, datum_list) -// datums[i++] = (Datum) lfirst(lc); - -// /* build an array based on calculated datums */ -// get_typlenbyvalalign(v_type, &elemlen, &elembyval, &elemalign); -// arr = construct_array(datums, count + 1, v_type, -// elemlen, elembyval, elemalign); - -// pfree(datums); -// list_free(datum_list); - -// PG_RETURN_ARRAYTYPE_P(arr); -// } From 6c6e05c06f58c1292f094d2994fe565fbe841256 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 7 Mar 2017 15:59:42 +0300 Subject: [PATCH 0261/1124] pathman_inserts: test a gap case --- expected/pathman_inserts.out | 17 ++++++++++++++++- sql/pathman_inserts.sql | 8 ++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index 3b713b0b..eec46463 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -856,6 +856,21 @@ NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: 256 | 128 | test_inserts.storage_14 (27 rows) +/* test gap case (missing partition in between) */ +CREATE TABLE test_inserts.test_gap(val INT NOT NULL); +INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); +SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); +NOTICE: sequence "test_gap_seq" does not exist, skipping + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test_inserts.test_gap_2; /* make a gap */ +INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ +ERROR: cannot spawn a partition +DROP TABLE test_inserts.test_gap CASCADE; +NOTICE: drop cascades to 2 other objects DROP SCHEMA test_inserts CASCADE; -NOTICE: drop cascades to 19 other objects +NOTICE: drop cascades to 20 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index 53d47abf..3737baa6 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -165,6 +165,14 @@ FROM generate_series(-2, 130, 5) i RETURNING e * 2, b, tableoid::regclass; +/* test gap case (missing partition in between) */ +CREATE TABLE test_inserts.test_gap(val INT NOT NULL); +INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); +SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); +DROP TABLE test_inserts.test_gap_2; /* make a gap */ +INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ +DROP TABLE test_inserts.test_gap CASCADE; + DROP SCHEMA test_inserts CASCADE; DROP EXTENSION pg_pathman CASCADE; From 44e0ebe6dae9a25141e1bd8372c3d42ad40759f6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 7 Mar 2017 17:45:45 +0300 Subject: [PATCH 0262/1124] tests for function postprocess_child_table_and_atts() --- expected/pathman_permissions.out | 69 +++++++++++++++++++++++++++++++- sql/pathman_permissions.sql | 38 +++++++++++++++++- 2 files changed, 105 insertions(+), 2 deletions(-) diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index eef1c993..55727aad 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -77,7 +77,7 @@ SELECT prepend_range_partition('permissions.user1_table'); permissions.user1_table_4 (1 row) -SELECT attname, attacl from pg_attribute +SELECT attname, attacl FROM pg_attribute WHERE attrelid = (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ @@ -166,6 +166,73 @@ NOTICE: 10 rows copied from permissions.user2_table_2 3 (1 row) +/* Switch to #1 */ +SET ROLE user1; +CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); +INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; +SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); +NOTICE: sequence "dropped_column_seq" does not exist, skipping + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column */ + attrelid | attname | attacl +------------------------------+---------+------------------ + permissions.dropped_column_1 | val | {user2=ar/user1} + permissions.dropped_column_2 | val | {user2=ar/user1} + permissions.dropped_column_3 | val | {user2=ar/user1} +(3 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_4 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+------------------ + permissions.dropped_column_1 | val | {user2=ar/user1} + permissions.dropped_column_2 | val | {user2=ar/user1} + permissions.dropped_column_3 | val | {user2=ar/user1} + permissions.dropped_column_4 | val | {user2=ar/user1} +(4 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_5 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+------------------ + permissions.dropped_column_1 | val | {user2=ar/user1} + permissions.dropped_column_2 | val | {user2=ar/user1} + permissions.dropped_column_3 | val | {user2=ar/user1} + permissions.dropped_column_4 | val | {user2=ar/user1} + permissions.dropped_column_5 | val | {user2=ar/user1} +(5 rows) + +DROP TABLE permissions.dropped_column CASCADE; +NOTICE: drop cascades to 5 other objects /* Finally reset user */ RESET ROLE; DROP OWNED BY user1; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index ac3483c8..43bf6ca6 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -62,7 +62,7 @@ GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ /* Should be able to prepend a partition */ SET ROLE user2; SELECT prepend_range_partition('permissions.user1_table'); -SELECT attname, attacl from pg_attribute +SELECT attname, attacl FROM pg_attribute WHERE attrelid = (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ @@ -104,6 +104,42 @@ INSERT INTO permissions.user2_table SELECT generate_series(1, 30); SELECT drop_partitions('permissions.user2_table'); +/* Switch to #1 */ +SET ROLE user1; +CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); +INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; + +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; + +SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column */ + +ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ +SELECT append_range_partition('permissions.dropped_column'); + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + +ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ +SELECT append_range_partition('permissions.dropped_column'); + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + +DROP TABLE permissions.dropped_column CASCADE; + + /* Finally reset user */ RESET ROLE; From da061b4d2c1c55795365d11f23636c95e810fc8b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 9 Mar 2017 18:52:38 +0300 Subject: [PATCH 0263/1124] restore previous behaviour for vanilla postgres (pathman_rel_pathlist_hook()) --- src/hooks.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/hooks.c b/src/hooks.c index 93ba5441..5864e2e5 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -8,6 +8,7 @@ * ------------------------------------------------------------------------ */ +#include "compat/expand_rte_hook.h" #include "compat/pg_compat.h" #include "compat/relation_tags.h" #include "compat/rowmarks_fix.h" @@ -216,6 +217,13 @@ pathman_rel_pathlist_hook(PlannerInfo *root, root->parse->resultRelation == rti) return; +/* It's better to exit, since RowMarks might be broken (hook aims to fix them) */ +#ifndef NATIVE_EXPAND_RTE_HOOK + if (root->parse->commandType != CMD_SELECT && + root->parse->commandType != CMD_INSERT) + return; +#endif + /* Skip if this table is not allowed to act as parent (e.g. FROM ONLY) */ if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, rte)) return; From bcf8e7bdc77bd5434afa11a1bf55109ba1ea203d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Mar 2017 18:22:33 +0300 Subject: [PATCH 0264/1124] take NATIVE_EXPAND_RTE_HOOK into consideration in pathman_transform_query_walker() --- src/planner_tree_modification.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index fb0c8551..ff18611d 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -8,6 +8,7 @@ * ------------------------------------------------------------------------ */ +#include "compat/expand_rte_hook.h" #include "compat/relation_tags.h" #include "compat/rowmarks_fix.h" @@ -180,19 +181,31 @@ pathman_transform_query_walker(Node *node, void *context) static void disable_standard_inheritance(Query *parse) { - ListCell *lc; + ListCell *lc; + Index current_rti; /* current range table entry index */ - /* Exit if it's not a SELECT query */ +/* + * We can't handle non-SELECT queries unless + * there's a pathman_expand_inherited_rtentry_hook() + */ +#ifndef NATIVE_EXPAND_RTE_HOOK if (parse->commandType != CMD_SELECT) return; +#endif /* Walk through RangeTblEntries list */ + current_rti = 0; foreach (lc, parse->rtable) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - /* Operate only on simple (non-join etc) relations */ - if (rte->rtekind != RTE_RELATION || rte->relkind != RELKIND_RELATION) + current_rti++; /* increment RTE index */ + Assert(current_rti != 0); + + /* Process only non-result base relations */ + if (rte->rtekind != RTE_RELATION || + rte->relkind != RELKIND_RELATION || + parse->resultRelation == current_rti) /* is it a result relation? */ continue; /* Table may be partitioned */ From ab92ef49ebf42ed298840d11d21a7e44a067a7f1 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 14 Mar 2017 18:16:20 +0300 Subject: [PATCH 0265/1124] fix conislocal for inherited constraints --- src/partition_creation.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 61650ae7..b02a2e6e 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -703,7 +703,6 @@ create_single_partition_internal(Oid parent_relid, NodeSetTag(&like_clause, T_TableLikeClause); like_clause.relation = copyObject(parent_rv); like_clause.options = CREATE_TABLE_LIKE_DEFAULTS | - CREATE_TABLE_LIKE_CONSTRAINTS | CREATE_TABLE_LIKE_INDEXES | CREATE_TABLE_LIKE_STORAGE; From a3ef3ed902fd6c44422843d82f958235a483436c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 20 Mar 2017 17:30:03 +0300 Subject: [PATCH 0266/1124] load PATHMAN_CONFIG lazily --- src/init.c | 12 +++--------- src/pg_pathman.c | 6 +++--- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/src/init.c b/src/init.c index 9d89730a..1011a198 100644 --- a/src/init.c +++ b/src/init.c @@ -830,9 +830,7 @@ read_pathman_config(void) { Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; - Oid relid; /* partitioned table */ - PartType parttype; /* partitioning type */ - text *attname; /* partitioned column name */ + Oid relid; /* partitioned table */ /* Extract Datums from tuple 'htup' */ heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); @@ -844,8 +842,6 @@ read_pathman_config(void) /* Extract values from Datums */ relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); - parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - attname = DatumGetTextP(values[Anum_pathman_config_attname - 1]); /* Check that relation 'relid' exists */ if (get_rel_type_id(relid) == InvalidOid) @@ -857,10 +853,8 @@ read_pathman_config(void) errhint(INIT_ERROR_HINT))); } - /* Create or update PartRelationInfo for this partitioned table */ - refresh_pathman_relation_info(relid, parttype, - text_to_cstring(attname), - true); /* allow lazy prel loading */ + /* get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(relid, NULL); } /* Clean resources */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index bf493fb2..4fa3f4bc 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -225,7 +225,7 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, *child_rowmark; Node *childqual; List *childquals; - ListCell *lc, + ListCell *lc1, *lc2; parent_rel = root->simple_rel_array[parent_rti]; @@ -288,9 +288,9 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, { childquals = NIL; - forboth(lc, wrappers, lc2, parent_rel->baserestrictinfo) + forboth(lc1, wrappers, lc2, parent_rel->baserestrictinfo) { - WrapperNode *wrap = (WrapperNode *) lfirst(lc); + WrapperNode *wrap = (WrapperNode *) lfirst(lc1); Node *new_clause; bool always_true; From 36a338bb11750dacd8951a9513e21c7afec6647d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 20 Mar 2017 17:37:00 +0300 Subject: [PATCH 0267/1124] fix isolation tests --- expected/insert_nodes.out | 16 ++++++++-------- specs/insert_nodes.spec | 1 + 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/expected/insert_nodes.out b/expected/insert_nodes.out index a6791621..9e22f1bd 100644 --- a/expected/insert_nodes.out +++ b/expected/insert_nodes.out @@ -1,9 +1,9 @@ Parsed test spec with 2 sessions starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions -create_range_partitions +set_spawn_using_bgw -1 + step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; @@ -22,9 +22,9 @@ consrc ((id >= 101) AND (id < 201)) starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_300 s2c s2_show_partitions -create_range_partitions +set_spawn_using_bgw -1 + step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; @@ -44,9 +44,9 @@ consrc ((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_300 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions -create_range_partitions +set_spawn_using_bgw -1 + step s1b: BEGIN; step s1_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; @@ -67,9 +67,9 @@ consrc ((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_150 s2b s2_insert_300 s1r s2r s2_show_partitions -create_range_partitions +set_spawn_using_bgw -1 + step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2b: BEGIN; diff --git a/specs/insert_nodes.spec b/specs/insert_nodes.spec index 93df4102..10e1a1d8 100644 --- a/specs/insert_nodes.spec +++ b/specs/insert_nodes.spec @@ -3,6 +3,7 @@ setup CREATE EXTENSION pg_pathman; CREATE TABLE range_rel(id serial primary key); SELECT create_range_partitions('range_rel', 'id', 1, 100, 1); + SELECT set_spawn_using_bgw('range_rel', true); } teardown From ceecae31b44d0eb28f4bbd91a27f61722c08673f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 20 Mar 2017 17:52:56 +0300 Subject: [PATCH 0268/1124] reformat isolation tests --- expected/insert_nodes.out | 63 +++++++++++++++++++----- specs/for_update.spec | 16 +++--- specs/insert_nodes.spec | 30 ++++++----- specs/rollback_on_create_partitions.spec | 24 ++++----- 4 files changed, 89 insertions(+), 44 deletions(-) diff --git a/expected/insert_nodes.out b/expected/insert_nodes.out index 9e22f1bd..64758aef 100644 --- a/expected/insert_nodes.out +++ b/expected/insert_nodes.out @@ -5,20 +5,30 @@ set_spawn_using_bgw step s1b: BEGIN; -step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); +step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; +step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass + ORDER BY c.oid; consrc + ((id >= 1) AND (id < 101)) + ((id >= 101) AND (id < 201)) step s2b: BEGIN; -step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); +step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; +step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass + ORDER BY c.oid; consrc + ((id >= 1) AND (id < 101)) + ((id >= 101) AND (id < 201)) starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_300 s2c s2_show_partitions @@ -26,21 +36,32 @@ set_spawn_using_bgw step s1b: BEGIN; -step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); +step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; +step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass + ORDER BY c.oid; consrc + ((id >= 1) AND (id < 101)) + ((id >= 101) AND (id < 201)) step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; +step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass + ORDER BY c.oid; consrc + ((id >= 1) AND (id < 101)) + ((id >= 101) AND (id < 201)) + ((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_300 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions @@ -50,20 +71,32 @@ set_spawn_using_bgw step s1b: BEGIN; step s1_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; +step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass + ORDER BY c.oid; consrc + ((id >= 1) AND (id < 101)) + ((id >= 101) AND (id < 201)) + ((id >= 201) AND (id < 301)) step s2b: BEGIN; -step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); +step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; +step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass + ORDER BY c.oid; consrc + ((id >= 1) AND (id < 101)) + ((id >= 101) AND (id < 201)) + ((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_150 s2b s2_insert_300 s1r s2r s2_show_partitions @@ -71,14 +104,20 @@ set_spawn_using_bgw step s1b: BEGIN; -step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); +step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; step s2r: ROLLBACK; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; +step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass + ORDER BY c.oid; consrc + ((id >= 1) AND (id < 101)) + ((id >= 101) AND (id < 201)) + ((id >= 201) AND (id < 301)) diff --git a/specs/for_update.spec b/specs/for_update.spec index 55ea24af..f7a8f758 100644 --- a/specs/for_update.spec +++ b/specs/for_update.spec @@ -13,16 +13,16 @@ teardown } session "s1" -step "s1_b" { begin; } -step "s1_c" { commit; } -step "s1_r" { rollback; } -step "s1_update" { update test_tbl set id = 2 where id = 1; } +step "s1_b" { begin; } +step "s1_c" { commit; } +step "s1_r" { rollback; } +step "s1_update" { update test_tbl set id = 2 where id = 1; } session "s2" -step "s2_b" { begin; } -step "s2_c" { commit; } -step "s2_select_locked" { select * from test_tbl where id = 1 for share; } -step "s2_select" { select * from test_tbl where id = 1; } +step "s2_b" { begin; } +step "s2_c" { commit; } +step "s2_select_locked" { select * from test_tbl where id = 1 for share; } +step "s2_select" { select * from test_tbl where id = 1; } permutation "s1_b" "s1_update" "s2_select" "s1_r" diff --git a/specs/insert_nodes.spec b/specs/insert_nodes.spec index 10e1a1d8..3bb67746 100644 --- a/specs/insert_nodes.spec +++ b/specs/insert_nodes.spec @@ -14,20 +14,26 @@ teardown } session "s1" -step "s1b" { BEGIN; } -step "s1_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } -step "s1_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } -step "s1_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; } -step "s1r" { ROLLBACK; } -step "s1c" { COMMIT; } +step "s1b" { BEGIN; } +step "s1_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } +step "s1_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } +step "s1_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass + ORDER BY c.oid; } +step "s1r" { ROLLBACK; } +step "s1c" { COMMIT; } session "s2" -step "s2b" { BEGIN; } -step "s2_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } -step "s2_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } -step "s2_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; } -step "s2r" { ROLLBACK; } -step "s2c" { COMMIT; } +step "s2b" { BEGIN; } +step "s2_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } +step "s2_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } +step "s2_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass + ORDER BY c.oid; } +step "s2r" { ROLLBACK; } +step "s2c" { COMMIT; } # Rollback first transactions permutation "s1b" "s1_insert_150" "s1r" "s1_show_partitions" "s2b" "s2_insert_150" "s2c" "s2_show_partitions" diff --git a/specs/rollback_on_create_partitions.spec b/specs/rollback_on_create_partitions.spec index 41fc48d1..a24c2897 100644 --- a/specs/rollback_on_create_partitions.spec +++ b/specs/rollback_on_create_partitions.spec @@ -11,18 +11,18 @@ teardown } session "s1" -step "begin" { BEGIN; } -step "rollback" { ROLLBACK; } -step "commit" { COMMIT; } -step "insert_data" { INSERT INTO range_rel SELECT generate_series(1, 10000); } -step "create_partitions" { SELECT create_range_partitions('range_rel', 'id', 1, 1000); } -step "drop_partitions" { SELECT drop_partitions('range_rel'); } -step "savepoint_a" { SAVEPOINT a; } -step "rollback_a" { ROLLBACK TO SAVEPOINT a; } -step "savepoint_b" { SAVEPOINT b; } -step "rollback_b" { ROLLBACK TO SAVEPOINT b; } -step "savepoint_c" { SAVEPOINT c; } -step "show_rel" { EXPLAIN (COSTS OFF) SELECT * FROM range_rel; } +step "begin" { BEGIN; } +step "rollback" { ROLLBACK; } +step "commit" { COMMIT; } +step "insert_data" { INSERT INTO range_rel SELECT generate_series(1, 10000); } +step "create_partitions" { SELECT create_range_partitions('range_rel', 'id', 1, 1000); } +step "drop_partitions" { SELECT drop_partitions('range_rel'); } +step "savepoint_a" { SAVEPOINT a; } +step "rollback_a" { ROLLBACK TO SAVEPOINT a; } +step "savepoint_b" { SAVEPOINT b; } +step "rollback_b" { ROLLBACK TO SAVEPOINT b; } +step "savepoint_c" { SAVEPOINT c; } +step "show_rel" { EXPLAIN (COSTS OFF) SELECT * FROM range_rel; } permutation "begin" "insert_data" "create_partitions" "show_rel" "rollback" "show_rel" From bf826d2408695e2abec3c73f794c555a263302b6 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 13 Mar 2017 16:12:18 +0300 Subject: [PATCH 0269/1124] Add partial support (creation) of expression based partitions --- .gitignore | 2 + hash.sql | 18 +- init.sql | 18 +- sql/pathman_basic.sql | 4 +- src/hooks.c | 7 +- src/include/init.h | 7 +- src/include/partition_creation.h | 13 +- src/include/partition_filter.h | 4 +- src/include/pathman.h | 8 +- src/include/relation_info.h | 12 +- src/init.c | 53 +--- src/nodes_common.c | 8 +- src/partition_creation.c | 490 +++++++++++++++++++++++++++++-- src/partition_filter.c | 17 +- src/pg_pathman.c | 7 +- src/pl_funcs.c | 122 ++++++-- src/pl_hash_funcs.c | 51 +--- src/pl_range_funcs.c | 6 +- src/relation_info.c | 61 ++-- src/utility_stmt_hooking.c | 15 +- 20 files changed, 693 insertions(+), 230 deletions(-) diff --git a/.gitignore b/.gitignore index f0d2c2c4..9cf8da8f 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ regression.out *.gcno *.gcov pg_pathman--*.sql +tags +cscope* diff --git a/hash.sql b/hash.sql index 59a2ae64..0a1667d6 100644 --- a/hash.sql +++ b/hash.sql @@ -35,8 +35,7 @@ BEGIN PERFORM @extschema@.common_relation_checks(parent_relid, attribute); /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) - VALUES (parent_relid, attribute, 1); + PERFORM @extschema@.add_to_pathman_config(parent_relid, attribute); /* Create partitions */ PERFORM @extschema@.create_hash_partitions_internal(parent_relid, @@ -48,13 +47,13 @@ BEGIN /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); - /* Copy data */ + /* Copy data IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); PERFORM @extschema@.partition_data(parent_relid); ELSE PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; + END IF; */ RETURN partitions_count; END @@ -299,14 +298,3 @@ LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INTEGER, INTEGER) RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' LANGUAGE C STRICT; - -/* - * Build hash condition for a CHECK CONSTRAINT - */ -CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( - attribute_type REGTYPE, - attribute TEXT, - partitions_count INT4, - partitions_index INT4) -RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' -LANGUAGE C STRICT; diff --git a/init.sql b/init.sql index 583080ad..9e25e9c2 100644 --- a/init.sql +++ b/init.sql @@ -35,6 +35,7 @@ LANGUAGE C; CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( partrel REGCLASS NOT NULL PRIMARY KEY, attname TEXT NOT NULL, + atttype OID NOT NULL, parttype INTEGER NOT NULL, range_interval TEXT, @@ -427,7 +428,7 @@ LANGUAGE plpgsql STRICT; */ CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( relation REGCLASS, - p_attribute TEXT) + expression TEXT) RETURNS BOOLEAN AS $$ DECLARE @@ -450,8 +451,8 @@ BEGIN RAISE EXCEPTION 'relation "%" has already been partitioned', relation; END IF; - IF @extschema@.is_attribute_nullable(relation, p_attribute) THEN - RAISE EXCEPTION 'partitioning key "%" must be NOT NULL', p_attribute; + IF NOT @extschema@.is_expression_suitable(relation, expression) THEN + RAISE EXCEPTION 'partitioning expression "%" is not suitable', expression; END IF; /* Check if there are foreign keys that reference the relation */ @@ -467,7 +468,7 @@ BEGIN RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; END IF; - RETURN TRUE; + RETURN FALSE; END $$ LANGUAGE plpgsql; @@ -796,6 +797,15 @@ CREATE OR REPLACE FUNCTION @extschema@.is_attribute_nullable( RETURNS BOOLEAN AS 'pg_pathman', 'is_attribute_nullable' LANGUAGE C STRICT; +/* + * Checks if expression is suitable + */ +CREATE OR REPLACE FUNCTION @extschema@.is_expression_suitable( + relid REGCLASS, + expr TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'is_expression_suitable' +LANGUAGE C STRICT; + /* * Check if regclass is date or timestamp. */ diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 36dd7e8d..15c84b85 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -11,7 +11,9 @@ CREATE TABLE test.hash_rel ( INSERT INTO test.hash_rel VALUES (1, 1); INSERT INTO test.hash_rel VALUES (2, 2); INSERT INTO test.hash_rel VALUES (3, 3); -SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +:gdb +SELECT pg_sleep(10); +SELECT pathman.create_hash_partitions('test.hash_rel', 'value + 1', 3); ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; diff --git a/src/hooks.c b/src/hooks.c index 5864e2e5..75fec605 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -255,10 +255,11 @@ pathman_rel_pathlist_hook(PlannerInfo *root, int32 type_mod; TypeCacheEntry *tce; - /* Make Var from partition column */ - get_rte_attribute_type(rte, prel->attnum, + /* Make Var from patition column */ + /* FIX: this */ + get_rte_attribute_type(rte, 0, &vartypeid, &type_mod, &varcollid); - var = makeVar(rti, prel->attnum, vartypeid, type_mod, varcollid, 0); + var = makeVar(rti, 0, vartypeid, type_mod, varcollid, 0); var->location = -1; /* Determine operator type */ diff --git a/src/include/init.h b/src/include/init.h index 7b5459b0..ef8e06dd 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -132,7 +132,6 @@ void unload_config(void); void fill_prel_with_partitions(const Oid *partitions, const uint32 parts_count, - const char *part_column_name, PartRelationInfo *prel); /* Result of find_inheritance_children_array() */ @@ -149,11 +148,9 @@ find_children_status find_inheritance_children_array(Oid parentrelId, uint32 *children_size, Oid **children); -char *build_check_constraint_name_relid_internal(Oid relid, - AttrNumber attno); +char *build_check_constraint_name_relid_internal(Oid relid); -char *build_check_constraint_name_relname_internal(const char *relname, - AttrNumber attno); +char *build_check_constraint_name_relname_internal(const char *relname); char *build_sequence_name_internal(Oid relid); diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 0338fa4e..1481cad4 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -66,17 +66,24 @@ bool check_range_available(Oid parent_relid, /* HASH constraints */ Constraint * build_hash_check_constraint(Oid child_relid, - char *attname, + const char *expr, uint32 part_idx, uint32 part_count, Oid value_type); -Node * build_raw_hash_check_tree(char *attname, +Node * build_raw_hash_check_tree(const char *base_expr, uint32 part_idx, - uint32 part_count, Oid value_type); + uint32 part_count, + Oid relid, + Oid value_type); void drop_check_constraint(Oid relid, AttrNumber attnum); +/* expression parsing functions */ +Node *get_expression_node(Oid relid, const char *expr, bool analyze); +Oid get_partition_expr_type(Oid relid, const char *expr); + + /* Partitioning callback type */ typedef enum diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index af8d0993..d6a244b0 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -25,7 +25,9 @@ #endif -#define ERR_PART_ATTR_NULL "partitioned column's value should not be NULL" +#define ERR_PART_ATTR_NULL "partition expression's value should not be NULL" +#define ERR_PART_ATTR_MULTIPLE_RESULTS \ + "partition expression's value should be single, not set" #define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" #define ERR_PART_ATTR_MULTIPLE "PartitionFilter selected more than one partition" diff --git a/src/include/pathman.h b/src/include/pathman.h index 6c8c17e5..4836baca 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -16,6 +16,7 @@ #include "rangeset.h" #include "postgres.h" +#include "fmgr.h" #include "nodes/makefuncs.h" #include "nodes/primnodes.h" #include "nodes/execnodes.h" @@ -43,11 +44,12 @@ * Definitions for the "pathman_config" table. */ #define PATHMAN_CONFIG "pathman_config" -#define Natts_pathman_config 4 +#define Natts_pathman_config 5 #define Anum_pathman_config_partrel 1 /* partitioned relation (regclass) */ #define Anum_pathman_config_attname 2 /* partitioned column (text) */ -#define Anum_pathman_config_parttype 3 /* partitioning type (1|2) */ -#define Anum_pathman_config_range_interval 4 /* interval for RANGE pt. (text) */ +#define Anum_pathman_config_atttype 3 /* partitioned atttype */ +#define Anum_pathman_config_parttype 4 /* partitioning type (1|2) */ +#define Anum_pathman_config_range_interval 5 /* interval for RANGE pt. (text) */ /* type modifier (typmod) for 'range_interval' */ #define PATHMAN_CONFIG_interval_typmod -1 diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 587de24e..7fc3718a 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -15,6 +15,8 @@ #include "postgres.h" #include "access/attnum.h" #include "fmgr.h" +#include "nodes/nodes.h" +#include "nodes/primnodes.h" #include "port/atomics.h" #include "storage/lock.h" #include "utils/datum.h" @@ -126,9 +128,9 @@ typedef struct RangeEntry *ranges; /* per-partition range entry or NULL */ PartType parttype; /* partitioning type (HASH | RANGE) */ - AttrNumber attnum; /* partitioned column's index */ - Oid atttype; /* partitioned column's type */ - int32 atttypmod; /* partitioned column type modifier */ + Expr *expr; + Oid atttype; /* expression type */ + int32 atttypmod; /* expression type modifier */ bool attbyval; /* is partitioned column stored by value? */ int16 attlen; /* length of the partitioned column's type */ int attalign; /* alignment of the part column's type */ @@ -191,8 +193,8 @@ PrelLastChild(const PartRelationInfo *prel) const PartRelationInfo *refresh_pathman_relation_info(Oid relid, - PartType partitioning_type, - const char *part_column_name, + Datum *values, + bool *isnull, bool allow_incomplete); void invalidate_pathman_relation_info(Oid relid, bool *found); void remove_pathman_relation_info(Oid relid); diff --git a/src/init.c b/src/init.c index 1011a198..b4cda22c 100644 --- a/src/init.c +++ b/src/init.c @@ -71,7 +71,7 @@ static void init_local_cache(void); static void fini_local_cache(void); static void read_pathman_config(void); -static Expr *get_partition_constraint_expr(Oid partition, AttrNumber part_attno); +static Expr *get_partition_constraint_expr(Oid partition); static int cmp_range_entries(const void *p1, const void *p2, void *arg); @@ -89,7 +89,6 @@ static bool validate_range_opexpr(const Expr *expr, static bool validate_hash_constraint(const Expr *expr, const PartRelationInfo *prel, - const AttrNumber part_attno, uint32 *part_hash); static bool read_opexpr_const(const OpExpr *opexpr, @@ -363,7 +362,6 @@ fini_local_cache(void) void fill_prel_with_partitions(const Oid *partitions, const uint32 parts_count, - const char *part_column_name, PartRelationInfo *prel) { uint32 i; @@ -378,18 +376,7 @@ fill_prel_with_partitions(const Oid *partitions, for (i = 0; i < PrelChildrenCount(prel); i++) { - AttrNumber part_attno; - - /* NOTE: Partitions may have different TupleDescs */ - part_attno = get_attnum(partitions[i], part_column_name); - - /* Raise ERROR if there's no such column */ - if (part_attno == InvalidAttrNumber) - elog(ERROR, "partition \"%s\" has no column \"%s\"", - get_rel_name_or_relid(partitions[i]), - part_column_name); - - con_expr = get_partition_constraint_expr(partitions[i], part_attno); + con_expr = get_partition_constraint_expr(partitions[i]); /* Perform a partitioning_type-dependent task */ switch (prel->parttype) @@ -398,7 +385,7 @@ fill_prel_with_partitions(const Oid *partitions, { uint32 hash; /* hash value < parts_count */ - if (validate_hash_constraint(con_expr, prel, part_attno, &hash)) + if (validate_hash_constraint(con_expr, prel, &hash)) prel->children[hash] = partitions[i]; else { @@ -416,7 +403,7 @@ fill_prel_with_partitions(const Oid *partitions, Datum lower, upper; bool lower_null, upper_null; - if (validate_range_constraint(con_expr, prel, part_attno, + if (validate_range_constraint(con_expr, prel, 0, &lower, &upper, &lower_null, &upper_null)) { @@ -656,15 +643,15 @@ find_inheritance_children_array(Oid parentrelId, * These functions does not perform sanity checks at all. */ char * -build_check_constraint_name_relid_internal(Oid relid, AttrNumber attno) +build_check_constraint_name_relid_internal(Oid relid) { - return build_check_constraint_name_relname_internal(get_rel_name(relid), attno); + return build_check_constraint_name_relname_internal(get_rel_name(relid)); } char * -build_check_constraint_name_relname_internal(const char *relname, AttrNumber attno) +build_check_constraint_name_relname_internal(const char *relname) { - return psprintf("pathman_%s_%u_check", relname, attno); + return psprintf("pathman_%s_check", relname); } /* @@ -854,7 +841,10 @@ read_pathman_config(void) } /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(relid, NULL); + refresh_pathman_relation_info(relid, + values, + isnull, + true); /* allow lazy prel loading */ } /* Clean resources */ @@ -869,7 +859,7 @@ read_pathman_config(void) * build_check_constraint_name_internal() is used to build conname. */ static Expr * -get_partition_constraint_expr(Oid partition, AttrNumber part_attno) +get_partition_constraint_expr(Oid partition) { Oid conid; /* constraint Oid */ char *conname; /* constraint name */ @@ -878,7 +868,7 @@ get_partition_constraint_expr(Oid partition, AttrNumber part_attno) bool conbin_isnull; Expr *expr; /* expression tree for constraint */ - conname = build_check_constraint_name_relid_internal(partition, part_attno); + conname = build_check_constraint_name_relid_internal(partition); conid = get_relation_constraint_oid(partition, conname, true); if (conid == InvalidOid) { @@ -1116,7 +1106,6 @@ read_opexpr_const(const OpExpr *opexpr, static bool validate_hash_constraint(const Expr *expr, const PartRelationInfo *prel, - const AttrNumber part_attno, uint32 *part_hash) { const TypeCacheEntry *tce; @@ -1156,23 +1145,11 @@ validate_hash_constraint(const Expr *expr, type_hash_proc_expr = (FuncExpr *) first; /* Check that function is indeed TYPE_HASH_PROC */ - if (type_hash_proc_expr->funcid != prel->hash_proc || - !(IsA(linitial(type_hash_proc_expr->args), Var) || - IsA(linitial(type_hash_proc_expr->args), RelabelType))) + if (type_hash_proc_expr->funcid != prel->hash_proc) { return false; } - /* Extract argument into 'var' */ - if (IsA(linitial(type_hash_proc_expr->args), RelabelType)) - var = (Var *) ((RelabelType *) linitial(type_hash_proc_expr->args))->arg; - else - var = (Var *) linitial(type_hash_proc_expr->args); - - /* Check that 'var' is the partitioning key attribute */ - if (var->varoattno != part_attno) - return false; - /* Check that PARTITIONS_COUNT is equal to total amount of partitions */ if (DatumGetUInt32(((Const *) second)->constvalue) != PrelChildrenCount(prel)) return false; diff --git a/src/nodes_common.c b/src/nodes_common.c index ad6bfa8c..bf8bde3a 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -140,10 +140,13 @@ append_part_attr_to_tlist(List *tlist, Index relno, const PartRelationInfo *prel TargetEntry *te = (TargetEntry *) lfirst(lc); Var *var = (Var *) te->expr; + /* FIX this if (IsA(var, Var) && var->varoattno == prel->attnum) part_attr_found = true; + */ } + /* FIX this if (!part_attr_found) { Var *newvar = makeVar(relno, @@ -158,7 +161,7 @@ append_part_attr_to_tlist(List *tlist, Index relno, const PartRelationInfo *prel tlist = lappend(tlist, makeTargetEntry((Expr *) newvar, last_item, NULL, false)); - } + } */ return tlist; } @@ -262,11 +265,12 @@ get_partitioned_attr_clauses(List *restrictinfo_list, Assert(IsA(rinfo, RestrictInfo)); pull_varattnos((Node *) rinfo->clause, partitioned_rel, &varattnos); + /* FIX this if (bms_get_singleton_member(varattnos, &part_attno) && AdjustAttno(part_attno) == prel->attnum) { result = lappend(result, rinfo->clause); - } + } */ } return result; } diff --git a/src/partition_creation.c b/src/partition_creation.c index 3baf067b..c504ac86 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -29,9 +29,12 @@ #include "commands/tablecmds.h" #include "commands/tablespace.h" #include "miscadmin.h" +#include "parser/parser.h" #include "parser/parse_func.h" #include "parser/parse_relation.h" #include "parser/parse_utilcmd.h" +#include "parser/analyze.h" +#include "tcop/tcopprot.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/datum.h" @@ -59,7 +62,7 @@ static void create_single_partition_common(Oid partition_relid, static Oid create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace, - char **partitioned_column); + char **partitioning_expr); static char *choose_range_partition_name(Oid parent_relid, Oid parent_nsp); static char *choose_hash_partition_name(Oid parent_relid, uint32 part_idx); @@ -97,7 +100,7 @@ create_single_range_partition_internal(Oid parent_relid, { Oid partition_relid; Constraint *check_constr; - char *partitioned_column; + char *partitioning_expr; init_callback_params callback_params; /* Generate a name if asked to */ @@ -112,15 +115,15 @@ create_single_range_partition_internal(Oid parent_relid, partition_rv = makeRangeVar(parent_nsp_name, partition_name, -1); } - /* Create a partition & get 'partitioned_column' */ + /* Create a partition & get 'partitioning expression' */ partition_relid = create_single_partition_internal(parent_relid, partition_rv, tablespace, - &partitioned_column); + &partitioning_expr); /* Build check constraint for RANGE partition */ check_constr = build_range_check_constraint(partition_relid, - partitioned_column, + partitioning_expr, start_value, end_value, value_type); @@ -151,7 +154,7 @@ create_single_hash_partition_internal(Oid parent_relid, { Oid partition_relid; Constraint *check_constr; - char *partitioned_column; + char *partitioning_expr; init_callback_params callback_params; /* Generate a name if asked to */ @@ -166,15 +169,15 @@ create_single_hash_partition_internal(Oid parent_relid, partition_rv = makeRangeVar(parent_nsp_name, partition_name, -1); } - /* Create a partition & get 'partitioned_column' */ + /* Create a partition & get 'partitionining expression' */ partition_relid = create_single_partition_internal(parent_relid, partition_rv, tablespace, - &partitioned_column); + &partitioning_expr); /* Build check constraint for HASH partition */ check_constr = build_hash_check_constraint(partition_relid, - partitioned_column, + partitioning_expr, part_idx, part_count, value_type); @@ -639,7 +642,7 @@ static Oid create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace, - char **partitioned_column) /* to be set */ + char **partitioning_expr) /* to be set */ { /* Value to be returned */ Oid partition_relid = InvalidOid; /* safety */ @@ -682,12 +685,12 @@ create_single_partition_internal(Oid parent_relid, parent_nsp_name = get_namespace_name(parent_nsp); /* Fetch partitioned column's name */ - if (partitioned_column) + if (partitioning_expr) { - Datum partitioned_column_datum; + Datum expr_datum; - partitioned_column_datum = config_values[Anum_pathman_config_attname - 1]; - *partitioned_column = TextDatumGetCString(partitioned_column_datum); + expr_datum = config_values[Anum_pathman_config_attname - 1]; + *partitioning_expr = TextDatumGetCString(expr_datum); } /* Make up parent's RangeVar */ @@ -1114,7 +1117,7 @@ drop_check_constraint(Oid relid, AttrNumber attnum) AlterTableCmd *cmd; /* Build a correct name for this constraint */ - constr_name = build_check_constraint_name_relid_internal(relid, attnum); + constr_name = build_check_constraint_name_relid_internal(relid); stmt = makeNode(AlterTableStmt); stmt->relation = makeRangeVarFromRelid(relid); @@ -1204,12 +1207,9 @@ build_range_check_constraint(Oid child_relid, { Constraint *hash_constr; char *range_constr_name; - AttrNumber attnum; /* Build a correct name for this constraint */ - attnum = get_attnum(child_relid, attname); - range_constr_name = build_check_constraint_name_relid_internal(child_relid, - attnum); + range_constr_name = build_check_constraint_name_relid_internal(child_relid); /* Initialize basic properties of a CHECK constraint */ hash_constr = make_constraint_common(range_constr_name, @@ -1278,15 +1278,16 @@ check_range_available(Oid parent_relid, /* Build HASH check constraint expression tree */ Node * -build_raw_hash_check_tree(char *attname, +build_raw_hash_check_tree(const char *base_expr, uint32 part_idx, uint32 part_count, + Oid relid, Oid value_type) { A_Expr *eq_oper = makeNode(A_Expr); FuncCall *part_idx_call = makeNode(FuncCall), *hash_call = makeNode(FuncCall); - ColumnRef *hashed_column = makeNode(ColumnRef); + //ColumnRef *hashed_column = makeNode(ColumnRef); A_Const *part_idx_c = makeNode(A_Const), *part_count_c = makeNode(A_Const); @@ -1294,13 +1295,14 @@ build_raw_hash_check_tree(char *attname, Oid hash_proc; TypeCacheEntry *tce; + Node *expr = get_expression_node(relid, base_expr, false); tce = lookup_type_cache(value_type, TYPECACHE_HASH_PROC); hash_proc = tce->hash_proc; /* Partitioned column */ - hashed_column->fields = list_make1(makeString(attname)); - hashed_column->location = -1; + //hashed_column->fields = list_make1(makeString(attname)); + //hashed_column->location = -1; /* Total amount of partitions */ part_count_c->val = make_int_value_struct(part_count); @@ -1312,7 +1314,7 @@ build_raw_hash_check_tree(char *attname, /* Call hash_proc() */ hash_call->funcname = list_make1(makeString(get_func_name(hash_proc))); - hash_call->args = list_make1(hashed_column); + hash_call->args = list_make1(expr); hash_call->agg_order = NIL; hash_call->agg_filter = NULL; hash_call->agg_within_group = false; @@ -1352,25 +1354,23 @@ build_raw_hash_check_tree(char *attname, /* Build complete HASH check constraint */ Constraint * build_hash_check_constraint(Oid child_relid, - char *attname, + const char *expr, uint32 part_idx, uint32 part_count, Oid value_type) { Constraint *hash_constr; char *hash_constr_name; - AttrNumber attnum; /* Build a correct name for this constraint */ - attnum = get_attnum(child_relid, attname); - hash_constr_name = build_check_constraint_name_relid_internal(child_relid, - attnum); + hash_constr_name = build_check_constraint_name_relid_internal(child_relid); /* Initialize basic properties of a CHECK constraint */ hash_constr = make_constraint_common(hash_constr_name, - build_raw_hash_check_tree(attname, + build_raw_hash_check_tree(expr, part_idx, part_count, + child_relid, value_type)); /* Everything seems to be fine */ return hash_constr; @@ -1680,3 +1680,433 @@ text_to_regprocedure(text *proc_signature) return DatumGetObjectId(result); } + +/* Wraps expression by SELECT query and returns parsed tree */ +static Node * +parse_expression(Oid relid, const char *expr, char **query_string_out) +{ + char *fmt = "SELECT (%s) FROM %s.%s"; + char *relname = get_rel_name(relid), + *namespace_name = get_namespace_name(get_rel_namespace(relid)); + List *parsetree_list; + char *query_string = psprintf(fmt, expr, namespace_name, relname); + + parsetree_list = raw_parser(query_string); + Assert(list_length(parsetree_list) == 1); + + if (query_string_out) + { + *query_string_out = query_string; + } + return (Node *)(lfirst(list_head(parsetree_list))); +} + +/* + * exprNodeLocation - + * basicly copy of exprLocation from nodeFuncs, but with another + * purpose - get address of location variable + */ +static bool +clearNodeLocation(const Node *expr) +{ + if (expr == NULL) + return false; + + switch (nodeTag(expr)) + { + case T_RangeVar: + ((RangeVar *) expr)->location = -1; + break; + case T_Var: + ((Var *) expr)->location = -1; + break; + case T_Const: + ((Const *) expr)->location = -1; + break; + case T_Param: + ((Param *) expr)->location = -1; + break; + case T_Aggref: + /* function name should always be the first thing */ + ((Aggref *) expr)->location = -1; + break; + case T_GroupingFunc: + ((GroupingFunc *) expr)->location = -1; + break; + case T_WindowFunc: + /* function name should always be the first thing */ + ((WindowFunc *) expr)->location = -1; + break; + case T_ArrayRef: + /* just use array argument's location */ + clearNodeLocation((Node *) ((const ArrayRef *) expr)->refexpr); + break; + case T_FuncExpr: + { + FuncExpr *fexpr = (FuncExpr *) expr; + + /* consider both function name and leftmost arg */ + fexpr->location = -1; + clearNodeLocation((Node *) fexpr->args); + } + break; + case T_NamedArgExpr: + { + NamedArgExpr *na = (NamedArgExpr *) expr; + + /* consider both argument name and value */ + na->location = -1; + clearNodeLocation((Node *) na->arg); + } + break; + case T_OpExpr: + case T_DistinctExpr: /* struct-equivalent to OpExpr */ + case T_NullIfExpr: /* struct-equivalent to OpExpr */ + { + OpExpr *opexpr = (OpExpr *) expr; + + /* consider both operator name and leftmost arg */ + opexpr->location = -1; + clearNodeLocation((Node *) opexpr->args); + } + break; + case T_ScalarArrayOpExpr: + { + ScalarArrayOpExpr *saopexpr = (ScalarArrayOpExpr *) expr; + + /* consider both operator name and leftmost arg */ + + saopexpr->location = -1; + clearNodeLocation((Node *) saopexpr->args); + } + break; + case T_BoolExpr: + { + BoolExpr *bexpr = (BoolExpr *) expr; + + /* + * Same as above, to handle either NOT or AND/OR. We can't + * special-case NOT because of the way that it's used for + * things like IS NOT BETWEEN. + */ + bexpr->location = -1; + clearNodeLocation((Node *) bexpr->args); + } + break; + case T_SubLink: + { + SubLink *sublink = (SubLink *) expr; + + /* check the testexpr, if any, and the operator/keyword */ + clearNodeLocation(sublink->testexpr); + sublink->location = -1; + } + break; + case T_FieldSelect: + /* just use argument's location */ + return clearNodeLocation((Node *) ((FieldSelect *) expr)->arg); + case T_FieldStore: + /* just use argument's location */ + return clearNodeLocation((Node *) ((FieldStore *) expr)->arg); + case T_RelabelType: + { + RelabelType *rexpr = (RelabelType *) expr; + + /* Much as above */ + rexpr->location = -1; + clearNodeLocation((Node *) rexpr->arg); + } + break; + case T_CoerceViaIO: + { + CoerceViaIO *cexpr = (CoerceViaIO *) expr; + + /* Much as above */ + cexpr->location = -1; + clearNodeLocation((Node *) cexpr->arg); + } + break; + case T_ArrayCoerceExpr: + { + ArrayCoerceExpr *cexpr = (ArrayCoerceExpr *) expr; + + /* Much as above */ + cexpr->location = -1; + clearNodeLocation((Node *) cexpr->arg); + } + break; + case T_ConvertRowtypeExpr: + { + ConvertRowtypeExpr *cexpr = (ConvertRowtypeExpr *) expr; + + /* Much as above */ + cexpr->location = -1; + clearNodeLocation((Node *) cexpr->arg); + } + break; + case T_CollateExpr: + /* just use argument's location */ + clearNodeLocation((Node *) ((CollateExpr *) expr)->arg); + break; + case T_CaseExpr: + /* CASE keyword should always be the first thing */ + ((CaseExpr *) expr)->location = -1; + break; + case T_CaseWhen: + /* WHEN keyword should always be the first thing */ + ((CaseWhen *) expr)->location = -1; + break; + case T_ArrayExpr: + /* the location points at ARRAY or [, which must be leftmost */ + ((ArrayExpr *) expr)->location = -1; + break; + case T_RowExpr: + /* the location points at ROW or (, which must be leftmost */ + ((RowExpr *) expr)->location = -1; + break; + case T_RowCompareExpr: + /* just use leftmost argument's location */ + return clearNodeLocation((Node *) ((RowCompareExpr *) expr)->largs); + case T_CoalesceExpr: + /* COALESCE keyword should always be the first thing */ + ((CoalesceExpr *) expr)->location = -1; + break; + case T_MinMaxExpr: + /* GREATEST/LEAST keyword should always be the first thing */ + ((MinMaxExpr *) expr)->location = -1; + break; + case T_XmlExpr: + { + XmlExpr *xexpr = (XmlExpr *) expr; + + /* consider both function name and leftmost arg */ + xexpr->location = -1; + clearNodeLocation((Node *) xexpr->args); + } + break; + case T_NullTest: + { + NullTest *nexpr = (NullTest *) expr; + + /* Much as above */ + nexpr->location = -1; + clearNodeLocation((Node *) nexpr->arg); + } + break; + case T_BooleanTest: + { + BooleanTest *bexpr = (BooleanTest *) expr; + + /* Much as above */ + bexpr->location = -1; + clearNodeLocation((Node *) bexpr->arg); + } + break; + case T_CoerceToDomain: + { + CoerceToDomain *cexpr = (CoerceToDomain *) expr; + + /* Much as above */ + cexpr->location = -1; + clearNodeLocation((Node *) cexpr->arg); + } + break; + case T_CoerceToDomainValue: + ((CoerceToDomainValue *) expr)->location = -1; + break; + case T_SetToDefault: + ((SetToDefault *) expr)->location = -1; + break; + case T_TargetEntry: + /* just use argument's location */ + return clearNodeLocation((Node *) ((const TargetEntry *) expr)->expr); + case T_IntoClause: + /* use the contained RangeVar's location --- close enough */ + return clearNodeLocation((Node *) ((const IntoClause *) expr)->rel); + case T_List: + { + /* report location of first list member that has a location */ + ListCell *lc; + + //loc = -1; /* just to suppress compiler warning */ + foreach(lc, (const List *) expr) + { + clearNodeLocation((Node *) lfirst(lc)); + } + } + break; + case T_A_Expr: + { + A_Expr *aexpr = (A_Expr *) expr; + + /* use leftmost of operator or left operand (if any) */ + /* we assume right operand can't be to left of operator */ + aexpr->location = -1; + clearNodeLocation(aexpr->lexpr); + } + break; + case T_ColumnRef: + ((ColumnRef *) expr)->location = -1; + break; + case T_ParamRef: + ((ParamRef *) expr)->location = -1; + break; + case T_A_Const: + ((A_Const *) expr)->location = -1; + break; + case T_FuncCall: + { + FuncCall *fc = (FuncCall *) expr; + + /* consider both function name and leftmost arg */ + /* (we assume any ORDER BY nodes must be to right of name) */ + fc->location = -1; + clearNodeLocation((Node *) fc->args); + } + break; + case T_A_ArrayExpr: + /* the location points at ARRAY or [, which must be leftmost */ + ((A_ArrayExpr *) expr)->location = -1; + break; + case T_ResTarget: + /* we need not examine the contained expression (if any) */ + ((ResTarget *) expr)->location = -1; + break; + case T_MultiAssignRef: + return clearNodeLocation((Node *)(((MultiAssignRef *) expr)->source)); + case T_TypeCast: + { + TypeCast *tc = (TypeCast *) expr; + + /* + * This could represent CAST(), ::, or TypeName 'literal', so + * any of the components might be leftmost. + */ + clearNodeLocation(tc->arg); + tc->typeName->location = -1; + tc->location = -1; + } + break; + case T_CollateClause: + /* just use argument's location */ + return clearNodeLocation(((CollateClause *) expr)->arg); + case T_SortBy: + /* just use argument's location (ignore operator, if any) */ + return clearNodeLocation(((SortBy *) expr)->node); + case T_WindowDef: + ((WindowDef *) expr)->location = -1; + break; + case T_RangeTableSample: + ((RangeTableSample *) expr)->location = -1; + break; + case T_TypeName: + ((TypeName *) expr)->location = -1; + break; + case T_ColumnDef: + ((ColumnDef *) expr)->location = -1; + break; + case T_Constraint: + ((Constraint *) expr)->location = -1; + break; + case T_FunctionParameter: + /* just use typename's location */ + return clearNodeLocation((Node *) ((const FunctionParameter *) expr)->argType); + case T_XmlSerialize: + /* XMLSERIALIZE keyword should always be the first thing */ + ((XmlSerialize *) expr)->location = -1; + break; + case T_GroupingSet: + ((GroupingSet *) expr)->location = -1; + break; + case T_WithClause: + ((WithClause *) expr)->location = -1; + break; + case T_InferClause: + ((InferClause *) expr)->location = -1; + break; + case T_OnConflictClause: + ((OnConflictClause *) expr)->location = -1; + break; + case T_CommonTableExpr: + ((CommonTableExpr *) expr)->location = -1; + break; + case T_PlaceHolderVar: + /* just use argument's location */ + return clearNodeLocation((Node *) ((const PlaceHolderVar *) expr)->phexpr); + case T_InferenceElem: + /* just use nested expr's location */ + return clearNodeLocation((Node *) ((const InferenceElem *) expr)->expr); + default: + return false; + } + return true; +} + +static bool location_cleaning_walker(Node *node, void *context) +{ + if (node == NULL) + return false; + + if (clearNodeLocation(node)) + return false; + + return raw_expression_tree_walker(node, location_cleaning_walker, context); +} + +/* By given relation id and expression returns query node */ +Node * +get_expression_node(Oid relid, const char *expr, bool analyze) +{ + List *querytree_list; + List *target_list; + char *query_string; + Node *parsetree = parse_expression(relid, expr, &query_string), + *raw_node; + Query *query; + TargetEntry *target_entry; + post_parse_analyze_hook_type orig_hook = NULL; + + target_list = ((SelectStmt *)parsetree)->targetList; + + if (!analyze) { + raw_node = (Node *)(((ResTarget *)(lfirst(list_head(target_list))))->val); + //raw_expression_tree_walker(raw_node, location_cleaning_walker, NULL); + return raw_node; + } + + //turn off parse hooks + orig_hook = post_parse_analyze_hook; + post_parse_analyze_hook = NULL; + + querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); + query = (Query *)lfirst(list_head(querytree_list)); + target_entry = (TargetEntry *)lfirst(list_head(query->targetList)); + //plan = pg_plan_query(query, 0, NULL); + + post_parse_analyze_hook = orig_hook; + + return (Node *)target_entry->expr; +} + +/* Determines type of expression for a relation */ +Oid +get_partition_expr_type(Oid relid, const char *expr) +{ + Node *parsetree, + *target_entry, + *expr_node; + Query *query; + char *query_string; + + parsetree = parse_expression(relid, expr, &query_string); + + /* This will fail with elog in case of wrong expression + * with more or less understable text */ + query = parse_analyze(parsetree, query_string, NULL, 0); + + /* We use analyzed query only to get type of expression */ + target_entry = lfirst(list_head(query->targetList)); + expr_node = (Node *)((TargetEntry *)target_entry)->expr; + return get_call_expr_argtype(expr_node, 0); +} + diff --git a/src/partition_filter.c b/src/partition_filter.c index a5e80e6f..c321cf85 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -569,6 +569,8 @@ partition_filter_exec(CustomScanState *node) ResultRelInfoHolder *rri_holder; bool isnull; Datum value; + ExprDoneCond itemIsDone; + ExprState *expr_state; /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); @@ -582,16 +584,27 @@ partition_filter_exec(CustomScanState *node) return slot; } - /* Extract partitioned column's value (also check types) */ + /* Extract partitioned column's value (also check types) Assert(slot->tts_tupleDescriptor-> attrs[prel->attnum - 1]->atttypid == prel->atttype); value = slot_getattr(slot, prel->attnum, &isnull); if (isnull) - elog(ERROR, ERR_PART_ATTR_NULL); + elog(ERROR, ERR_PART_ATTR_NULL); */ + + /* Prepare state before execution */ + expr_state = ExecPrepareExpr(prel->expr, estate); /* Switch to per-tuple context */ old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + /* Execute expression */ + value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); + if (isnull) + elog(ERROR, ERR_PART_ATTR_NULL); + + if (itemIsDone != ExprSingleResult) + elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); + /* Search for a matching partition */ rri_holder = select_partition_for_insert(value, prel->atttype, prel, &state->result_parts, estate); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 4fa3f4bc..cf2344e9 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -910,12 +910,13 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) (Var *) ((RelabelType *) varnode)->arg; /* Skip if base types or attribute numbers do not match */ + /* FIX: use exprsssion if (getBaseType(var->vartype) != getBaseType(prel->atttype) || - var->varoattno != prel->attnum || /* partitioned attribute */ - var->varno != context->prel_varno) /* partitioned table */ + var->varoattno != prel->attnum || + var->varno != context->prel_varno) { goto handle_arrexpr_return; - } + } */ } else goto handle_arrexpr_return; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index b0ea3861..8f754d57 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -32,6 +32,7 @@ #include "utils/snapmgr.h" #include "utils/lsyscache.h" #include "utils/syscache.h" +#include "utils/typcache.h" /* Function declarations */ @@ -56,6 +57,7 @@ PG_FUNCTION_INFO_V1( build_check_constraint_name_attname ); PG_FUNCTION_INFO_V1( validate_relname ); PG_FUNCTION_INFO_V1( is_date_type ); PG_FUNCTION_INFO_V1( is_attribute_nullable ); +PG_FUNCTION_INFO_V1( is_expression_suitable ); PG_FUNCTION_INFO_V1( add_to_pathman_config ); PG_FUNCTION_INFO_V1( pathman_config_params_trigger_func ); @@ -370,14 +372,15 @@ show_partition_list_internal(PG_FUNCTION_ARGS) continue; } - - partattr_cstr = get_attname(PrelParentRelid(prel), prel->attnum); - if (!partattr_cstr) - { + + // FIX this + //partattr_cstr = get_attname(PrelParentRelid(prel), prel->attnum); + //if (!partattr_cstr) + //{ /* Parent does not exist, go to the next 'prel' */ - usercxt->current_prel = NULL; - continue; - } + // usercxt->current_prel = NULL; + // continue; + //} /* Fill in common values */ values[Anum_pathman_pl_parent - 1] = PrelParentRelid(prel); @@ -486,26 +489,69 @@ is_date_type(PG_FUNCTION_ARGS) PG_RETURN_BOOL(is_date_type_internal(PG_GETARG_OID(0))); } +Datum +is_expression_suitable(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + char *expr = text_to_cstring(PG_GETARG_TEXT_P(1)); + bool result; + + TypeCacheEntry *tce; + Oid type_oid = get_partition_expr_type(relid, expr); + + tce = lookup_type_cache(type_oid, TYPECACHE_HASH_PROC); + result = (tce->hash_proc != InvalidOid); + + PG_RETURN_BOOL(result); +} + Datum is_attribute_nullable(PG_FUNCTION_ARGS) { + /* Oid relid = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); + char *relname = get_rel_name(relid), + *namespace_name = get_namespace_name(get_rel_namespace(relid)); + char *expr = text_to_cstring(PG_GETARG_TEXT_P(1)); + char *fmt = "SELECT (%s) FROM %s.%s"; bool result = true; HeapTuple tp; - - tp = SearchSysCacheAttName(relid, text_to_cstring(attname)); - if (HeapTupleIsValid(tp)) - { - Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); - result = !att_tup->attnotnull; - ReleaseSysCache(tp); - } - else - elog(ERROR, "Cannot find type name for attribute \"%s\" " - "of relation \"%s\"", - text_to_cstring(attname), get_rel_name_or_relid(relid)); - + List *parsetree_list, + *querytree_list, + *plantree_list; + EState *estate; + ExprContext *econtext; + Node *parsetree, + *target_entry; + Query *query; + PlannedStmt *plan; + MemoryContext oldcontext; + SeqScanState *scanstate; + Oid expr_type; + + int n = snprintf(NULL, 0, fmt, expr, namespace_name, relname); + char *query_string = (char *) palloc(n + 1); + snprintf(query_string, n + 1, fmt, expr, namespace_name, relname); + + parsetree_list = raw_parser(query_string); + + Assert(list_length(parsetree_list) == 1); + parsetree = (Node *)(lfirst(list_head(parsetree_list))); + + query = parse_analyze(parsetree, query_string, NULL, 0); + plan = pg_plan_query(query, 0, NULL); + + target_entry = lfirst(list_head(plan->planTree->targetlist)); + expr_type = get_call_expr_argtype(((TargetEntry *)target_entry)->expr, 0); + + estate = CreateExecutorState(); + + Assert(nodeTag(plan->planTree) == T_SeqScan); + scanstate = ExecInitSeqScan(plan->planTree, estate, 0); + + pfree(query_string); + */ + bool result = true; PG_RETURN_BOOL(result); /* keep compiler happy */ } @@ -566,7 +612,7 @@ build_check_constraint_name_attnum(PG_FUNCTION_ARGS) elog(ERROR, "Cannot build check constraint name: " "invalid attribute number %i", attnum); - result = build_check_constraint_name_relid_internal(relid, attnum); + result = build_check_constraint_name_relid_internal(relid); PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); } @@ -586,7 +632,7 @@ build_check_constraint_name_attname(PG_FUNCTION_ARGS) elog(ERROR, "relation \"%s\" has no column \"%s\"", get_rel_name_or_relid(relid), text_to_cstring(attname)); - result = build_check_constraint_name_relid_internal(relid, attnum); + result = build_check_constraint_name_relid_internal(relid); PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); } @@ -614,6 +660,9 @@ add_to_pathman_config(PG_FUNCTION_ARGS) HeapTuple htup; CatalogIndexState indstate; + char *expr; + Oid expr_type; + PathmanInitState init_state; MemoryContext old_mcxt = CurrentMemoryContext; @@ -631,10 +680,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) if (!check_relation_exists(relid)) elog(ERROR, "Invalid relation %u", relid); - if (get_attnum(relid, text_to_cstring(attname)) == InvalidAttrNumber) - elog(ERROR, "relation \"%s\" has no column \"%s\"", - get_rel_name_or_relid(relid), text_to_cstring(attname)); - /* Select partitioning type using 'range_interval' */ parttype = PG_ARGISNULL(2) ? PT_HASH : PT_RANGE; @@ -647,11 +692,25 @@ add_to_pathman_config(PG_FUNCTION_ARGS) values[Anum_pathman_config_attname - 1] = PointerGetDatum(attname); isnull[Anum_pathman_config_attname - 1] = false; + expr = TextDatumGetCString(PointerGetDatum(attname)); + expr_type = get_partition_expr_type(relid, expr); + + values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_type); + isnull[Anum_pathman_config_atttype - 1] = false; + values[Anum_pathman_config_parttype - 1] = Int32GetDatum(parttype); isnull[Anum_pathman_config_parttype - 1] = false; - values[Anum_pathman_config_range_interval - 1] = PG_GETARG_DATUM(2); - isnull[Anum_pathman_config_range_interval - 1] = PG_ARGISNULL(2); + if (parttype == PT_RANGE) + { + values[Anum_pathman_config_range_interval - 1] = PG_GETARG_DATUM(2); + isnull[Anum_pathman_config_range_interval - 1] = PG_ARGISNULL(2); + } + else + { + values[Anum_pathman_config_range_interval - 1] = (Datum) 0; + isnull[Anum_pathman_config_range_interval - 1] = true; + } /* Insert new row into PATHMAN_CONFIG */ pathman_config = heap_open(get_pathman_config_relid(false), RowExclusiveLock); @@ -668,8 +727,9 @@ add_to_pathman_config(PG_FUNCTION_ARGS) /* Some flags might change during refresh attempt */ save_pathman_init_state(&init_state); - refresh_pathman_relation_info(relid, parttype, - text_to_cstring(attname), + refresh_pathman_relation_info(relid, + values, + isnull, false); /* initialize immediately */ } PG_CATCH(); diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 55540196..e6b039e6 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -32,8 +32,6 @@ PG_FUNCTION_INFO_V1( create_hash_partitions_internal ); PG_FUNCTION_INFO_V1( get_type_hash_func ); PG_FUNCTION_INFO_V1( get_hash_part_idx ); -PG_FUNCTION_INFO_V1( build_hash_condition ); - /* * Create HASH partitions implementation (written in C). @@ -51,9 +49,9 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) pfree(arr); \ } while (0) - Oid parent_relid = PG_GETARG_OID(0); - const char *partitioned_col_name = TextDatumGetCString(PG_GETARG_DATUM(1)); - Oid partitioned_col_type; + Oid parent_relid = PG_GETARG_OID(0), + expr_type; + const char *expr = TextDatumGetCString(PG_GETARG_DATUM(1)); uint32 partitions_count = PG_GETARG_INT32(2), i; @@ -68,9 +66,7 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) if (get_pathman_relation_info(parent_relid)) elog(ERROR, "cannot add new HASH partitions"); - partitioned_col_type = get_attribute_type(parent_relid, - partitioned_col_name, - false); + expr_type = get_partition_expr_type(parent_relid, expr); /* Extract partition names */ if (!PG_ARGISNULL(3)) @@ -108,7 +104,7 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) /* Create a partition (copy FKs, invoke callbacks etc) */ create_single_hash_partition_internal(parent_relid, i, partitions_count, - partitioned_col_type, + expr_type, partition_rv, tablespace); } @@ -146,43 +142,6 @@ get_hash_part_idx(PG_FUNCTION_ARGS) PG_RETURN_UINT32(hash_to_part_index(value, part_count)); } -/* - * Build hash condition for a CHECK CONSTRAINT - */ -Datum -build_hash_condition(PG_FUNCTION_ARGS) -{ - Oid atttype = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); - uint32 part_count = PG_GETARG_UINT32(2), - part_idx = PG_GETARG_UINT32(3); - - TypeCacheEntry *tce; - char *attname_cstring = text_to_cstring(attname); - - char *result; - - if (part_idx >= part_count) - elog(ERROR, "'partition_index' must be lower than 'partitions_count'"); - - tce = lookup_type_cache(atttype, TYPECACHE_HASH_PROC); - - /* Check that HASH function exists */ - if (!OidIsValid(tce->hash_proc)) - elog(ERROR, "no hash function for type %s", format_type_be(atttype)); - - /* Create hash condition CSTRING */ - result = psprintf("%s.get_hash_part_idx(%s(%s), %u) = %u", - get_namespace_name(get_pathman_schema()), - get_func_name(tce->hash_proc), - attname_cstring, - part_count, - part_idx); - - PG_RETURN_TEXT_P(cstring_to_text(result)); -} - - /* * ------------------ * Helper functions diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index b78f44a4..84ae9bc1 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -528,13 +528,14 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) } /* Drop old constraint and create a new one */ + /* FIX: this modify_range_constraint(parts[0], get_relid_attribute_name(prel->key, prel->attnum), prel->attnum, prel->atttype, &first->min, - &last->max); + &last->max); */ /* Make constraint visible */ CommandCounterIncrement(); @@ -613,13 +614,14 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) *next = &ranges[i + 1]; /* Drop old constraint and create a new one */ + /* modify_range_constraint(next->child_oid, get_relid_attribute_name(prel->key, prel->attnum), prel->attnum, prel->atttype, &cur->min, - &next->max); + &next->max);*/ } /* Finally drop this partition */ diff --git a/src/relation_info.c b/src/relation_info.c index bde960c7..1493fab7 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -9,6 +9,7 @@ */ #include "relation_info.h" +#include "partition_creation.h" #include "init.h" #include "utils.h" #include "xact_handling.h" @@ -18,6 +19,7 @@ #include "catalog/catalog.h" #include "catalog/indexing.h" #include "catalog/pg_inherits.h" +#include "catalog/pg_type.h" #include "miscadmin.h" #include "storage/lmgr.h" #include "utils/builtins.h" @@ -67,8 +69,8 @@ static Oid get_parent_of_partition_internal(Oid partition, /* Create or update PartRelationInfo in local cache. Might emit ERROR. */ const PartRelationInfo * refresh_pathman_relation_info(Oid relid, - PartType partitioning_type, - const char *part_column_name, + Datum *values, + bool *isnull, bool allow_incomplete) { const LOCKMODE lockmode = AccessShareLock; @@ -80,6 +82,9 @@ refresh_pathman_relation_info(Oid relid, PartRelationInfo *prel; Datum param_values[Natts_pathman_config_params]; bool param_isnull[Natts_pathman_config_params]; + const char *expr; + Oid expr_type; + HeapTuple tp; prel = (PartRelationInfo *) pathman_cache_search_relid(partitioned_rels, relid, HASH_ENTER, @@ -129,19 +134,29 @@ refresh_pathman_relation_info(Oid relid, prel->ranges = NULL; /* Set partitioning type */ - prel->parttype = partitioning_type; + prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - /* Initialize PartRelationInfo using syscache & typcache */ - prel->attnum = get_attnum(relid, part_column_name); + /* Read config values */ + expr = TextDatumGetCString(values[Anum_pathman_config_attname - 1]); + expr_type = DatumGetObjectId(values[Anum_pathman_config_atttype - 1]); - /* Attribute number sanity check */ - if (prel->attnum == InvalidAttrNumber) - elog(ERROR, "Relation \"%s\" has no column \"%s\"", - get_rel_name_or_relid(relid), part_column_name); + /* + * Save parsed expression to cache and use already saved expression type + * from config + */ + prel->expr = (Expr *) get_expression_node(relid, expr, true); + prel->atttype = expr_type; - /* Fetch atttypid, atttypmod, and attcollation in a single cache lookup */ - get_atttypetypmodcoll(relid, prel->attnum, - &prel->atttype, &prel->atttypmod, &prel->attcollid); + tp = SearchSysCache1(TYPEOID, values[Anum_pathman_config_atttype - 1]); + if (HeapTupleIsValid(tp)) + { + Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); + prel->atttypmod = typtup->typtypmod; + prel->attcollid = typtup->typcollation; + ReleaseSysCache(tp); + } + else + elog(ERROR, "Something went wrong while getting type information"); /* Fetch HASH & CMP fuctions and other stuff from type cache */ typcache = lookup_type_cache(prel->atttype, @@ -199,7 +214,7 @@ refresh_pathman_relation_info(Oid relid, */ fill_prel_with_partitions(prel_children, prel_children_count, - part_column_name, prel); + prel); /* Peform some actions for each child */ for (i = 0; i < prel_children_count; i++) @@ -284,16 +299,9 @@ get_pathman_relation_info(Oid relid) /* Check that PATHMAN_CONFIG table contains this relation */ if (pathman_config_contains_relation(relid, values, isnull, NULL)) { - PartType part_type; - const char *attname; - - /* We can't use 'part_type' & 'attname' from invalid prel */ - part_type = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - attname = TextDatumGetCString(values[Anum_pathman_config_attname - 1]); - /* Refresh partitioned table cache entry (might turn NULL) */ /* TODO: possible refactoring, pass found 'prel' instead of searching */ - prel = refresh_pathman_relation_info(relid, part_type, attname, false); + prel = refresh_pathman_relation_info(relid, values, isnull, false); } /* Else clear remaining cache entry */ @@ -667,15 +675,10 @@ try_perform_parent_refresh(Oid parent) if (pathman_config_contains_relation(parent, values, isnull, NULL)) { - text *attname; - PartType parttype; - - parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - attname = DatumGetTextP(values[Anum_pathman_config_attname - 1]); - /* If anything went wrong, return false (actually, it might emit ERROR) */ - refresh_pathman_relation_info(parent, parttype, - text_to_cstring(attname), + refresh_pathman_relation_info(parent, + values, + isnull, true); /* allow lazy */ } /* Not a partitioned relation */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 1a5079c8..cc9d37b6 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -167,14 +167,14 @@ is_pathman_related_table_rename(Node *parsetree, return false; /* Is parent partitioned? */ + /* FIX this if ((prel = get_pathman_relation_info(parent_relid)) != NULL) { - /* Return 'partition_relid' and 'prel->attnum' */ if (partition_relid_out) *partition_relid_out = partition_relid; if (partitioned_col_out) *partitioned_col_out = prel->attnum; return true; - } + } */ return false; } @@ -551,11 +551,14 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, if (!NextCopyFrom(cstate, econtext, values, nulls, &tuple_oid)) break; + /* FIX this if (nulls[prel->attnum - 1]) elog(ERROR, ERR_PART_ATTR_NULL); + */ /* Search for a matching partition */ - rri_holder = select_partition_for_insert(values[prel->attnum - 1], + /* FIX here, attnum */ + rri_holder = select_partition_for_insert(values[/* here */1], prel->atttype, prel, &parts_storage, estate); child_result_rel = rri_holder->result_rel_info; @@ -698,13 +701,11 @@ PathmanRenameConstraint(Oid partition_relid, /* cached partition Oid */ /* Generate old constraint name */ old_constraint_name = - build_check_constraint_name_relid_internal(partition_relid, - partitioned_col); + build_check_constraint_name_relid_internal(partition_relid); /* Generate new constraint name */ new_constraint_name = - build_check_constraint_name_relname_internal(part_rename_stmt->newname, - partitioned_col); + build_check_constraint_name_relname_internal(part_rename_stmt->newname); /* Build check constraint RENAME statement */ memset((void *) &rename_stmt, 0, sizeof(RenameStmt)); From a36a14c87d3b5fdf8d5fbb6bb54498d384e204a2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 13 Mar 2017 18:19:35 +0300 Subject: [PATCH 0270/1124] Disable hooks while expression is parsing --- src/hooks.c | 28 ++++++++++++++++++++++++++++ src/include/pathman.h | 3 +++ src/init.c | 4 +--- src/partition_creation.c | 18 ++++++++++-------- src/pg_pathman.c | 5 ++++- 5 files changed, 46 insertions(+), 12 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 75fec605..24e35d6b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -65,6 +65,10 @@ pathman_join_pathlist_hook(PlannerInfo *root, set_join_pathlist_next(root, joinrel, outerrel, innerrel, jointype, extra); + /* Hooks can be disabled */ + if (!hooks_enabled) + return; + /* Check that both pg_pathman & RuntimeAppend nodes are enabled */ if (!IsPathmanReady() || !pg_pathman_enable_runtimeappend) return; @@ -204,6 +208,10 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (set_rel_pathlist_hook_next != NULL) set_rel_pathlist_hook_next(root, rel, rti, rte); + /* Hooks can be disabled */ + if (!hooks_enabled) + return; + /* Make sure that pg_pathman is ready */ if (!IsPathmanReady()) return; @@ -484,6 +492,18 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) PG_TRY(); { + /* Hooks can be disabled */ + if (!hooks_enabled) + { + /* Invoke original hook if needed */ + if (planner_hook_next) + result = planner_hook_next(parse, cursorOptions, boundParams); + else + result = standard_planner(parse, cursorOptions, boundParams); + + return result; + } + if (pathman_ready) { /* Increment relation tags refcount */ @@ -543,6 +563,10 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) if (post_parse_analyze_hook_next) post_parse_analyze_hook_next(pstate, query); + /* Hooks can be disabled */ + if (!hooks_enabled) + return; + /* We shouldn't do anything on BEGIN or SET ISOLATION LEVEL stmts */ if (query->commandType == CMD_UTILITY && (xact_is_transaction_stmt(query->utilityStmt) || @@ -617,6 +641,10 @@ pathman_relcache_hook(Datum arg, Oid relid) if (!IsPathmanReady()) return; + /* Hooks can be disabled */ + if (!hooks_enabled) + return; + /* We shouldn't even consider special OIDs */ if (relid < FirstNormalObjectId) return; diff --git a/src/include/pathman.h b/src/include/pathman.h index 4836baca..84899d79 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -84,6 +84,9 @@ extern Oid pathman_config_relid; extern Oid pathman_config_params_relid; +/* Hooks enable state */ +extern bool hooks_enabled; + /* * Just to clarify our intentions (return the corresponding relid). */ diff --git a/src/init.c b/src/init.c index b4cda22c..06b8bc71 100644 --- a/src/init.c +++ b/src/init.c @@ -1135,7 +1135,7 @@ validate_hash_constraint(const Expr *expr, if (list_length(get_hash_expr->args) == 2) { - Node *first = linitial(get_hash_expr->args); /* arg #1: TYPE_HASH_PROC(VALUE) */ + Node *first = linitial(get_hash_expr->args); /* arg #1: TYPE_HASH_PROC(EXPRESSION) */ Node *second = lsecond(get_hash_expr->args); /* arg #2: PARTITIONS_COUNT */ Const *cur_partition_hash; /* hash value for this partition */ @@ -1146,9 +1146,7 @@ validate_hash_constraint(const Expr *expr, /* Check that function is indeed TYPE_HASH_PROC */ if (type_hash_proc_expr->funcid != prel->hash_proc) - { return false; - } /* Check that PARTITIONS_COUNT is equal to total amount of partitions */ if (DatumGetUInt32(((Const *) second)->constvalue) != PrelChildrenCount(prel)) diff --git a/src/partition_creation.c b/src/partition_creation.c index c504ac86..6db80c19 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -29,6 +29,7 @@ #include "commands/tablecmds.h" #include "commands/tablespace.h" #include "miscadmin.h" +#include "nodes/plannodes.h" #include "parser/parser.h" #include "parser/parse_func.h" #include "parser/parse_relation.h" @@ -2053,7 +2054,7 @@ static bool location_cleaning_walker(Node *node, void *context) return raw_expression_tree_walker(node, location_cleaning_walker, context); } -/* By given relation id and expression returns query node */ +/* By given relation id and expression returns node */ Node * get_expression_node(Oid relid, const char *expr, bool analyze) { @@ -2064,7 +2065,7 @@ get_expression_node(Oid relid, const char *expr, bool analyze) *raw_node; Query *query; TargetEntry *target_entry; - post_parse_analyze_hook_type orig_hook = NULL; + PlannedStmt *plan; target_list = ((SelectStmt *)parsetree)->targetList; @@ -2074,16 +2075,17 @@ get_expression_node(Oid relid, const char *expr, bool analyze) return raw_node; } - //turn off parse hooks - orig_hook = post_parse_analyze_hook; - post_parse_analyze_hook = NULL; + /* We don't need pathman hooks on next stages */ + hooks_enabled = false; querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); query = (Query *)lfirst(list_head(querytree_list)); - target_entry = (TargetEntry *)lfirst(list_head(query->targetList)); - //plan = pg_plan_query(query, 0, NULL); + plan = pg_plan_query(query, 0, NULL); - post_parse_analyze_hook = orig_hook; + target_entry = lfirst(list_head(plan->planTree->targetlist)); + + /* Hooks can work now */ + hooks_enabled = true; return (Node *)target_entry->expr; } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index cf2344e9..0027ceb5 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -42,6 +42,9 @@ PG_MODULE_MAGIC; Oid pathman_config_relid = InvalidOid, pathman_config_params_relid = InvalidOid; +/* Used to temporary disable hooks */ +bool hooks_enabled = true; + /* pg module functions */ void _PG_init(void); @@ -141,7 +144,7 @@ _PG_init(void) /* Apply initial state */ restore_pathman_init_state(&temp_init_state); - /* Initialize 'next' hook pointers */ + /* Set basic hooks */ set_rel_pathlist_hook_next = set_rel_pathlist_hook; set_rel_pathlist_hook = pathman_rel_pathlist_hook; set_join_pathlist_next = set_join_pathlist_hook; From 480ff13835188c5e9154d5092433b20008e3f1c0 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 14 Mar 2017 19:17:04 +0300 Subject: [PATCH 0271/1124] Add insert support using expressions --- src/include/init.h | 5 +++ src/include/partition_creation.h | 4 +- src/include/relation_info.h | 11 ++++- src/partition_creation.c | 25 +++++++++-- src/partition_filter.c | 75 ++++++++++++++++++++++++++++++++ src/relation_info.c | 2 +- 6 files changed, 114 insertions(+), 8 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index ef8e06dd..02994fbd 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -50,6 +50,11 @@ extern PathmanInitState pg_pathman_init_state; */ #define IsPathmanEnabled() ( pg_pathman_init_state.pg_pathman_enable ) +/* + * Enable or disable pg_pathman + */ +#define EnablePathman(b) ( pg_pathman_init_state.pg_pathman_enable = (b) ) + /* * Check if pg_pathman is initialized & enabled. */ diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 1481cad4..93bf9ce1 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -80,7 +80,8 @@ Node * build_raw_hash_check_tree(const char *base_expr, void drop_check_constraint(Oid relid, AttrNumber attnum); /* expression parsing functions */ -Node *get_expression_node(Oid relid, const char *expr, bool analyze); +Node *get_expression_node(Oid relid, const char *expr, bool analyze, + RTEMapItem **rte_map); Oid get_partition_expr_type(Oid relid, const char *expr); @@ -152,5 +153,4 @@ typedef struct void invoke_part_callback(init_callback_params *cb_params); bool validate_part_callback(Oid procid, bool emit_error); - #endif /* PARTITION_CREATION_H */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 7fc3718a..712580e5 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -113,6 +113,13 @@ typedef struct max; } RangeEntry; +/* Used to convert 'varno' attributes accodirdingly to working query */ +typedef struct +{ + Oid relid; /* relid by which we can determine what rte we need in current query */ + int res_idx; /* varno will be used for Var */ +} RTEMapItem; + /* * PartRelationInfo * Per-relation partitioning information @@ -127,8 +134,9 @@ typedef struct Oid *children; /* Oids of child partitions */ RangeEntry *ranges; /* per-partition range entry or NULL */ + Expr *expr; /* planned expression */ + RTEMapItem *expr_map; /* 'varno' map */ PartType parttype; /* partitioning type (HASH | RANGE) */ - Expr *expr; Oid atttype; /* expression type */ int32 atttypmod; /* expression type modifier */ bool attbyval; /* is partitioned column stored by value? */ @@ -164,7 +172,6 @@ typedef enum PPS_NOT_SURE /* can't determine (not transactional state) */ } PartParentSearch; - /* * PartRelationInfo field access macros. */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 6db80c19..275bb9b9 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1296,7 +1296,7 @@ build_raw_hash_check_tree(const char *base_expr, Oid hash_proc; TypeCacheEntry *tce; - Node *expr = get_expression_node(relid, base_expr, false); + Node *expr = get_expression_node(relid, base_expr, false, NULL); tce = lookup_type_cache(value_type, TYPECACHE_HASH_PROC); hash_proc = tce->hash_proc; @@ -1686,7 +1686,7 @@ text_to_regprocedure(text *proc_signature) static Node * parse_expression(Oid relid, const char *expr, char **query_string_out) { - char *fmt = "SELECT (%s) FROM %s.%s"; + char *fmt = "SELECT (%s) FROM ONLY %s.%s"; char *relname = get_rel_name(relid), *namespace_name = get_namespace_name(get_rel_namespace(relid)); List *parsetree_list; @@ -2056,7 +2056,7 @@ static bool location_cleaning_walker(Node *node, void *context) /* By given relation id and expression returns node */ Node * -get_expression_node(Oid relid, const char *expr, bool analyze) +get_expression_node(Oid relid, const char *expr, bool analyze, RTEMapItem **rte_map) { List *querytree_list; List *target_list; @@ -2082,6 +2082,25 @@ get_expression_node(Oid relid, const char *expr, bool analyze) query = (Query *)lfirst(list_head(querytree_list)); plan = pg_plan_query(query, 0, NULL); + if (rte_map != NULL) + { + int i = 0; + int len = list_length(plan->rtable); + ListCell *cell; + + *rte_map = (RTEMapItem *)palloc0(sizeof(RTEMapItem) * (len + 1)); + foreach(cell, plan->rtable) + { + RangeTblEntry *tbl = lfirst(cell); + /* only plain relation RTE */ + Assert(tbl->relid > 0); + (*rte_map)[i].relid = tbl->relid; + (*rte_map)[i].res_idx = -1; + + i++; + } + } + target_entry = lfirst(list_head(plan->planTree->targetlist)); /* Hooks can work now */ diff --git a/src/partition_filter.c b/src/partition_filter.c index c321cf85..823062d4 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -528,6 +528,72 @@ partition_filter_create_scan_state(CustomScan *node) return (Node *) state; } +static void +adapt_rte_map(List *es_rangetable, RTEMapItem *rte_map) +{ + int i = 0; + ListCell *cell; + + while (true) + { + int j = 1; /* rangetable entries are counting from 1 */ + bool found = false; + + RTEMapItem *item = &rte_map[i++]; + if (item->relid == 0) /* end of array */ + break; + + foreach(cell, es_rangetable) + { + RangeTblEntry *entry = lfirst(cell); + if (entry->relid == item->relid) { + item->res_idx = j; + found = true; + break; + } + + j++; + } + + if (!found) + elog(ERROR, "Didn't found RTE entry for relid %d in expression", + item->relid); + } +} + +static bool +adapt_walker(Node *node, void *context) +{ + if (node == NULL) + return false; + + if (IsA(node, Var)) + { + PartRelationInfo *prel = (PartRelationInfo *)context; + int i = 0; + Var *var = (Var *)node; + + while (true) + { + RTEMapItem *item = &prel->expr_map[i]; + if (item->relid == 0) + break; + + if (var->varno == (i + 1)) + { + var->varno = item->res_idx; + return false; + } + + i++; + } + + elog(ERROR, "Didn't found relation for Var in expression"); + } + + return expression_tree_walker(node, adapt_walker, context); +} + void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { @@ -571,6 +637,7 @@ partition_filter_exec(CustomScanState *node) Datum value; ExprDoneCond itemIsDone; ExprState *expr_state; + TupleTableSlot *orig_slot; /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); @@ -591,6 +658,10 @@ partition_filter_exec(CustomScanState *node) if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); */ + /* Modify expression to our needs */ + adapt_rte_map(estate->es_range_table, prel->expr_map); + expression_tree_walker((Node *)prel->expr, adapt_walker, (void *) prel); + /* Prepare state before execution */ expr_state = ExecPrepareExpr(prel->expr, estate); @@ -598,7 +669,11 @@ partition_filter_exec(CustomScanState *node) old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); /* Execute expression */ + orig_slot = econtext->ecxt_scantuple; + econtext->ecxt_scantuple = slot; value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); + econtext->ecxt_scantuple = orig_slot; + if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); diff --git a/src/relation_info.c b/src/relation_info.c index 1493fab7..1b065d5d 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -144,7 +144,7 @@ refresh_pathman_relation_info(Oid relid, * Save parsed expression to cache and use already saved expression type * from config */ - prel->expr = (Expr *) get_expression_node(relid, expr, true); + prel->expr = (Expr *) get_expression_node(relid, expr, true, &prel->expr_map); prel->atttype = expr_type; tp = SearchSysCache1(TYPEOID, values[Anum_pathman_config_atttype - 1]); From dcfc5bc645da95a891374d5fb2e5ae8d41a228a5 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 15 Mar 2017 17:04:09 +0300 Subject: [PATCH 0272/1124] Execute expression using CustomConst node --- src/include/relation_info.h | 12 ++ src/partition_creation.c | 378 +++--------------------------------- src/partition_filter.c | 74 ++++--- 3 files changed, 76 insertions(+), 388 deletions(-) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 712580e5..d6857b37 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -159,6 +159,18 @@ typedef struct Oid parent_rel; } PartParentInfo; +/* + * CustomConst + * Const with Var pointer + * We can know that is CustomConst by checking `location`. It should be + * equal -2 + */ +typedef struct +{ + Const cns; + Var *orig; +} CustomConst; + /* * PartParentSearch * Represents status of a specific cached entry. diff --git a/src/partition_creation.c b/src/partition_creation.c index 275bb9b9..f035594d 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1703,355 +1703,34 @@ parse_expression(Oid relid, const char *expr, char **query_string_out) } /* - * exprNodeLocation - - * basicly copy of exprLocation from nodeFuncs, but with another - * purpose - get address of location variable + * To prevent calculation of Vars in expression, we change them with + * CustomConst, and later before execution we fill it with actual value */ -static bool -clearNodeLocation(const Node *expr) +static Node * +expression_mutator(Node *node, void *context) { - if (expr == NULL) - return false; + const TypeCacheEntry *typcache; - switch (nodeTag(expr)) + if (IsA(node, Var)) { - case T_RangeVar: - ((RangeVar *) expr)->location = -1; - break; - case T_Var: - ((Var *) expr)->location = -1; - break; - case T_Const: - ((Const *) expr)->location = -1; - break; - case T_Param: - ((Param *) expr)->location = -1; - break; - case T_Aggref: - /* function name should always be the first thing */ - ((Aggref *) expr)->location = -1; - break; - case T_GroupingFunc: - ((GroupingFunc *) expr)->location = -1; - break; - case T_WindowFunc: - /* function name should always be the first thing */ - ((WindowFunc *) expr)->location = -1; - break; - case T_ArrayRef: - /* just use array argument's location */ - clearNodeLocation((Node *) ((const ArrayRef *) expr)->refexpr); - break; - case T_FuncExpr: - { - FuncExpr *fexpr = (FuncExpr *) expr; - - /* consider both function name and leftmost arg */ - fexpr->location = -1; - clearNodeLocation((Node *) fexpr->args); - } - break; - case T_NamedArgExpr: - { - NamedArgExpr *na = (NamedArgExpr *) expr; - - /* consider both argument name and value */ - na->location = -1; - clearNodeLocation((Node *) na->arg); - } - break; - case T_OpExpr: - case T_DistinctExpr: /* struct-equivalent to OpExpr */ - case T_NullIfExpr: /* struct-equivalent to OpExpr */ - { - OpExpr *opexpr = (OpExpr *) expr; - - /* consider both operator name and leftmost arg */ - opexpr->location = -1; - clearNodeLocation((Node *) opexpr->args); - } - break; - case T_ScalarArrayOpExpr: - { - ScalarArrayOpExpr *saopexpr = (ScalarArrayOpExpr *) expr; - - /* consider both operator name and leftmost arg */ - - saopexpr->location = -1; - clearNodeLocation((Node *) saopexpr->args); - } - break; - case T_BoolExpr: - { - BoolExpr *bexpr = (BoolExpr *) expr; - - /* - * Same as above, to handle either NOT or AND/OR. We can't - * special-case NOT because of the way that it's used for - * things like IS NOT BETWEEN. - */ - bexpr->location = -1; - clearNodeLocation((Node *) bexpr->args); - } - break; - case T_SubLink: - { - SubLink *sublink = (SubLink *) expr; - - /* check the testexpr, if any, and the operator/keyword */ - clearNodeLocation(sublink->testexpr); - sublink->location = -1; - } - break; - case T_FieldSelect: - /* just use argument's location */ - return clearNodeLocation((Node *) ((FieldSelect *) expr)->arg); - case T_FieldStore: - /* just use argument's location */ - return clearNodeLocation((Node *) ((FieldStore *) expr)->arg); - case T_RelabelType: - { - RelabelType *rexpr = (RelabelType *) expr; - - /* Much as above */ - rexpr->location = -1; - clearNodeLocation((Node *) rexpr->arg); - } - break; - case T_CoerceViaIO: - { - CoerceViaIO *cexpr = (CoerceViaIO *) expr; - - /* Much as above */ - cexpr->location = -1; - clearNodeLocation((Node *) cexpr->arg); - } - break; - case T_ArrayCoerceExpr: - { - ArrayCoerceExpr *cexpr = (ArrayCoerceExpr *) expr; - - /* Much as above */ - cexpr->location = -1; - clearNodeLocation((Node *) cexpr->arg); - } - break; - case T_ConvertRowtypeExpr: - { - ConvertRowtypeExpr *cexpr = (ConvertRowtypeExpr *) expr; - - /* Much as above */ - cexpr->location = -1; - clearNodeLocation((Node *) cexpr->arg); - } - break; - case T_CollateExpr: - /* just use argument's location */ - clearNodeLocation((Node *) ((CollateExpr *) expr)->arg); - break; - case T_CaseExpr: - /* CASE keyword should always be the first thing */ - ((CaseExpr *) expr)->location = -1; - break; - case T_CaseWhen: - /* WHEN keyword should always be the first thing */ - ((CaseWhen *) expr)->location = -1; - break; - case T_ArrayExpr: - /* the location points at ARRAY or [, which must be leftmost */ - ((ArrayExpr *) expr)->location = -1; - break; - case T_RowExpr: - /* the location points at ROW or (, which must be leftmost */ - ((RowExpr *) expr)->location = -1; - break; - case T_RowCompareExpr: - /* just use leftmost argument's location */ - return clearNodeLocation((Node *) ((RowCompareExpr *) expr)->largs); - case T_CoalesceExpr: - /* COALESCE keyword should always be the first thing */ - ((CoalesceExpr *) expr)->location = -1; - break; - case T_MinMaxExpr: - /* GREATEST/LEAST keyword should always be the first thing */ - ((MinMaxExpr *) expr)->location = -1; - break; - case T_XmlExpr: - { - XmlExpr *xexpr = (XmlExpr *) expr; - - /* consider both function name and leftmost arg */ - xexpr->location = -1; - clearNodeLocation((Node *) xexpr->args); - } - break; - case T_NullTest: - { - NullTest *nexpr = (NullTest *) expr; - - /* Much as above */ - nexpr->location = -1; - clearNodeLocation((Node *) nexpr->arg); - } - break; - case T_BooleanTest: - { - BooleanTest *bexpr = (BooleanTest *) expr; - - /* Much as above */ - bexpr->location = -1; - clearNodeLocation((Node *) bexpr->arg); - } - break; - case T_CoerceToDomain: - { - CoerceToDomain *cexpr = (CoerceToDomain *) expr; - - /* Much as above */ - cexpr->location = -1; - clearNodeLocation((Node *) cexpr->arg); - } - break; - case T_CoerceToDomainValue: - ((CoerceToDomainValue *) expr)->location = -1; - break; - case T_SetToDefault: - ((SetToDefault *) expr)->location = -1; - break; - case T_TargetEntry: - /* just use argument's location */ - return clearNodeLocation((Node *) ((const TargetEntry *) expr)->expr); - case T_IntoClause: - /* use the contained RangeVar's location --- close enough */ - return clearNodeLocation((Node *) ((const IntoClause *) expr)->rel); - case T_List: - { - /* report location of first list member that has a location */ - ListCell *lc; - - //loc = -1; /* just to suppress compiler warning */ - foreach(lc, (const List *) expr) - { - clearNodeLocation((Node *) lfirst(lc)); - } - } - break; - case T_A_Expr: - { - A_Expr *aexpr = (A_Expr *) expr; - - /* use leftmost of operator or left operand (if any) */ - /* we assume right operand can't be to left of operator */ - aexpr->location = -1; - clearNodeLocation(aexpr->lexpr); - } - break; - case T_ColumnRef: - ((ColumnRef *) expr)->location = -1; - break; - case T_ParamRef: - ((ParamRef *) expr)->location = -1; - break; - case T_A_Const: - ((A_Const *) expr)->location = -1; - break; - case T_FuncCall: - { - FuncCall *fc = (FuncCall *) expr; - - /* consider both function name and leftmost arg */ - /* (we assume any ORDER BY nodes must be to right of name) */ - fc->location = -1; - clearNodeLocation((Node *) fc->args); - } - break; - case T_A_ArrayExpr: - /* the location points at ARRAY or [, which must be leftmost */ - ((A_ArrayExpr *) expr)->location = -1; - break; - case T_ResTarget: - /* we need not examine the contained expression (if any) */ - ((ResTarget *) expr)->location = -1; - break; - case T_MultiAssignRef: - return clearNodeLocation((Node *)(((MultiAssignRef *) expr)->source)); - case T_TypeCast: - { - TypeCast *tc = (TypeCast *) expr; - - /* - * This could represent CAST(), ::, or TypeName 'literal', so - * any of the components might be leftmost. - */ - clearNodeLocation(tc->arg); - tc->typeName->location = -1; - tc->location = -1; - } - break; - case T_CollateClause: - /* just use argument's location */ - return clearNodeLocation(((CollateClause *) expr)->arg); - case T_SortBy: - /* just use argument's location (ignore operator, if any) */ - return clearNodeLocation(((SortBy *) expr)->node); - case T_WindowDef: - ((WindowDef *) expr)->location = -1; - break; - case T_RangeTableSample: - ((RangeTableSample *) expr)->location = -1; - break; - case T_TypeName: - ((TypeName *) expr)->location = -1; - break; - case T_ColumnDef: - ((ColumnDef *) expr)->location = -1; - break; - case T_Constraint: - ((Constraint *) expr)->location = -1; - break; - case T_FunctionParameter: - /* just use typename's location */ - return clearNodeLocation((Node *) ((const FunctionParameter *) expr)->argType); - case T_XmlSerialize: - /* XMLSERIALIZE keyword should always be the first thing */ - ((XmlSerialize *) expr)->location = -1; - break; - case T_GroupingSet: - ((GroupingSet *) expr)->location = -1; - break; - case T_WithClause: - ((WithClause *) expr)->location = -1; - break; - case T_InferClause: - ((InferClause *) expr)->location = -1; - break; - case T_OnConflictClause: - ((OnConflictClause *) expr)->location = -1; - break; - case T_CommonTableExpr: - ((CommonTableExpr *) expr)->location = -1; - break; - case T_PlaceHolderVar: - /* just use argument's location */ - return clearNodeLocation((Node *) ((const PlaceHolderVar *) expr)->phexpr); - case T_InferenceElem: - /* just use nested expr's location */ - return clearNodeLocation((Node *) ((const InferenceElem *) expr)->expr); - default: - return false; + Node *new_node = newNode(sizeof(CustomConst), T_Const); + Const *new_const = (Const *)new_node; + ((CustomConst *)new_node)->orig = (Var *)node; + + new_const->consttype = ((Var *)node)->vartype; + new_const->consttypmod = ((Var *)node)->vartypmod; + new_const->constcollid = ((Var *)node)->varcollid; + new_const->constvalue = (Datum) 0; + new_const->constisnull = false; + new_const->location = -2; + + typcache = lookup_type_cache(new_const->consttype, 0); + new_const->constbyval = typcache->typbyval; + new_const->constlen = typcache->typlen; + + return new_node; } - return true; -} - -static bool location_cleaning_walker(Node *node, void *context) -{ - if (node == NULL) - return false; - - if (clearNodeLocation(node)) - return false; - - return raw_expression_tree_walker(node, location_cleaning_walker, context); + return expression_tree_mutator(node, expression_mutator, NULL); } /* By given relation id and expression returns node */ @@ -2062,7 +1741,7 @@ get_expression_node(Oid relid, const char *expr, bool analyze, RTEMapItem **rte_ List *target_list; char *query_string; Node *parsetree = parse_expression(relid, expr, &query_string), - *raw_node; + *result; Query *query; TargetEntry *target_entry; PlannedStmt *plan; @@ -2070,9 +1749,8 @@ get_expression_node(Oid relid, const char *expr, bool analyze, RTEMapItem **rte_ target_list = ((SelectStmt *)parsetree)->targetList; if (!analyze) { - raw_node = (Node *)(((ResTarget *)(lfirst(list_head(target_list))))->val); - //raw_expression_tree_walker(raw_node, location_cleaning_walker, NULL); - return raw_node; + result = (Node *)(((ResTarget *)(lfirst(list_head(target_list))))->val); + return result; } /* We don't need pathman hooks on next stages */ @@ -2106,7 +1784,9 @@ get_expression_node(Oid relid, const char *expr, bool analyze, RTEMapItem **rte_ /* Hooks can work now */ hooks_enabled = true; - return (Node *)target_entry->expr; + result = (Node *)target_entry->expr; + result = expression_mutator(result, NULL); + return result; } /* Determines type of expression for a relation */ diff --git a/src/partition_filter.c b/src/partition_filter.c index 823062d4..045b50ff 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -561,37 +561,39 @@ adapt_rte_map(List *es_rangetable, RTEMapItem *rte_map) } } +struct expr_walker_context +{ + const PartRelationInfo *prel; + TupleTableSlot *slot; +}; + static bool -adapt_walker(Node *node, void *context) +adapt_values (Node *node, struct expr_walker_context *context) { if (node == NULL) return false; - if (IsA(node, Var)) + if (IsA(node, Const) && ((Const *)node)->location == -2) { - PartRelationInfo *prel = (PartRelationInfo *)context; - int i = 0; - Var *var = (Var *)node; - - while (true) - { - RTEMapItem *item = &prel->expr_map[i]; - if (item->relid == 0) - break; + Var *variable; + AttrNumber attnum; + Const *cst; + bool isNull; - if (var->varno == (i + 1)) - { - var->varno = item->res_idx; - return false; - } + cst = (Const *)node; + variable = ((CustomConst *)node)->orig; - i++; - } + attnum = variable->varattno; + Assert(attnum != InvalidAttrNumber); - elog(ERROR, "Didn't found relation for Var in expression"); + Assert(context->slot->tts_tupleDescriptor-> + attrs[attnum - 1]->atttypid == cst->consttype); + cst->constvalue = slot_getattr(context->slot, attnum, &isNull); + cst->constisnull = isNull; + return false; } - return expression_tree_walker(node, adapt_walker, context); + return expression_tree_walker(node, adapt_values, (void *) context); } void @@ -630,14 +632,14 @@ partition_filter_exec(CustomScanState *node) if (!TupIsNull(slot)) { - MemoryContext old_cxt; - const PartRelationInfo *prel; - ResultRelInfoHolder *rri_holder; - bool isnull; - Datum value; - ExprDoneCond itemIsDone; - ExprState *expr_state; - TupleTableSlot *orig_slot; + MemoryContext old_cxt; + const PartRelationInfo *prel; + ResultRelInfoHolder *rri_holder; + bool isnull; + Datum value; + ExprDoneCond itemIsDone; + ExprState *expr_state; + struct expr_walker_context expr_walker_context; /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); @@ -651,16 +653,13 @@ partition_filter_exec(CustomScanState *node) return slot; } - /* Extract partitioned column's value (also check types) - Assert(slot->tts_tupleDescriptor-> - attrs[prel->attnum - 1]->atttypid == prel->atttype); - value = slot_getattr(slot, prel->attnum, &isnull); - if (isnull) - elog(ERROR, ERR_PART_ATTR_NULL); */ + /* Prepare walker context */ + expr_walker_context.prel = prel; /* maybe slot will be enough */ + expr_walker_context.slot = slot; - /* Modify expression to our needs */ + /* Fetch values from slot for expression */ adapt_rte_map(estate->es_range_table, prel->expr_map); - expression_tree_walker((Node *)prel->expr, adapt_walker, (void *) prel); + adapt_values((Node *)prel->expr, (void *) &expr_walker_context); /* Prepare state before execution */ expr_state = ExecPrepareExpr(prel->expr, estate); @@ -669,10 +668,7 @@ partition_filter_exec(CustomScanState *node) old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); /* Execute expression */ - orig_slot = econtext->ecxt_scantuple; - econtext->ecxt_scantuple = slot; value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); - econtext->ecxt_scantuple = orig_slot; if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); From e898585f6af14cf88948fc5e51dda7b8eea3611d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 15 Mar 2017 20:08:00 +0300 Subject: [PATCH 0273/1124] Simplfy code --- src/include/init.h | 5 -- src/include/partition_creation.h | 3 +- src/include/relation_info.h | 11 ++--- src/partition_creation.c | 84 +++++++++++++++++--------------- src/partition_filter.c | 66 +++++++++---------------- src/relation_info.c | 2 +- 6 files changed, 76 insertions(+), 95 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index 02994fbd..ef8e06dd 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -50,11 +50,6 @@ extern PathmanInitState pg_pathman_init_state; */ #define IsPathmanEnabled() ( pg_pathman_init_state.pg_pathman_enable ) -/* - * Enable or disable pg_pathman - */ -#define EnablePathman(b) ( pg_pathman_init_state.pg_pathman_enable = (b) ) - /* * Check if pg_pathman is initialized & enabled. */ diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 93bf9ce1..8b2310b7 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -80,8 +80,7 @@ Node * build_raw_hash_check_tree(const char *base_expr, void drop_check_constraint(Oid relid, AttrNumber attnum); /* expression parsing functions */ -Node *get_expression_node(Oid relid, const char *expr, bool analyze, - RTEMapItem **rte_map); +Node *get_expression_node(Oid relid, const char *expr, bool analyze); Oid get_partition_expr_type(Oid relid, const char *expr); diff --git a/src/include/relation_info.h b/src/include/relation_info.h index d6857b37..7f479f2f 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -135,7 +135,6 @@ typedef struct RangeEntry *ranges; /* per-partition range entry or NULL */ Expr *expr; /* planned expression */ - RTEMapItem *expr_map; /* 'varno' map */ PartType parttype; /* partitioning type (HASH | RANGE) */ Oid atttype; /* expression type */ int32 atttypmod; /* expression type modifier */ @@ -161,14 +160,14 @@ typedef struct /* * CustomConst - * Const with Var pointer - * We can know that is CustomConst by checking `location`. It should be - * equal -2 + * Modified Const that also stores 'varattno' attribute from some Var + * We can check that is CustomConst by checking `location` attrubute. + * It should be equal -2 */ typedef struct { - Const cns; - Var *orig; + Const cns; + AttrNumber varattno; } CustomConst; /* diff --git a/src/partition_creation.c b/src/partition_creation.c index f035594d..9d152ae0 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -43,6 +43,7 @@ #include "utils/jsonb.h" #include "utils/snapmgr.h" #include "utils/lsyscache.h" +#include "utils/memutils.h" #include "utils/syscache.h" #include "utils/typcache.h" @@ -1296,7 +1297,7 @@ build_raw_hash_check_tree(const char *base_expr, Oid hash_proc; TypeCacheEntry *tce; - Node *expr = get_expression_node(relid, base_expr, false, NULL); + Node *expr = get_expression_node(relid, base_expr, false); tce = lookup_type_cache(value_type, TYPECACHE_HASH_PROC); hash_proc = tce->hash_proc; @@ -1702,26 +1703,40 @@ parse_expression(Oid relid, const char *expr, char **query_string_out) return (Node *)(lfirst(list_head(parsetree_list))); } +struct expr_mutator_context +{ + Oid relid; /* partitioned table */ + List *rtable; /* range table list from expression query */ +}; + /* - * To prevent calculation of Vars in expression, we change them with + * To prevent calculation of Vars in expression, we wrap them with * CustomConst, and later before execution we fill it with actual value */ static Node * -expression_mutator(Node *node, void *context) +expression_mutator(Node *node, struct expr_mutator_context *context) { const TypeCacheEntry *typcache; if (IsA(node, Var)) { - Node *new_node = newNode(sizeof(CustomConst), T_Const); - Const *new_const = (Const *)new_node; - ((CustomConst *)new_node)->orig = (Var *)node; + Var *variable = (Var *) node; + Node *new_node = newNode(sizeof(CustomConst), T_Const); + Const *new_const = (Const *)new_node; + + RangeTblEntry *entry = rt_fetch(variable->varno, context->rtable); + if (entry->relid != context->relid) + elog(ERROR, "Columns in the expression should " + "be only from partitioned relation"); + + /* we only need varattno from original Var, for now */ + ((CustomConst *)new_node)->varattno = ((Var *)node)->varattno; new_const->consttype = ((Var *)node)->vartype; new_const->consttypmod = ((Var *)node)->vartypmod; new_const->constcollid = ((Var *)node)->varcollid; new_const->constvalue = (Datum) 0; - new_const->constisnull = false; + new_const->constisnull = true; new_const->location = -2; typcache = lookup_type_cache(new_const->consttype, 0); @@ -1730,22 +1745,25 @@ expression_mutator(Node *node, void *context) return new_node; } - return expression_tree_mutator(node, expression_mutator, NULL); + return expression_tree_mutator(node, expression_mutator, (void *) context); } /* By given relation id and expression returns node */ Node * -get_expression_node(Oid relid, const char *expr, bool analyze, RTEMapItem **rte_map) +get_expression_node(Oid relid, const char *expr, bool analyze) { - List *querytree_list; - List *target_list; - char *query_string; - Node *parsetree = parse_expression(relid, expr, &query_string), - *result; - Query *query; - TargetEntry *target_entry; - PlannedStmt *plan; - + List *querytree_list; + List *target_list; + char *query_string; + Node *parsetree, + *result; + Query *query; + TargetEntry *target_entry; + PlannedStmt *plan; + MemoryContext oldcontext; + struct expr_mutator_context context; + + parsetree = parse_expression(relid, expr, &query_string), target_list = ((SelectStmt *)parsetree)->targetList; if (!analyze) { @@ -1759,33 +1777,21 @@ get_expression_node(Oid relid, const char *expr, bool analyze, RTEMapItem **rte_ querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); query = (Query *)lfirst(list_head(querytree_list)); plan = pg_plan_query(query, 0, NULL); - - if (rte_map != NULL) - { - int i = 0; - int len = list_length(plan->rtable); - ListCell *cell; - - *rte_map = (RTEMapItem *)palloc0(sizeof(RTEMapItem) * (len + 1)); - foreach(cell, plan->rtable) - { - RangeTblEntry *tbl = lfirst(cell); - /* only plain relation RTE */ - Assert(tbl->relid > 0); - (*rte_map)[i].relid = tbl->relid; - (*rte_map)[i].res_idx = -1; - - i++; - } - } - target_entry = lfirst(list_head(plan->planTree->targetlist)); /* Hooks can work now */ hooks_enabled = true; result = (Node *)target_entry->expr; - result = expression_mutator(result, NULL); + oldcontext = MemoryContextSwitchTo(TopMemoryContext); + + /* We need relid and range table list for mutator */ + context.relid = relid; + context.rtable = plan->rtable; + + /* This will create new tree in TopMemoryContext */ + result = expression_mutator(result, (void *) &context); + MemoryContextSwitchTo(oldcontext); return result; } diff --git a/src/partition_filter.c b/src/partition_filter.c index 045b50ff..876478dc 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -528,68 +528,45 @@ partition_filter_create_scan_state(CustomScan *node) return (Node *) state; } -static void -adapt_rte_map(List *es_rangetable, RTEMapItem *rte_map) -{ - int i = 0; - ListCell *cell; - - while (true) - { - int j = 1; /* rangetable entries are counting from 1 */ - bool found = false; - - RTEMapItem *item = &rte_map[i++]; - if (item->relid == 0) /* end of array */ - break; - - foreach(cell, es_rangetable) - { - RangeTblEntry *entry = lfirst(cell); - if (entry->relid == item->relid) { - item->res_idx = j; - found = true; - break; - } - - j++; - } - - if (!found) - elog(ERROR, "Didn't found RTE entry for relid %d in expression", - item->relid); - } -} - struct expr_walker_context { const PartRelationInfo *prel; TupleTableSlot *slot; + bool clear; }; +/* Fills CustomConst nodes with values from slot */ static bool adapt_values (Node *node, struct expr_walker_context *context) { if (node == NULL) return false; + /* location == -2 means that it's our CustomConst node */ if (IsA(node, Const) && ((Const *)node)->location == -2) { - Var *variable; AttrNumber attnum; Const *cst; bool isNull; cst = (Const *)node; - variable = ((CustomConst *)node)->orig; - attnum = variable->varattno; + attnum = ((CustomConst *)node)->varattno; Assert(attnum != InvalidAttrNumber); - Assert(context->slot->tts_tupleDescriptor-> - attrs[attnum - 1]->atttypid == cst->consttype); - cst->constvalue = slot_getattr(context->slot, attnum, &isNull); - cst->constisnull = isNull; + if (context->clear) + { + cst->constvalue = (Datum) 0; + cst->constisnull = true; + } + else + { + /* check that type is still same */ + Assert(context->slot->tts_tupleDescriptor-> + attrs[attnum - 1]->atttypid == cst->consttype); + cst->constvalue = slot_getattr(context->slot, attnum, &isNull); + cst->constisnull = isNull; + } return false; } @@ -656,9 +633,9 @@ partition_filter_exec(CustomScanState *node) /* Prepare walker context */ expr_walker_context.prel = prel; /* maybe slot will be enough */ expr_walker_context.slot = slot; + expr_walker_context.clear = true; - /* Fetch values from slot for expression */ - adapt_rte_map(estate->es_range_table, prel->expr_map); + /* Clear values from slot for expression */ adapt_values((Node *)prel->expr, (void *) &expr_walker_context); /* Prepare state before execution */ @@ -667,6 +644,11 @@ partition_filter_exec(CustomScanState *node) /* Switch to per-tuple context */ old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + expr_walker_context.clear = false; + + /* Fetch values from slot for expression */ + adapt_values((Node *)prel->expr, (void *) &expr_walker_context); + /* Execute expression */ value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); diff --git a/src/relation_info.c b/src/relation_info.c index 1b065d5d..1493fab7 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -144,7 +144,7 @@ refresh_pathman_relation_info(Oid relid, * Save parsed expression to cache and use already saved expression type * from config */ - prel->expr = (Expr *) get_expression_node(relid, expr, true, &prel->expr_map); + prel->expr = (Expr *) get_expression_node(relid, expr, true); prel->atttype = expr_type; tp = SearchSysCache1(TYPEOID, values[Anum_pathman_config_atttype - 1]); From 70c064d6fe71c0a2187697bd8107f62c1bcad2b8 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 22 Mar 2017 14:44:27 +0300 Subject: [PATCH 0274/1124] Use heap_getattr and copy slot before using --- src/debug_print.c | 31 +++++++++++++++++++++++++++++++ src/partition_filter.c | 18 ++++++++---------- 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/src/debug_print.c b/src/debug_print.c index 36016861..f1e93c87 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -11,9 +11,11 @@ #include "rangeset.h" #include "postgres.h" +#include "fmgr.h" #include "nodes/bitmapset.h" #include "nodes/pg_list.h" #include "lib/stringinfo.h" +#include "utils/lsyscache.h" /* @@ -99,3 +101,32 @@ irange_print(IndexRange irange) return str.data; } + +/* + * Print Datum as cstring + */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +datum_print(Datum origval, Oid typid) +{ + Oid typoutput; + bool typisvarlena; + Datum val; + + /* Query output function */ + getTypeOutputInfo(typid, &typoutput, &typisvarlena); + + if (typisvarlena && VARATT_IS_EXTERNAL_ONDISK(origval)) + return NULL; //unchanged-toast-datum + else if (!typisvarlena) + val = origval; + else + { + /* Definitely detoasted Datum */ + val = PointerGetDatum(PG_DETOAST_DATUM(origval)); + } + + return OidOutputFunctionCall(typoutput, val); +} diff --git a/src/partition_filter.c b/src/partition_filter.c index 876478dc..e5152773 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -532,6 +532,7 @@ struct expr_walker_context { const PartRelationInfo *prel; TupleTableSlot *slot; + HeapTuple tup; bool clear; }; @@ -564,7 +565,8 @@ adapt_values (Node *node, struct expr_walker_context *context) /* check that type is still same */ Assert(context->slot->tts_tupleDescriptor-> attrs[attnum - 1]->atttypid == cst->consttype); - cst->constvalue = slot_getattr(context->slot, attnum, &isNull); + cst->constvalue = heap_getattr(context->tup, attnum, + context->slot->tts_tupleDescriptor, &isNull); cst->constisnull = isNull; } return false; @@ -633,22 +635,18 @@ partition_filter_exec(CustomScanState *node) /* Prepare walker context */ expr_walker_context.prel = prel; /* maybe slot will be enough */ expr_walker_context.slot = slot; - expr_walker_context.clear = true; - - /* Clear values from slot for expression */ - adapt_values((Node *)prel->expr, (void *) &expr_walker_context); - - /* Prepare state before execution */ - expr_state = ExecPrepareExpr(prel->expr, estate); + expr_walker_context.tup = ExecCopySlotTuple(slot); + expr_walker_context.clear = false; /* Switch to per-tuple context */ old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - expr_walker_context.clear = false; - /* Fetch values from slot for expression */ adapt_values((Node *)prel->expr, (void *) &expr_walker_context); + /* Prepare state before execution */ + expr_state = ExecPrepareExpr(prel->expr, estate); + /* Execute expression */ value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); From e37bec3b272f8a8d1e07a764f6c7305a2f2336b8 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 22 Mar 2017 15:16:50 +0300 Subject: [PATCH 0275/1124] Ignore paths temporarily --- src/pg_pathman.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 0027ceb5..748413e6 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1203,7 +1203,8 @@ pull_var_param(const WalkerContext *ctx, (Var *) ((RelabelType *) left)->arg; /* Check if 'v' is partitioned column of 'prel' */ - if (v->varoattno == ctx->prel->attnum && + /* FIX this */ + if (v->varoattno == 0 && v->varno == ctx->prel_varno) { *var_ptr = left; @@ -1220,7 +1221,8 @@ pull_var_param(const WalkerContext *ctx, (Var *) ((RelabelType *) right)->arg; /* Check if 'v' is partitioned column of 'prel' */ - if (v->varoattno == ctx->prel->attnum && + /* FIX this */ + if (v->varoattno == 0 && v->varno == ctx->prel_varno) { *var_ptr = right; From 58ef2a5498515715e88007bf315089807f5a7b45 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 22 Mar 2017 15:32:11 +0300 Subject: [PATCH 0276/1124] Copy and calculate in per tuple memory context --- src/partition_filter.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index e5152773..575f6825 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -632,15 +632,15 @@ partition_filter_exec(CustomScanState *node) return slot; } + /* Switch to per-tuple context */ + old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + /* Prepare walker context */ - expr_walker_context.prel = prel; /* maybe slot will be enough */ + expr_walker_context.prel = prel; expr_walker_context.slot = slot; expr_walker_context.tup = ExecCopySlotTuple(slot); expr_walker_context.clear = false; - /* Switch to per-tuple context */ - old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - /* Fetch values from slot for expression */ adapt_values((Node *)prel->expr, (void *) &expr_walker_context); From 405c5098b83b2167f9562961bf0fd9e8bd9d97f1 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 23 Mar 2017 12:01:51 +0300 Subject: [PATCH 0277/1124] Make last changes --- src/partition_creation.c | 2 ++ src/partition_filter.c | 16 ++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 9d152ae0..c4aa5457 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1783,6 +1783,8 @@ get_expression_node(Oid relid, const char *expr, bool analyze) hooks_enabled = true; result = (Node *)target_entry->expr; + + /* We keep expression in top context */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); /* We need relid and range table list for mutator */ diff --git a/src/partition_filter.c b/src/partition_filter.c index 575f6825..97b84377 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -632,6 +632,16 @@ partition_filter_exec(CustomScanState *node) return slot; } + old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); + + /* Fetch values from slot for expression */ + adapt_values((Node *)prel->expr, (void *) &expr_walker_context); + + MemoryContextSwitchTo(old_cxt); + + /* Prepare state for execution */ + expr_state = ExecPrepareExpr(prel->expr, estate); + /* Switch to per-tuple context */ old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -641,12 +651,6 @@ partition_filter_exec(CustomScanState *node) expr_walker_context.tup = ExecCopySlotTuple(slot); expr_walker_context.clear = false; - /* Fetch values from slot for expression */ - adapt_values((Node *)prel->expr, (void *) &expr_walker_context); - - /* Prepare state before execution */ - expr_state = ExecPrepareExpr(prel->expr, estate); - /* Execute expression */ value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); From e23b607868d4ed02fee21954301dee70f303d878 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 24 Mar 2017 17:11:23 +0300 Subject: [PATCH 0278/1124] fix formatting in README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 709ccf35..22a9ef87 100644 --- a/README.md +++ b/README.md @@ -99,8 +99,8 @@ create_hash_partitions(relation REGCLASS, attribute TEXT, partitions_count INTEGER, partition_data BOOLEAN DEFAULT TRUE, - partition_names TEXT[] DEFAULT NULL, - tablespaces TEXT[] DEFAULT NULL) + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) ``` Performs HASH partitioning for `relation` by integer key `attribute`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. Partition creation callback is invoked for each partition if set beforehand (see `set_init_callback()`). From c1cec9d6c8fffbfeed94bed47510931453b66843 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 24 Mar 2017 21:54:17 +0300 Subject: [PATCH 0279/1124] README.md: fix authors section --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 22a9ef87..96403f26 100644 --- a/README.md +++ b/README.md @@ -665,7 +665,7 @@ All sections and data will remain unchanged and will be handled by the standard Do not hesitate to post your issues, questions and new ideas at the [issues](https://github.com/postgrespro/pg_pathman/issues) page. ## Authors -Ildar Musin Postgres Professional Ltd., Russia -Alexander Korotkov Postgres Professional Ltd., Russia -Dmitry Ivanov Postgres Professional Ltd., Russia +Ildar Musin Postgres Professional Ltd., Russia +Alexander Korotkov Postgres Professional Ltd., Russia +Dmitry Ivanov Postgres Professional Ltd., Russia From 577072934cb42307c459be2d26657917b31a7e3b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 24 Mar 2017 21:55:09 +0300 Subject: [PATCH 0280/1124] README.rus.md: fix authors section --- README.rus.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.rus.md b/README.rus.md index 6acea3c5..d7fe87a4 100644 --- a/README.rus.md +++ b/README.rus.md @@ -488,6 +488,6 @@ SELECT disable_pathman_for('range_rel'); Если у вас есть вопросы или предложения, а также если вы обнаружили ошибки, напишите нам в разделе [issues](https://github.com/postgrespro/pg_pathman/issues). ## Авторы -Ильдар Мусин Postgres Professional, Россия -Александр Коротков Postgres Professional, Россия -Дмитрий Иванов Postgres Professional, Россия +Ильдар Мусин Postgres Professional, Россия +Александр Коротков Postgres Professional, Россия +Дмитрий Иванов Postgres Professional, Россия From bb711e866eaea6d203d9ea5841f457df984915ad Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 25 Mar 2017 13:13:28 +0300 Subject: [PATCH 0281/1124] fix LICENSE --- LICENSE | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/LICENSE b/LICENSE index 5c6dab26..263a54bd 100644 --- a/LICENSE +++ b/LICENSE @@ -1,11 +1,11 @@ -Copyright (c) 2015-2016, Postgres Professional - -Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group +pg_pathman is released under the PostgreSQL License, a liberal Open Source license, similar to the BSD or MIT licenses. +Copyright (c) 2015-2017, Postgres Professional +Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. -IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +IN NO EVENT SHALL POSTGRES PROFESSIONAL BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF POSTGRES PROFESSIONAL HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +POSTGRES PROFESSIONAL SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND POSTGRES PROFESSIONAL HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. From 13e8aef6c66b70520a0c8191885df32631e69ae6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 25 Mar 2017 13:51:08 +0300 Subject: [PATCH 0282/1124] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 96403f26..74018a4d 100644 --- a/README.md +++ b/README.md @@ -661,7 +661,7 @@ SELECT disable_pathman_for('range_rel'); ``` All sections and data will remain unchanged and will be handled by the standard PostgreSQL inheritance mechanism. -##Feedback +## Feedback Do not hesitate to post your issues, questions and new ideas at the [issues](https://github.com/postgrespro/pg_pathman/issues) page. ## Authors From 0e24f25ac7525f9f0a85f25c80d48ada5643284d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 26 Mar 2017 14:55:54 +0300 Subject: [PATCH 0283/1124] WIP implement constraint cache --- src/include/init.h | 5 ++ src/include/relation_info.h | 28 ++++++-- src/init.c | 81 +++++------------------ src/relation_info.c | 127 ++++++++++++++++++++++++++++++++++++ 4 files changed, 170 insertions(+), 71 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index 7b5459b0..49891485 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -21,6 +21,10 @@ #include "utils/snapshot.h" +/* Help user in case of emergency */ +#define INIT_ERROR_HINT "pg_pathman will be disabled to allow you to resolve this issue" + + /* * pg_pathman's initialization state structure. */ @@ -35,6 +39,7 @@ typedef struct extern HTAB *partitioned_rels; extern HTAB *parent_cache; +extern HTAB *constraint_cache; /* pg_pathman's initialization state */ extern PathmanInitState pg_pathman_init_state; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 587de24e..875f3e62 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -18,6 +18,7 @@ #include "port/atomics.h" #include "storage/lock.h" #include "utils/datum.h" +#include "nodes/primnodes.h" /* Range bound */ @@ -37,7 +38,7 @@ typedef struct #define IsMinusInfinity(i) ( (i)->is_infinite == MINUS_INFINITY ) -inline static Bound +static inline Bound CopyBound(const Bound *src, bool byval, int typlen) { Bound bound = { @@ -50,7 +51,7 @@ CopyBound(const Bound *src, bool byval, int typlen) return bound; } -inline static Bound +static inline Bound MakeBound(Datum value) { Bound bound = { value, FINITE }; @@ -58,7 +59,7 @@ MakeBound(Datum value) return bound; } -inline static Bound +static inline Bound MakeBoundInf(int8 infinity_type) { Bound bound = { (Datum) 0, infinity_type }; @@ -66,7 +67,7 @@ MakeBoundInf(int8 infinity_type) return bound; } -inline static Datum +static inline Datum BoundGetValue(const Bound *bound) { Assert(!IsInfinite(bound)); @@ -74,7 +75,7 @@ BoundGetValue(const Bound *bound) return bound->value; } -inline static int +static inline int cmp_bounds(FmgrInfo *cmp_func, const Bound *b1, const Bound *b2) { if (IsMinusInfinity(b1) || IsPlusInfinity(b2)) @@ -149,6 +150,13 @@ typedef struct Oid parent_rel; } PartParentInfo; +typedef struct +{ + Oid child_rel; /* key */ + Oid conid; + Expr *constraint; +} PartConstraintInfo; + /* * PartParentSearch * Represents status of a specific cached entry. @@ -177,7 +185,7 @@ typedef enum #define PrelIsValid(prel) ( (prel) && (prel)->valid ) -inline static uint32 +static inline uint32 PrelLastChild(const PartRelationInfo *prel) { Assert(PrelIsValid(prel)); @@ -201,18 +209,26 @@ const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, bool unlock_if_not_found, LockAcquireResult *lock_result); +/* Global invalidation routines */ void delay_pathman_shutdown(void); void delay_invalidation_parent_rel(Oid parent); void delay_invalidation_vague_rel(Oid vague_rel); void finish_delayed_invalidation(void); +/* Parent cache */ void cache_parent_of_partition(Oid partition, Oid parent); Oid forget_parent_of_partition(Oid partition, PartParentSearch *status); Oid get_parent_of_partition(Oid partition, PartParentSearch *status); +/* Constraint cache */ +Oid forget_constraint_of_partition(Oid partition); +Expr * get_constraint_of_partition(Oid partition, AttrNumber part_attno); + +/* Safe casts for PartType */ PartType DatumGetPartType(Datum datum); char * PartTypeToCString(PartType parttype); +/* PartRelationInfo checker */ void shout_if_prel_is_invalid(Oid parent_oid, const PartRelationInfo *prel, PartType expected_part_type); diff --git a/src/init.c b/src/init.c index 1011a198..bc5e19a4 100644 --- a/src/init.c +++ b/src/init.c @@ -21,7 +21,6 @@ #include "access/htup_details.h" #include "access/sysattr.h" #include "catalog/indexing.h" -#include "catalog/pg_constraint.h" #include "catalog/pg_extension.h" #include "catalog/pg_inherits.h" #include "catalog/pg_inherits_fn.h" @@ -38,13 +37,6 @@ #include "utils/syscache.h" #include "utils/typcache.h" -#if PG_VERSION_NUM >= 90600 -#include "catalog/pg_constraint_fn.h" -#endif - - -/* Help user in case of emergency */ -#define INIT_ERROR_HINT "pg_pathman will be disabled to allow you to resolve this issue" /* Initial size of 'partitioned_rels' table */ #define PART_RELS_SIZE 10 @@ -57,6 +49,9 @@ HTAB *partitioned_rels = NULL; /* Storage for PartParentInfos */ HTAB *parent_cache = NULL; +/* Storage for partition constraints */ +HTAB *constraint_cache = NULL; + /* pg_pathman's init status */ PathmanInitState pg_pathman_init_state; @@ -71,8 +66,6 @@ static void init_local_cache(void); static void fini_local_cache(void); static void read_pathman_config(void); -static Expr *get_partition_constraint_expr(Oid partition, AttrNumber part_attno); - static int cmp_range_entries(const void *p1, const void *p2, void *arg); static bool validate_range_constraint(const Expr *expr, @@ -312,6 +305,7 @@ init_local_cache(void) /* Destroy caches, just in case */ hash_destroy(partitioned_rels); hash_destroy(parent_cache); + hash_destroy(constraint_cache); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); @@ -329,6 +323,15 @@ init_local_cache(void) parent_cache = hash_create("pg_pathman's partition parents cache", PART_RELS_SIZE * CHILD_FACTOR, &ctl, HASH_ELEM | HASH_BLOBS); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(PartConstraintInfo); + ctl.hcxt = TopMemoryContext; /* place data to persistent mcxt */ + + constraint_cache = hash_create("pg_pathman's partition constraints cache", + PART_RELS_SIZE * CHILD_FACTOR, + &ctl, HASH_ELEM | HASH_BLOBS); } /* @@ -386,10 +389,10 @@ fill_prel_with_partitions(const Oid *partitions, /* Raise ERROR if there's no such column */ if (part_attno == InvalidAttrNumber) elog(ERROR, "partition \"%s\" has no column \"%s\"", - get_rel_name_or_relid(partitions[i]), - part_column_name); + get_rel_name_or_relid(partitions[i]), part_column_name); - con_expr = get_partition_constraint_expr(partitions[i], part_attno); + /* Fetch constraint's expression tree */ + con_expr = get_constraint_of_partition(partitions[i], part_attno); /* Perform a partitioning_type-dependent task */ switch (prel->parttype) @@ -863,58 +866,6 @@ read_pathman_config(void) heap_close(rel, AccessShareLock); } -/* - * Get constraint expression tree for a partition. - * - * build_check_constraint_name_internal() is used to build conname. - */ -static Expr * -get_partition_constraint_expr(Oid partition, AttrNumber part_attno) -{ - Oid conid; /* constraint Oid */ - char *conname; /* constraint name */ - HeapTuple con_tuple; - Datum conbin_datum; - bool conbin_isnull; - Expr *expr; /* expression tree for constraint */ - - conname = build_check_constraint_name_relid_internal(partition, part_attno); - conid = get_relation_constraint_oid(partition, conname, true); - if (conid == InvalidOid) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("constraint \"%s\" for partition \"%s\" does not exist", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); - } - - con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); - conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, - Anum_pg_constraint_conbin, - &conbin_isnull); - if (conbin_isnull) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(WARNING, - (errmsg("constraint \"%s\" for partition \"%s\" has NULL conbin", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); - pfree(conname); - - return NULL; /* could not parse */ - } - pfree(conname); - - /* Finally we get a constraint expression tree */ - expr = (Expr *) stringToNode(TextDatumGetCString(conbin_datum)); - - /* Don't foreget to release syscache tuple */ - ReleaseSysCache(con_tuple); - - return expr; -} - /* qsort comparison function for RangeEntries */ static int cmp_range_entries(const void *p1, const void *p2, void *arg) diff --git a/src/relation_info.c b/src/relation_info.c index bde960c7..aacd7c59 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -17,6 +17,7 @@ #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/indexing.h" +#include "catalog/pg_constraint.h" #include "catalog/pg_inherits.h" #include "miscadmin.h" #include "storage/lmgr.h" @@ -29,6 +30,10 @@ #include "utils/lsyscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM >= 90600 +#include "catalog/pg_constraint_fn.h" +#endif + /* * We delay all invalidation jobs received in relcache hook. @@ -59,6 +64,10 @@ static Oid get_parent_of_partition_internal(Oid partition, PartParentSearch *status, HASHACTION action); +static Expr *get_partition_constraint_expr(Oid partition, + AttrNumber part_attno, + Oid *constr_oid); + /* * refresh\invalidate\get\remove PartRelationInfo functions. @@ -684,6 +693,123 @@ try_perform_parent_refresh(Oid parent) return true; } + +/* + * forget\get constraint functions. + */ + +Oid +forget_constraint_of_partition(Oid partition) +{ + PartConstraintInfo *pcon = pathman_cache_search_relid(constraint_cache, + partition, + HASH_FIND, NULL); + if (pcon) + { + Oid conid = pcon->conid; + + /* TODO: implement pfree(constraint) logc */ + + pathman_cache_search_relid(constraint_cache, + partition, + HASH_REMOVE, NULL); + + return conid; + } + + return InvalidOid; +} + +Expr * +get_constraint_of_partition(Oid partition, AttrNumber part_attno) +{ + PartConstraintInfo *pcon = pathman_cache_search_relid(constraint_cache, + partition, + HASH_FIND, NULL); + if (!pcon) + { + Oid conid; + Expr *con_expr; + MemoryContext old_mcxt; + + /* Try to build constraint's expression tree */ + con_expr = get_partition_constraint_expr(partition, part_attno, &conid); + + /* Create new entry for this constraint */ + pcon = pathman_cache_search_relid(constraint_cache, + partition, + HASH_ENTER, NULL); + + /* Copy constraint's data to the persistent mcxt */ + old_mcxt = MemoryContextSwitchTo(TopMemoryContext); + pcon->conid = conid; + pcon->constraint = copyObject(con_expr); + MemoryContextSwitchTo(old_mcxt); + } + + return pcon->constraint; +} + +/* + * Get constraint expression tree for a partition. + * + * build_check_constraint_name_internal() is used to build conname. + */ +static Expr * +get_partition_constraint_expr(Oid partition, + AttrNumber part_attno, + Oid *constr_oid) /* optional ret value #2 */ +{ + Oid conid; /* constraint Oid */ + char *conname; /* constraint name */ + HeapTuple con_tuple; + Datum conbin_datum; + bool conbin_isnull; + Expr *expr; /* expression tree for constraint */ + + conname = build_check_constraint_name_relid_internal(partition, part_attno); + conid = get_relation_constraint_oid(partition, conname, true); + + /* Return constraint's Oid to caller */ + if (constr_oid) + *constr_oid = conid; + + if (!OidIsValid(conid)) + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("constraint \"%s\" of partition \"%s\" does not exist", + conname, get_rel_name_or_relid(partition)), + errhint(INIT_ERROR_HINT))); + } + + con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); + conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, + Anum_pg_constraint_conbin, + &conbin_isnull); + if (conbin_isnull) + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(WARNING, + (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", + conname, get_rel_name_or_relid(partition)), + errhint(INIT_ERROR_HINT))); + pfree(conname); + + return NULL; /* could not parse */ + } + pfree(conname); + + /* Finally we get a constraint expression tree */ + expr = (Expr *) stringToNode(TextDatumGetCString(conbin_datum)); + + /* Don't foreget to release syscache tuple */ + ReleaseSysCache(con_tuple); + + return expr; +} + + /* * Safe PartType wrapper. */ @@ -718,6 +844,7 @@ PartTypeToCString(PartType parttype) } } + /* * Common PartRelationInfo checks. Emit ERROR if anything is wrong. */ From 36ac4bfe565bb291db0a69ae5fa305cff1c282ee Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 26 Mar 2017 15:28:32 +0300 Subject: [PATCH 0284/1124] fix regression tests, fix pathman_relcache_hook(), simplify forget_constraint_of_partition() --- expected/pathman_calamity.out | 4 ++-- src/hooks.c | 3 +++ src/include/relation_info.h | 2 +- src/relation_info.c | 33 ++++++++++++--------------------- 4 files changed, 18 insertions(+), 24 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index e9a5c7e4..34e8c61c 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -510,7 +510,7 @@ SELECT create_hash_partitions('calamity.part_ok', 'val', 4); CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ NOTICE: merging column "val" with inherited definition SELECT add_to_pathman_config('calamity.part_test', 'val'); -ERROR: constraint "pathman_wrong_partition_1_check" for partition "wrong_partition" does not exist +ERROR: constraint "pathman_wrong_partition_1_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ QUERY PLAN ----------------------------- @@ -522,7 +522,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is ena (5 rows) SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); -ERROR: constraint "pathman_wrong_partition_1_check" for partition "wrong_partition" does not exist +ERROR: constraint "pathman_wrong_partition_1_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ QUERY PLAN ----------------------------- diff --git a/src/hooks.c b/src/hooks.c index 5864e2e5..0e2613f7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -624,6 +624,9 @@ pathman_relcache_hook(Datum arg, Oid relid) if (relid == get_pathman_config_relid(false)) delay_pathman_shutdown(); + /* Invalidate PartConstraintInfo cache if needed */ + forget_constraint_of_partition(relid); + /* Invalidate PartParentInfo cache if needed */ partitioned_table = forget_parent_of_partition(relid, &search); diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 875f3e62..9435b1f1 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -221,7 +221,7 @@ Oid forget_parent_of_partition(Oid partition, PartParentSearch *status); Oid get_parent_of_partition(Oid partition, PartParentSearch *status); /* Constraint cache */ -Oid forget_constraint_of_partition(Oid partition); +void forget_constraint_of_partition(Oid partition); Expr * get_constraint_of_partition(Oid partition, AttrNumber part_attno); /* Safe casts for PartType */ diff --git a/src/relation_info.c b/src/relation_info.c index aacd7c59..429f752f 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -698,34 +698,24 @@ try_perform_parent_refresh(Oid parent) * forget\get constraint functions. */ -Oid +/* Remove partition's constraint from cache */ +void forget_constraint_of_partition(Oid partition) { - PartConstraintInfo *pcon = pathman_cache_search_relid(constraint_cache, - partition, - HASH_FIND, NULL); - if (pcon) - { - Oid conid = pcon->conid; - - /* TODO: implement pfree(constraint) logc */ - - pathman_cache_search_relid(constraint_cache, - partition, - HASH_REMOVE, NULL); - - return conid; - } - - return InvalidOid; + pathman_cache_search_relid(constraint_cache, + partition, + HASH_REMOVE, + NULL); } +/* Return partition's constraint as expression tree */ Expr * get_constraint_of_partition(Oid partition, AttrNumber part_attno) { PartConstraintInfo *pcon = pathman_cache_search_relid(constraint_cache, partition, - HASH_FIND, NULL); + HASH_FIND, + NULL); if (!pcon) { Oid conid; @@ -738,7 +728,8 @@ get_constraint_of_partition(Oid partition, AttrNumber part_attno) /* Create new entry for this constraint */ pcon = pathman_cache_search_relid(constraint_cache, partition, - HASH_ENTER, NULL); + HASH_ENTER, + NULL); /* Copy constraint's data to the persistent mcxt */ old_mcxt = MemoryContextSwitchTo(TopMemoryContext); @@ -751,7 +742,7 @@ get_constraint_of_partition(Oid partition, AttrNumber part_attno) } /* - * Get constraint expression tree for a partition. + * Get constraint expression tree of a partition. * * build_check_constraint_name_internal() is used to build conname. */ From b7ee080bfb1eedb69b49a036002d6eeb64ba852c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 26 Mar 2017 15:41:39 +0300 Subject: [PATCH 0285/1124] fix function forget_constraint_of_partition() --- src/relation_info.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index 429f752f..32c4ddca 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -702,10 +702,19 @@ try_perform_parent_refresh(Oid parent) void forget_constraint_of_partition(Oid partition) { - pathman_cache_search_relid(constraint_cache, - partition, - HASH_REMOVE, - NULL); + PartConstraintInfo *pcon = pathman_cache_search_relid(constraint_cache, + partition, + HASH_FIND, + NULL); + if (pcon) + { + /* FIXME: implement pfree(constraint) logc */ + + pathman_cache_search_relid(constraint_cache, + partition, + HASH_REMOVE, + NULL); + } } /* Return partition's constraint as expression tree */ From 431c76ba659152c038dfd59db47d01d2f137426e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 26 Mar 2017 16:36:55 +0300 Subject: [PATCH 0286/1124] introduce pg_pathman's memory contexts (TopPathmanContext etc) --- src/include/init.h | 5 +++ src/init.c | 91 +++++++++++++++++++++++++++++++------------ src/pathman_workers.c | 4 +- src/relation_info.c | 6 +-- 4 files changed, 76 insertions(+), 30 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index 49891485..07a0392b 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -37,6 +37,11 @@ typedef struct } PathmanInitState; +extern MemoryContext TopPathmanContext; +extern MemoryContext PathmanRelationCacheContext; +extern MemoryContext PathmanParentCacheContext; +extern MemoryContext PathmanCostraintCacheContext; + extern HTAB *partitioned_rels; extern HTAB *parent_cache; extern HTAB *constraint_cache; diff --git a/src/init.c b/src/init.c index bc5e19a4..a54d7fb8 100644 --- a/src/init.c +++ b/src/init.c @@ -43,14 +43,20 @@ #define CHILD_FACTOR 500 +/* Various memory contexts for caches */ +MemoryContext TopPathmanContext = NULL; +MemoryContext PathmanRelationCacheContext = NULL; +MemoryContext PathmanParentCacheContext = NULL; +MemoryContext PathmanCostraintCacheContext = NULL; + /* Storage for PartRelationInfos */ -HTAB *partitioned_rels = NULL; +HTAB *partitioned_rels = NULL; /* Storage for PartParentInfos */ -HTAB *parent_cache = NULL; +HTAB *parent_cache = NULL; /* Storage for partition constraints */ -HTAB *constraint_cache = NULL; +HTAB *constraint_cache = NULL; /* pg_pathman's init status */ PathmanInitState pg_pathman_init_state; @@ -210,7 +216,7 @@ load_config(void) /* Validate pg_pathman's Pl/PgSQL facade (might be outdated) */ validate_sql_facade_version(get_sql_facade_version()); - init_local_cache(); /* create 'partitioned_rels' hash table */ + init_local_cache(); /* create various hash tables (caches) */ read_pathman_config(); /* read PATHMAN_CONFIG table & fill cache */ /* Register pathman_relcache_hook(), currently we can't unregister it */ @@ -307,10 +313,52 @@ init_local_cache(void) hash_destroy(parent_cache); hash_destroy(constraint_cache); + /* Reset pg_pathman's memory contexts */ + if (TopPathmanContext) + { + /* Check that child contexts exist */ + Assert(MemoryContextIsValid(PathmanRelationCacheContext)); + Assert(MemoryContextIsValid(PathmanParentCacheContext)); + Assert(MemoryContextIsValid(PathmanCostraintCacheContext)); + + /* Clear children */ + MemoryContextResetChildren(TopPathmanContext); + } + /* Initialize pg_pathman's memory contexts */ + else + { + Assert(PathmanRelationCacheContext == NULL); + Assert(PathmanParentCacheContext == NULL); + Assert(PathmanCostraintCacheContext == NULL); + + TopPathmanContext = + AllocSetContextCreate(TopMemoryContext, + CppAsString(TopPathmanContext), + ALLOCSET_DEFAULT_SIZES); + + /* For PartRelationInfo */ + PathmanRelationCacheContext = + AllocSetContextCreate(TopPathmanContext, + CppAsString(PathmanRelationCacheContext), + ALLOCSET_DEFAULT_SIZES); + + /* For PartParentInfo */ + PathmanParentCacheContext = + AllocSetContextCreate(TopPathmanContext, + CppAsString(PathmanParentCacheContext), + ALLOCSET_DEFAULT_SIZES); + + /* For PartConstraintInfo */ + PathmanCostraintCacheContext = + AllocSetContextCreate(TopPathmanContext, + CppAsString(PathmanCostraintCacheContext), + ALLOCSET_DEFAULT_SIZES); + } + memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(PartRelationInfo); - ctl.hcxt = TopMemoryContext; /* place data to persistent mcxt */ + ctl.hcxt = PathmanRelationCacheContext; partitioned_rels = hash_create("pg_pathman's partitioned relations cache", PART_RELS_SIZE, &ctl, HASH_ELEM | HASH_BLOBS); @@ -318,7 +366,7 @@ init_local_cache(void) memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(PartParentInfo); - ctl.hcxt = TopMemoryContext; /* place data to persistent mcxt */ + ctl.hcxt = PathmanParentCacheContext; parent_cache = hash_create("pg_pathman's partition parents cache", PART_RELS_SIZE * CHILD_FACTOR, @@ -327,7 +375,7 @@ init_local_cache(void) memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(PartConstraintInfo); - ctl.hcxt = TopMemoryContext; /* place data to persistent mcxt */ + ctl.hcxt = PathmanCostraintCacheContext; constraint_cache = hash_create("pg_pathman's partition constraints cache", PART_RELS_SIZE * CHILD_FACTOR, @@ -340,24 +388,17 @@ init_local_cache(void) static void fini_local_cache(void) { - HASH_SEQ_STATUS status; - PartRelationInfo *prel; - - hash_seq_init(&status, partitioned_rels); - while((prel = (PartRelationInfo *) hash_seq_search(&status)) != NULL) - { - if (PrelIsValid(prel)) - { - FreeChildrenArray(prel); - FreeRangesArray(prel); - } - } - - /* Now we can safely destroy hash tables */ + /* First, destroy hash tables */ hash_destroy(partitioned_rels); hash_destroy(parent_cache); - partitioned_rels = NULL; - parent_cache = NULL; + hash_destroy(constraint_cache); + + partitioned_rels = NULL; + parent_cache = NULL; + constraint_cache = NULL; + + /* Now we can clear allocations */ + MemoryContextResetChildren(TopPathmanContext); } /* @@ -371,7 +412,7 @@ fill_prel_with_partitions(const Oid *partitions, { uint32 i; Expr *con_expr; - MemoryContext mcxt = TopMemoryContext; + MemoryContext mcxt = PathmanRelationCacheContext; /* Allocate memory for 'prel->children' & 'prel->ranges' (if needed) */ prel->children = MemoryContextAllocZero(mcxt, parts_count * sizeof(Oid)); @@ -474,7 +515,7 @@ fill_prel_with_partitions(const Oid *partitions, prel->children[i] = prel->ranges[i].child_oid; /* Copy all min & max Datums to the persistent mcxt */ - old_mcxt = MemoryContextSwitchTo(TopMemoryContext); + old_mcxt = MemoryContextSwitchTo(PathmanRelationCacheContext); for (i = 0; i < PrelChildrenCount(prel); i++) { prel->ranges[i].min = CopyBound(&prel->ranges[i].min, diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 246b216b..41fc0632 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -475,10 +475,10 @@ bgw_main_concurrent_part(Datum main_arg) MemoryContext current_mcxt; /* - * Allocate as SQL query in top memory context because current + * Allocate SQL query in TopPathmanContext because current * context will be destroyed after transaction finishes */ - current_mcxt = MemoryContextSwitchTo(TopMemoryContext); + current_mcxt = MemoryContextSwitchTo(TopPathmanContext); sql = psprintf("SELECT %s._partition_data_concurrent($1::oid, p_limit:=$2)", get_namespace_name(get_pathman_schema())); MemoryContextSwitchTo(current_mcxt); diff --git a/src/relation_info.c b/src/relation_info.c index 32c4ddca..3e452b6a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -43,10 +43,10 @@ static List *delayed_invalidation_vague_rels = NIL; static bool delayed_shutdown = false; /* pathman was dropped */ -/* Add unique Oid to list, allocate in TopMemoryContext */ +/* Add unique Oid to list, allocate in TopPathmanContext */ #define list_add_unique(list, oid) \ do { \ - MemoryContext old_mcxt = MemoryContextSwitchTo(TopMemoryContext); \ + MemoryContext old_mcxt = MemoryContextSwitchTo(TopPathmanContext); \ list = list_append_unique_oid(list, ObjectIdGetDatum(oid)); \ MemoryContextSwitchTo(old_mcxt); \ } while (0) @@ -741,7 +741,7 @@ get_constraint_of_partition(Oid partition, AttrNumber part_attno) NULL); /* Copy constraint's data to the persistent mcxt */ - old_mcxt = MemoryContextSwitchTo(TopMemoryContext); + old_mcxt = MemoryContextSwitchTo(PathmanCostraintCacheContext); pcon->conid = conid; pcon->constraint = copyObject(con_expr); MemoryContextSwitchTo(old_mcxt); From e91d2b55adb37a2b6c2fed4f674c849740353340 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 26 Mar 2017 21:06:12 +0300 Subject: [PATCH 0287/1124] introduce 'pathman_cache_stats' view, fix hash table memcontexts --- init.sql | 29 ++++++++--- src/compat/relation_tags.c | 2 +- src/include/init.h | 26 ++++++++++ src/include/pathman.h | 9 ++++ src/include/utils.h | 4 ++ src/init.c | 11 +++-- src/pl_funcs.c | 98 ++++++++++++++++++++++++++++++++++++-- src/utils.c | 38 +++++++++++++++ 8 files changed, 202 insertions(+), 15 deletions(-) diff --git a/init.sql b/init.sql index 583080ad..d11ebb28 100644 --- a/init.sql +++ b/init.sql @@ -250,12 +250,12 @@ LANGUAGE plpgsql; */ CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() RETURNS TABLE ( - parent REGCLASS, - partition REGCLASS, - parttype INT4, - partattr TEXT, - range_min TEXT, - range_max TEXT) + parent REGCLASS, + partition REGCLASS, + parttype INT4, + partattr TEXT, + range_min TEXT, + range_max TEXT) AS 'pg_pathman', 'show_partition_list_internal' LANGUAGE C STRICT; @@ -267,6 +267,23 @@ AS SELECT * FROM @extschema@.show_partition_list(); GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; +/* + * Show memory usage of pg_pathman's caches. + */ +CREATE OR REPLACE FUNCTION @extschema@.show_cache_stats() +RETURNS TABLE ( + context TEXT, + size INT8, + used INT8) +AS 'pg_pathman', 'show_cache_stats_internal' +LANGUAGE C STRICT; + +/* + * View for show_cache_stats(). + */ +CREATE OR REPLACE VIEW @extschema@.pathman_cache_stats +AS SELECT * FROM @extschema@.show_cache_stats(); + /* * Show all existing concurrent partitioning tasks. */ diff --git a/src/compat/relation_tags.c b/src/compat/relation_tags.c index b7d2260b..383dd1f5 100644 --- a/src/compat/relation_tags.c +++ b/src/compat/relation_tags.c @@ -110,7 +110,7 @@ rte_attach_tag(const uint32 query_id, per_table_relation_tags = hash_create("Custom tags for RangeTblEntry", start_elems, &hashctl, - HASH_ELEM | HASH_BLOBS); + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } /* Search by 'htab_key' */ diff --git a/src/include/init.h b/src/include/init.h index 07a0392b..7e4f8786 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -37,6 +37,7 @@ typedef struct } PathmanInitState; +#define PATHMAN_MCXT_COUNT 4 extern MemoryContext TopPathmanContext; extern MemoryContext PathmanRelationCacheContext; extern MemoryContext PathmanParentCacheContext; @@ -50,6 +51,31 @@ extern HTAB *constraint_cache; extern PathmanInitState pg_pathman_init_state; +/* Transform pg_pathman's memory context into simple name */ +static inline const char * +simpify_mcxt_name(MemoryContext mcxt) +{ + static const char *top_mcxt = "maintenance"; + static const char *bound_mcxt = "bounds cache"; + static const char *parent_mcxt = "parents cache"; + static const char *constr_mcxt = "constraints cache"; + + if (mcxt == TopPathmanContext) + return top_mcxt; + + else if (mcxt == PathmanRelationCacheContext) + return bound_mcxt; + + else if (mcxt == PathmanParentCacheContext) + return parent_mcxt; + + else if (mcxt == PathmanCostraintCacheContext) + return constr_mcxt; + + else elog(ERROR, "error in function " CppAsString(simpify_mcxt_name)); +} + + /* * Check if pg_pathman is initialized. */ diff --git a/src/include/pathman.h b/src/include/pathman.h index 6c8c17e5..d592cd33 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -75,6 +75,15 @@ #define Anum_pathman_pl_range_min 5 /* partition's min value */ #define Anum_pathman_pl_range_max 6 /* partition's max value */ +/* + * Definitions for the "pathman_cache_stats" view. + */ +#define PATHMAN_CACHE_STATS "pathman_cache_stats" +#define Natts_pathman_cache_stats 3 +#define Anum_pathman_cs_context 1 /* name of memory context */ +#define Anum_pathman_cs_size 2 /* size of memory context */ +#define Anum_pathman_cs_used 3 /* used space */ + /* * Cache current PATHMAN_CONFIG relid (set during load_config()). diff --git a/src/include/utils.h b/src/include/utils.h index 752e6e6d..1fd966a4 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -18,6 +18,7 @@ #include "parser/parse_oper.h" #include "utils/rel.h" #include "nodes/relation.h" +#include "nodes/memnodes.h" #include "nodes/nodeFuncs.h" @@ -33,6 +34,9 @@ bool check_security_policy_internal(Oid relid, Oid role); */ Oid get_pathman_schema(void); List * list_reverse(List *l); +void McxtStatsInternal(MemoryContext context, int level, + bool examine_children, + MemoryContextCounters *totals); /* * Useful functions for relations. diff --git a/src/init.c b/src/init.c index a54d7fb8..63e81062 100644 --- a/src/init.c +++ b/src/init.c @@ -361,7 +361,8 @@ init_local_cache(void) ctl.hcxt = PathmanRelationCacheContext; partitioned_rels = hash_create("pg_pathman's partitioned relations cache", - PART_RELS_SIZE, &ctl, HASH_ELEM | HASH_BLOBS); + PART_RELS_SIZE, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); @@ -369,8 +370,8 @@ init_local_cache(void) ctl.hcxt = PathmanParentCacheContext; parent_cache = hash_create("pg_pathman's partition parents cache", - PART_RELS_SIZE * CHILD_FACTOR, - &ctl, HASH_ELEM | HASH_BLOBS); + PART_RELS_SIZE * CHILD_FACTOR, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); @@ -378,8 +379,8 @@ init_local_cache(void) ctl.hcxt = PathmanCostraintCacheContext; constraint_cache = hash_create("pg_pathman's partition constraints cache", - PART_RELS_SIZE * CHILD_FACTOR, - &ctl, HASH_ELEM | HASH_BLOBS); + PART_RELS_SIZE * CHILD_FACTOR, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } /* diff --git a/src/pl_funcs.c b/src/pl_funcs.c index b0ea3861..a1360c4f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -46,6 +46,7 @@ PG_FUNCTION_INFO_V1( get_base_type_pl ); PG_FUNCTION_INFO_V1( get_partition_key_type ); PG_FUNCTION_INFO_V1( get_tablespace_pl ); +PG_FUNCTION_INFO_V1( show_cache_stats_internal ); PG_FUNCTION_INFO_V1( show_partition_list_internal ); PG_FUNCTION_INFO_V1( build_update_trigger_func_name ); @@ -72,9 +73,7 @@ PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( get_pathman_lib_version ); -/* - * User context for function show_partition_list_internal(). - */ +/* User context for function show_partition_list_internal() */ typedef struct { Relation pathman_config; @@ -86,6 +85,13 @@ typedef struct uint32 child_number; /* child we're looking at */ } show_partition_list_cxt; +/* User context for function show_pathman_cache_stats_internal() */ +typedef struct +{ + MemoryContext pathman_contexts[PATHMAN_MCXT_COUNT]; + int current_context; +} show_cache_stats_cxt; + static void on_partitions_created_internal(Oid partitioned_table, bool add_callbacks); static void on_partitions_updated_internal(Oid partitioned_table, bool add_callbacks); @@ -265,6 +271,92 @@ get_tablespace_pl(PG_FUNCTION_ARGS) * ---------------------- */ +Datum +show_cache_stats_internal(PG_FUNCTION_ARGS) +{ + show_cache_stats_cxt *usercxt; + FuncCallContext *funccxt; + + /* + * Initialize tuple descriptor & function call context. + */ + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + MemoryContext old_mcxt; + + funccxt = SRF_FIRSTCALL_INIT(); + + old_mcxt = MemoryContextSwitchTo(funccxt->multi_call_memory_ctx); + + usercxt = (show_cache_stats_cxt *) palloc(sizeof(show_cache_stats_cxt)); + + usercxt->pathman_contexts[0] = TopPathmanContext; + usercxt->pathman_contexts[1] = PathmanRelationCacheContext; + usercxt->pathman_contexts[2] = PathmanParentCacheContext; + usercxt->pathman_contexts[3] = PathmanCostraintCacheContext; + + usercxt->current_context = 0; + + /* Create tuple descriptor */ + tupdesc = CreateTemplateTupleDesc(Natts_pathman_cache_stats, false); + + TupleDescInitEntry(tupdesc, Anum_pathman_cs_context, + "context", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, Anum_pathman_cs_size, + "size", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, Anum_pathman_cs_used, + "used", INT8OID, -1, 0); + + funccxt->tuple_desc = BlessTupleDesc(tupdesc); + funccxt->user_fctx = (void *) usercxt; + + MemoryContextSwitchTo(old_mcxt); + } + + funccxt = SRF_PERCALL_SETUP(); + usercxt = (show_cache_stats_cxt *) funccxt->user_fctx; + + if (usercxt->current_context < lengthof(usercxt->pathman_contexts)) + { + MemoryContext current_mcxt; + MemoryContextCounters mcxt_stats; + HeapTuple htup; + Datum values[Natts_pathman_cache_stats]; + bool isnull[Natts_pathman_cache_stats] = { 0 }; + + /* Prepare context counters */ + memset(&mcxt_stats, 0, sizeof(mcxt_stats)); + + /* Select current memory context */ + current_mcxt = usercxt->pathman_contexts[usercxt->current_context]; + + /* NOTE: we do not consider child contexts if it's TopPathmanContext */ + McxtStatsInternal(current_mcxt, 0, + (current_mcxt != TopPathmanContext), + &mcxt_stats); + + values[Anum_pathman_cs_context - 1] = + CStringGetTextDatum(simpify_mcxt_name(current_mcxt)); + + values[Anum_pathman_cs_size - 1] = + Int64GetDatum(mcxt_stats.totalspace); + + values[Anum_pathman_cs_used - 1] = + Int64GetDatum(mcxt_stats.totalspace - mcxt_stats.freespace); + + /* Switch to next context */ + usercxt->current_context++; + + /* Form output tuple */ + htup = heap_form_tuple(funccxt->tuple_desc, values, isnull); + + SRF_RETURN_NEXT(funccxt, HeapTupleGetDatum(htup)); + } + + SRF_RETURN_DONE(funccxt); +} + /* * List all existing partitions and their parents. */ diff --git a/src/utils.c b/src/utils.c index 1f2ce440..c26c4863 100644 --- a/src/utils.c +++ b/src/utils.c @@ -4,6 +4,8 @@ * definitions of various support functions * * Copyright (c) 2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ */ @@ -162,6 +164,42 @@ list_reverse(List *l) return result; } +void +McxtStatsInternal(MemoryContext context, int level, + bool examine_children, + MemoryContextCounters *totals) +{ + MemoryContextCounters local_totals; + MemoryContext child; + + AssertArg(MemoryContextIsValid(context)); + + /* Examine the context itself */ + (*context->methods->stats) (context, level, false, totals); + + memset(&local_totals, 0, sizeof(local_totals)); + + if (!examine_children) + return; + + /* Examine children */ + for (child = context->firstchild; + child != NULL; + child = child->nextchild) + { + + McxtStatsInternal(child, level + 1, + examine_children, + &local_totals); + } + + /* Save children stats */ + totals->nblocks += local_totals.nblocks; + totals->freechunks += local_totals.freechunks; + totals->totalspace += local_totals.totalspace; + totals->freespace += local_totals.freespace; +} + /* From 0c3233c052feec6d825137d51f1b5d0a7674b42d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 26 Mar 2017 21:10:25 +0300 Subject: [PATCH 0288/1124] formatting, comments --- src/pl_funcs.c | 3 +++ src/utility_stmt_hooking.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index a1360c4f..20d6860f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -271,6 +271,9 @@ get_tablespace_pl(PG_FUNCTION_ARGS) * ---------------------- */ +/* + * List stats of all existing caches (memory contexts). + */ Datum show_cache_stats_internal(PG_FUNCTION_ARGS) { diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 1a5079c8..2c859731 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -416,7 +416,7 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) } /* This should never happen (see is_pathman_related_copy()) */ - else elog(ERROR, "error in function \"%s\"", CppAsString(PathmanDoCopy)); + else elog(ERROR, "error in function " CppAsString(PathmanDoCopy)); /* COPY ... FROM ... */ if (is_from) From 20fbd32133753e9593f4aca40d80062d8d5c879f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 26 Mar 2017 21:29:01 +0300 Subject: [PATCH 0289/1124] add 'entries' to 'pathman_cache_stats' view --- init.sql | 3 ++- src/include/pathman.h | 3 ++- src/pl_funcs.c | 27 ++++++++++++++++++++------- 3 files changed, 24 insertions(+), 9 deletions(-) diff --git a/init.sql b/init.sql index d11ebb28..cba29687 100644 --- a/init.sql +++ b/init.sql @@ -274,7 +274,8 @@ CREATE OR REPLACE FUNCTION @extschema@.show_cache_stats() RETURNS TABLE ( context TEXT, size INT8, - used INT8) + used INT8, + entries INT8) AS 'pg_pathman', 'show_cache_stats_internal' LANGUAGE C STRICT; diff --git a/src/include/pathman.h b/src/include/pathman.h index d592cd33..274d73da 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -79,10 +79,11 @@ * Definitions for the "pathman_cache_stats" view. */ #define PATHMAN_CACHE_STATS "pathman_cache_stats" -#define Natts_pathman_cache_stats 3 +#define Natts_pathman_cache_stats 4 #define Anum_pathman_cs_context 1 /* name of memory context */ #define Anum_pathman_cs_size 2 /* size of memory context */ #define Anum_pathman_cs_used 3 /* used space */ +#define Anum_pathman_cs_entries 4 /* number of cache entries */ /* diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 20d6860f..1325ffae 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -89,7 +89,8 @@ typedef struct typedef struct { MemoryContext pathman_contexts[PATHMAN_MCXT_COUNT]; - int current_context; + HTAB *pathman_htables[PATHMAN_MCXT_COUNT]; + int current_item; } show_cache_stats_cxt; @@ -299,7 +300,12 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) usercxt->pathman_contexts[2] = PathmanParentCacheContext; usercxt->pathman_contexts[3] = PathmanCostraintCacheContext; - usercxt->current_context = 0; + usercxt->pathman_htables[0] = NULL; /* no HTAB for this entry */ + usercxt->pathman_htables[1] = partitioned_rels; + usercxt->pathman_htables[2] = parent_cache; + usercxt->pathman_htables[3] = constraint_cache; + + usercxt->current_item = 0; /* Create tuple descriptor */ tupdesc = CreateTemplateTupleDesc(Natts_pathman_cache_stats, false); @@ -310,6 +316,8 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) "size", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_cs_used, "used", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, Anum_pathman_cs_entries, + "entries", INT8OID, -1, 0); funccxt->tuple_desc = BlessTupleDesc(tupdesc); funccxt->user_fctx = (void *) usercxt; @@ -320,8 +328,9 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) funccxt = SRF_PERCALL_SETUP(); usercxt = (show_cache_stats_cxt *) funccxt->user_fctx; - if (usercxt->current_context < lengthof(usercxt->pathman_contexts)) + if (usercxt->current_item < lengthof(usercxt->pathman_contexts)) { + HTAB *current_htab; MemoryContext current_mcxt; MemoryContextCounters mcxt_stats; HeapTuple htup; @@ -331,8 +340,9 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) /* Prepare context counters */ memset(&mcxt_stats, 0, sizeof(mcxt_stats)); - /* Select current memory context */ - current_mcxt = usercxt->pathman_contexts[usercxt->current_context]; + /* Select current memory context and hash table (cache) */ + current_mcxt = usercxt->pathman_contexts[usercxt->current_item]; + current_htab = usercxt->pathman_htables[usercxt->current_item]; /* NOTE: we do not consider child contexts if it's TopPathmanContext */ McxtStatsInternal(current_mcxt, 0, @@ -348,8 +358,11 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) values[Anum_pathman_cs_used - 1] = Int64GetDatum(mcxt_stats.totalspace - mcxt_stats.freespace); - /* Switch to next context */ - usercxt->current_context++; + values[Anum_pathman_cs_entries - 1] = + Int64GetDatum(current_htab ? hash_get_num_entries(current_htab) : 0); + + /* Switch to next item */ + usercxt->current_item++; /* Form output tuple */ htup = heap_form_tuple(funccxt->tuple_desc, values, isnull); From bd44be097cd25dbc349c1e27b2f2bf6b09da4828 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 27 Mar 2017 14:41:20 +0300 Subject: [PATCH 0290/1124] Save expression in database --- hash.sql | 10 +- init.sql | 20 +-- src/include/partition_creation.h | 24 ++-- src/include/pathman.h | 11 +- src/include/relation_info.h | 2 +- src/init.c | 7 +- src/partition_creation.c | 205 ++++++++++++++++--------------- src/partition_filter.c | 8 +- src/pl_funcs.c | 82 +++++++------ src/pl_hash_funcs.c | 7 +- src/pl_range_funcs.c | 13 +- src/relation_info.c | 61 ++++++++- 12 files changed, 259 insertions(+), 191 deletions(-) diff --git a/hash.sql b/hash.sql index 0a1667d6..f31de040 100644 --- a/hash.sql +++ b/hash.sql @@ -13,7 +13,7 @@ */ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( parent_relid REGCLASS, - attribute TEXT, + expression TEXT, partitions_count INTEGER, partition_data BOOLEAN DEFAULT TRUE, partition_names TEXT[] DEFAULT NULL, @@ -31,15 +31,15 @@ BEGIN PERFORM @extschema@.lock_partitioned_relation(parent_relid); END IF; - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + expression := lower(expression); + PERFORM @extschema@.common_relation_checks(parent_relid, expression); /* Insert new entry to pathman config */ - PERFORM @extschema@.add_to_pathman_config(parent_relid, attribute); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, false); /* Create partitions */ PERFORM @extschema@.create_hash_partitions_internal(parent_relid, - attribute, + expression, partitions_count, partition_names, tablespaces); diff --git a/init.sql b/init.sql index 9e25e9c2..25ebff33 100644 --- a/init.sql +++ b/init.sql @@ -35,18 +35,20 @@ LANGUAGE C; CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( partrel REGCLASS NOT NULL PRIMARY KEY, attname TEXT NOT NULL, + raw_expression TEXT NOT NULL, atttype OID NOT NULL, parttype INTEGER NOT NULL, range_interval TEXT, /* check for allowed part types */ - CHECK (parttype IN (1, 2)), + CHECK (parttype IN (1, 2)) /* check for correct interval */ + /* CHECK (@extschema@.validate_interval_value(partrel, attname, parttype, - range_interval)) + range_interval)) */ ); @@ -451,10 +453,6 @@ BEGIN RAISE EXCEPTION 'relation "%" has already been partitioned', relation; END IF; - IF NOT @extschema@.is_expression_suitable(relation, expression) THEN - RAISE EXCEPTION 'partitioning expression "%" is not suitable', expression; - END IF; - /* Check if there are foreign keys that reference the relation */ FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint WHERE confrelid = relation::REGCLASS::OID) @@ -800,11 +798,13 @@ LANGUAGE C STRICT; /* * Checks if expression is suitable */ + /* CREATE OR REPLACE FUNCTION @extschema@.is_expression_suitable( relid REGCLASS, expr TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'is_expression_suitable' LANGUAGE C STRICT; +*/ /* * Check if regclass is date or timestamp. @@ -848,9 +848,11 @@ LANGUAGE C STRICT; * Attach a previously partitioned table. */ CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( - parent_relid REGCLASS, - attname TEXT, - range_interval TEXT DEFAULT NULL) + parent_relid REGCLASS, + attname TEXT, + range_interval TEXT DEFAULT NULL, + refresh_part_info BOOL DEFAULT TRUE +) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 8b2310b7..8d6fa4d5 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -32,7 +32,6 @@ Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, Oid create_single_range_partition_internal(Oid parent_relid, const Bound *start_value, const Bound *end_value, - Oid value_type, RangeVar *partition_rv, char *tablespace); @@ -40,19 +39,18 @@ Oid create_single_range_partition_internal(Oid parent_relid, Oid create_single_hash_partition_internal(Oid parent_relid, uint32 part_idx, uint32 part_count, - Oid value_type, RangeVar *partition_rv, char *tablespace); /* RANGE constraints */ Constraint * build_range_check_constraint(Oid child_relid, - char *attname, + Node *raw_expression, const Bound *start_value, const Bound *end_value, - Oid value_type); + Oid expr_type); -Node * build_raw_range_check_tree(char *attname, +Node * build_raw_range_check_tree(Node *raw_expression, const Bound *start_value, const Bound *end_value, Oid value_type); @@ -66,12 +64,12 @@ bool check_range_available(Oid parent_relid, /* HASH constraints */ Constraint * build_hash_check_constraint(Oid child_relid, - const char *expr, + Node *raw_expression, uint32 part_idx, uint32 part_count, Oid value_type); -Node * build_raw_hash_check_tree(const char *base_expr, +Node * build_raw_hash_check_tree(Node *raw_expression, uint32 part_idx, uint32 part_count, Oid relid, @@ -79,10 +77,16 @@ Node * build_raw_hash_check_tree(const char *base_expr, void drop_check_constraint(Oid relid, AttrNumber attnum); -/* expression parsing functions */ -Node *get_expression_node(Oid relid, const char *expr, bool analyze); -Oid get_partition_expr_type(Oid relid, const char *expr); +typedef struct +{ + Oid expr_type; + Datum expr_datum; + Node *raw_expr; +} PartExpressionInfo; +/* expression parsing functions */ +PartExpressionInfo *get_part_expression_info(Oid relid, + const char *expr_string, bool check_hash_func, bool make_plan); /* Partitioning callback type */ diff --git a/src/include/pathman.h b/src/include/pathman.h index 84899d79..df08ab73 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -44,12 +44,13 @@ * Definitions for the "pathman_config" table. */ #define PATHMAN_CONFIG "pathman_config" -#define Natts_pathman_config 5 +#define Natts_pathman_config 6 #define Anum_pathman_config_partrel 1 /* partitioned relation (regclass) */ -#define Anum_pathman_config_attname 2 /* partitioned column (text) */ -#define Anum_pathman_config_atttype 3 /* partitioned atttype */ -#define Anum_pathman_config_parttype 4 /* partitioning type (1|2) */ -#define Anum_pathman_config_range_interval 5 /* interval for RANGE pt. (text) */ +#define Anum_pathman_config_expression 2 /* partitioned expression (text) */ +#define Anum_pathman_config_raw_expression 3 /* partitioned raw expression (text) */ +#define Anum_pathman_config_atttype 4 /* partitioned atttype */ +#define Anum_pathman_config_parttype 5 /* partitioning type (1|2) */ +#define Anum_pathman_config_range_interval 6 /* interval for RANGE pt. (text) */ /* type modifier (typmod) for 'range_interval' */ #define PATHMAN_CONFIG_interval_typmod -1 diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 7f479f2f..f8e9d3ef 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -134,7 +134,7 @@ typedef struct Oid *children; /* Oids of child partitions */ RangeEntry *ranges; /* per-partition range entry or NULL */ - Expr *expr; /* planned expression */ + Node *expr; /* planned expression */ PartType parttype; /* partitioning type (HASH | RANGE) */ Oid atttype; /* expression type */ int32 atttypmod; /* expression type modifier */ diff --git a/src/init.c b/src/init.c index 06b8bc71..c7c68cb8 100644 --- a/src/init.c +++ b/src/init.c @@ -711,7 +711,8 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Perform checks for non-NULL columns */ Assert(!isnull[Anum_pathman_config_partrel - 1]); - Assert(!isnull[Anum_pathman_config_attname - 1]); + Assert(!isnull[Anum_pathman_config_expression - 1]); + Assert(!isnull[Anum_pathman_config_raw_expression - 1]); Assert(!isnull[Anum_pathman_config_parttype - 1]); } @@ -825,7 +826,8 @@ read_pathman_config(void) /* These attributes are marked as NOT NULL, check anyway */ Assert(!isnull[Anum_pathman_config_partrel - 1]); Assert(!isnull[Anum_pathman_config_parttype - 1]); - Assert(!isnull[Anum_pathman_config_attname - 1]); + Assert(!isnull[Anum_pathman_config_expression - 1]); + Assert(!isnull[Anum_pathman_config_raw_expression - 1]); /* Extract values from Datums */ relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); @@ -1112,7 +1114,6 @@ validate_hash_constraint(const Expr *expr, const OpExpr *eq_expr; const FuncExpr *get_hash_expr, *type_hash_proc_expr; - const Var *var; /* partitioned column */ if (!expr) return false; diff --git a/src/partition_creation.c b/src/partition_creation.c index c4aa5457..5af03458 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -30,6 +30,7 @@ #include "commands/tablespace.h" #include "miscadmin.h" #include "nodes/plannodes.h" +#include "optimizer/clauses.h" #include "parser/parser.h" #include "parser/parse_func.h" #include "parser/parse_relation.h" @@ -64,7 +65,8 @@ static void create_single_partition_common(Oid partition_relid, static Oid create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace, - char **partitioning_expr); + Oid *expr_type, + Node **expr); static char *choose_range_partition_name(Oid parent_relid, Oid parent_nsp); static char *choose_hash_partition_name(Oid parent_relid, uint32 part_idx); @@ -96,13 +98,13 @@ Oid create_single_range_partition_internal(Oid parent_relid, const Bound *start_value, const Bound *end_value, - Oid value_type, RangeVar *partition_rv, char *tablespace) { - Oid partition_relid; + Oid partition_relid, + value_type; Constraint *check_constr; - char *partitioning_expr; + Node *expr; init_callback_params callback_params; /* Generate a name if asked to */ @@ -121,11 +123,12 @@ create_single_range_partition_internal(Oid parent_relid, partition_relid = create_single_partition_internal(parent_relid, partition_rv, tablespace, - &partitioning_expr); + &value_type, + &expr); /* Build check constraint for RANGE partition */ check_constr = build_range_check_constraint(partition_relid, - partitioning_expr, + expr, start_value, end_value, value_type); @@ -150,13 +153,13 @@ Oid create_single_hash_partition_internal(Oid parent_relid, uint32 part_idx, uint32 part_count, - Oid value_type, RangeVar *partition_rv, char *tablespace) { - Oid partition_relid; + Oid partition_relid, + value_type; Constraint *check_constr; - char *partitioning_expr; + Node *expr; init_callback_params callback_params; /* Generate a name if asked to */ @@ -175,11 +178,12 @@ create_single_hash_partition_internal(Oid parent_relid, partition_relid = create_single_partition_internal(parent_relid, partition_rv, tablespace, - &partitioning_expr); + &value_type, + &expr); /* Build check constraint for HASH partition */ check_constr = build_hash_check_constraint(partition_relid, - partitioning_expr, + expr, part_idx, part_count, value_type); @@ -556,7 +560,6 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ last_partition = create_single_range_partition_internal(parent_relid, &bounds[0], &bounds[1], - range_bound_type, NULL, NULL); #ifdef USE_ASSERT_CHECKING @@ -644,7 +647,8 @@ static Oid create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace, - char **partitioning_expr) /* to be set */ + Oid *expr_type, /* to be set */ + Node **expr) /* to be set */ { /* Value to be returned */ Oid partition_relid = InvalidOid; /* safety */ @@ -686,13 +690,19 @@ create_single_partition_internal(Oid parent_relid, parent_nsp = get_rel_namespace(parent_relid); parent_nsp_name = get_namespace_name(parent_nsp); - /* Fetch partitioned column's name */ - if (partitioning_expr) + /* Fetch expression for constraint */ + if (expr && expr_type) { - Datum expr_datum; + char *expr_string; + PartExpressionInfo *expr_info; - expr_datum = config_values[Anum_pathman_config_attname - 1]; - *partitioning_expr = TextDatumGetCString(expr_datum); + *expr_type = DatumGetObjectId(config_values[Anum_pathman_config_atttype - 1]); + expr_string = TextDatumGetCString(config_values[Anum_pathman_config_raw_expression - 1]); + expr_info = get_part_expression_info(parent_relid, expr_string, false, false); + + *expr = expr_info->raw_expr; + pfree(expr_string); + pfree(expr_info); } /* Make up parent's RangeVar */ @@ -1138,7 +1148,7 @@ drop_check_constraint(Oid relid, AttrNumber attnum) /* Build RANGE check constraint expression tree */ Node * -build_raw_range_check_tree(char *attname, +build_raw_range_check_tree(Node *raw_expression, const Bound *start_value, const Bound *end_value, Oid value_type) @@ -1151,7 +1161,7 @@ build_raw_range_check_tree(char *attname, ColumnRef *col_ref = makeNode(ColumnRef); /* Partitioned column */ - col_ref->fields = list_make1(makeString(attname)); + //col_ref->fields = list_make1(makeString(attname)); col_ref->location = -1; and_oper->boolop = AND_EXPR; @@ -1202,7 +1212,7 @@ build_raw_range_check_tree(char *attname, /* Build complete RANGE check constraint */ Constraint * build_range_check_constraint(Oid child_relid, - char *attname, + Node *raw_expression, const Bound *start_value, const Bound *end_value, Oid value_type) @@ -1215,7 +1225,7 @@ build_range_check_constraint(Oid child_relid, /* Initialize basic properties of a CHECK constraint */ hash_constr = make_constraint_common(range_constr_name, - build_raw_range_check_tree(attname, + build_raw_range_check_tree(raw_expression, start_value, end_value, value_type)); @@ -1280,7 +1290,7 @@ check_range_available(Oid parent_relid, /* Build HASH check constraint expression tree */ Node * -build_raw_hash_check_tree(const char *base_expr, +build_raw_hash_check_tree(Node *raw_expression, uint32 part_idx, uint32 part_count, Oid relid, @@ -1297,15 +1307,10 @@ build_raw_hash_check_tree(const char *base_expr, Oid hash_proc; TypeCacheEntry *tce; - Node *expr = get_expression_node(relid, base_expr, false); tce = lookup_type_cache(value_type, TYPECACHE_HASH_PROC); hash_proc = tce->hash_proc; - /* Partitioned column */ - //hashed_column->fields = list_make1(makeString(attname)); - //hashed_column->location = -1; - /* Total amount of partitions */ part_count_c->val = make_int_value_struct(part_count); part_count_c->location = -1; @@ -1316,7 +1321,7 @@ build_raw_hash_check_tree(const char *base_expr, /* Call hash_proc() */ hash_call->funcname = list_make1(makeString(get_func_name(hash_proc))); - hash_call->args = list_make1(expr); + hash_call->args = list_make1(raw_expression); hash_call->agg_order = NIL; hash_call->agg_filter = NULL; hash_call->agg_within_group = false; @@ -1356,7 +1361,7 @@ build_raw_hash_check_tree(const char *base_expr, /* Build complete HASH check constraint */ Constraint * build_hash_check_constraint(Oid child_relid, - const char *expr, + Node *raw_expression, uint32 part_idx, uint32 part_count, Oid value_type) @@ -1369,7 +1374,7 @@ build_hash_check_constraint(Oid child_relid, /* Initialize basic properties of a CHECK constraint */ hash_constr = make_constraint_common(hash_constr_name, - build_raw_hash_check_tree(expr, + build_raw_hash_check_tree(raw_expression, part_idx, part_count, child_relid, @@ -1703,54 +1708,10 @@ parse_expression(Oid relid, const char *expr, char **query_string_out) return (Node *)(lfirst(list_head(parsetree_list))); } -struct expr_mutator_context -{ - Oid relid; /* partitioned table */ - List *rtable; /* range table list from expression query */ -}; - -/* - * To prevent calculation of Vars in expression, we wrap them with - * CustomConst, and later before execution we fill it with actual value - */ -static Node * -expression_mutator(Node *node, struct expr_mutator_context *context) -{ - const TypeCacheEntry *typcache; - - if (IsA(node, Var)) - { - Var *variable = (Var *) node; - Node *new_node = newNode(sizeof(CustomConst), T_Const); - Const *new_const = (Const *)new_node; - - RangeTblEntry *entry = rt_fetch(variable->varno, context->rtable); - if (entry->relid != context->relid) - elog(ERROR, "Columns in the expression should " - "be only from partitioned relation"); - - /* we only need varattno from original Var, for now */ - ((CustomConst *)new_node)->varattno = ((Var *)node)->varattno; - - new_const->consttype = ((Var *)node)->vartype; - new_const->consttypmod = ((Var *)node)->vartypmod; - new_const->constcollid = ((Var *)node)->varcollid; - new_const->constvalue = (Datum) 0; - new_const->constisnull = true; - new_const->location = -2; - - typcache = lookup_type_cache(new_const->consttype, 0); - new_const->constbyval = typcache->typbyval; - new_const->constlen = typcache->typlen; - - return new_node; - } - return expression_tree_mutator(node, expression_mutator, (void *) context); -} - /* By given relation id and expression returns node */ +/* Node * -get_expression_node(Oid relid, const char *expr, bool analyze) +get_raw_expression(Oid relid, const char *expr) { List *querytree_list; List *target_list; @@ -1766,12 +1727,10 @@ get_expression_node(Oid relid, const char *expr, bool analyze) parsetree = parse_expression(relid, expr, &query_string), target_list = ((SelectStmt *)parsetree)->targetList; - if (!analyze) { - result = (Node *)(((ResTarget *)(lfirst(list_head(target_list))))->val); - return result; - } + result = (Node *)(((ResTarget *)(lfirst(list_head(target_list))))->val); + return result; */ - /* We don't need pathman hooks on next stages */ + /* hooks_enabled = false; querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); @@ -1779,43 +1738,87 @@ get_expression_node(Oid relid, const char *expr, bool analyze) plan = pg_plan_query(query, 0, NULL); target_entry = lfirst(list_head(plan->planTree->targetlist)); - /* Hooks can work now */ hooks_enabled = true; result = (Node *)target_entry->expr; - /* We keep expression in top context */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); - /* We need relid and range table list for mutator */ context.relid = relid; context.rtable = plan->rtable; - /* This will create new tree in TopMemoryContext */ result = expression_mutator(result, (void *) &context); MemoryContextSwitchTo(oldcontext); return result; -} +}*/ /* Determines type of expression for a relation */ -Oid -get_partition_expr_type(Oid relid, const char *expr) +PartExpressionInfo * +get_part_expression_info(Oid relid, const char *expr_string, + bool check_hash_func, bool make_plan) { - Node *parsetree, - *target_entry, - *expr_node; - Query *query; - char *query_string; - - parsetree = parse_expression(relid, expr, &query_string); + Node *parsetree, + *expr_node, + *raw_expression; + Query *query; + char *query_string, *out_string; + PartExpressionInfo *expr_info; + List *querytree_list, + *raw_target_list; + PlannedStmt *plan; + TargetEntry *target_entry; + + expr_info = palloc(sizeof(PartExpressionInfo)); + parsetree = parse_expression(relid, expr_string, &query_string); + + /* Convert raw expression to string and return it as datum*/ + raw_target_list = ((SelectStmt *)parsetree)->targetList; + raw_expression = (Node *)(((ResTarget *)(lfirst(list_head(raw_target_list))))->val); + + /* Keep raw expression */ + expr_info->raw_expr = raw_expression; + expr_info->expr_datum = (Datum) 0; + + /* We don't need pathman activity initialization for this relation yet */ + hooks_enabled = false; /* This will fail with elog in case of wrong expression * with more or less understable text */ - query = parse_analyze(parsetree, query_string, NULL, 0); + querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); + query = (Query *) lfirst(list_head(querytree_list)); - /* We use analyzed query only to get type of expression */ + /* expr_node is node that we need for further use */ target_entry = lfirst(list_head(query->targetList)); - expr_node = (Node *)((TargetEntry *)target_entry)->expr; - return get_call_expr_argtype(expr_node, 0); -} + expr_node = (Node *) target_entry->expr; + + /* Now we have node and can determine type of that node */ + expr_info->expr_type = exprType(expr_node); + + if (check_hash_func) + { + TypeCacheEntry *tce; + + tce = lookup_type_cache(expr_info->expr_type, TYPECACHE_HASH_PROC); + if (tce->hash_proc == InvalidOid) + elog(ERROR, "Expression should be hashable"); + } + + if (!make_plan) + return expr_info; + + /* Plan this query. We reuse 'expr_node' here */ + plan = pg_plan_query(query, 0, NULL); + target_entry = lfirst(list_head(plan->planTree->targetlist)); + expr_node = (Node *) target_entry->expr; + expr_node = eval_const_expressions(NULL, expr_node); + /* Enable pathman */ + hooks_enabled = true; + + /* Convert expression to string and return it as datum */ + out_string = nodeToString(expr_node); + expr_info->expr_datum = CStringGetTextDatum(out_string); + pfree(out_string); + + return expr_info; +} diff --git a/src/partition_filter.c b/src/partition_filter.c index 97b84377..49f2659c 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -635,12 +635,12 @@ partition_filter_exec(CustomScanState *node) old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); /* Fetch values from slot for expression */ - adapt_values((Node *)prel->expr, (void *) &expr_walker_context); - - MemoryContextSwitchTo(old_cxt); + adapt_values(prel->expr, (void *) &expr_walker_context); /* Prepare state for execution */ - expr_state = ExecPrepareExpr(prel->expr, estate); + expr_state = ExecInitExpr((Expr *)prel->expr, NULL); + + MemoryContextSwitchTo(old_cxt); /* Switch to per-tuple context */ old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 8f754d57..c48f72d2 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -57,7 +57,7 @@ PG_FUNCTION_INFO_V1( build_check_constraint_name_attname ); PG_FUNCTION_INFO_V1( validate_relname ); PG_FUNCTION_INFO_V1( is_date_type ); PG_FUNCTION_INFO_V1( is_attribute_nullable ); -PG_FUNCTION_INFO_V1( is_expression_suitable ); +//PG_FUNCTION_INFO_V1( is_expression_suitable ); PG_FUNCTION_INFO_V1( add_to_pathman_config ); PG_FUNCTION_INFO_V1( pathman_config_params_trigger_func ); @@ -489,6 +489,7 @@ is_date_type(PG_FUNCTION_ARGS) PG_RETURN_BOOL(is_date_type_internal(PG_GETARG_OID(0))); } +/* Datum is_expression_suitable(PG_FUNCTION_ARGS) { @@ -503,7 +504,7 @@ is_expression_suitable(PG_FUNCTION_ARGS) result = (tce->hash_proc != InvalidOid); PG_RETURN_BOOL(result); -} +}*/ Datum is_attribute_nullable(PG_FUNCTION_ARGS) @@ -651,20 +652,19 @@ Datum add_to_pathman_config(PG_FUNCTION_ARGS) { Oid relid; - text *attname; + char *expression; PartType parttype; Relation pathman_config; Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; + bool refresh_part_info; HeapTuple htup; CatalogIndexState indstate; - char *expr; - Oid expr_type; - - PathmanInitState init_state; - MemoryContext old_mcxt = CurrentMemoryContext; + PathmanInitState init_state; + PartExpressionInfo *expr_info; + MemoryContext old_mcxt = CurrentMemoryContext; if (PG_ARGISNULL(0)) elog(ERROR, "'parent_relid' should not be NULL"); @@ -674,7 +674,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) /* Read parameters */ relid = PG_GETARG_OID(0); - attname = PG_GETARG_TEXT_P(1); /* Check that relation exists */ if (!check_relation_exists(relid)) @@ -683,20 +682,25 @@ add_to_pathman_config(PG_FUNCTION_ARGS) /* Select partitioning type using 'range_interval' */ parttype = PG_ARGISNULL(2) ? PT_HASH : PT_RANGE; + /* Parse and check expression */ + expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + expr_info = get_part_expression_info(relid, expression, (parttype == PT_HASH), true); + Assert(expr_info->expr_datum != (Datum) 0); + /* * Initialize columns (partrel, attname, parttype, range_interval). */ values[Anum_pathman_config_partrel - 1] = ObjectIdGetDatum(relid); isnull[Anum_pathman_config_partrel - 1] = false; - values[Anum_pathman_config_attname - 1] = PointerGetDatum(attname); - isnull[Anum_pathman_config_attname - 1] = false; + values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_info->expr_type); + isnull[Anum_pathman_config_atttype - 1] = false; - expr = TextDatumGetCString(PointerGetDatum(attname)); - expr_type = get_partition_expr_type(relid, expr); + values[Anum_pathman_config_expression - 1] = expr_info->expr_datum; + isnull[Anum_pathman_config_expression - 1] = false; - values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_type); - isnull[Anum_pathman_config_atttype - 1] = false; + values[Anum_pathman_config_raw_expression - 1] = CStringGetTextDatum(expression); + isnull[Anum_pathman_config_raw_expression - 1] = false; values[Anum_pathman_config_parttype - 1] = Int32GetDatum(parttype); isnull[Anum_pathman_config_parttype - 1] = false; @@ -721,35 +725,39 @@ add_to_pathman_config(PG_FUNCTION_ARGS) CatalogCloseIndexes(indstate); heap_close(pathman_config, RowExclusiveLock); - /* Now try to create a PartRelationInfo */ - PG_TRY(); + refresh_part_info = PG_GETARG_BOOL(3); + if (refresh_part_info) { - /* Some flags might change during refresh attempt */ - save_pathman_init_state(&init_state); + /* Now try to create a PartRelationInfo */ + PG_TRY(); + { + /* Some flags might change during refresh attempt */ + save_pathman_init_state(&init_state); - refresh_pathman_relation_info(relid, - values, - isnull, - false); /* initialize immediately */ - } - PG_CATCH(); - { - ErrorData *edata; + refresh_pathman_relation_info(relid, + values, + isnull, + false); /* initialize immediately */ + } + PG_CATCH(); + { + ErrorData *edata; - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - edata = CopyErrorData(); - FlushErrorState(); + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + edata = CopyErrorData(); + FlushErrorState(); - /* We have to restore all changed flags */ - restore_pathman_init_state(&init_state); + /* We have to restore all changed flags */ + restore_pathman_init_state(&init_state); - /* Show error message */ - elog(ERROR, "%s", edata->message); + /* Show error message */ + elog(ERROR, "%s", edata->message); - FreeErrorData(edata); + FreeErrorData(edata); + } + PG_END_TRY(); } - PG_END_TRY(); PG_RETURN_BOOL(true); } diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index e6b039e6..6b27e823 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -49,9 +49,7 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) pfree(arr); \ } while (0) - Oid parent_relid = PG_GETARG_OID(0), - expr_type; - const char *expr = TextDatumGetCString(PG_GETARG_DATUM(1)); + Oid parent_relid = PG_GETARG_OID(0); uint32 partitions_count = PG_GETARG_INT32(2), i; @@ -66,8 +64,6 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) if (get_pathman_relation_info(parent_relid)) elog(ERROR, "cannot add new HASH partitions"); - expr_type = get_partition_expr_type(parent_relid, expr); - /* Extract partition names */ if (!PG_ARGISNULL(3)) partition_names = deconstruct_text_array(PG_GETARG_DATUM(3), &partition_names_size); @@ -104,7 +100,6 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) /* Create a partition (copy FKs, invoke callbacks etc) */ create_single_hash_partition_internal(parent_relid, i, partitions_count, - expr_type, partition_rv, tablespace); } diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 84ae9bc1..532d69ee 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -87,7 +87,6 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* RANGE boundaries + value type */ Bound start, end; - Oid value_type; /* Optional: name & tablespace */ RangeVar *partition_name_rv; @@ -103,7 +102,7 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* Fetch mandatory args */ parent_relid = PG_GETARG_OID(0); - value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + //value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); start = PG_ARGISNULL(1) ? MakeBoundInf(MINUS_INFINITY) : @@ -136,7 +135,6 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) partition_relid = create_single_range_partition_internal(parent_relid, &start, &end, - value_type, partition_name_rv, tablespace); @@ -380,7 +378,8 @@ build_range_condition(PG_FUNCTION_ARGS) MakeBoundInf(PLUS_INFINITY) : MakeBound(PG_GETARG_DATUM(3)); - con = build_range_check_constraint(relid, text_to_cstring(attname), + con = build_range_check_constraint(relid, + NULL, &min, &max, bounds_type); @@ -847,14 +846,14 @@ modify_range_constraint(Oid child_relid, { Constraint *constraint; Relation partition_rel; - char *attname_nonconst = pstrdup(attname); + //char *attname_nonconst = pstrdup(attname); /* Drop old constraint */ drop_check_constraint(child_relid, attnum); /* Build a new one */ constraint = build_range_check_constraint(child_relid, - attname_nonconst, + NULL, lower, upper, atttype); @@ -866,7 +865,7 @@ modify_range_constraint(Oid child_relid, false, true, true); heap_close(partition_rel, NoLock); - pfree(attname_nonconst); + //pfree(attname_nonconst); } /* diff --git a/src/relation_info.c b/src/relation_info.c index 1493fab7..75e59413 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -21,6 +21,7 @@ #include "catalog/pg_inherits.h" #include "catalog/pg_type.h" #include "miscadmin.h" +#include "optimizer/clauses.h" #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/fmgroids.h" @@ -62,6 +63,53 @@ static Oid get_parent_of_partition_internal(Oid partition, HASHACTION action); +struct expr_mutator_context +{ + Oid relid; /* partitioned table */ + List *rtable; /* range table list from expression query */ +}; + +/* + * To prevent calculation of Vars in expression, we wrap them with + * CustomConst, and later before execution we fill it with actual value + */ +static Node * +expression_mutator(Node *node, struct expr_mutator_context *context) +{ + const TypeCacheEntry *typcache; + + if (IsA(node, Var)) + { + //Var *variable = (Var *) node; + Node *new_node = newNode(sizeof(CustomConst), T_Const); + Const *new_const = (Const *)new_node; + + /* + RangeTblEntry *entry = rt_fetch(variable->varno, context->rtable); + if (entry->relid != context->relid) + elog(ERROR, "Columns in the expression should " + "be only from partitioned relation"); + */ + + /* we only need varattno from original Var, for now */ + ((CustomConst *)new_node)->varattno = ((Var *)node)->varattno; + + new_const->consttype = ((Var *)node)->vartype; + new_const->consttypmod = ((Var *)node)->vartypmod; + new_const->constcollid = ((Var *)node)->varcollid; + new_const->constvalue = (Datum) 0; + new_const->constisnull = true; + new_const->location = -2; + + typcache = lookup_type_cache(new_const->consttype, 0); + new_const->constbyval = typcache->typbyval; + new_const->constlen = typcache->typlen; + + return new_node; + } + return expression_tree_mutator(node, expression_mutator, (void *) context); +} + /* * refresh\invalidate\get\remove PartRelationInfo functions. */ @@ -82,9 +130,10 @@ refresh_pathman_relation_info(Oid relid, PartRelationInfo *prel; Datum param_values[Natts_pathman_config_params]; bool param_isnull[Natts_pathman_config_params]; - const char *expr; + char *expr; Oid expr_type; HeapTuple tp; + MemoryContext oldcontext; prel = (PartRelationInfo *) pathman_cache_search_relid(partitioned_rels, relid, HASH_ENTER, @@ -137,15 +186,21 @@ refresh_pathman_relation_info(Oid relid, prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); /* Read config values */ - expr = TextDatumGetCString(values[Anum_pathman_config_attname - 1]); + expr = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); expr_type = DatumGetObjectId(values[Anum_pathman_config_atttype - 1]); /* * Save parsed expression to cache and use already saved expression type * from config */ - prel->expr = (Expr *) get_expression_node(relid, expr, true); + oldcontext = MemoryContextSwitchTo(TopMemoryContext); + prel->expr = (Node *) stringToNode(expr); + fix_opfuncids(prel->expr); + prel->expr = expression_mutator(prel->expr, NULL); + MemoryContextSwitchTo(oldcontext); + prel->atttype = expr_type; + pfree(expr); tp = SearchSysCache1(TYPEOID, values[Anum_pathman_config_atttype - 1]); if (HeapTupleIsValid(tp)) From 7d8167b609aa1491bc60a5c58de267bef3aa0461 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 27 Mar 2017 15:18:01 +0300 Subject: [PATCH 0291/1124] pathman_post_parse_analysis_hook(): improve 'shared_preload_libraries' check (issue #82) --- src/hooks.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/hooks.c b/src/hooks.c index 72169921..6dea3c55 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -20,6 +20,7 @@ #include "xact_handling.h" #include "access/transam.h" +#include "catalog/pg_authid.h" #include "miscadmin.h" #include "optimizer/cost.h" #include "optimizer/restrictinfo.h" @@ -556,14 +557,33 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) /* Check that pg_pathman is the last extension loaded */ if (post_parse_analyze_hook != pathman_post_parse_analysis_hook) { - char *spl_value; /* value of "shared_preload_libraries" GUC */ + Oid save_userid; + int save_sec_context; + bool need_priv_escalation = !superuser(); /* we might be a SU */ + char *spl_value; /* value of "shared_preload_libraries" GUC */ + /* Do we have to escalate privileges? */ + if (need_priv_escalation) + { + /* Get current user's Oid and security context */ + GetUserIdAndSecContext(&save_userid, &save_sec_context); + + /* Become superuser in order to bypass sequence ACL checks */ + SetUserIdAndSecContext(BOOTSTRAP_SUPERUSERID, + save_sec_context | SECURITY_LOCAL_USERID_CHANGE); + } + + /* Only SU can read this GUC */ #if PG_VERSION_NUM >= 90600 spl_value = GetConfigOptionByName("shared_preload_libraries", NULL, false); #else spl_value = GetConfigOptionByName("shared_preload_libraries", NULL); #endif + /* Restore user's privileges */ + if (need_priv_escalation) + SetUserIdAndSecContext(save_userid, save_sec_context); + ereport(ERROR, (errmsg("extension conflict has been detected"), errdetail("shared_preload_libraries = \"%s\"", spl_value), From fd8474f253cb97bdf2035a66d6d6dd8e80540d77 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 27 Mar 2017 15:18:28 +0300 Subject: [PATCH 0292/1124] Fix execution --- src/partition_creation.c | 9 +++++---- src/partition_filter.c | 12 ++++++------ 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 5af03458..75439569 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1804,7 +1804,7 @@ get_part_expression_info(Oid relid, const char *expr_string, } if (!make_plan) - return expr_info; + goto end; /* Plan this query. We reuse 'expr_node' here */ plan = pg_plan_query(query, 0, NULL); @@ -1812,13 +1812,14 @@ get_part_expression_info(Oid relid, const char *expr_string, expr_node = (Node *) target_entry->expr; expr_node = eval_const_expressions(NULL, expr_node); - /* Enable pathman */ - hooks_enabled = true; - /* Convert expression to string and return it as datum */ out_string = nodeToString(expr_node); expr_info->expr_datum = CStringGetTextDatum(out_string); pfree(out_string); +end: + /* Enable pathman hooks */ + hooks_enabled = true; + return expr_info; } diff --git a/src/partition_filter.c b/src/partition_filter.c index 49f2659c..bd5e9444 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -634,6 +634,12 @@ partition_filter_exec(CustomScanState *node) old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); + /* Prepare walker context */ + expr_walker_context.prel = prel; + expr_walker_context.slot = slot; + expr_walker_context.tup = ExecCopySlotTuple(slot); + expr_walker_context.clear = false; + /* Fetch values from slot for expression */ adapt_values(prel->expr, (void *) &expr_walker_context); @@ -645,12 +651,6 @@ partition_filter_exec(CustomScanState *node) /* Switch to per-tuple context */ old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - /* Prepare walker context */ - expr_walker_context.prel = prel; - expr_walker_context.slot = slot; - expr_walker_context.tup = ExecCopySlotTuple(slot); - expr_walker_context.clear = false; - /* Execute expression */ value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); From 34922aaefc7de723794901a14785257380a3e5f1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 27 Mar 2017 15:31:28 +0300 Subject: [PATCH 0293/1124] add TODO (more tests) --- src/hooks.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/hooks.c b/src/hooks.c index 6dea3c55..53bf30a5 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -573,6 +573,8 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) save_sec_context | SECURITY_LOCAL_USERID_CHANGE); } + /* TODO: add a test for this case (non-privileged user etc) */ + /* Only SU can read this GUC */ #if PG_VERSION_NUM >= 90600 spl_value = GetConfigOptionByName("shared_preload_libraries", NULL, false); From ae574fd71519672ee96070b433e7d30836be9a28 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 27 Mar 2017 16:09:41 +0300 Subject: [PATCH 0294/1124] Fix rename hook --- src/hooks.c | 5 +---- src/include/relation_info.h | 7 ------- src/include/utility_stmt_hooking.h | 4 +--- src/partition_filter.c | 22 ++++++---------------- src/utility_stmt_hooking.c | 10 ++-------- 5 files changed, 10 insertions(+), 38 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 24e35d6b..7b9aa56e 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -715,7 +715,6 @@ pathman_process_utility_hook(Node *parsetree, if (IsPathmanReady()) { Oid partition_relid; - AttrNumber partitioned_col; /* Override standard COPY statement if needed */ if (is_pathman_related_copy(parsetree)) @@ -733,10 +732,8 @@ pathman_process_utility_hook(Node *parsetree, /* Override standard RENAME statement if needed */ if (is_pathman_related_table_rename(parsetree, - &partition_relid, - &partitioned_col)) + &partition_relid)) PathmanRenameConstraint(partition_relid, - partitioned_col, (const RenameStmt *) parsetree); } diff --git a/src/include/relation_info.h b/src/include/relation_info.h index f8e9d3ef..411e680f 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -113,13 +113,6 @@ typedef struct max; } RangeEntry; -/* Used to convert 'varno' attributes accodirdingly to working query */ -typedef struct -{ - Oid relid; /* relid by which we can determine what rte we need in current query */ - int res_idx; /* varno will be used for Var */ -} RTEMapItem; - /* * PartRelationInfo * Per-relation partitioning information diff --git a/src/include/utility_stmt_hooking.h b/src/include/utility_stmt_hooking.h index 18f86e2e..f0fdf9cd 100644 --- a/src/include/utility_stmt_hooking.h +++ b/src/include/utility_stmt_hooking.h @@ -21,13 +21,11 @@ /* Various traits */ bool is_pathman_related_copy(Node *parsetree); bool is_pathman_related_table_rename(Node *parsetree, - Oid *partition_relid_out, - AttrNumber *partitioned_col_out); + Oid *partition_relid_out); /* Statement handlers */ void PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed); void PathmanRenameConstraint(Oid partition_relid, - AttrNumber partitioned_col, const RenameStmt *partition_rename_stmt); diff --git a/src/partition_filter.c b/src/partition_filter.c index bd5e9444..35f8ccdf 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -533,7 +533,6 @@ struct expr_walker_context const PartRelationInfo *prel; TupleTableSlot *slot; HeapTuple tup; - bool clear; }; /* Fills CustomConst nodes with values from slot */ @@ -555,20 +554,12 @@ adapt_values (Node *node, struct expr_walker_context *context) attnum = ((CustomConst *)node)->varattno; Assert(attnum != InvalidAttrNumber); - if (context->clear) - { - cst->constvalue = (Datum) 0; - cst->constisnull = true; - } - else - { - /* check that type is still same */ - Assert(context->slot->tts_tupleDescriptor-> - attrs[attnum - 1]->atttypid == cst->consttype); - cst->constvalue = heap_getattr(context->tup, attnum, - context->slot->tts_tupleDescriptor, &isNull); - cst->constisnull = isNull; - } + /* check that type is still same */ + Assert(context->slot->tts_tupleDescriptor-> + attrs[attnum - 1]->atttypid == cst->consttype); + cst->constvalue = heap_getattr(context->tup, attnum, + context->slot->tts_tupleDescriptor, &isNull); + cst->constisnull = isNull; return false; } @@ -638,7 +629,6 @@ partition_filter_exec(CustomScanState *node) expr_walker_context.prel = prel; expr_walker_context.slot = slot; expr_walker_context.tup = ExecCopySlotTuple(slot); - expr_walker_context.clear = false; /* Fetch values from slot for expression */ adapt_values(prel->expr, (void *) &expr_walker_context); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index cc9d37b6..84497ce3 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -134,8 +134,7 @@ is_pathman_related_copy(Node *parsetree) */ bool is_pathman_related_table_rename(Node *parsetree, - Oid *partition_relid_out, /* ret value */ - AttrNumber *partitioned_col_out) /* ret value */ + Oid *partition_relid_out) /* ret value */ { RenameStmt *rename_stmt = (RenameStmt *) parsetree; Oid partition_relid, @@ -147,7 +146,6 @@ is_pathman_related_table_rename(Node *parsetree, /* Set default values */ if (partition_relid_out) *partition_relid_out = InvalidOid; - if (partitioned_col_out) *partitioned_col_out = InvalidAttrNumber; if (!IsA(parsetree, RenameStmt)) return false; @@ -167,14 +165,11 @@ is_pathman_related_table_rename(Node *parsetree, return false; /* Is parent partitioned? */ - /* FIX this if ((prel = get_pathman_relation_info(parent_relid)) != NULL) { if (partition_relid_out) *partition_relid_out = partition_relid; - if (partitioned_col_out) *partitioned_col_out = prel->attnum; - return true; - } */ + } return false; } @@ -692,7 +687,6 @@ prepare_rri_for_copy(EState *estate, */ void PathmanRenameConstraint(Oid partition_relid, /* cached partition Oid */ - AttrNumber partitioned_col, /* partitioned column */ const RenameStmt *part_rename_stmt) /* partition rename stmt */ { char *old_constraint_name, From 1ccf5b9c559023e578a59fee5b1cef5e2e3d26aa Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 27 Mar 2017 17:27:15 +0300 Subject: [PATCH 0295/1124] Use more adequate name for variable, optimize code --- src/hooks.c | 25 +++++++------------- src/include/pathman.h | 2 +- src/partition_creation.c | 51 ++++------------------------------------ src/pg_pathman.c | 4 ++-- 4 files changed, 16 insertions(+), 66 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 7b9aa56e..fb64c2a2 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -66,7 +66,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, innerrel, jointype, extra); /* Hooks can be disabled */ - if (!hooks_enabled) + if (!pathman_hooks_enabled) return; /* Check that both pg_pathman & RuntimeAppend nodes are enabled */ @@ -209,7 +209,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, set_rel_pathlist_hook_next(root, rel, rti, rte); /* Hooks can be disabled */ - if (!hooks_enabled) + if (!pathman_hooks_enabled) return; /* Make sure that pg_pathman is ready */ @@ -492,19 +492,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) PG_TRY(); { - /* Hooks can be disabled */ - if (!hooks_enabled) - { - /* Invoke original hook if needed */ - if (planner_hook_next) - result = planner_hook_next(parse, cursorOptions, boundParams); - else - result = standard_planner(parse, cursorOptions, boundParams); - - return result; - } - - if (pathman_ready) + if (pathman_ready && pathman_hooks_enabled) { /* Increment relation tags refcount */ incr_refcount_relation_tags(); @@ -519,6 +507,9 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) else result = standard_planner(parse, cursorOptions, boundParams); + if (!pathman_hooks_enabled) + return result; + if (pathman_ready) { /* Give rowmark-related attributes correct names */ @@ -564,7 +555,7 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) post_parse_analyze_hook_next(pstate, query); /* Hooks can be disabled */ - if (!hooks_enabled) + if (!pathman_hooks_enabled) return; /* We shouldn't do anything on BEGIN or SET ISOLATION LEVEL stmts */ @@ -642,7 +633,7 @@ pathman_relcache_hook(Datum arg, Oid relid) return; /* Hooks can be disabled */ - if (!hooks_enabled) + if (!pathman_hooks_enabled) return; /* We shouldn't even consider special OIDs */ diff --git a/src/include/pathman.h b/src/include/pathman.h index df08ab73..9f39eb53 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -86,7 +86,7 @@ extern Oid pathman_config_relid; extern Oid pathman_config_params_relid; /* Hooks enable state */ -extern bool hooks_enabled; +extern bool pathman_hooks_enabled; /* * Just to clarify our intentions (return the corresponding relid). diff --git a/src/partition_creation.c b/src/partition_creation.c index 75439569..ea2047c8 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1708,51 +1708,10 @@ parse_expression(Oid relid, const char *expr, char **query_string_out) return (Node *)(lfirst(list_head(parsetree_list))); } -/* By given relation id and expression returns node */ /* -Node * -get_raw_expression(Oid relid, const char *expr) -{ - List *querytree_list; - List *target_list; - char *query_string; - Node *parsetree, - *result; - Query *query; - TargetEntry *target_entry; - PlannedStmt *plan; - MemoryContext oldcontext; - struct expr_mutator_context context; - - parsetree = parse_expression(relid, expr, &query_string), - target_list = ((SelectStmt *)parsetree)->targetList; - - result = (Node *)(((ResTarget *)(lfirst(list_head(target_list))))->val); - return result; */ - - /* - hooks_enabled = false; - - querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); - query = (Query *)lfirst(list_head(querytree_list)); - plan = pg_plan_query(query, 0, NULL); - target_entry = lfirst(list_head(plan->planTree->targetlist)); - - hooks_enabled = true; - - result = (Node *)target_entry->expr; - - oldcontext = MemoryContextSwitchTo(TopMemoryContext); - - context.relid = relid; - context.rtable = plan->rtable; - - result = expression_mutator(result, (void *) &context); - MemoryContextSwitchTo(oldcontext); - return result; -}*/ - -/* Determines type of expression for a relation */ + * Parses expression related to 'relid', and returns its type, + * raw expression tree, and if specified returns its plan + */ PartExpressionInfo * get_part_expression_info(Oid relid, const char *expr_string, bool check_hash_func, bool make_plan) @@ -1780,7 +1739,7 @@ get_part_expression_info(Oid relid, const char *expr_string, expr_info->expr_datum = (Datum) 0; /* We don't need pathman activity initialization for this relation yet */ - hooks_enabled = false; + pathman_hooks_enabled = false; /* This will fail with elog in case of wrong expression * with more or less understable text */ @@ -1819,7 +1778,7 @@ get_part_expression_info(Oid relid, const char *expr_string, end: /* Enable pathman hooks */ - hooks_enabled = true; + pathman_hooks_enabled = true; return expr_info; } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 748413e6..91317c46 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -42,8 +42,8 @@ PG_MODULE_MAGIC; Oid pathman_config_relid = InvalidOid, pathman_config_params_relid = InvalidOid; -/* Used to temporary disable hooks */ -bool hooks_enabled = true; +/* Used to disable hooks temporarily */ +bool pathman_hooks_enabled = true; /* pg module functions */ From ff9712188e84b8c76c137c70d53cca87b7262741 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 27 Mar 2017 19:15:42 +0300 Subject: [PATCH 0296/1124] Start modifying range functions --- init.sql | 28 ++----------- range.sql | 23 ++++++----- src/include/relation_info.h | 1 + src/pl_funcs.c | 81 +------------------------------------ src/pl_range_funcs.c | 21 +++------- src/relation_info.c | 2 + 6 files changed, 26 insertions(+), 130 deletions(-) diff --git a/init.sql b/init.sql index 25ebff33..3a2a7516 100644 --- a/init.sql +++ b/init.sql @@ -15,8 +15,7 @@ * text to Datum */ CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( - partrel REGCLASS, - attname TEXT, + atttype OID, parttype INTEGER, range_interval TEXT) RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' @@ -44,11 +43,10 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( CHECK (parttype IN (1, 2)) /* check for correct interval */ - /* CHECK (@extschema@.validate_interval_value(partrel, - attname, + atttype, parttype, - range_interval)) */ + range_interval)) ); @@ -786,26 +784,6 @@ CREATE OR REPLACE FUNCTION @extschema@.validate_relname( RETURNS VOID AS 'pg_pathman', 'validate_relname' LANGUAGE C; -/* - * Checks if attribute is nullable - */ -CREATE OR REPLACE FUNCTION @extschema@.is_attribute_nullable( - relid REGCLASS, - attname TEXT) -RETURNS BOOLEAN AS 'pg_pathman', 'is_attribute_nullable' -LANGUAGE C STRICT; - -/* - * Checks if expression is suitable - */ - /* -CREATE OR REPLACE FUNCTION @extschema@.is_expression_suitable( - relid REGCLASS, - expr TEXT) -RETURNS BOOLEAN AS 'pg_pathman', 'is_expression_suitable' -LANGUAGE C STRICT; -*/ - /* * Check if regclass is date or timestamp. */ diff --git a/range.sql b/range.sql index 84c9fefa..ccef515c 100644 --- a/range.sql +++ b/range.sql @@ -26,7 +26,7 @@ LANGUAGE plpgsql; */ CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( parent_relid REGCLASS, - attribute TEXT, + expression TEXT, start_value ANYELEMENT, end_value ANYELEMENT) RETURNS VOID AS @@ -40,32 +40,32 @@ BEGIN /* Get min and max values */ EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) FROM %2$s WHERE NOT %1$s IS NULL', - attribute, parent_relid::TEXT) + expression, parent_relid::TEXT) INTO v_count, v_min, v_max; /* Check if column has NULL values */ IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN - RAISE EXCEPTION 'column "%" contains NULL values', attribute; + RAISE EXCEPTION 'expression "%" returns NULL values', expression; END IF; /* Check lower boundary */ IF start_value > v_min THEN - RAISE EXCEPTION 'start value is less than min value of "%"', attribute; + RAISE EXCEPTION 'start value is less than min value of "%"', expression; END IF; /* Check upper boundary */ IF end_value <= v_max THEN - RAISE EXCEPTION 'not enough partitions to fit all values of "%"', attribute; + RAISE EXCEPTION 'not enough partitions to fit all values of "%"', expression; END IF; END $$ LANGUAGE plpgsql; /* - * Creates RANGE partitions for specified relation based on datetime attribute + * Creates RANGE partitions for specified relation based on datetime expression */ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, - attribute TEXT, + expression TEXT, start_value ANYELEMENT, p_interval INTERVAL, p_count INTEGER DEFAULT NULL, @@ -91,8 +91,8 @@ BEGIN PERFORM @extschema@.lock_partitioned_relation(parent_relid); END IF; - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + expression := lower(expression); + PERFORM @extschema@.common_relation_checks(parent_relid, expression); IF p_count < 0 THEN RAISE EXCEPTION '"p_count" must not be less than 0'; @@ -100,7 +100,7 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO v_rows_count, v_max; IF v_rows_count = 0 THEN @@ -253,6 +253,9 @@ BEGIN end_value); END IF; + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, false); + /* Insert new entry to pathman config */ INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) VALUES (parent_relid, attribute, 2, p_interval::TEXT); diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 411e680f..ec4e6ae5 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -128,6 +128,7 @@ typedef struct RangeEntry *ranges; /* per-partition range entry or NULL */ Node *expr; /* planned expression */ + char *expr_string; /* string with original expression */ PartType parttype; /* partitioning type (HASH | RANGE) */ Oid atttype; /* expression type */ int32 atttypmod; /* expression type modifier */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index c48f72d2..02f92ede 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -372,20 +372,11 @@ show_partition_list_internal(PG_FUNCTION_ARGS) continue; } - - // FIX this - //partattr_cstr = get_attname(PrelParentRelid(prel), prel->attnum); - //if (!partattr_cstr) - //{ - /* Parent does not exist, go to the next 'prel' */ - // usercxt->current_prel = NULL; - // continue; - //} /* Fill in common values */ values[Anum_pathman_pl_parent - 1] = PrelParentRelid(prel); values[Anum_pathman_pl_parttype - 1] = prel->parttype; - values[Anum_pathman_pl_partattr - 1] = CStringGetTextDatum(partattr_cstr); + values[Anum_pathman_pl_partattr - 1] = prel->expr_string; switch (prel->parttype) { @@ -489,74 +480,6 @@ is_date_type(PG_FUNCTION_ARGS) PG_RETURN_BOOL(is_date_type_internal(PG_GETARG_OID(0))); } -/* -Datum -is_expression_suitable(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - char *expr = text_to_cstring(PG_GETARG_TEXT_P(1)); - bool result; - - TypeCacheEntry *tce; - Oid type_oid = get_partition_expr_type(relid, expr); - - tce = lookup_type_cache(type_oid, TYPECACHE_HASH_PROC); - result = (tce->hash_proc != InvalidOid); - - PG_RETURN_BOOL(result); -}*/ - -Datum -is_attribute_nullable(PG_FUNCTION_ARGS) -{ - /* - Oid relid = PG_GETARG_OID(0); - char *relname = get_rel_name(relid), - *namespace_name = get_namespace_name(get_rel_namespace(relid)); - char *expr = text_to_cstring(PG_GETARG_TEXT_P(1)); - char *fmt = "SELECT (%s) FROM %s.%s"; - bool result = true; - HeapTuple tp; - List *parsetree_list, - *querytree_list, - *plantree_list; - EState *estate; - ExprContext *econtext; - Node *parsetree, - *target_entry; - Query *query; - PlannedStmt *plan; - MemoryContext oldcontext; - SeqScanState *scanstate; - Oid expr_type; - - int n = snprintf(NULL, 0, fmt, expr, namespace_name, relname); - char *query_string = (char *) palloc(n + 1); - snprintf(query_string, n + 1, fmt, expr, namespace_name, relname); - - parsetree_list = raw_parser(query_string); - - Assert(list_length(parsetree_list) == 1); - parsetree = (Node *)(lfirst(list_head(parsetree_list))); - - query = parse_analyze(parsetree, query_string, NULL, 0); - plan = pg_plan_query(query, 0, NULL); - - target_entry = lfirst(list_head(plan->planTree->targetlist)); - expr_type = get_call_expr_argtype(((TargetEntry *)target_entry)->expr, 0); - - estate = CreateExecutorState(); - - Assert(nodeTag(plan->planTree) == T_SeqScan); - scanstate = ExecInitSeqScan(plan->planTree, estate, 0); - - pfree(query_string); - */ - bool result = true; - PG_RETURN_BOOL(result); /* keep compiler happy */ -} - - /* * ------------------------ * Useful string builders @@ -670,7 +593,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) elog(ERROR, "'parent_relid' should not be NULL"); if (PG_ARGISNULL(1)) - elog(ERROR, "'attname' should not be NULL"); + elog(ERROR, "'expression' should not be NULL"); /* Read parameters */ relid = PG_GETARG_OID(0); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 532d69ee..93d800b2 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -637,38 +637,27 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) Datum validate_interval_value(PG_FUNCTION_ARGS) { - Oid partrel = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); - PartType parttype = DatumGetPartType(PG_GETARG_DATUM(2)); - Datum interval_text = PG_GETARG_DATUM(3); + Oid atttype = PG_GETARG_OID(0); + PartType parttype = DatumGetPartType(PG_GETARG_DATUM(1)); + Datum interval_text = PG_GETARG_DATUM(2); Datum interval_value; Oid interval_type; if (PG_ARGISNULL(0)) - elog(ERROR, "'partrel' should not be NULL"); + elog(ERROR, "'atttype' should not be NULL"); if (PG_ARGISNULL(1)) - elog(ERROR, "'attname' should not be NULL"); - - if (PG_ARGISNULL(2)) elog(ERROR, "'parttype' should not be NULL"); /* * NULL interval is fine for both HASH and RANGE. But for RANGE we need * to make some additional checks */ - if (!PG_ARGISNULL(3)) + if (!PG_ARGISNULL(2)) { - char *attname_cstr; - Oid atttype; /* type of partitioned attribute */ - if (parttype == PT_HASH) elog(ERROR, "interval must be NULL for HASH partitioned table"); - /* Convert attname to CSTRING and fetch column's type */ - attname_cstr = text_to_cstring(attname); - atttype = get_attribute_type(partrel, attname_cstr, false); - /* Try converting textual representation */ interval_value = extract_binary_interval_from_text(interval_text, atttype, diff --git a/src/relation_info.c b/src/relation_info.c index 75e59413..14b371d3 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -194,6 +194,8 @@ refresh_pathman_relation_info(Oid relid, * from config */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); + prel->expr_string = TextDatumGetCString( + values[Anum_pathman_config_raw_expression - 1]); prel->expr = (Node *) stringToNode(expr); fix_opfuncids(prel->expr); prel->expr = expression_mutator(prel->expr, NULL); From f85232a203f1b18a910b554409f4c4140044046c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 28 Mar 2017 17:27:59 +0300 Subject: [PATCH 0297/1124] refactoring (move functions, renames and fixes), use bounds cache instead of constraints cache, introduce macro AssertTemporaryContext() --- src/hooks.c | 4 +- src/include/init.h | 42 +++-- src/include/relation_info.h | 77 ++++++--- src/init.c | 299 +++++++------------------------- src/pathman_workers.c | 2 +- src/pl_funcs.c | 6 +- src/pl_range_funcs.c | 4 +- src/relation_info.c | 336 +++++++++++++++++++++++++++++++----- 8 files changed, 451 insertions(+), 319 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 0e2613f7..bd9b11ff 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -624,8 +624,8 @@ pathman_relcache_hook(Datum arg, Oid relid) if (relid == get_pathman_config_relid(false)) delay_pathman_shutdown(); - /* Invalidate PartConstraintInfo cache if needed */ - forget_constraint_of_partition(relid); + /* Invalidate PartBoundInfo cache if needed */ + forget_bounds_of_partition(relid); /* Invalidate PartParentInfo cache if needed */ partitioned_table = forget_parent_of_partition(relid, &search); diff --git a/src/include/init.h b/src/include/init.h index 7e4f8786..9037cf56 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -37,15 +37,26 @@ typedef struct } PathmanInitState; +/* Check that this is a temporary memory context that's going to be destroyed */ +#define AssertTemporaryContext() \ + do { \ + Assert(CurrentMemoryContext != TopMemoryContext); \ + Assert(CurrentMemoryContext != TopPathmanContext); \ + Assert(CurrentMemoryContext != PathmanRelationCacheContext); \ + Assert(CurrentMemoryContext != PathmanParentCacheContext); \ + Assert(CurrentMemoryContext != PathmanBoundCacheContext); \ + } while (0) + + #define PATHMAN_MCXT_COUNT 4 extern MemoryContext TopPathmanContext; extern MemoryContext PathmanRelationCacheContext; extern MemoryContext PathmanParentCacheContext; -extern MemoryContext PathmanCostraintCacheContext; +extern MemoryContext PathmanBoundCacheContext; extern HTAB *partitioned_rels; extern HTAB *parent_cache; -extern HTAB *constraint_cache; +extern HTAB *bound_cache; /* pg_pathman's initialization state */ extern PathmanInitState pg_pathman_init_state; @@ -55,10 +66,10 @@ extern PathmanInitState pg_pathman_init_state; static inline const char * simpify_mcxt_name(MemoryContext mcxt) { - static const char *top_mcxt = "maintenance"; - static const char *bound_mcxt = "bounds cache"; - static const char *parent_mcxt = "parents cache"; - static const char *constr_mcxt = "constraints cache"; + static const char *top_mcxt = "maintenance", + *bound_mcxt = "partition info cache", + *parent_mcxt = "parent mapping cache", + *constr_mcxt = "bounds cache"; if (mcxt == TopPathmanContext) return top_mcxt; @@ -69,7 +80,7 @@ simpify_mcxt_name(MemoryContext mcxt) else if (mcxt == PathmanParentCacheContext) return parent_mcxt; - else if (mcxt == PathmanCostraintCacheContext) + else if (mcxt == PathmanBoundCacheContext) return constr_mcxt; else elog(ERROR, "error in function " CppAsString(simpify_mcxt_name)); @@ -166,11 +177,6 @@ bool load_config(void); void unload_config(void); -void fill_prel_with_partitions(const Oid *partitions, - const uint32 parts_count, - const char *part_column_name, - PartRelationInfo *prel); - /* Result of find_inheritance_children_array() */ typedef enum { @@ -203,4 +209,16 @@ bool read_pathman_params(Oid relid, bool *isnull); +bool validate_range_constraint(const Expr *expr, + const PartRelationInfo *prel, + const AttrNumber part_attno, + Datum *lower, Datum *upper, + bool *lower_null, bool *upper_null); + +bool validate_hash_constraint(const Expr *expr, + const PartRelationInfo *prel, + const AttrNumber part_attno, + uint32 *part_hash); + + #endif /* PATHMAN_INIT_H */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 9435b1f1..3399cb68 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -37,7 +37,6 @@ typedef struct #define IsPlusInfinity(i) ( (i)->is_infinite == PLUS_INFINITY ) #define IsMinusInfinity(i) ( (i)->is_infinite == MINUS_INFINITY ) - static inline Bound CopyBound(const Bound *src, bool byval, int typlen) { @@ -75,6 +74,13 @@ BoundGetValue(const Bound *bound) return bound->value; } +static inline void +FreeBound(Bound *bound, bool byval) +{ + if (!IsInfinite(bound) && !byval) + pfree(DatumGetPointer(BoundGetValue(bound))); +} + static inline int cmp_bounds(FmgrInfo *cmp_func, const Bound *b1, const Bound *b2) { @@ -97,7 +103,7 @@ cmp_bounds(FmgrInfo *cmp_func, const Bound *b1, const Bound *b2) */ typedef enum { - PT_INDIFFERENT = 0, /* for part type traits (virtual type) */ + PT_ANY = 0, /* for part type traits (virtual type) */ PT_HASH, PT_RANGE } PartType; @@ -122,11 +128,14 @@ typedef struct bool valid; /* is this entry valid? */ bool enable_parent; /* include parent to the plan */ + PartType parttype; /* partitioning type (HASH | RANGE) */ + uint32 children_count; Oid *children; /* Oids of child partitions */ RangeEntry *ranges; /* per-partition range entry or NULL */ - PartType parttype; /* partitioning type (HASH | RANGE) */ + const char *attname; /* name of the partitioned column */ + AttrNumber attnum; /* partitioned column's index */ Oid atttype; /* partitioned column's type */ int32 atttypmod; /* partitioned column type modifier */ @@ -140,7 +149,7 @@ typedef struct } PartRelationInfo; /* - * RelParentInfo + * PartParentInfo * Cached parent of the specified partition. * Allows us to quickly search for PartRelationInfo. */ @@ -150,12 +159,24 @@ typedef struct Oid parent_rel; } PartParentInfo; +/* + * PartBoundInfo + * Cached bounds of the specified partition. + */ typedef struct { Oid child_rel; /* key */ - Oid conid; - Expr *constraint; -} PartConstraintInfo; + + PartType parttype; + + /* For RANGE partitions */ + Bound range_min; + Bound range_max; + bool byval; + + /* For HASH partitions */ + uint32 hash; +} PartBoundInfo; /* * PartParentSearch @@ -220,31 +241,39 @@ void cache_parent_of_partition(Oid partition, Oid parent); Oid forget_parent_of_partition(Oid partition, PartParentSearch *status); Oid get_parent_of_partition(Oid partition, PartParentSearch *status); -/* Constraint cache */ -void forget_constraint_of_partition(Oid partition); -Expr * get_constraint_of_partition(Oid partition, AttrNumber part_attno); +/* Bounds cache */ +void forget_bounds_of_partition(Oid partition); +PartBoundInfo * get_bounds_of_partition(Oid partition, + const PartRelationInfo *prel); /* Safe casts for PartType */ PartType DatumGetPartType(Datum datum); char * PartTypeToCString(PartType parttype); /* PartRelationInfo checker */ -void shout_if_prel_is_invalid(Oid parent_oid, +void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, - PartType expected_part_type); + const PartType expected_part_type); /* - * Useful static functions for freeing memory. + * Useful functions & macros for freeing memory. */ +#define FreeIfNotNull(ptr) \ + do { \ + if (ptr) \ + { \ + pfree((void *) ptr); \ + ptr = NULL; \ + } \ + } while(0) + static inline void FreeChildrenArray(PartRelationInfo *prel) { uint32 i; - Assert(PrelIsValid(prel)); - /* Remove relevant PartParentInfos */ if (prel->children) { @@ -252,6 +281,10 @@ FreeChildrenArray(PartRelationInfo *prel) { Oid child = prel->children[i]; + /* Skip if Oid is invalid (e.g. initialization error) */ + if (!OidIsValid(child)) + continue; + /* If it's *always been* relid's partition, free cache */ if (PrelParentRelid(prel) == get_parent_of_partition(child, NULL)) forget_parent_of_partition(child, NULL); @@ -267,8 +300,6 @@ FreeRangesArray(PartRelationInfo *prel) { uint32 i; - Assert(PrelIsValid(prel)); - /* Remove RangeEntries array */ if (prel->ranges) { @@ -277,11 +308,14 @@ FreeRangesArray(PartRelationInfo *prel) { for (i = 0; i < PrelChildrenCount(prel); i++) { - if (!IsInfinite(&prel->ranges[i].min)) - pfree(DatumGetPointer(BoundGetValue(&prel->ranges[i].min))); + Oid child = prel->ranges[i].child_oid; - if (!IsInfinite(&prel->ranges[i].max)) - pfree(DatumGetPointer(BoundGetValue(&prel->ranges[i].max))); + /* Skip if Oid is invalid (e.g. initialization error) */ + if (!OidIsValid(child)) + continue; + + FreeBound(&prel->ranges[i].min, prel->attbyval); + FreeBound(&prel->ranges[i].max, prel->attbyval); } } @@ -290,5 +324,4 @@ FreeRangesArray(PartRelationInfo *prel) } } - #endif /* RELATION_INFO_H */ diff --git a/src/init.c b/src/init.c index 63e81062..a35f8c37 100644 --- a/src/init.c +++ b/src/init.c @@ -47,7 +47,7 @@ MemoryContext TopPathmanContext = NULL; MemoryContext PathmanRelationCacheContext = NULL; MemoryContext PathmanParentCacheContext = NULL; -MemoryContext PathmanCostraintCacheContext = NULL; +MemoryContext PathmanBoundCacheContext = NULL; /* Storage for PartRelationInfos */ HTAB *partitioned_rels = NULL; @@ -55,8 +55,8 @@ HTAB *partitioned_rels = NULL; /* Storage for PartParentInfos */ HTAB *parent_cache = NULL; -/* Storage for partition constraints */ -HTAB *constraint_cache = NULL; +/* Storage for PartBoundInfos */ +HTAB *bound_cache = NULL; /* pg_pathman's init status */ PathmanInitState pg_pathman_init_state; @@ -72,13 +72,7 @@ static void init_local_cache(void); static void fini_local_cache(void); static void read_pathman_config(void); -static int cmp_range_entries(const void *p1, const void *p2, void *arg); -static bool validate_range_constraint(const Expr *expr, - const PartRelationInfo *prel, - const AttrNumber part_attno, - Datum *lower, Datum *upper, - bool *lower_null, bool *upper_null); static bool validate_range_opexpr(const Expr *expr, const PartRelationInfo *prel, const TypeCacheEntry *tce, @@ -86,11 +80,6 @@ static bool validate_range_opexpr(const Expr *expr, Datum *lower, Datum *upper, bool *lower_null, bool *upper_null); -static bool validate_hash_constraint(const Expr *expr, - const PartRelationInfo *prel, - const AttrNumber part_attno, - uint32 *part_hash); - static bool read_opexpr_const(const OpExpr *opexpr, const PartRelationInfo *prel, const AttrNumber part_attno, @@ -311,7 +300,7 @@ init_local_cache(void) /* Destroy caches, just in case */ hash_destroy(partitioned_rels); hash_destroy(parent_cache); - hash_destroy(constraint_cache); + hash_destroy(bound_cache); /* Reset pg_pathman's memory contexts */ if (TopPathmanContext) @@ -319,7 +308,7 @@ init_local_cache(void) /* Check that child contexts exist */ Assert(MemoryContextIsValid(PathmanRelationCacheContext)); Assert(MemoryContextIsValid(PathmanParentCacheContext)); - Assert(MemoryContextIsValid(PathmanCostraintCacheContext)); + Assert(MemoryContextIsValid(PathmanBoundCacheContext)); /* Clear children */ MemoryContextResetChildren(TopPathmanContext); @@ -329,7 +318,7 @@ init_local_cache(void) { Assert(PathmanRelationCacheContext == NULL); Assert(PathmanParentCacheContext == NULL); - Assert(PathmanCostraintCacheContext == NULL); + Assert(PathmanBoundCacheContext == NULL); TopPathmanContext = AllocSetContextCreate(TopMemoryContext, @@ -348,10 +337,10 @@ init_local_cache(void) CppAsString(PathmanParentCacheContext), ALLOCSET_DEFAULT_SIZES); - /* For PartConstraintInfo */ - PathmanCostraintCacheContext = + /* For PartBoundInfo */ + PathmanBoundCacheContext = AllocSetContextCreate(TopPathmanContext, - CppAsString(PathmanCostraintCacheContext), + CppAsString(PathmanBoundCacheContext), ALLOCSET_DEFAULT_SIZES); } @@ -375,12 +364,12 @@ init_local_cache(void) memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(PartConstraintInfo); - ctl.hcxt = PathmanCostraintCacheContext; + ctl.entrysize = sizeof(PartBoundInfo); + ctl.hcxt = PathmanBoundCacheContext; - constraint_cache = hash_create("pg_pathman's partition constraints cache", - PART_RELS_SIZE * CHILD_FACTOR, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + bound_cache = hash_create("pg_pathman's partition constraints cache", + PART_RELS_SIZE * CHILD_FACTOR, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } /* @@ -392,159 +381,16 @@ fini_local_cache(void) /* First, destroy hash tables */ hash_destroy(partitioned_rels); hash_destroy(parent_cache); - hash_destroy(constraint_cache); + hash_destroy(bound_cache); partitioned_rels = NULL; parent_cache = NULL; - constraint_cache = NULL; + bound_cache = NULL; /* Now we can clear allocations */ MemoryContextResetChildren(TopPathmanContext); } -/* - * Fill PartRelationInfo with partition-related info. - */ -void -fill_prel_with_partitions(const Oid *partitions, - const uint32 parts_count, - const char *part_column_name, - PartRelationInfo *prel) -{ - uint32 i; - Expr *con_expr; - MemoryContext mcxt = PathmanRelationCacheContext; - - /* Allocate memory for 'prel->children' & 'prel->ranges' (if needed) */ - prel->children = MemoryContextAllocZero(mcxt, parts_count * sizeof(Oid)); - if (prel->parttype == PT_RANGE) - prel->ranges = MemoryContextAllocZero(mcxt, parts_count * sizeof(RangeEntry)); - prel->children_count = parts_count; - - for (i = 0; i < PrelChildrenCount(prel); i++) - { - AttrNumber part_attno; - - /* NOTE: Partitions may have different TupleDescs */ - part_attno = get_attnum(partitions[i], part_column_name); - - /* Raise ERROR if there's no such column */ - if (part_attno == InvalidAttrNumber) - elog(ERROR, "partition \"%s\" has no column \"%s\"", - get_rel_name_or_relid(partitions[i]), part_column_name); - - /* Fetch constraint's expression tree */ - con_expr = get_constraint_of_partition(partitions[i], part_attno); - - /* Perform a partitioning_type-dependent task */ - switch (prel->parttype) - { - case PT_HASH: - { - uint32 hash; /* hash value < parts_count */ - - if (validate_hash_constraint(con_expr, prel, part_attno, &hash)) - prel->children[hash] = partitions[i]; - else - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("wrong constraint format for HASH partition \"%s\"", - get_rel_name_or_relid(partitions[i])), - errhint(INIT_ERROR_HINT))); - } - } - break; - - case PT_RANGE: - { - Datum lower, upper; - bool lower_null, upper_null; - - if (validate_range_constraint(con_expr, prel, part_attno, - &lower, &upper, - &lower_null, &upper_null)) - { - prel->ranges[i].child_oid = partitions[i]; - - prel->ranges[i].min = lower_null ? - MakeBoundInf(MINUS_INFINITY) : - MakeBound(lower); - - prel->ranges[i].max = upper_null ? - MakeBoundInf(PLUS_INFINITY) : - MakeBound(upper); - } - else - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("wrong constraint format for RANGE partition \"%s\"", - get_rel_name_or_relid(partitions[i])), - errhint(INIT_ERROR_HINT))); - } - } - break; - - default: - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("Unknown partitioning type for relation \"%s\"", - get_rel_name_or_relid(PrelParentRelid(prel))), - errhint(INIT_ERROR_HINT))); - } - } - } - - /* Finalize 'prel' for a RANGE-partitioned table */ - if (prel->parttype == PT_RANGE) - { - MemoryContext old_mcxt; - FmgrInfo flinfo; - - /* Prepare function info */ - fmgr_info(prel->cmp_proc, &flinfo); - - /* Sort partitions by RangeEntry->min asc */ - qsort_arg((void *) prel->ranges, PrelChildrenCount(prel), - sizeof(RangeEntry), cmp_range_entries, - (void *) &flinfo); - - /* Initialize 'prel->children' array */ - for (i = 0; i < PrelChildrenCount(prel); i++) - prel->children[i] = prel->ranges[i].child_oid; - - /* Copy all min & max Datums to the persistent mcxt */ - old_mcxt = MemoryContextSwitchTo(PathmanRelationCacheContext); - for (i = 0; i < PrelChildrenCount(prel); i++) - { - prel->ranges[i].min = CopyBound(&prel->ranges[i].min, - prel->attbyval, - prel->attlen); - - prel->ranges[i].max = CopyBound(&prel->ranges[i].max, - prel->attbyval, - prel->attlen); - } - MemoryContextSwitchTo(old_mcxt); - } - -#ifdef USE_ASSERT_CHECKING - /* Check that each partition Oid has been assigned properly */ - if (prel->parttype == PT_HASH) - for (i = 0; i < PrelChildrenCount(prel); i++) - { - if (prel->children[i] == InvalidOid) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - elog(ERROR, "pg_pathman's cache for relation \"%s\" " - "has not been properly initialized", - get_rel_name_or_relid(PrelParentRelid(prel))); - } - } -#endif -} /* * find_inheritance_children @@ -908,15 +754,58 @@ read_pathman_config(void) heap_close(rel, AccessShareLock); } -/* qsort comparison function for RangeEntries */ -static int -cmp_range_entries(const void *p1, const void *p2, void *arg) + +/* + * Validates range constraint. It MUST have one of the following formats: + * + * VARIABLE >= CONST AND VARIABLE < CONST + * VARIABLE >= CONST + * VARIABLE < CONST + * + * Writes 'lower' & 'upper' and 'lower_null' & 'upper_null' values on success. + */ +bool +validate_range_constraint(const Expr *expr, + const PartRelationInfo *prel, + const AttrNumber part_attno, + Datum *lower, Datum *upper, + bool *lower_null, bool *upper_null) { - const RangeEntry *v1 = (const RangeEntry *) p1; - const RangeEntry *v2 = (const RangeEntry *) p2; - FmgrInfo *flinfo = (FmgrInfo *) arg; + const TypeCacheEntry *tce; + + if (!expr) + return false; + + /* Set default values */ + *lower_null = *upper_null = true; + + /* Find type cache entry for partitioned column's type */ + tce = lookup_type_cache(prel->atttype, TYPECACHE_BTREE_OPFAMILY); + + /* Is it an AND clause? */ + if (and_clause((Node *) expr)) + { + const BoolExpr *boolexpr = (const BoolExpr *) expr; + ListCell *lc; + + /* Walk through boolexpr's args */ + foreach (lc, boolexpr->args) + { + const OpExpr *opexpr = (const OpExpr *) lfirst(lc); - return cmp_bounds(flinfo, &v1->min, &v2->min); + /* Exit immediately if something is wrong */ + if (!validate_range_opexpr((const Expr *) opexpr, prel, tce, part_attno, + lower, upper, lower_null, upper_null)) + return false; + } + + /* Everything seems to be fine */ + return true; + } + + /* It might be just an OpExpr clause */ + else return validate_range_opexpr(expr, prel, tce, part_attno, + lower, upper, lower_null, upper_null); } /* Validates a single expression of kind VAR >= CONST or VAR < CONST */ @@ -977,60 +866,6 @@ validate_range_opexpr(const Expr *expr, } } - -/* - * Validates range constraint. It MUST have one of the following formats: - * - * VARIABLE >= CONST AND VARIABLE < CONST - * VARIABLE >= CONST - * VARIABLE < CONST - * - * Writes 'lower' & 'upper' and 'lower_null' & 'upper_null' values on success. - */ -static bool -validate_range_constraint(const Expr *expr, - const PartRelationInfo *prel, - const AttrNumber part_attno, - Datum *lower, Datum *upper, - bool *lower_null, bool *upper_null) -{ - const TypeCacheEntry *tce; - - if (!expr) - return false; - - /* Set default values */ - *lower_null = *upper_null = true; - - /* Find type cache entry for partitioned column's type */ - tce = lookup_type_cache(prel->atttype, TYPECACHE_BTREE_OPFAMILY); - - /* Is it an AND clause? */ - if (and_clause((Node *) expr)) - { - const BoolExpr *boolexpr = (const BoolExpr *) expr; - ListCell *lc; - - /* Walk through boolexpr's args */ - foreach (lc, boolexpr->args) - { - const OpExpr *opexpr = (const OpExpr *) lfirst(lc); - - /* Exit immediately if something is wrong */ - if (!validate_range_opexpr((const Expr *) opexpr, prel, tce, part_attno, - lower, upper, lower_null, upper_null)) - return false; - } - - /* Everything seems to be fine */ - return true; - } - - /* It might be just an OpExpr clause */ - else return validate_range_opexpr(expr, prel, tce, part_attno, - lower, upper, lower_null, upper_null); -} - /* * Reads const value from expressions of kind: * 1) VAR >= CONST OR VAR < CONST @@ -1106,7 +941,7 @@ read_opexpr_const(const OpExpr *opexpr, * * Writes 'part_hash' hash value for this partition on success. */ -static bool +bool validate_hash_constraint(const Expr *expr, const PartRelationInfo *prel, const AttrNumber part_attno, diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 41fc0632..627b3210 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -645,7 +645,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* We also lock the parent relation */ get_pathman_relation_info_after_lock(relid, true, NULL), /* Partitioning type does not matter here */ - PT_INDIFFERENT); + PT_ANY); /* Check that partitioning operation result is visible */ if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin)) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 1325ffae..fff5a51f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -235,7 +235,7 @@ get_partition_key_type(PG_FUNCTION_ARGS) const PartRelationInfo *prel; prel = get_pathman_relation_info(relid); - shout_if_prel_is_invalid(relid, prel, PT_INDIFFERENT); + shout_if_prel_is_invalid(relid, prel, PT_ANY); PG_RETURN_OID(prel->atttype); } @@ -298,12 +298,12 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) usercxt->pathman_contexts[0] = TopPathmanContext; usercxt->pathman_contexts[1] = PathmanRelationCacheContext; usercxt->pathman_contexts[2] = PathmanParentCacheContext; - usercxt->pathman_contexts[3] = PathmanCostraintCacheContext; + usercxt->pathman_contexts[3] = PathmanBoundCacheContext; usercxt->pathman_htables[0] = NULL; /* no HTAB for this entry */ usercxt->pathman_htables[1] = partitioned_rels; usercxt->pathman_htables[2] = parent_cache; - usercxt->pathman_htables[3] = constraint_cache; + usercxt->pathman_htables[3] = bound_cache; usercxt->current_item = 0; diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index b78f44a4..9628ff59 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -982,10 +982,10 @@ check_range_adjacence(Oid cmp_proc, List *ranges) static char * get_qualified_rel_name(Oid relid) { - Oid namespace = get_rel_namespace(relid); + Oid nspid = get_rel_namespace(relid); return psprintf("%s.%s", - quote_identifier(get_namespace_name(namespace)), + quote_identifier(get_namespace_name(nspid)), quote_identifier(get_rel_name(relid))); } diff --git a/src/relation_info.c b/src/relation_info.c index 3e452b6a..55727787 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -64,9 +64,21 @@ static Oid get_parent_of_partition_internal(Oid partition, PartParentSearch *status, HASHACTION action); -static Expr *get_partition_constraint_expr(Oid partition, - AttrNumber part_attno, - Oid *constr_oid); + +static Expr *get_partition_constraint_expr(Oid partition, AttrNumber part_attno); + +static void fill_prel_with_partitions(PartRelationInfo *prel, + const Oid *partitions, + const uint32 parts_count); + +static void fill_pbin_with_bounds(PartBoundInfo *pbin, + const PartRelationInfo *prel, + const Expr *constraint_expr, + const AttrNumber part_attno); + +static int cmp_range_entries(const void *p1, const void *p2, void *arg); + + /* @@ -90,6 +102,8 @@ refresh_pathman_relation_info(Oid relid, Datum param_values[Natts_pathman_config_params]; bool param_isnull[Natts_pathman_config_params]; + AssertTemporaryContext(); + prel = (PartRelationInfo *) pathman_cache_search_relid(partitioned_rels, relid, HASH_ENTER, &found_entry); @@ -108,9 +122,9 @@ refresh_pathman_relation_info(Oid relid, /* Clear outdated resources */ if (found_entry && PrelIsValid(prel)) { - /* Free these arrays iff they're not NULL */ FreeChildrenArray(prel); FreeRangesArray(prel); + FreeIfNotNull(prel->attname); } /* First we assume that this entry is invalid */ @@ -140,6 +154,10 @@ refresh_pathman_relation_info(Oid relid, /* Set partitioning type */ prel->parttype = partitioning_type; + /* Copy name of partitioned attribute */ + prel->attname = MemoryContextStrdup(PathmanRelationCacheContext, + part_column_name); + /* Initialize PartRelationInfo using syscache & typcache */ prel->attnum = get_attnum(relid, part_column_name); @@ -206,9 +224,21 @@ refresh_pathman_relation_info(Oid relid, * will try to refresh it again (and again), until the error is fixed * by user manually (i.e. invalid check constraints etc). */ - fill_prel_with_partitions(prel_children, - prel_children_count, - part_column_name, prel); + PG_TRY(); + { + fill_prel_with_partitions(prel, prel_children, prel_children_count); + } + PG_CATCH(); + { + /* Free remaining resources */ + FreeChildrenArray(prel); + FreeRangesArray(prel); + FreeIfNotNull(prel->attname); + + /* Rethrow ERROR further */ + PG_RE_THROW(); + } + PG_END_TRY(); /* Peform some actions for each child */ for (i = 0; i < prel_children_count; i++) @@ -257,6 +287,7 @@ invalidate_pathman_relation_info(Oid relid, bool *found) { FreeChildrenArray(prel); FreeRangesArray(prel); + FreeIfNotNull(prel->attname); prel->valid = false; /* now cache entry is invalid */ } @@ -358,9 +389,9 @@ remove_pathman_relation_info(Oid relid) NULL); if (PrelIsValid(prel)) { - /* Free these arrays iff they're not NULL */ FreeChildrenArray(prel); FreeRangesArray(prel); + FreeIfNotNull(prel->attname); } /* Now let's remove the entry completely */ @@ -372,6 +403,116 @@ remove_pathman_relation_info(Oid relid) relid, MyProcPid); } +/* Fill PartRelationInfo with partition-related info */ +static void +fill_prel_with_partitions(PartRelationInfo *prel, + const Oid *partitions, + const uint32 parts_count) +{ +/* Allocate array if partitioning type matches 'prel' (or "ANY") */ +#define AllocZeroArray(part_type, context, elem_num, elem_type) \ + ( \ + ((part_type) == PT_ANY || (part_type) == prel->parttype) ? \ + MemoryContextAllocZero((context), (elem_num) * sizeof(elem_type)) : \ + NULL \ + ) + + uint32 i; + MemoryContext mcxt = PathmanRelationCacheContext; + + AssertTemporaryContext(); + + /* Allocate memory for 'prel->children' & 'prel->ranges' (if needed) */ + prel->children = AllocZeroArray(PT_ANY, mcxt, parts_count, Oid); + prel->ranges = AllocZeroArray(PT_RANGE, mcxt, parts_count, RangeEntry); + + /* Set number of children */ + PrelChildrenCount(prel) = parts_count; + + /* Initialize bounds of partitions */ + for (i = 0; i < PrelChildrenCount(prel); i++) + { + PartBoundInfo *bound_info; + + /* Fetch constraint's expression tree */ + bound_info = get_bounds_of_partition(partitions[i], prel); + + /* Copy bounds from bound cache */ + switch (prel->parttype) + { + case PT_HASH: + prel->children[bound_info->hash] = bound_info->child_rel; + break; + + case PT_RANGE: + { + MemoryContext old_mcxt; + + /* Copy child's Oid */ + prel->ranges[i].child_oid = bound_info->child_rel; + + /* Copy all min & max Datums to the persistent mcxt */ + old_mcxt = MemoryContextSwitchTo(PathmanRelationCacheContext); + + prel->ranges[i].min = CopyBound(&bound_info->range_min, + prel->attbyval, + prel->attlen); + + prel->ranges[i].max = CopyBound(&bound_info->range_max, + prel->attbyval, + prel->attlen); + + /* Switch back */ + MemoryContextSwitchTo(old_mcxt); + } + break; + + default: + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("Unknown partitioning type for relation \"%s\"", + get_rel_name_or_relid(PrelParentRelid(prel))), + errhint(INIT_ERROR_HINT))); + } + break; + } + } + + /* Finalize 'prel' for a RANGE-partitioned table */ + if (prel->parttype == PT_RANGE) + { + FmgrInfo flinfo; + + /* Prepare function info */ + fmgr_info(prel->cmp_proc, &flinfo); + + /* Sort partitions by RangeEntry->min asc */ + qsort_arg((void *) prel->ranges, PrelChildrenCount(prel), + sizeof(RangeEntry), cmp_range_entries, + (void *) &flinfo); + + /* Initialize 'prel->children' array */ + for (i = 0; i < PrelChildrenCount(prel); i++) + prel->children[i] = prel->ranges[i].child_oid; + } + +#ifdef USE_ASSERT_CHECKING + /* Check that each partition Oid has been assigned properly */ + if (prel->parttype == PT_HASH) + for (i = 0; i < PrelChildrenCount(prel); i++) + { + if (!OidIsValid(prel->children[i])) + { + DisablePathman(); /* disable pg_pathman since config is broken */ + elog(ERROR, "pg_pathman's cache for relation \"%s\" " + "has not been properly initialized", + get_rel_name_or_relid(PrelParentRelid(prel))); + } + } +#endif +} + /* * Functions for delayed invalidation. @@ -700,17 +841,22 @@ try_perform_parent_refresh(Oid parent) /* Remove partition's constraint from cache */ void -forget_constraint_of_partition(Oid partition) +forget_bounds_of_partition(Oid partition) { - PartConstraintInfo *pcon = pathman_cache_search_relid(constraint_cache, - partition, - HASH_FIND, - NULL); + PartBoundInfo *pcon = pathman_cache_search_relid(bound_cache, + partition, + HASH_FIND, + NULL); if (pcon) { - /* FIXME: implement pfree(constraint) logc */ + /* Call pfree() if it's RANGE bounds */ + if (pcon->parttype == PT_RANGE) + { + FreeBound(&pcon->range_min, pcon->byval); + FreeBound(&pcon->range_max, pcon->byval); + } - pathman_cache_search_relid(constraint_cache, + pathman_cache_search_relid(bound_cache, partition, HASH_REMOVE, NULL); @@ -718,36 +864,48 @@ forget_constraint_of_partition(Oid partition) } /* Return partition's constraint as expression tree */ -Expr * -get_constraint_of_partition(Oid partition, AttrNumber part_attno) +PartBoundInfo * +get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) { - PartConstraintInfo *pcon = pathman_cache_search_relid(constraint_cache, - partition, - HASH_FIND, - NULL); - if (!pcon) + PartBoundInfo *pbin = pathman_cache_search_relid(bound_cache, + partition, + HASH_FIND, + NULL); + if (!pbin) { - Oid conid; + PartBoundInfo pbin_local; Expr *con_expr; - MemoryContext old_mcxt; + AttrNumber part_attno; + + /* Initialize other fields */ + pbin_local.child_rel = partition; + pbin_local.byval = prel->attbyval; + + /* NOTE: Partitions may have different TupleDescs */ + part_attno = get_attnum(partition, prel->attname); + + /* Raise ERROR if there's no such column */ + if (part_attno == InvalidAttrNumber) + elog(ERROR, "partition \"%s\" has no column \"%s\"", + get_rel_name_or_relid(partition), prel->attname); + + /* Try to build constraint's expression tree (may emit ERROR) */ + con_expr = get_partition_constraint_expr(partition, part_attno); - /* Try to build constraint's expression tree */ - con_expr = get_partition_constraint_expr(partition, part_attno, &conid); + /* Grab bounds/hash and fill in 'pbin_local' (may emit ERROR) */ + fill_pbin_with_bounds(&pbin_local, prel, con_expr, part_attno); - /* Create new entry for this constraint */ - pcon = pathman_cache_search_relid(constraint_cache, + /* We strive to delay the creation of cache's entry */ + pbin = pathman_cache_search_relid(bound_cache, partition, HASH_ENTER, NULL); - /* Copy constraint's data to the persistent mcxt */ - old_mcxt = MemoryContextSwitchTo(PathmanCostraintCacheContext); - pcon->conid = conid; - pcon->constraint = copyObject(con_expr); - MemoryContextSwitchTo(old_mcxt); + /* Copy data from 'pbin_local' */ + memcpy(pbin, &pbin_local, sizeof(PartBoundInfo)); } - return pcon->constraint; + return pbin; } /* @@ -756,9 +914,7 @@ get_constraint_of_partition(Oid partition, AttrNumber part_attno) * build_check_constraint_name_internal() is used to build conname. */ static Expr * -get_partition_constraint_expr(Oid partition, - AttrNumber part_attno, - Oid *constr_oid) /* optional ret value #2 */ +get_partition_constraint_expr(Oid partition, AttrNumber part_attno) { Oid conid; /* constraint Oid */ char *conname; /* constraint name */ @@ -770,10 +926,6 @@ get_partition_constraint_expr(Oid partition, conname = build_check_constraint_name_relid_internal(partition, part_attno); conid = get_relation_constraint_oid(partition, conname, true); - /* Return constraint's Oid to caller */ - if (constr_oid) - *constr_oid = conid; - if (!OidIsValid(conid)) { DisablePathman(); /* disable pg_pathman since config is broken */ @@ -809,6 +961,100 @@ get_partition_constraint_expr(Oid partition, return expr; } +/* Fill PartBoundInfo with bounds/hash */ +static void +fill_pbin_with_bounds(PartBoundInfo *pbin, + const PartRelationInfo *prel, + const Expr *constraint_expr, + const AttrNumber part_attno) +{ + AssertTemporaryContext(); + + /* Copy partitioning type to 'pbin' */ + pbin->parttype = prel->parttype; + + /* Perform a partitioning_type-dependent task */ + switch (prel->parttype) + { + case PT_HASH: + { + if (!validate_hash_constraint(constraint_expr, + prel, part_attno, + &pbin->hash)) + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("wrong constraint format for HASH partition \"%s\"", + get_rel_name_or_relid(pbin->child_rel)), + errhint(INIT_ERROR_HINT))); + } + } + break; + + case PT_RANGE: + { + Datum lower, upper; + bool lower_null, upper_null; + + if (validate_range_constraint(constraint_expr, + prel, part_attno, + &lower, &upper, + &lower_null, &upper_null)) + { + MemoryContext old_mcxt; + + /* Switch to the persistent memory context */ + old_mcxt = MemoryContextSwitchTo(PathmanBoundCacheContext); + + pbin->range_min = lower_null ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(datumCopy(lower, + prel->attbyval, + prel->attlen)); + + pbin->range_max = upper_null ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(datumCopy(upper, + prel->attbyval, + prel->attlen)); + + /* Switch back */ + MemoryContextSwitchTo(old_mcxt); + } + else + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("wrong constraint format for RANGE partition \"%s\"", + get_rel_name_or_relid(pbin->child_rel)), + errhint(INIT_ERROR_HINT))); + } + } + break; + + default: + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("Unknown partitioning type for relation \"%s\"", + get_rel_name_or_relid(PrelParentRelid(prel))), + errhint(INIT_ERROR_HINT))); + } + break; + } +} + +/* qsort comparison function for RangeEntries */ +static int +cmp_range_entries(const void *p1, const void *p2, void *arg) +{ + const RangeEntry *v1 = (const RangeEntry *) p1; + const RangeEntry *v2 = (const RangeEntry *) p2; + FmgrInfo *flinfo = (FmgrInfo *) arg; + + return cmp_bounds(flinfo, &v1->min, &v2->min); +} + /* * Safe PartType wrapper. @@ -849,9 +1095,9 @@ PartTypeToCString(PartType parttype) * Common PartRelationInfo checks. Emit ERROR if anything is wrong. */ void -shout_if_prel_is_invalid(Oid parent_oid, +shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, - PartType expected_part_type) + const PartType expected_part_type) { if (!prel) elog(ERROR, "relation \"%s\" has no partitions", @@ -863,8 +1109,8 @@ shout_if_prel_is_invalid(Oid parent_oid, get_rel_name_or_relid(parent_oid), MyProcPid); - /* Check partitioning type unless it's "indifferent" */ - if (expected_part_type != PT_INDIFFERENT && + /* Check partitioning type unless it's "ANY" */ + if (expected_part_type != PT_ANY && expected_part_type != prel->parttype) { char *expected_str; From a0f38b5ea7926c678aa43aa18166e239f9dc94cf Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 28 Mar 2017 18:57:59 +0300 Subject: [PATCH 0298/1124] Modify range sql scripts according expression use --- init.sql | 11 +++---- range.sql | 55 +++++++++++++++----------------- src/include/partition_creation.h | 5 +-- src/include/pathman.h | 10 +++--- src/include/relation_info.h | 3 +- src/init.c | 5 ++- src/partition_creation.c | 42 +++++++++++------------- src/pl_funcs.c | 18 +++++------ src/pl_range_funcs.c | 33 ++++++++----------- src/relation_info.c | 9 ++---- 10 files changed, 86 insertions(+), 105 deletions(-) diff --git a/init.sql b/init.sql index 3a2a7516..ccac49df 100644 --- a/init.sql +++ b/init.sql @@ -33,18 +33,17 @@ LANGUAGE C; */ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( partrel REGCLASS NOT NULL PRIMARY KEY, - attname TEXT NOT NULL, - raw_expression TEXT NOT NULL, - atttype OID NOT NULL, + attname TEXT NOT NULL, /* expression */ parttype INTEGER NOT NULL, range_interval TEXT, + expression_p TEXT NOT NULL, /* parsed expression (until plan) */ + atttype OID NOT NULL, /* expression type */ /* check for allowed part types */ - CHECK (parttype IN (1, 2)) + CHECK (parttype IN (1, 2)), /* check for correct interval */ - CHECK (@extschema@.validate_interval_value(partrel, - atttype, + CHECK (@extschema@.validate_interval_value(atttype, parttype, range_interval)) ); diff --git a/range.sql b/range.sql index ccef515c..ad5d405f 100644 --- a/range.sql +++ b/range.sql @@ -132,15 +132,15 @@ BEGIN /* Check boundaries */ EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', ''%s'', ''%s'', ''%s''::%s)', parent_relid, - attribute, + expression, start_value, end_value, v_atttype::TEXT); END IF; /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT, false); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid) @@ -177,11 +177,11 @@ END $$ LANGUAGE plpgsql; /* - * Creates RANGE partitions for specified relation based on numerical attribute + * Creates RANGE partitions for specified relation based on numerical expression */ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, - attribute TEXT, + expression TEXT, start_value ANYELEMENT, p_interval ANYELEMENT, p_count INTEGER DEFAULT NULL, @@ -206,8 +206,8 @@ BEGIN PERFORM @extschema@.lock_partitioned_relation(parent_relid); END IF; - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + expression := lower(expression); + PERFORM @extschema@.common_relation_checks(parent_relid, expression); IF p_count < 0 THEN RAISE EXCEPTION 'partitions count must not be less than zero'; @@ -215,7 +215,7 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO v_rows_count, v_max; IF v_rows_count = 0 THEN @@ -223,7 +223,7 @@ BEGIN END IF; IF v_max IS NULL THEN - RAISE EXCEPTION 'column "%" has NULL values', attribute; + RAISE EXCEPTION 'expression "%" can return NULL values', expression; END IF; p_count := 0; @@ -248,17 +248,14 @@ BEGIN /* check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, - attribute, + expression, start_value, end_value); END IF; /* Insert new entry to pathman config */ - PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, false); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT, false); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid) @@ -296,7 +293,7 @@ $$ LANGUAGE plpgsql; */ CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( parent_relid REGCLASS, - attribute TEXT, + expression TEXT, start_value ANYELEMENT, end_value ANYELEMENT, p_interval ANYELEMENT, @@ -317,8 +314,8 @@ BEGIN PERFORM @extschema@.lock_partitioned_relation(parent_relid); END IF; - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + expression := lower(expression); + PERFORM @extschema@.common_relation_checks(parent_relid, expression); IF p_interval <= 0 THEN RAISE EXCEPTION 'interval must be positive'; @@ -326,13 +323,13 @@ BEGIN /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, - attribute, + expression, start_value, end_value); /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT, false); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid) @@ -366,11 +363,11 @@ END $$ LANGUAGE plpgsql; /* - * Creates RANGE partitions for specified range based on datetime attribute + * Creates RANGE partitions for specified range based on datetime expression */ CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( parent_relid REGCLASS, - attribute TEXT, + expression TEXT, start_value ANYELEMENT, end_value ANYELEMENT, p_interval INTERVAL, @@ -391,18 +388,18 @@ BEGIN PERFORM @extschema@.lock_partitioned_relation(parent_relid); END IF; - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + expression := lower(expression); + PERFORM @extschema@.common_relation_checks(parent_relid, expression); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, - attribute, + expression, start_value, end_value); /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT, false); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid) @@ -1124,7 +1121,7 @@ SET client_min_messages = WARNING; */ CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( p_relid REGCLASS, - attribute TEXT, + expression TEXT, start_value ANYELEMENT, end_value ANYELEMENT) RETURNS TEXT AS 'pg_pathman', 'build_range_condition' diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 8d6fa4d5..4f3adce3 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -75,7 +75,7 @@ Node * build_raw_hash_check_tree(Node *raw_expression, Oid relid, Oid value_type); -void drop_check_constraint(Oid relid, AttrNumber attnum); +void drop_check_constraint(Oid relid); typedef struct { @@ -84,10 +84,11 @@ typedef struct Node *raw_expr; } PartExpressionInfo; -/* expression parsing functions */ +/* Expression parsing functions */ PartExpressionInfo *get_part_expression_info(Oid relid, const char *expr_string, bool check_hash_func, bool make_plan); +Node *get_raw_expression(Oid relid, const char *expr, char **query_string_out); /* Partitioning callback type */ typedef enum diff --git a/src/include/pathman.h b/src/include/pathman.h index 9f39eb53..590c3b34 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -46,11 +46,11 @@ #define PATHMAN_CONFIG "pathman_config" #define Natts_pathman_config 6 #define Anum_pathman_config_partrel 1 /* partitioned relation (regclass) */ -#define Anum_pathman_config_expression 2 /* partitioned expression (text) */ -#define Anum_pathman_config_raw_expression 3 /* partitioned raw expression (text) */ -#define Anum_pathman_config_atttype 4 /* partitioned atttype */ -#define Anum_pathman_config_parttype 5 /* partitioning type (1|2) */ -#define Anum_pathman_config_range_interval 6 /* interval for RANGE pt. (text) */ +#define Anum_pathman_config_expression 2 /* partition expression (original) */ +#define Anum_pathman_config_parttype 3 /* partitioning type (1|2) */ +#define Anum_pathman_config_range_interval 4 /* interval for RANGE pt. (text) */ +#define Anum_pathman_config_expression_p 5 /* parsed partition expression (text) */ +#define Anum_pathman_config_atttype 6 /* partitioned atttype */ /* type modifier (typmod) for 'range_interval' */ #define PATHMAN_CONFIG_interval_typmod -1 diff --git a/src/include/relation_info.h b/src/include/relation_info.h index ec4e6ae5..5f90bba8 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -127,8 +127,8 @@ typedef struct Oid *children; /* Oids of child partitions */ RangeEntry *ranges; /* per-partition range entry or NULL */ + char *attname; /* original expression */ Node *expr; /* planned expression */ - char *expr_string; /* string with original expression */ PartType parttype; /* partitioning type (HASH | RANGE) */ Oid atttype; /* expression type */ int32 atttypmod; /* expression type modifier */ @@ -206,7 +206,6 @@ PrelLastChild(const PartRelationInfo *prel) const PartRelationInfo *refresh_pathman_relation_info(Oid relid, Datum *values, - bool *isnull, bool allow_incomplete); void invalidate_pathman_relation_info(Oid relid, bool *found); void remove_pathman_relation_info(Oid relid); diff --git a/src/init.c b/src/init.c index c7c68cb8..94688dd6 100644 --- a/src/init.c +++ b/src/init.c @@ -712,7 +712,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Perform checks for non-NULL columns */ Assert(!isnull[Anum_pathman_config_partrel - 1]); Assert(!isnull[Anum_pathman_config_expression - 1]); - Assert(!isnull[Anum_pathman_config_raw_expression - 1]); + Assert(!isnull[Anum_pathman_config_expression_p - 1]); Assert(!isnull[Anum_pathman_config_parttype - 1]); } @@ -827,7 +827,7 @@ read_pathman_config(void) Assert(!isnull[Anum_pathman_config_partrel - 1]); Assert(!isnull[Anum_pathman_config_parttype - 1]); Assert(!isnull[Anum_pathman_config_expression - 1]); - Assert(!isnull[Anum_pathman_config_raw_expression - 1]); + Assert(!isnull[Anum_pathman_config_expression_p - 1]); /* Extract values from Datums */ relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); @@ -845,7 +845,6 @@ read_pathman_config(void) /* get_pathman_relation_info() will refresh this entry */ refresh_pathman_relation_info(relid, values, - isnull, true); /* allow lazy prel loading */ } diff --git a/src/partition_creation.c b/src/partition_creation.c index ea2047c8..d0857968 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -694,15 +694,11 @@ create_single_partition_internal(Oid parent_relid, if (expr && expr_type) { char *expr_string; - PartExpressionInfo *expr_info; *expr_type = DatumGetObjectId(config_values[Anum_pathman_config_atttype - 1]); - expr_string = TextDatumGetCString(config_values[Anum_pathman_config_raw_expression - 1]); - expr_info = get_part_expression_info(parent_relid, expr_string, false, false); - - *expr = expr_info->raw_expr; + expr_string = TextDatumGetCString(config_values[Anum_pathman_config_expression - 1]); + *expr = get_raw_expression(parent_relid, expr_string, NULL); pfree(expr_string); - pfree(expr_info); } /* Make up parent's RangeVar */ @@ -1120,9 +1116,9 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) * ----------------------------- */ -/* Drop pg_pathman's check constraint by 'relid' and 'attnum' */ +/* Drop pg_pathman's check constraint by 'relid' */ void -drop_check_constraint(Oid relid, AttrNumber attnum) +drop_check_constraint(Oid relid) { char *constr_name; AlterTableStmt *stmt; @@ -1689,9 +1685,13 @@ text_to_regprocedure(text *proc_signature) } /* Wraps expression by SELECT query and returns parsed tree */ -static Node * -parse_expression(Oid relid, const char *expr, char **query_string_out) +Node * +get_raw_expression(Oid relid, const char *expr, char **query_string_out) { + Node *result; + SelectStmt *select_stmt; + ResTarget *target; + char *fmt = "SELECT (%s) FROM ONLY %s.%s"; char *relname = get_rel_name(relid), *namespace_name = get_namespace_name(get_rel_namespace(relid)); @@ -1705,7 +1705,10 @@ parse_expression(Oid relid, const char *expr, char **query_string_out) { *query_string_out = query_string; } - return (Node *)(lfirst(list_head(parsetree_list))); + select_stmt = (SelectStmt *) lfirst(list_head(parsetree_list)); + target = (ResTarget *) lfirst(list_head(select_stmt->targetList)); + result = (Node *) target->val; + return result; } /* @@ -1716,26 +1719,18 @@ PartExpressionInfo * get_part_expression_info(Oid relid, const char *expr_string, bool check_hash_func, bool make_plan) { - Node *parsetree, - *expr_node, - *raw_expression; + Node *expr_node; Query *query; char *query_string, *out_string; PartExpressionInfo *expr_info; - List *querytree_list, - *raw_target_list; + List *querytree_list; PlannedStmt *plan; TargetEntry *target_entry; expr_info = palloc(sizeof(PartExpressionInfo)); - parsetree = parse_expression(relid, expr_string, &query_string); - - /* Convert raw expression to string and return it as datum*/ - raw_target_list = ((SelectStmt *)parsetree)->targetList; - raw_expression = (Node *)(((ResTarget *)(lfirst(list_head(raw_target_list))))->val); /* Keep raw expression */ - expr_info->raw_expr = raw_expression; + expr_info->raw_expr = get_raw_expression(relid, expr_string, &query_string); expr_info->expr_datum = (Datum) 0; /* We don't need pathman activity initialization for this relation yet */ @@ -1743,7 +1738,8 @@ get_part_expression_info(Oid relid, const char *expr_string, /* This will fail with elog in case of wrong expression * with more or less understable text */ - querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); + querytree_list = pg_analyze_and_rewrite(expr_info->raw_expr, + query_string, NULL, 0); query = (Query *) lfirst(list_head(querytree_list)); /* expr_node is node that we need for further use */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 02f92ede..77958f13 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -331,7 +331,6 @@ show_partition_list_internal(PG_FUNCTION_ARGS) HeapTuple htup; Datum values[Natts_pathman_partition_list]; bool isnull[Natts_pathman_partition_list] = { 0 }; - char *partattr_cstr; /* Fetch next PartRelationInfo if needed */ if (usercxt->current_prel == NULL) @@ -376,7 +375,7 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* Fill in common values */ values[Anum_pathman_pl_parent - 1] = PrelParentRelid(prel); values[Anum_pathman_pl_parttype - 1] = prel->parttype; - values[Anum_pathman_pl_partattr - 1] = prel->expr_string; + values[Anum_pathman_pl_partattr - 1] = CStringGetTextDatum(prel->attname); switch (prel->parttype) { @@ -616,17 +615,17 @@ add_to_pathman_config(PG_FUNCTION_ARGS) values[Anum_pathman_config_partrel - 1] = ObjectIdGetDatum(relid); isnull[Anum_pathman_config_partrel - 1] = false; - values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_info->expr_type); - isnull[Anum_pathman_config_atttype - 1] = false; + values[Anum_pathman_config_parttype - 1] = Int32GetDatum(parttype); + isnull[Anum_pathman_config_parttype - 1] = false; - values[Anum_pathman_config_expression - 1] = expr_info->expr_datum; + values[Anum_pathman_config_expression - 1] = CStringGetTextDatum(expression); isnull[Anum_pathman_config_expression - 1] = false; - values[Anum_pathman_config_raw_expression - 1] = CStringGetTextDatum(expression); - isnull[Anum_pathman_config_raw_expression - 1] = false; + values[Anum_pathman_config_expression_p - 1] = expr_info->expr_datum; + isnull[Anum_pathman_config_expression_p - 1] = false; - values[Anum_pathman_config_parttype - 1] = Int32GetDatum(parttype); - isnull[Anum_pathman_config_parttype - 1] = false; + values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_info->expr_type); + isnull[Anum_pathman_config_atttype - 1] = false; if (parttype == PT_RANGE) { @@ -659,7 +658,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) refresh_pathman_relation_info(relid, values, - isnull, false); /* initialize immediately */ } PG_CATCH(); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 93d800b2..20add638 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -44,7 +44,6 @@ static void merge_range_partitions_internal(Oid parent, uint32 nparts); static void modify_range_constraint(Oid child_relid, const char *attname, - AttrNumber attnum, Oid atttype, const Bound *lower, const Bound *upper); @@ -361,8 +360,9 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) Datum build_range_condition(PG_FUNCTION_ARGS) { + Node *expr; Oid relid = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); + char *expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); Bound min, max; @@ -378,8 +378,9 @@ build_range_condition(PG_FUNCTION_ARGS) MakeBoundInf(PLUS_INFINITY) : MakeBound(PG_GETARG_DATUM(3)); + expr = get_raw_expression(relid, expression, NULL); con = build_range_check_constraint(relid, - NULL, + expr, &min, &max, bounds_type); @@ -527,14 +528,11 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) } /* Drop old constraint and create a new one */ - /* FIX: this modify_range_constraint(parts[0], - get_relid_attribute_name(prel->key, - prel->attnum), - prel->attnum, + prel->attname, prel->atttype, &first->min, - &last->max); */ + &last->max); /* Make constraint visible */ CommandCounterIncrement(); @@ -613,14 +611,11 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) *next = &ranges[i + 1]; /* Drop old constraint and create a new one */ - /* modify_range_constraint(next->child_oid, - get_relid_attribute_name(prel->key, - prel->attnum), - prel->attnum, + prel->attname, prel->atttype, &cur->min, - &next->max);*/ + &next->max); } /* Finally drop this partition */ @@ -828,21 +823,23 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) static void modify_range_constraint(Oid child_relid, const char *attname, - AttrNumber attnum, Oid atttype, const Bound *lower, const Bound *upper) { + Node *expr; Constraint *constraint; Relation partition_rel; - //char *attname_nonconst = pstrdup(attname); /* Drop old constraint */ - drop_check_constraint(child_relid, attnum); + drop_check_constraint(child_relid); + + /* Parse expression */ + expr = get_raw_expression(child_relid, attname, NULL); /* Build a new one */ constraint = build_range_check_constraint(child_relid, - NULL, + expr, lower, upper, atttype); @@ -853,8 +850,6 @@ modify_range_constraint(Oid child_relid, list_make1(constraint), false, true, true); heap_close(partition_rel, NoLock); - - //pfree(attname_nonconst); } /* diff --git a/src/relation_info.c b/src/relation_info.c index 14b371d3..43f74cf6 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -118,7 +118,6 @@ expression_mutator(Node *node, struct expr_mutator_context *context) const PartRelationInfo * refresh_pathman_relation_info(Oid relid, Datum *values, - bool *isnull, bool allow_incomplete) { const LOCKMODE lockmode = AccessShareLock; @@ -186,7 +185,7 @@ refresh_pathman_relation_info(Oid relid, prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); /* Read config values */ - expr = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); + expr = TextDatumGetCString(values[Anum_pathman_config_expression_p - 1]); expr_type = DatumGetObjectId(values[Anum_pathman_config_atttype - 1]); /* @@ -194,8 +193,7 @@ refresh_pathman_relation_info(Oid relid, * from config */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); - prel->expr_string = TextDatumGetCString( - values[Anum_pathman_config_raw_expression - 1]); + prel->attname = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); prel->expr = (Node *) stringToNode(expr); fix_opfuncids(prel->expr); prel->expr = expression_mutator(prel->expr, NULL); @@ -358,7 +356,7 @@ get_pathman_relation_info(Oid relid) { /* Refresh partitioned table cache entry (might turn NULL) */ /* TODO: possible refactoring, pass found 'prel' instead of searching */ - prel = refresh_pathman_relation_info(relid, values, isnull, false); + prel = refresh_pathman_relation_info(relid, values, false); } /* Else clear remaining cache entry */ @@ -735,7 +733,6 @@ try_perform_parent_refresh(Oid parent) /* If anything went wrong, return false (actually, it might emit ERROR) */ refresh_pathman_relation_info(parent, values, - isnull, true); /* allow lazy */ } /* Not a partitioned relation */ From c103ecc0c8f892151a563ee5501f4ddb27c9c002 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 28 Mar 2017 19:05:20 +0300 Subject: [PATCH 0299/1124] restore compatibility with PostgreSQL 9.5 --- src/compat/pg_compat.c | 39 ++++++++++++++++++++++++++++++++++ src/include/compat/pg_compat.h | 9 ++++++++ src/include/utils.h | 3 --- src/init.c | 6 ++++++ src/pl_funcs.c | 25 ++++++++++++++++------ src/utils.c | 37 -------------------------------- 6 files changed, 73 insertions(+), 46 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 766cfc74..6e441980 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -268,6 +268,45 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); } +/* + * Examine contents of MemoryContext. + */ +void +McxtStatsInternal(MemoryContext context, int level, + bool examine_children, + MemoryContextCounters *totals) +{ + MemoryContextCounters local_totals; + MemoryContext child; + + AssertArg(MemoryContextIsValid(context)); + + /* Examine the context itself */ + (*context->methods->stats) (context, level, false, totals); + + memset(&local_totals, 0, sizeof(local_totals)); + + if (!examine_children) + return; + + /* Examine children */ + for (child = context->firstchild; + child != NULL; + child = child->nextchild) + { + + McxtStatsInternal(child, level + 1, + examine_children, + &local_totals); + } + + /* Save children stats */ + totals->nblocks += local_totals.nblocks; + totals->freechunks += local_totals.freechunks; + totals->totalspace += local_totals.totalspace; + totals->freespace += local_totals.freespace; +} + #else /* PG_VERSION_NUM >= 90500 */ diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 0928f28d..58401587 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -78,6 +78,12 @@ extern Result *make_result(List *tlist, make_result((tlist), (resconstantqual), (subplan)) +/* McxtStatsInternal() */ +void McxtStatsInternal(MemoryContext context, int level, + bool examine_children, + MemoryContextCounters *totals); + + /* pull_var_clause() */ #define pull_var_clause_compat(node, aggbehavior, phbehavior) \ pull_var_clause((node), (aggbehavior) | (phbehavior)) @@ -93,6 +99,9 @@ extern void set_rel_consider_parallel(PlannerInfo *root, #else /* PG_VERSION_NUM >= 90500 */ +#define ALLOCSET_DEFAULT_SIZES \ + ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE + /* adjust_appendrel_attrs() */ #define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ do { \ diff --git a/src/include/utils.h b/src/include/utils.h index 1fd966a4..8ecaf46c 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -34,9 +34,6 @@ bool check_security_policy_internal(Oid relid, Oid role); */ Oid get_pathman_schema(void); List * list_reverse(List *l); -void McxtStatsInternal(MemoryContext context, int level, - bool examine_children, - MemoryContextCounters *totals); /* * Useful functions for relations. diff --git a/src/init.c b/src/init.c index a35f8c37..b1d9fd1c 100644 --- a/src/init.c +++ b/src/init.c @@ -38,6 +38,12 @@ #include "utils/typcache.h" +/* Define ALLOCSET_DEFAULT_SIZES for our precious MemoryContexts */ +#if PG_VERSION_NUM < 90600 +#define ALLOCSET_DEFAULT_SIZES \ + ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE +#endif + /* Initial size of 'partitioned_rels' table */ #define PART_RELS_SIZE 10 #define CHILD_FACTOR 500 diff --git a/src/pl_funcs.c b/src/pl_funcs.c index fff5a51f..ffc628e2 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -332,32 +332,45 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) { HTAB *current_htab; MemoryContext current_mcxt; - MemoryContextCounters mcxt_stats; HeapTuple htup; Datum values[Natts_pathman_cache_stats]; bool isnull[Natts_pathman_cache_stats] = { 0 }; - /* Prepare context counters */ - memset(&mcxt_stats, 0, sizeof(mcxt_stats)); +#if PG_VERSION_NUM >= 90600 + MemoryContextCounters mcxt_stats; +#endif /* Select current memory context and hash table (cache) */ current_mcxt = usercxt->pathman_contexts[usercxt->current_item]; current_htab = usercxt->pathman_htables[usercxt->current_item]; + values[Anum_pathman_cs_context - 1] = + CStringGetTextDatum(simpify_mcxt_name(current_mcxt)); + +/* We can't check stats of mcxt prior to 9.6 */ +#if PG_VERSION_NUM >= 90600 + + /* Prepare context counters */ + memset(&mcxt_stats, 0, sizeof(mcxt_stats)); + /* NOTE: we do not consider child contexts if it's TopPathmanContext */ McxtStatsInternal(current_mcxt, 0, (current_mcxt != TopPathmanContext), &mcxt_stats); - values[Anum_pathman_cs_context - 1] = - CStringGetTextDatum(simpify_mcxt_name(current_mcxt)); - values[Anum_pathman_cs_size - 1] = Int64GetDatum(mcxt_stats.totalspace); values[Anum_pathman_cs_used - 1] = Int64GetDatum(mcxt_stats.totalspace - mcxt_stats.freespace); +#else + + /* Set unsupported fields to NULL */ + isnull[Anum_pathman_cs_size - 1] = true; + isnull[Anum_pathman_cs_used - 1] = true; +#endif + values[Anum_pathman_cs_entries - 1] = Int64GetDatum(current_htab ? hash_get_num_entries(current_htab) : 0); diff --git a/src/utils.c b/src/utils.c index c26c4863..ec1b1dd9 100644 --- a/src/utils.c +++ b/src/utils.c @@ -164,43 +164,6 @@ list_reverse(List *l) return result; } -void -McxtStatsInternal(MemoryContext context, int level, - bool examine_children, - MemoryContextCounters *totals) -{ - MemoryContextCounters local_totals; - MemoryContext child; - - AssertArg(MemoryContextIsValid(context)); - - /* Examine the context itself */ - (*context->methods->stats) (context, level, false, totals); - - memset(&local_totals, 0, sizeof(local_totals)); - - if (!examine_children) - return; - - /* Examine children */ - for (child = context->firstchild; - child != NULL; - child = child->nextchild) - { - - McxtStatsInternal(child, level + 1, - examine_children, - &local_totals); - } - - /* Save children stats */ - totals->nblocks += local_totals.nblocks; - totals->freechunks += local_totals.freechunks; - totals->totalspace += local_totals.totalspace; - totals->freespace += local_totals.freespace; -} - - /* * Get relation owner. From 4d9d52a8e7e87167dde48dc6b77415a30dd87c6d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 28 Mar 2017 19:17:39 +0300 Subject: [PATCH 0300/1124] add missing includes, formatting --- src/include/compat/pg_compat.h | 1 + src/pl_funcs.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 58401587..1f81f5f9 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -14,6 +14,7 @@ #include "compat/debug_compat_features.h" #include "postgres.h" +#include "nodes/memnodes.h" #include "nodes/relation.h" #include "nodes/pg_list.h" #include "optimizer/cost.h" diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ffc628e2..c847c043 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -372,7 +372,9 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) #endif values[Anum_pathman_cs_entries - 1] = - Int64GetDatum(current_htab ? hash_get_num_entries(current_htab) : 0); + Int64GetDatum(current_htab ? + hash_get_num_entries(current_htab) : + 0); /* Switch to next item */ usercxt->current_item++; From f00f0d1cf5b67cd5003d7dc25320ad1f9c93fd1a Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 28 Mar 2017 19:46:38 +0300 Subject: [PATCH 0301/1124] Fix expression parser --- src/include/partition_creation.h | 3 ++- src/partition_creation.c | 20 +++++++++++++++----- src/pl_range_funcs.c | 4 ++-- 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 4f3adce3..f4031278 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -88,7 +88,8 @@ typedef struct PartExpressionInfo *get_part_expression_info(Oid relid, const char *expr_string, bool check_hash_func, bool make_plan); -Node *get_raw_expression(Oid relid, const char *expr, char **query_string_out); +Node *get_raw_expression(Oid relid, const char *expr, char **query_string_out, + Node **parsetree); /* Partitioning callback type */ typedef enum diff --git a/src/partition_creation.c b/src/partition_creation.c index d0857968..daf116f0 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -697,7 +697,7 @@ create_single_partition_internal(Oid parent_relid, *expr_type = DatumGetObjectId(config_values[Anum_pathman_config_atttype - 1]); expr_string = TextDatumGetCString(config_values[Anum_pathman_config_expression - 1]); - *expr = get_raw_expression(parent_relid, expr_string, NULL); + *expr = get_raw_expression(parent_relid, expr_string, NULL, NULL); pfree(expr_string); } @@ -1686,7 +1686,8 @@ text_to_regprocedure(text *proc_signature) /* Wraps expression by SELECT query and returns parsed tree */ Node * -get_raw_expression(Oid relid, const char *expr, char **query_string_out) +get_raw_expression(Oid relid, const char *expr, char **query_string_out, + Node **parsetree) { Node *result; SelectStmt *select_stmt; @@ -1705,7 +1706,14 @@ get_raw_expression(Oid relid, const char *expr, char **query_string_out) { *query_string_out = query_string; } + select_stmt = (SelectStmt *) lfirst(list_head(parsetree_list)); + + if (parsetree) + { + *parsetree = (Node *) select_stmt; + } + target = (ResTarget *) lfirst(list_head(select_stmt->targetList)); result = (Node *) target->val; return result; @@ -1719,7 +1727,8 @@ PartExpressionInfo * get_part_expression_info(Oid relid, const char *expr_string, bool check_hash_func, bool make_plan) { - Node *expr_node; + Node *expr_node, + *parsetree; Query *query; char *query_string, *out_string; PartExpressionInfo *expr_info; @@ -1730,7 +1739,8 @@ get_part_expression_info(Oid relid, const char *expr_string, expr_info = palloc(sizeof(PartExpressionInfo)); /* Keep raw expression */ - expr_info->raw_expr = get_raw_expression(relid, expr_string, &query_string); + expr_info->raw_expr = get_raw_expression(relid, expr_string, + &query_string, &parsetree); expr_info->expr_datum = (Datum) 0; /* We don't need pathman activity initialization for this relation yet */ @@ -1738,7 +1748,7 @@ get_part_expression_info(Oid relid, const char *expr_string, /* This will fail with elog in case of wrong expression * with more or less understable text */ - querytree_list = pg_analyze_and_rewrite(expr_info->raw_expr, + querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); query = (Query *) lfirst(list_head(querytree_list)); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 20add638..4387d1b7 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -378,7 +378,7 @@ build_range_condition(PG_FUNCTION_ARGS) MakeBoundInf(PLUS_INFINITY) : MakeBound(PG_GETARG_DATUM(3)); - expr = get_raw_expression(relid, expression, NULL); + expr = get_raw_expression(relid, expression, NULL, NULL); con = build_range_check_constraint(relid, expr, &min, &max, @@ -835,7 +835,7 @@ modify_range_constraint(Oid child_relid, drop_check_constraint(child_relid); /* Parse expression */ - expr = get_raw_expression(child_relid, attname, NULL); + expr = get_raw_expression(child_relid, attname, NULL, NULL); /* Build a new one */ constraint = build_range_check_constraint(child_relid, From 621ce289c856b48d83c9d0aa266ba97fb5d89b9e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 28 Mar 2017 19:48:15 +0300 Subject: [PATCH 0302/1124] add tests for 'pathman_cache_stats' view --- expected/pathman_calamity.out | 28 ++++++++++++++++++++++++++++ sql/pathman_calamity.sql | 18 ++++++++++++++++++ src/include/init.h | 6 +++--- 3 files changed, 49 insertions(+), 3 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 34e8c61c..8ce7b651 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -670,6 +670,34 @@ NOTICE: drop cascades to table calamity.test_range_oid_1 DROP SCHEMA calamity CASCADE; NOTICE: drop cascades to 18 other objects DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check view pathman_cache_stats */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); +NOTICE: sequence "test_pathman_cache_stats_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition pruning cache | 1 +(4 rows) + +DROP SCHEMA calamity CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; /* * ------------------------------------------ * Special tests (uninitialized pg_pathman) diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 6094cf5b..e8239161 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -260,6 +260,24 @@ SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ DROP TABLE calamity.test_range_oid CASCADE; +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; + + + +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ + +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; + +/* check view pathman_cache_stats */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/include/init.h b/src/include/init.h index 9037cf56..f70d8e64 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -67,9 +67,9 @@ static inline const char * simpify_mcxt_name(MemoryContext mcxt) { static const char *top_mcxt = "maintenance", - *bound_mcxt = "partition info cache", - *parent_mcxt = "parent mapping cache", - *constr_mcxt = "bounds cache"; + *bound_mcxt = "partition pruning cache", + *parent_mcxt = "partition parents cache", + *constr_mcxt = "partition bounds cache"; if (mcxt == TopPathmanContext) return top_mcxt; From 385728b30f43ee91fb4e090970c0844905c97efd Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 29 Mar 2017 14:46:04 +0300 Subject: [PATCH 0303/1124] Fix validation and constraint creation --- range.sql | 4 +-- src/include/partition_creation.h | 1 + src/init.c | 49 ++++++-------------------------- src/partition_creation.c | 24 ++++++++-------- src/pl_range_funcs.c | 6 ++-- 5 files changed, 28 insertions(+), 56 deletions(-) diff --git a/range.sql b/range.sql index ad5d405f..540bb1f3 100644 --- a/range.sql +++ b/range.sql @@ -164,13 +164,13 @@ BEGIN /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); - /* Relocate data if asked to */ + /* Relocate data if asked to IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); PERFORM @extschema@.partition_data(parent_relid); ELSE PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; + END IF; */ RETURN p_count; END diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index f4031278..ccc8b22e 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -30,6 +30,7 @@ Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, /* Create one RANGE partition */ Oid create_single_range_partition_internal(Oid parent_relid, + Oid value_type, const Bound *start_value, const Bound *end_value, RangeVar *partition_rv, diff --git a/src/init.c b/src/init.c index 94688dd6..38f451cc 100644 --- a/src/init.c +++ b/src/init.c @@ -77,13 +77,11 @@ static int cmp_range_entries(const void *p1, const void *p2, void *arg); static bool validate_range_constraint(const Expr *expr, const PartRelationInfo *prel, - const AttrNumber part_attno, Datum *lower, Datum *upper, bool *lower_null, bool *upper_null); static bool validate_range_opexpr(const Expr *expr, const PartRelationInfo *prel, const TypeCacheEntry *tce, - const AttrNumber part_attno, Datum *lower, Datum *upper, bool *lower_null, bool *upper_null); @@ -93,7 +91,6 @@ static bool validate_hash_constraint(const Expr *expr, static bool read_opexpr_const(const OpExpr *opexpr, const PartRelationInfo *prel, - const AttrNumber part_attno, Datum *val); static int oid_cmp(const void *p1, const void *p2); @@ -403,7 +400,7 @@ fill_prel_with_partitions(const Oid *partitions, Datum lower, upper; bool lower_null, upper_null; - if (validate_range_constraint(con_expr, prel, 0, + if (validate_range_constraint(con_expr, prel, &lower, &upper, &lower_null, &upper_null)) { @@ -917,12 +914,11 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) return cmp_bounds(flinfo, &v1->min, &v2->min); } -/* Validates a single expression of kind VAR >= CONST or VAR < CONST */ +/* Validates a single expression of kind EXPRESSION >= CONST or EXPRESSION < CONST */ static bool validate_range_opexpr(const Expr *expr, const PartRelationInfo *prel, const TypeCacheEntry *tce, - const AttrNumber part_attno, Datum *lower, Datum *upper, bool *lower_null, bool *upper_null) { @@ -940,7 +936,7 @@ validate_range_opexpr(const Expr *expr, opexpr = (const OpExpr *) expr; /* Try reading Const value */ - if (!read_opexpr_const(opexpr, prel, part_attno, &val)) + if (!read_opexpr_const(opexpr, prel, &val)) return false; /* Examine the strategy (expect '>=' OR '<') */ @@ -979,16 +975,15 @@ validate_range_opexpr(const Expr *expr, /* * Validates range constraint. It MUST have one of the following formats: * - * VARIABLE >= CONST AND VARIABLE < CONST - * VARIABLE >= CONST - * VARIABLE < CONST + * EXPRESSION >= CONST AND EXPRESSION < CONST + * EXPRESSION >= CONST + * EXPRESSION < CONST * * Writes 'lower' & 'upper' and 'lower_null' & 'upper_null' values on success. */ static bool validate_range_constraint(const Expr *expr, const PartRelationInfo *prel, - const AttrNumber part_attno, Datum *lower, Datum *upper, bool *lower_null, bool *upper_null) { @@ -1015,7 +1010,7 @@ validate_range_constraint(const Expr *expr, const OpExpr *opexpr = (const OpExpr *) lfirst(lc); /* Exit immediately if something is wrong */ - if (!validate_range_opexpr((const Expr *) opexpr, prel, tce, part_attno, + if (!validate_range_opexpr((const Expr *) opexpr, prel, tce, lower, upper, lower_null, upper_null)) return false; } @@ -1025,55 +1020,29 @@ validate_range_constraint(const Expr *expr, } /* It might be just an OpExpr clause */ - else return validate_range_opexpr(expr, prel, tce, part_attno, + else return validate_range_opexpr(expr, prel, tce, lower, upper, lower_null, upper_null); } /* * Reads const value from expressions of kind: - * 1) VAR >= CONST OR VAR < CONST + * 1) EXPRESSION >= CONST OR EXPRESSION < CONST * 2) RELABELTYPE(VAR) >= CONST OR RELABELTYPE(VAR) < CONST */ static bool read_opexpr_const(const OpExpr *opexpr, const PartRelationInfo *prel, - const AttrNumber part_attno, Datum *val) { - const Node *left; const Node *right; - const Var *part_attr; /* partitioned column */ const Const *constant; bool cast_success; if (list_length(opexpr->args) != 2) return false; - left = linitial(opexpr->args); right = lsecond(opexpr->args); - /* VAR is a part of RelabelType node */ - if (IsA(left, RelabelType) && IsA(right, Const)) - { - Var *var = (Var *) ((RelabelType *) left)->arg; - - if (IsA(var, Var)) - part_attr = var; - else - return false; - } - /* left arg is of type VAR */ - else if (IsA(left, Var) && IsA(right, Const)) - { - part_attr = (Var *) left; - } - /* Something is wrong, retreat! */ - else return false; - - /* VAR.attno == partitioned attribute number */ - if (part_attr->varoattno != part_attno) - return false; - /* CONST is NOT NULL */ if (((Const *) right)->constisnull) return false; diff --git a/src/partition_creation.c b/src/partition_creation.c index daf116f0..5075055e 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -96,13 +96,13 @@ static RangeVar *makeRangeVarFromRelid(Oid relid); /* Create one RANGE partition [start_value, end_value) */ Oid create_single_range_partition_internal(Oid parent_relid, + Oid value_type, const Bound *start_value, const Bound *end_value, RangeVar *partition_rv, char *tablespace) { - Oid partition_relid, - value_type; + Oid partition_relid; Constraint *check_constr; Node *expr; init_callback_params callback_params; @@ -123,7 +123,7 @@ create_single_range_partition_internal(Oid parent_relid, partition_relid = create_single_partition_internal(parent_relid, partition_rv, tablespace, - &value_type, + NULL, &expr); /* Build check constraint for RANGE partition */ @@ -559,6 +559,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ bounds[1] = MakeBound(should_append ? cur_leading_bound : cur_following_bound); last_partition = create_single_range_partition_internal(parent_relid, + value_type, &bounds[0], &bounds[1], NULL, NULL); @@ -691,11 +692,15 @@ create_single_partition_internal(Oid parent_relid, parent_nsp_name = get_namespace_name(parent_nsp); /* Fetch expression for constraint */ - if (expr && expr_type) + if (expr_type) + { + *expr_type = DatumGetObjectId(config_values[Anum_pathman_config_atttype - 1]); + } + + if (expr) { char *expr_string; - *expr_type = DatumGetObjectId(config_values[Anum_pathman_config_atttype - 1]); expr_string = TextDatumGetCString(config_values[Anum_pathman_config_expression - 1]); *expr = get_raw_expression(parent_relid, expr_string, NULL, NULL); pfree(expr_string); @@ -1154,11 +1159,6 @@ build_raw_range_check_tree(Node *raw_expression, *right_arg = makeNode(A_Expr); A_Const *left_const = makeNode(A_Const), *right_const = makeNode(A_Const); - ColumnRef *col_ref = makeNode(ColumnRef); - - /* Partitioned column */ - //col_ref->fields = list_make1(makeString(attname)); - col_ref->location = -1; and_oper->boolop = AND_EXPR; and_oper->args = NIL; @@ -1174,7 +1174,7 @@ build_raw_range_check_tree(Node *raw_expression, left_arg->name = list_make1(makeString(">=")); left_arg->kind = AEXPR_OP; - left_arg->lexpr = (Node *) col_ref; + left_arg->lexpr = raw_expression; left_arg->rexpr = (Node *) left_const; left_arg->location = -1; @@ -1191,7 +1191,7 @@ build_raw_range_check_tree(Node *raw_expression, right_arg->name = list_make1(makeString("<")); right_arg->kind = AEXPR_OP; - right_arg->lexpr = (Node *) col_ref; + right_arg->lexpr = raw_expression; right_arg->rexpr = (Node *) right_const; right_arg->location = -1; diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 4387d1b7..3c5a1138 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -81,7 +81,8 @@ PG_FUNCTION_INFO_V1( validate_interval_value ); Datum create_single_range_partition_pl(PG_FUNCTION_ARGS) { - Oid parent_relid; + Oid parent_relid, + value_type; /* RANGE boundaries + value type */ Bound start, @@ -101,7 +102,7 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* Fetch mandatory args */ parent_relid = PG_GETARG_OID(0); - //value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); start = PG_ARGISNULL(1) ? MakeBoundInf(MINUS_INFINITY) : @@ -132,6 +133,7 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* Create a new RANGE partition and return its Oid */ partition_relid = create_single_range_partition_internal(parent_relid, + value_type, &start, &end, partition_name_rv, From 39295b66bde7dd292bfcf23cef86329ee2c60bd0 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 29 Mar 2017 16:21:47 +0300 Subject: [PATCH 0304/1124] Fix data relocation after partitions created --- hash.sql | 4 ++-- range.sql | 4 ++-- src/partition_creation.c | 17 +++++++++++++++-- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/hash.sql b/hash.sql index f31de040..ba91fb4a 100644 --- a/hash.sql +++ b/hash.sql @@ -47,13 +47,13 @@ BEGIN /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); - /* Copy data + /* Copy data */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); PERFORM @extschema@.partition_data(parent_relid); ELSE PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; */ + END IF; RETURN partitions_count; END diff --git a/range.sql b/range.sql index 540bb1f3..ad5d405f 100644 --- a/range.sql +++ b/range.sql @@ -164,13 +164,13 @@ BEGIN /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); - /* Relocate data if asked to + /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); PERFORM @extschema@.partition_data(parent_relid); ELSE PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; */ + END IF; RETURN p_count; END diff --git a/src/partition_creation.c b/src/partition_creation.c index 5075055e..8a16d2cd 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1735,9 +1735,14 @@ get_part_expression_info(Oid relid, const char *expr_string, List *querytree_list; PlannedStmt *plan; TargetEntry *target_entry; + MemoryContext pathman_parse_context, oldcontext; expr_info = palloc(sizeof(PartExpressionInfo)); + pathman_parse_context = AllocSetContextCreate(TopMemoryContext, + "pathman parse context", + ALLOCSET_DEFAULT_SIZES); + /* Keep raw expression */ expr_info->raw_expr = get_raw_expression(relid, expr_string, &query_string, &parsetree); @@ -1746,6 +1751,12 @@ get_part_expression_info(Oid relid, const char *expr_string, /* We don't need pathman activity initialization for this relation yet */ pathman_hooks_enabled = false; + /* We use separate memory context here, just to make sure we don't leave + * anything behind after analyze and planning. + * Parsed raw expression will stay in context of caller + */ + oldcontext = MemoryContextSwitchTo(pathman_parse_context); + /* This will fail with elog in case of wrong expression * with more or less understable text */ querytree_list = pg_analyze_and_rewrite(parsetree, @@ -1776,11 +1787,13 @@ get_part_expression_info(Oid relid, const char *expr_string, target_entry = lfirst(list_head(plan->planTree->targetlist)); expr_node = (Node *) target_entry->expr; expr_node = eval_const_expressions(NULL, expr_node); + out_string = nodeToString(expr_node); + + MemoryContextSwitchTo(oldcontext); /* Convert expression to string and return it as datum */ - out_string = nodeToString(expr_node); expr_info->expr_datum = CStringGetTextDatum(out_string); - pfree(out_string); + MemoryContextReset(pathman_parse_context); end: /* Enable pathman hooks */ From dcc08d8e231b44b83f1b8c374930a8be684e4710 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Mar 2017 16:38:32 +0300 Subject: [PATCH 0305/1124] introduce 'pg_pathman.enable_bounds_cache' GUC, improve memory consumption of function fill_prel_with_partitions() --- expected/pathman_calamity.out | 89 +++++++++++++++++++++-- sql/pathman_calamity.sql | 17 +++++ src/hooks.c | 14 ++-- src/include/compat/pg_compat.h | 7 ++ src/include/init.h | 8 +- src/include/relation_info.h | 9 +++ src/init.c | 8 +- src/pg_pathman.c | 1 + src/relation_info.c | 129 ++++++++++++++++++++++++--------- 9 files changed, 225 insertions(+), 57 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 8ce7b651..b698bed1 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -677,6 +677,8 @@ DROP EXTENSION pg_pathman; */ CREATE SCHEMA calamity; CREATE EXTENSION pg_pathman; +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; /* check view pathman_cache_stats */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); @@ -687,16 +689,89 @@ NOTICE: sequence "test_pathman_cache_stats_seq" does not exist, skipping (1 row) SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries --------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition parents cache | 10 - partition pruning cache | 1 + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition dispatch cache | 1 + partition parents cache | 10 (4 rows) +SELECT drop_partitions('calamity.test_pathman_cache_stats'); +NOTICE: function calamity.test_pathman_cache_stats_upd_trig_func() does not exist, skipping +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_1 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_2 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_3 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_4 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_5 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_6 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_7 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_8 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_9 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_10 + drop_partitions +----------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition dispatch cache | 0 + partition parents cache | 0 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats; +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (one more time) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition dispatch cache | 1 + partition parents cache | 10 +(4 rows) + +SELECT drop_partitions('calamity.test_pathman_cache_stats'); +NOTICE: function calamity.test_pathman_cache_stats_upd_trig_func() does not exist, skipping +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_1 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_2 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_3 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_4 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_5 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_6 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_7 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_8 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_9 +NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_10 + drop_partitions +----------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition dispatch cache | 0 + partition parents cache | 0 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats; DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 12 other objects +NOTICE: drop cascades to sequence calamity.test_pathman_cache_stats_seq DROP EXTENSION pg_pathman; /* * ------------------------------------------ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index e8239161..cbeea2f9 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -274,10 +274,27 @@ DROP EXTENSION pg_pathman; CREATE SCHEMA calamity; CREATE EXTENSION pg_pathman; +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; + /* check view pathman_cache_stats */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT drop_partitions('calamity.test_pathman_cache_stats'); +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +DROP TABLE calamity.test_pathman_cache_stats; + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; + +/* check view pathman_cache_stats (one more time) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT drop_partitions('calamity.test_pathman_cache_stats'); +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +DROP TABLE calamity.test_pathman_cache_stats; DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index 71694b1f..1dbe7673 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -447,14 +447,16 @@ pg_pathman_enable_assign_hook(bool newval, void *extra) pg_pathman_init_state.override_copy && pg_pathman_enable_runtimeappend && pg_pathman_enable_runtime_merge_append && - pg_pathman_enable_partition_filter)) + pg_pathman_enable_partition_filter && + pg_pathman_enable_bounds_cache)) return; - pg_pathman_init_state.auto_partition = newval; - pg_pathman_init_state.override_copy = newval; - pg_pathman_enable_runtime_merge_append = newval; - pg_pathman_enable_runtimeappend = newval; - pg_pathman_enable_partition_filter = newval; + pg_pathman_init_state.auto_partition = newval; + pg_pathman_init_state.override_copy = newval; + pg_pathman_enable_runtimeappend = newval; + pg_pathman_enable_runtime_merge_append = newval; + pg_pathman_enable_partition_filter = newval; + pg_pathman_enable_bounds_cache = newval; elog(NOTICE, "RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes " diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 1f81f5f9..e731268e 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -19,6 +19,13 @@ #include "nodes/pg_list.h" #include "optimizer/cost.h" #include "optimizer/paths.h" +#include "utils/memutils.h" + +/* Define ALLOCSET_DEFAULT_SIZES for our precious MemoryContexts */ +#if PG_VERSION_NUM < 90600 +#define ALLOCSET_DEFAULT_SIZES \ + ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE +#endif /* diff --git a/src/include/init.h b/src/include/init.h index f70d8e64..039e31df 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -67,21 +67,21 @@ static inline const char * simpify_mcxt_name(MemoryContext mcxt) { static const char *top_mcxt = "maintenance", - *bound_mcxt = "partition pruning cache", + *rel_mcxt = "partition dispatch cache", *parent_mcxt = "partition parents cache", - *constr_mcxt = "partition bounds cache"; + *bound_mcxt = "partition bounds cache"; if (mcxt == TopPathmanContext) return top_mcxt; else if (mcxt == PathmanRelationCacheContext) - return bound_mcxt; + return rel_mcxt; else if (mcxt == PathmanParentCacheContext) return parent_mcxt; else if (mcxt == PathmanBoundCacheContext) - return constr_mcxt; + return bound_mcxt; else elog(ERROR, "error in function " CppAsString(simpify_mcxt_name)); } diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 3399cb68..710b5e40 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -98,6 +98,7 @@ cmp_bounds(FmgrInfo *cmp_func, const Bound *b1, const Bound *b2) } + /* * Partitioning type. */ @@ -192,6 +193,7 @@ typedef enum } PartParentSearch; + /* * PartRelationInfo field access macros. */ @@ -324,4 +326,11 @@ FreeRangesArray(PartRelationInfo *prel) } } + +/* For pg_pathman.enable_bounds_cache GUC */ +extern bool pg_pathman_enable_bounds_cache; + +void init_relation_info_static_data(void); + + #endif /* RELATION_INFO_H */ diff --git a/src/init.c b/src/init.c index b1d9fd1c..7fc8d50a 100644 --- a/src/init.c +++ b/src/init.c @@ -11,6 +11,8 @@ * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" + #include "hooks.h" #include "init.h" #include "pathman.h" @@ -38,12 +40,6 @@ #include "utils/typcache.h" -/* Define ALLOCSET_DEFAULT_SIZES for our precious MemoryContexts */ -#if PG_VERSION_NUM < 90600 -#define ALLOCSET_DEFAULT_SIZES \ - ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE -#endif - /* Initial size of 'partitioned_rels' table */ #define PART_RELS_SIZE 10 #define CHILD_FACTOR 500 diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 4fa3f4bc..2b1c9452 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -160,6 +160,7 @@ _PG_init(void) /* Initialize static data for all subsystems */ init_main_pathman_toggles(); + init_relation_info_static_data(); init_runtimeappend_static_data(); init_runtime_merge_append_static_data(); init_partition_filter_static_data(); diff --git a/src/relation_info.c b/src/relation_info.c index 55727787..772d34e9 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -8,6 +8,8 @@ * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" + #include "relation_info.h" #include "init.h" #include "utils.h" @@ -35,6 +37,12 @@ #endif +/* + * For pg_pathman.enable_bounds_cache GUC. + */ +bool pg_pathman_enable_bounds_cache = true; + + /* * We delay all invalidation jobs received in relcache hook. */ @@ -80,6 +88,21 @@ static int cmp_range_entries(const void *p1, const void *p2, void *arg); +void +init_relation_info_static_data(void) +{ + DefineCustomBoolVariable("pg_pathman.enable_bounds_cache", + "Make updates of partition dispatch cache faster", + NULL, + &pg_pathman_enable_bounds_cache, + true, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); +} + /* * refresh\invalidate\get\remove PartRelationInfo functions. @@ -418,24 +441,39 @@ fill_prel_with_partitions(PartRelationInfo *prel, ) uint32 i; - MemoryContext mcxt = PathmanRelationCacheContext; + MemoryContext cache_mcxt = PathmanRelationCacheContext, + temp_mcxt, /* reference temporary mcxt */ + old_mcxt; /* reference current mcxt */ AssertTemporaryContext(); /* Allocate memory for 'prel->children' & 'prel->ranges' (if needed) */ - prel->children = AllocZeroArray(PT_ANY, mcxt, parts_count, Oid); - prel->ranges = AllocZeroArray(PT_RANGE, mcxt, parts_count, RangeEntry); + prel->children = AllocZeroArray(PT_ANY, cache_mcxt, parts_count, Oid); + prel->ranges = AllocZeroArray(PT_RANGE, cache_mcxt, parts_count, RangeEntry); /* Set number of children */ PrelChildrenCount(prel) = parts_count; + /* Create temporary memory context for loop */ + temp_mcxt = AllocSetContextCreate(CurrentMemoryContext, + CppAsString(fill_prel_with_partitions), + ALLOCSET_DEFAULT_SIZES); + /* Initialize bounds of partitions */ for (i = 0; i < PrelChildrenCount(prel); i++) { PartBoundInfo *bound_info; - /* Fetch constraint's expression tree */ - bound_info = get_bounds_of_partition(partitions[i], prel); + /* Clear all previous allocations */ + MemoryContextReset(temp_mcxt); + + /* Switch to the temporary memory context */ + old_mcxt = MemoryContextSwitchTo(temp_mcxt); + { + /* Fetch constraint's expression tree */ + bound_info = get_bounds_of_partition(partitions[i], prel); + } + MemoryContextSwitchTo(old_mcxt); /* Copy bounds from bound cache */ switch (prel->parttype) @@ -446,23 +484,20 @@ fill_prel_with_partitions(PartRelationInfo *prel, case PT_RANGE: { - MemoryContext old_mcxt; - /* Copy child's Oid */ prel->ranges[i].child_oid = bound_info->child_rel; /* Copy all min & max Datums to the persistent mcxt */ - old_mcxt = MemoryContextSwitchTo(PathmanRelationCacheContext); - - prel->ranges[i].min = CopyBound(&bound_info->range_min, - prel->attbyval, - prel->attlen); - - prel->ranges[i].max = CopyBound(&bound_info->range_max, - prel->attbyval, - prel->attlen); - - /* Switch back */ + old_mcxt = MemoryContextSwitchTo(cache_mcxt); + { + prel->ranges[i].min = CopyBound(&bound_info->range_min, + prel->attbyval, + prel->attlen); + + prel->ranges[i].max = CopyBound(&bound_info->range_max, + prel->attbyval, + prel->attlen); + } MemoryContextSwitchTo(old_mcxt); } break; @@ -479,6 +514,9 @@ fill_prel_with_partitions(PartRelationInfo *prel, } } + /* Drop temporary memory context */ + MemoryContextDelete(temp_mcxt); + /* Finalize 'prel' for a RANGE-partitioned table */ if (prel->parttype == PT_RANGE) { @@ -843,19 +881,27 @@ try_perform_parent_refresh(Oid parent) void forget_bounds_of_partition(Oid partition) { - PartBoundInfo *pcon = pathman_cache_search_relid(bound_cache, - partition, - HASH_FIND, - NULL); - if (pcon) + PartBoundInfo *pbin; + + /* Should we search in bounds cache? */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bound_cache, + partition, + HASH_FIND, + NULL) : + NULL; /* don't even bother */ + + /* Free this entry */ + if (pbin) { /* Call pfree() if it's RANGE bounds */ - if (pcon->parttype == PT_RANGE) + if (pbin->parttype == PT_RANGE) { - FreeBound(&pcon->range_min, pcon->byval); - FreeBound(&pcon->range_max, pcon->byval); + FreeBound(&pbin->range_min, pbin->byval); + FreeBound(&pbin->range_max, pbin->byval); } + /* Finally remove this entry from cache */ pathman_cache_search_relid(bound_cache, partition, HASH_REMOVE, @@ -867,10 +913,23 @@ forget_bounds_of_partition(Oid partition) PartBoundInfo * get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) { - PartBoundInfo *pbin = pathman_cache_search_relid(bound_cache, - partition, - HASH_FIND, - NULL); + PartBoundInfo *pbin; + + /* + * We might end up building the constraint + * tree that we wouldn't want to keep. + */ + AssertTemporaryContext(); + + /* Should we search in bounds cache? */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bound_cache, + partition, + HASH_FIND, + NULL) : + NULL; /* don't even bother */ + + /* Build new entry */ if (!pbin) { PartBoundInfo pbin_local; @@ -896,10 +955,12 @@ get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) fill_pbin_with_bounds(&pbin_local, prel, con_expr, part_attno); /* We strive to delay the creation of cache's entry */ - pbin = pathman_cache_search_relid(bound_cache, - partition, - HASH_ENTER, - NULL); + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bound_cache, + partition, + HASH_ENTER, + NULL) : + palloc(sizeof(PartBoundInfo)); /* Copy data from 'pbin_local' */ memcpy(pbin, &pbin_local, sizeof(PartBoundInfo)); From d499bc6bd458ee624293b8ab60b13fc94c86f53e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Mar 2017 17:56:24 +0300 Subject: [PATCH 0306/1124] improve function read_opexpr_const() --- src/init.c | 99 ++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 73 insertions(+), 26 deletions(-) diff --git a/src/init.c b/src/init.c index 7fc8d50a..bc98e005 100644 --- a/src/init.c +++ b/src/init.c @@ -85,7 +85,7 @@ static bool validate_range_opexpr(const Expr *expr, static bool read_opexpr_const(const OpExpr *opexpr, const PartRelationInfo *prel, const AttrNumber part_attno, - Datum *val); + Datum *value); static int oid_cmp(const void *p1, const void *p2); @@ -877,53 +877,100 @@ static bool read_opexpr_const(const OpExpr *opexpr, const PartRelationInfo *prel, const AttrNumber part_attno, - Datum *val) + Datum *value) { const Node *left; const Node *right; const Var *part_attr; /* partitioned column */ - const Const *constant; + const Const *boundary; bool cast_success; + /* There should be exactly 2 args */ if (list_length(opexpr->args) != 2) return false; + /* Fetch args of expression */ left = linitial(opexpr->args); right = lsecond(opexpr->args); - /* VAR is a part of RelabelType node */ - if (IsA(left, RelabelType) && IsA(right, Const)) + /* Examine LEFT argument */ + switch (nodeTag(left)) { - Var *var = (Var *) ((RelabelType *) left)->arg; + case T_RelabelType: + { + Var *var = (Var *) ((RelabelType *) left)->arg; - if (IsA(var, Var)) - part_attr = var; - else + /* This node should contain Var */ + if (!IsA(var, Var)) + return false; + + /* Update LEFT */ + left = (Node *) var; + } + /* FALL THROUGH (no break) */ + + case T_Var: + { + part_attr = (Var *) left; + + /* VAR.attno == partitioned attribute number */ + if (part_attr->varoattno != part_attno) + return false; + } + break; + + default: return false; } - /* left arg is of type VAR */ - else if (IsA(left, Var) && IsA(right, Const)) + + /* Examine RIGHT argument */ + switch (nodeTag(right)) { - part_attr = (Var *) left; - } - /* Something is wrong, retreat! */ - else return false; + case T_FuncExpr: + { + FuncExpr *func_expr = (FuncExpr *) right; + Const *constant; - /* VAR.attno == partitioned attribute number */ - if (part_attr->varoattno != part_attno) - return false; + /* This node should represent a type cast */ + if (func_expr->funcformat != COERCE_EXPLICIT_CAST && + func_expr->funcformat != COERCE_IMPLICIT_CAST) + return false; - /* CONST is NOT NULL */ - if (((Const *) right)->constisnull) - return false; + /* This node should have exactly 1 argument */ + if (list_length(func_expr->args) != 1) + return false; + + /* Extract single argument */ + constant = linitial(func_expr->args); + + /* Argument should be a Const */ + if (!IsA(constant, Const)) + return false; + + /* Update RIGHT */ + right = (Node *) constant; + } + /* FALL THROUGH (no break) */ - constant = (Const *) right; + case T_Const: + { + boundary = (Const *) right; + + /* CONST is NOT NULL */ + if (boundary->constisnull) + return false; + } + break; + + default: + return false; + } /* Cast Const to a proper type if needed */ - *val = perform_type_cast(constant->constvalue, - getBaseType(constant->consttype), - getBaseType(prel->atttype), - &cast_success); + *value = perform_type_cast(boundary->constvalue, + getBaseType(boundary->consttype), + getBaseType(prel->atttype), + &cast_success); if (!cast_success) { From b556dba59315417e1d97e94023d415c27a7a974e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 29 Mar 2017 18:04:34 +0300 Subject: [PATCH 0307/1124] Change comment --- src/partition_creation.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 8a16d2cd..d410b147 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1751,7 +1751,8 @@ get_part_expression_info(Oid relid, const char *expr_string, /* We don't need pathman activity initialization for this relation yet */ pathman_hooks_enabled = false; - /* We use separate memory context here, just to make sure we don't leave + /* + * We use separate memory context here, just to make sure we don't leave * anything behind after analyze and planning. * Parsed raw expression will stay in context of caller */ From 8cd51494e4606ccd4f350439737b0de8b7cb16ff Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Mar 2017 18:35:07 +0300 Subject: [PATCH 0308/1124] more optimizations in finish_delayed_invalidation() and try_syscache_parent_search() --- src/relation_info.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index 772d34e9..e2083bf7 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -652,6 +652,7 @@ finish_delayed_invalidation(void) { PartParentSearch search; Oid parent; + List *fresh_rels = delayed_invalidation_parent_rels; parent = get_parent_of_partition(vague_rel, &search); @@ -659,12 +660,20 @@ finish_delayed_invalidation(void) { /* It's still parent */ case PPS_ENTRY_PART_PARENT: - try_perform_parent_refresh(parent); + { + /* Skip if we've already refreshed this parent */ + if (!list_member_oid(fresh_rels, parent)) + try_perform_parent_refresh(parent); + } break; /* It *might have been* parent before (not in PATHMAN_CONFIG) */ case PPS_ENTRY_PARENT: - remove_pathman_relation_info(parent); + { + /* Skip if we've already refreshed this parent */ + if (!list_member_oid(fresh_rels, parent)) + try_perform_parent_refresh(parent); + } break; /* How come we still don't know?? */ @@ -793,7 +802,6 @@ try_syscache_parent_search(Oid partition, PartParentSearch *status) else { Relation relation; - Snapshot snapshot; ScanKeyData key[1]; SysScanDesc scan; HeapTuple inheritsTuple; @@ -809,7 +817,6 @@ try_syscache_parent_search(Oid partition, PartParentSearch *status) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(partition)); - snapshot = RegisterSnapshot(GetLatestSnapshot()); scan = systable_beginscan(relation, InheritsRelidSeqnoIndexId, true, NULL, 1, key); @@ -835,7 +842,6 @@ try_syscache_parent_search(Oid partition, PartParentSearch *status) } systable_endscan(scan); - UnregisterSnapshot(snapshot); heap_close(relation, AccessShareLock); return parent; From 7f4f39e54e3a38a9c0541ad89c5b14836eb1e45a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Mar 2017 18:54:25 +0300 Subject: [PATCH 0309/1124] add regression test 'pathman_column_type' --- Makefile | 1 + expected/pathman_column_type.out | 56 ++++++++++++++++++++++++++++++++ sql/pathman_column_type.sql | 29 +++++++++++++++++ 3 files changed, 86 insertions(+) create mode 100644 expected/pathman_column_type.out create mode 100644 sql/pathman_column_type.sql diff --git a/Makefile b/Makefile index f13ceeb4..a2453679 100644 --- a/Makefile +++ b/Makefile @@ -38,6 +38,7 @@ REGRESS = pathman_basic \ pathman_rowmarks \ pathman_runtime_nodes \ pathman_utility_stmt \ + pathman_column_type \ pathman_calamity EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out new file mode 100644 index 00000000..ee71675f --- /dev/null +++ b/expected/pathman_column_type.out @@ -0,0 +1,56 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); +NOTICE: sequence "test_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition dispatch cache | 1 + partition parents cache | 10 +(4 rows) + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition dispatch cache | 1 + partition parents cache | 10 +(4 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | val +-------------------------+----- + test_column_type.test_1 | 1 +(1 row) + +DROP SCHEMA test_column_type CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql new file mode 100644 index 00000000..ef0e470e --- /dev/null +++ b/sql/pathman_column_type.sql @@ -0,0 +1,29 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; + + +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; + +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + + +DROP SCHEMA test_column_type CASCADE; +DROP EXTENSION pg_pathman; From 62fdfd63fd86365b005a19ea82ac0e9214db2bdd Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 29 Mar 2017 18:59:14 +0300 Subject: [PATCH 0310/1124] Fix memory context --- src/partition_creation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index e75e27eb..e3a1804c 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1738,7 +1738,7 @@ get_part_expression_info(Oid relid, const char *expr_string, expr_info = palloc(sizeof(PartExpressionInfo)); - pathman_parse_context = AllocSetContextCreate(TopMemoryContext, + pathman_parse_context = AllocSetContextCreate(TopPathmanContext, "pathman parse context", ALLOCSET_DEFAULT_SIZES); From ecdf3dd7436413156ba9cbf84df728f3bcbec1d5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Mar 2017 22:34:07 +0300 Subject: [PATCH 0311/1124] improve comments (constraint loader) --- src/init.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/init.c b/src/init.c index bc98e005..b944ae62 100644 --- a/src/init.c +++ b/src/init.c @@ -810,7 +810,7 @@ validate_range_constraint(const Expr *expr, lower, upper, lower_null, upper_null); } -/* Validates a single expression of kind VAR >= CONST or VAR < CONST */ +/* Validates a single expression of kind VAR >= CONST | VAR < CONST */ static bool validate_range_opexpr(const Expr *expr, const PartRelationInfo *prel, @@ -870,8 +870,10 @@ validate_range_opexpr(const Expr *expr, /* * Reads const value from expressions of kind: - * 1) VAR >= CONST OR VAR < CONST - * 2) RELABELTYPE(VAR) >= CONST OR RELABELTYPE(VAR) < CONST + * 1) VAR >= CONST + * 2) VAR < CONST + * 3) RELABELTYPE(VAR) >= CONST + * 4) RELABELTYPE(VAR) < CONST */ static bool read_opexpr_const(const OpExpr *opexpr, From 89c5b23c678d3a601e32ef58b71fc2f2b6fe2dfb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Mar 2017 22:45:29 +0300 Subject: [PATCH 0312/1124] fix cache names (for debug) --- src/init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/init.c b/src/init.c index b944ae62..b41b9d6e 100644 --- a/src/init.c +++ b/src/init.c @@ -351,7 +351,7 @@ init_local_cache(void) ctl.entrysize = sizeof(PartRelationInfo); ctl.hcxt = PathmanRelationCacheContext; - partitioned_rels = hash_create("pg_pathman's partitioned relations cache", + partitioned_rels = hash_create("pg_pathman's partition dispatch cache", PART_RELS_SIZE, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); @@ -369,7 +369,7 @@ init_local_cache(void) ctl.entrysize = sizeof(PartBoundInfo); ctl.hcxt = PathmanBoundCacheContext; - bound_cache = hash_create("pg_pathman's partition constraints cache", + bound_cache = hash_create("pg_pathman's partition bounds cache", PART_RELS_SIZE * CHILD_FACTOR, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } From f78330fbdca139021d2bc63a92e592c2a7610acf Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 30 Mar 2017 13:49:43 +0300 Subject: [PATCH 0313/1124] Fix basic selects for range partitions --- src/hooks.c | 21 ++++++-------- src/partition_filter.c | 34 +++++++++++++++-------- src/relation_info.c | 63 ++---------------------------------------- 3 files changed, 34 insertions(+), 84 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 5b8a471a..248c9ea9 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -29,6 +29,7 @@ #include "miscadmin.h" #include "optimizer/cost.h" #include "optimizer/restrictinfo.h" +#include "rewrite/rewriteManip.h" #include "utils/typcache.h" #include "utils/lsyscache.h" @@ -258,28 +259,22 @@ pathman_rel_pathlist_hook(PlannerInfo *root, * Get pathkeys for ascending and descending sort by partitioned column. */ List *pathkeys; - Var *var; - Oid vartypeid, - varcollid; - int32 type_mod; TypeCacheEntry *tce; + Node *expr; - /* Make Var from patition column */ - /* FIX: this */ - get_rte_attribute_type(rte, 0, - &vartypeid, &type_mod, &varcollid); - var = makeVar(rti, 0, vartypeid, type_mod, varcollid, 0); - var->location = -1; + expr = copyObject(prel->expr); + if (rti != 1) + ChangeVarNodes(expr, 1, rti, 0); /* Determine operator type */ - tce = lookup_type_cache(var->vartype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); + tce = lookup_type_cache(prel->atttype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); /* Make pathkeys */ - pathkeys = build_expression_pathkey(root, (Expr *) var, NULL, + pathkeys = build_expression_pathkey(root, (Expr *) expr, NULL, tce->lt_opr, NULL, false); if (pathkeys) pathkeyAsc = (PathKey *) linitial(pathkeys); - pathkeys = build_expression_pathkey(root, (Expr *) var, NULL, + pathkeys = build_expression_pathkey(root, (Expr *) expr, NULL, tce->gt_opr, NULL, false); if (pathkeys) pathkeyDesc = (PathKey *) linitial(pathkeys); diff --git a/src/partition_filter.c b/src/partition_filter.c index 35f8ccdf..2edd64f0 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -21,6 +21,7 @@ #include "foreign/fdwapi.h" #include "foreign/foreign.h" #include "nodes/nodeFuncs.h" +#include "rewrite/rewriteManip.h" #include "utils/guc.h" #include "utils/memutils.h" #include "utils/lsyscache.h" @@ -609,7 +610,10 @@ partition_filter_exec(CustomScanState *node) Datum value; ExprDoneCond itemIsDone; ExprState *expr_state; - struct expr_walker_context expr_walker_context; + ListCell *lc; + Index varno = 1; + TupleTableSlot *tmp_slot; + Node *expr; /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); @@ -623,26 +627,34 @@ partition_filter_exec(CustomScanState *node) return slot; } - old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); - - /* Prepare walker context */ - expr_walker_context.prel = prel; - expr_walker_context.slot = slot; - expr_walker_context.tup = ExecCopySlotTuple(slot); + /* Find proper varno for Vars in expression */ + foreach(lc, estate->es_range_table) + { + RangeTblEntry *entry = (RangeTblEntry *) lfirst(lc); + if (entry->relid == prel->key) + break; - /* Fetch values from slot for expression */ - adapt_values(prel->expr, (void *) &expr_walker_context); + varno++; + } - /* Prepare state for execution */ - expr_state = ExecInitExpr((Expr *)prel->expr, NULL); + /* Change varno according to range table */ + expr = copyObject(prel->expr); + if (varno != 1) + ChangeVarNodes(expr, 1, varno, 0); + /* Prepare state for expression execution */ + old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); + expr_state = ExecInitExpr((Expr *) expr, NULL); MemoryContextSwitchTo(old_cxt); /* Switch to per-tuple context */ old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); /* Execute expression */ + tmp_slot = econtext->ecxt_scantuple; + econtext->ecxt_scantuple = slot; value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); + econtext->ecxt_scantuple = tmp_slot; if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); diff --git a/src/relation_info.c b/src/relation_info.c index fe1dbe97..f9665488 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -68,12 +68,6 @@ static bool delayed_shutdown = false; /* pathman was dropped */ list = NIL; \ } while (0) -struct expr_mutator_context -{ - Oid relid; /* partitioned table */ - List *rtable; /* range table list from expression query */ -}; - static bool try_perform_parent_refresh(Oid parent); static Oid try_syscache_parent_search(Oid partition, PartParentSearch *status); static Oid get_parent_of_partition_internal(Oid partition, @@ -92,8 +86,6 @@ static void fill_pbin_with_bounds(PartBoundInfo *pbin, static int cmp_range_entries(const void *p1, const void *p2, void *arg); -static Node *expression_mutator(Node *node, struct expr_mutator_context *context); - void init_relation_info_static_data(void) @@ -133,7 +125,6 @@ refresh_pathman_relation_info(Oid relid, char *expr; HeapTuple tp; MemoryContext oldcontext; - Node *tmp_node; AssertTemporaryContext(); @@ -191,16 +182,12 @@ refresh_pathman_relation_info(Oid relid, prel->atttype = DatumGetObjectId(values[Anum_pathman_config_atttype - 1]); expr = TextDatumGetCString(values[Anum_pathman_config_expression_p - 1]); - /* Restore planned expression */ - tmp_node = (Node *) stringToNode(expr); - fix_opfuncids(tmp_node); - pfree(expr); - - /* expression and attname should be saved in cache context */ + /* Expression and attname should be saved in cache context */ oldcontext = MemoryContextSwitchTo(PathmanRelationCacheContext); - prel->expr = expression_mutator(tmp_node, NULL); prel->attname = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); + prel->expr = (Node *) stringToNode(expr); + fix_opfuncids(prel->expr); MemoryContextSwitchTo(oldcontext); @@ -1195,47 +1182,3 @@ shout_if_prel_is_invalid(const Oid parent_oid, expected_str); } } - - -/* - * To prevent calculation of Vars in expression, we wrap them with - * CustomConst, and later before execution we fill it with actual value - */ -static Node * -expression_mutator(Node *node, struct expr_mutator_context *context) -{ - const TypeCacheEntry *typcache; - - /* TODO: add RelabelType */ - /* TODO: check Vars, they should only be related with base relation */ - if (IsA(node, Var)) - { - //Var *variable = (Var *) node; - Node *new_node = newNode(sizeof(CustomConst), T_Const); - Const *new_const = (Const *)new_node; - - /* - RangeTblEntry *entry = rt_fetch(variable->varno, context->rtable); - if (entry->relid != context->relid) - elog(ERROR, "Columns in the expression should " - "be only from partitioned relation"); - */ - - /* we only need varattno from original Var, for now */ - ((CustomConst *)new_node)->varattno = ((Var *)node)->varattno; - - new_const->consttype = ((Var *)node)->vartype; - new_const->consttypmod = ((Var *)node)->vartypmod; - new_const->constcollid = ((Var *)node)->varcollid; - new_const->constvalue = (Datum) 0; - new_const->constisnull = true; - new_const->location = -2; - - typcache = lookup_type_cache(new_const->consttype, 0); - new_const->constbyval = typcache->typbyval; - new_const->constlen = typcache->typlen; - - return new_node; - } - return expression_tree_mutator(node, expression_mutator, (void *) context); -} From d76011c5cdaa2ebabbb4f88d1173f609ed39a60c Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 30 Mar 2017 16:33:14 +0300 Subject: [PATCH 0314/1124] make scan-build happy --- src/pl_range_funcs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 2474ccb2..d6570ed3 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -1098,7 +1098,7 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) MakeBoundInf(PLUS_INFINITY) : MakeBound(datums[i+1]); RangeVar *rv = npartnames > 0 ? rangevars[i] : NULL; - char *tablespace = ntablespaces > 0 ? tablespaces[i] : NULL; + char *tablespace = tablespaces ? tablespaces[i] : NULL; (void) create_single_range_partition_internal(relid, &start, From df85277c6e895992d4448237acc1186a959a481b Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 30 Mar 2017 19:20:43 +0300 Subject: [PATCH 0315/1124] Add expression check in pathlist hooks --- range.sql | 13 +++--- src/hooks.c | 23 +++++---- src/include/pathman.h | 6 +-- src/pg_pathman.c | 106 ++++++++++++++---------------------------- 4 files changed, 60 insertions(+), 88 deletions(-) diff --git a/range.sql b/range.sql index ad5d405f..0ecedd7b 100644 --- a/range.sql +++ b/range.sql @@ -130,12 +130,13 @@ BEGIN END LOOP; /* Check boundaries */ - EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', ''%s'', ''%s'', ''%s''::%s)', - parent_relid, - expression, - start_value, - end_value, - v_atttype::TEXT); + EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', $1, ''%s'', ''%s''::%s)', + parent_relid, + start_value, + end_value, + v_atttype::TEXT) + USING + expression; END IF; /* Insert new entry to pathman config */ diff --git a/src/hooks.c b/src/hooks.c index 248c9ea9..c8fb19af 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -61,6 +61,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, ListCell *lc; WalkerContext context; double paramsel; + Node *expr; /* Call hooks set by other extensions */ if (set_join_pathlist_next) @@ -105,14 +106,17 @@ pathman_join_pathlist_hook(PlannerInfo *root, otherclauses = NIL; } + /* Make copy of partitioning expression and fix Var's varno attributes */ + expr = copyObject(inner_prel->expr); + if (innerrel->relid != 1) + ChangeVarNodes(expr, 1, innerrel->relid, 0); + paramsel = 1.0; foreach (lc, joinclauses) { WrapperNode *wrap; - InitWalkerContext(&context, innerrel->relid, - inner_prel, NULL, false); - + InitWalkerContext(&context, expr, inner_prel, NULL, false); wrap = walk_expr_tree((Expr *) lfirst(lc), &context); paramsel *= wrap->paramsel; } @@ -252,6 +256,12 @@ pathman_rel_pathlist_hook(PlannerInfo *root, WalkerContext context; ListCell *lc; int i; + Node *expr; + + /* Make copy of partitioning expression and fix Var's varno attributes */ + expr = copyObject(prel->expr); + if (rti != 1) + ChangeVarNodes(expr, 1, rti, 0); if (prel->parttype == PT_RANGE) { @@ -260,11 +270,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, */ List *pathkeys; TypeCacheEntry *tce; - Node *expr; - - expr = copyObject(prel->expr); - if (rti != 1) - ChangeVarNodes(expr, 1, rti, 0); /* Determine operator type */ tce = lookup_type_cache(prel->atttype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); @@ -287,7 +292,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_COMPLETE)); /* Make wrappers over restrictions and collect final rangeset */ - InitWalkerContext(&context, rti, prel, NULL, false); + InitWalkerContext(&context, expr, prel, NULL, false); wrappers = NIL; foreach(lc, rel->baserestrictinfo) { diff --git a/src/include/pathman.h b/src/include/pathman.h index 9ef3bbfa..6e3fd104 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -141,16 +141,16 @@ typedef struct typedef struct { - Index prel_varno; /* Var::varno associated with prel */ + Node *prel_expr; /* expression from PartRelationInfo */ const PartRelationInfo *prel; /* main partitioning structure */ ExprContext *econtext; /* for ExecEvalExpr() */ bool for_insert; /* are we in PartitionFilter now? */ } WalkerContext; /* Usual initialization procedure for WalkerContext */ -#define InitWalkerContext(context, prel_vno, prel_info, ecxt, for_ins) \ +#define InitWalkerContext(context, expr, prel_info, ecxt, for_ins) \ do { \ - (context)->prel_varno = (prel_vno); \ + (context)->prel_expr = (expr); \ (context)->prel = (prel_info); \ (context)->econtext = (ecxt); \ (context)->for_insert = (for_ins); \ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index ba0617a5..9192c666 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -99,7 +99,7 @@ static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer); - +static bool match_expr_to_operand(Node *operand, Node *expr); /* We can transform Param into Const provided that 'econtext' is available */ #define IsConstValue(wcxt, node) \ ( IsA((node), Const) || (WcxtHasExprContext(wcxt) ? IsA((node), Param) : false) ) @@ -895,8 +895,7 @@ static WrapperNode * handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) { WrapperNode *result = (WrapperNode *) palloc(sizeof(WrapperNode)); - Node *varnode = (Node *) linitial(expr->args); - Var *var; + Node *exprnode = (Node *) linitial(expr->args); Node *arraynode = (Node *) lsecond(expr->args); const PartRelationInfo *prel = context->prel; @@ -904,27 +903,14 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) result->args = NIL; result->paramsel = 0.0; - Assert(varnode != NULL); + Assert(exprnode != NULL); - /* If variable is not the partition key then skip it */ - if (IsA(varnode, Var) || IsA(varnode, RelabelType)) - { - var = !IsA(varnode, RelabelType) ? - (Var *) varnode : - (Var *) ((RelabelType *) varnode)->arg; - - /* Skip if base types or attribute numbers do not match */ - /* FIX: use exprsssion - if (getBaseType(var->vartype) != getBaseType(prel->atttype) || - var->varoattno != prel->attnum || - var->varno != context->prel_varno) - { - goto handle_arrexpr_return; - } */ - } - else + if (!match_expr_to_operand(context->prel_expr, exprnode)) goto handle_arrexpr_return; + if (exprnode && IsA(exprnode, RelabelType)) + exprnode = (Node *) ((RelabelType *) exprnode)->arg; + if (arraynode && IsA(arraynode, Const) && !((Const *) arraynode)->constisnull) { @@ -1084,16 +1070,9 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, { int strategy; TypeCacheEntry *tce; - Oid vartype; const OpExpr *expr = (const OpExpr *) result->orig; const PartRelationInfo *prel = context->prel; - Assert(IsA(varnode, Var) || IsA(varnode, RelabelType)); - - vartype = !IsA(varnode, RelabelType) ? - ((Var *) varnode)->vartype : - ((RelabelType *) varnode)->resulttype; - /* Exit if Constant is NULL */ if (c->constisnull) { @@ -1102,7 +1081,7 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, return; } - tce = lookup_type_cache(vartype, TYPECACHE_BTREE_OPFAMILY); + tce = lookup_type_cache(prel->atttype, TYPECACHE_BTREE_OPFAMILY); strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); /* There's no strategy for this operator, go to end */ @@ -1164,25 +1143,36 @@ handle_binary_opexpr_param(const PartRelationInfo *prel, const OpExpr *expr = (const OpExpr *) result->orig; TypeCacheEntry *tce; int strategy; - Oid vartype; - - Assert(IsA(varnode, Var) || IsA(varnode, RelabelType)); - - vartype = !IsA(varnode, RelabelType) ? - ((Var *) varnode)->vartype : - ((RelabelType *) varnode)->resulttype; /* Determine operator type */ - tce = lookup_type_cache(vartype, TYPECACHE_BTREE_OPFAMILY); + tce = lookup_type_cache(prel->atttype, TYPECACHE_BTREE_OPFAMILY); strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); result->paramsel = estimate_paramsel_using_prel(prel, strategy); } + +/* + * Compare clause operand with our expression + */ +static bool +match_expr_to_operand(Node *operand, Node *expr) +{ + /* strip relabeling for both operand and expr */ + if (operand && IsA(operand, RelabelType)) + operand = (Node *) ((RelabelType *) operand)->arg; + + if (expr && IsA(expr, RelabelType)) + expr = (Node *) ((RelabelType *) expr)->arg; + + /* compare expressions and return result right away */ + return equal(expr, operand); +} + /* * Checks if expression is a KEY OP PARAM or PARAM OP KEY, where KEY is - * partition key (it could be Var or RelableType) and PARAM is whatever. + * partition expression and PARAM is whatever. * * NOTE: returns false if partition key is not in expression. */ @@ -1194,45 +1184,21 @@ pull_var_param(const WalkerContext *ctx, { Node *left = linitial(expr->args), *right = lsecond(expr->args); - Var *v = NULL; - /* Check the case when variable is on the left side */ - if (IsA(left, Var) || IsA(left, RelabelType)) + if (match_expr_to_operand(left, ctx->prel_expr)) { - v = !IsA(left, RelabelType) ? - (Var *) left : - (Var *) ((RelabelType *) left)->arg; - - /* Check if 'v' is partitioned column of 'prel' */ - /* FIX this */ - if (v->varoattno == 0 && - v->varno == ctx->prel_varno) - { - *var_ptr = left; - *param_ptr = right; - return true; - } + *var_ptr = left; + *param_ptr = right; + return true; } - /* ... variable is on the right side */ - if (IsA(right, Var) || IsA(right, RelabelType)) + if (match_expr_to_operand(right, ctx->prel_expr)) { - v = !IsA(right, RelabelType) ? - (Var *) right : - (Var *) ((RelabelType *) right)->arg; - - /* Check if 'v' is partitioned column of 'prel' */ - /* FIX this */ - if (v->varoattno == 0 && - v->varno == ctx->prel_varno) - { - *var_ptr = right; - *param_ptr = left; - return true; - } + *var_ptr = right; + *param_ptr = left; + return true; } - /* Variable isn't a partitionig key */ return false; } From bf42045378436e1edb3440c8bad91adaf07cc4f4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 31 Mar 2017 03:15:31 +0300 Subject: [PATCH 0316/1124] update README.md --- README.md | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index f3f8f94f..7c6f3a77 100644 --- a/README.md +++ b/README.md @@ -266,21 +266,21 @@ Set partition creation callback to be invoked for each attached or created parti ```json /* RANGE-partitioned table abc (child abc_4) */ { - "parent": "abc", - "parent_schema": "public", - "parttype": "2", - "partition": "abc_4", + "parent": "abc", + "parent_schema": "public", + "parttype": "2", + "partition": "abc_4", "partition_schema": "public", - "range_max": "401", - "range_min": "301" + "range_max": "401", + "range_min": "301" } /* HASH-partitioned table abc (child abc_0) */ { - "parent": "abc", - "parent_schema": "public", - "parttype": "1", - "partition": "abc_0" + "parent": "abc", + "parent_schema": "public", + "parttype": "1", + "partition": "abc_0", "partition_schema": "public" } ``` @@ -309,7 +309,7 @@ CREATE TABLE IF NOT EXISTS pathman_config_params ( enable_parent BOOLEAN NOT NULL DEFAULT TRUE, auto BOOLEAN NOT NULL DEFAULT TRUE, init_callback REGPROCEDURE NOT NULL DEFAULT 0, - spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE); + spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE); ``` This table stores optional parameters which override standard behavior. @@ -656,6 +656,7 @@ There are several user-accessible [GUC](https://www.postgresql.org/docs/9.5/stat - `pg_pathman.enable_runtimemergeappend` --- toggle `RuntimeMergeAppend` custom node on\off - `pg_pathman.enable_partitionfilter` --- toggle `PartitionFilter` custom node on\off - `pg_pathman.enable_auto_partition` --- toggle automatic partition creation on\off (per session) + - `pg_pathman.enable_bounds_cache` --- toggle bounds cache on\off (faster updates of partitioning scheme) - `pg_pathman.insert_into_fdw` --- allow INSERTs into various FDWs `(disabled | postgres | any_fdw)` - `pg_pathman.override_copy` --- toggle COPY statement hooking on\off From f9fa074b7c0735c784656c135d3c1e826f867615 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 31 Mar 2017 14:40:48 +0300 Subject: [PATCH 0317/1124] concrete types (hash.sql) --- hash.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hash.sql b/hash.sql index 59a2ae64..8331ed5f 100644 --- a/hash.sql +++ b/hash.sql @@ -14,7 +14,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( parent_relid REGCLASS, attribute TEXT, - partitions_count INTEGER, + partitions_count INT4, partition_data BOOLEAN DEFAULT TRUE, partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL) @@ -280,7 +280,7 @@ $$ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( parent_relid REGCLASS, attribute TEXT, - partitions_count INTEGER, + partitions_count INT4, partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL) RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' @@ -296,7 +296,7 @@ LANGUAGE C STRICT; /* * Calculates hash for integer value */ -CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INTEGER, INTEGER) +CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INT4, INT4) RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' LANGUAGE C STRICT; @@ -307,6 +307,6 @@ CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( attribute_type REGTYPE, attribute TEXT, partitions_count INT4, - partitions_index INT4) + partition_index INT4) RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' LANGUAGE C STRICT; From 102bb31ff1d0696df39cc95c5fe25c1685a1d380 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 31 Mar 2017 15:28:22 +0300 Subject: [PATCH 0318/1124] Fix COPY and 'handle_modification_query --- src/partition_creation.c | 24 ++++++++++++++- src/planner_tree_modification.c | 9 +++++- src/utility_stmt_hooking.c | 52 +++++++++++++++++++++++---------- 3 files changed, 67 insertions(+), 18 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index e3a1804c..2ae12a3d 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1683,6 +1683,27 @@ text_to_regprocedure(text *proc_signature) return DatumGetObjectId(result); } +/* + * Checks that columns are from partitioning relation + * Maybe there will be more checks later. + */ +static bool +validate_part_expression(Node *node, void *context) +{ + if (node == NULL) + return false; + + if (IsA(node, Var)) + { + Var *var = (Var *) node; + if (var->varno != 1) + elog(ERROR, "Columns used in expression should only be related" + " with partitioning relation"); + return false; + } + return expression_tree_walker(node, validate_part_expression, context); +} + /* Wraps expression by SELECT query and returns parsed tree */ Node * get_raw_expression(Oid relid, const char *expr, char **query_string_out, @@ -1787,11 +1808,12 @@ get_part_expression_info(Oid relid, const char *expr_string, target_entry = lfirst(list_head(plan->planTree->targetlist)); expr_node = (Node *) target_entry->expr; expr_node = eval_const_expressions(NULL, expr_node); + validate_part_expression(expr_node, NULL); out_string = nodeToString(expr_node); MemoryContextSwitchTo(oldcontext); - /* Convert expression to string and return it as datum */ + /* Save expression string as datum and free memory from planning stage */ expr_info->expr_datum = CStringGetTextDatum(out_string); MemoryContextReset(pathman_parse_context); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index ff18611d..a073dd11 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -15,6 +15,7 @@ #include "nodes_common.h" #include "partition_filter.h" #include "planner_tree_modification.h" +#include "rewrite/rewriteManip.h" #include "miscadmin.h" #include "optimizer/clauses.h" @@ -244,6 +245,7 @@ handle_modification_query(Query *parse) Expr *expr; WalkerContext context; Index result_rel; + Node *prel_expr; /* Fetch index of result relation */ result_rel = parse->resultRelation; @@ -274,8 +276,13 @@ handle_modification_query(Query *parse) /* Exit if there's no expr (no use) */ if (!expr) return; + /* Prepare partitioning expression */ + prel_expr = copyObject(prel->expr); + if (result_rel != 1) + ChangeVarNodes(prel_expr, 1, result_rel, 0); + /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, result_rel, prel, NULL, false); + InitWalkerContext(&context, prel_expr, prel, NULL, false); wrap = walk_expr_tree(expr, &context); ranges = irange_list_intersection(ranges, wrap->rangeset); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 65901a88..83a649e1 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -478,6 +478,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, TupleTableSlot *myslot; MemoryContext oldcontext = CurrentMemoryContext; + Node *expr = NULL; + ExprState *expr_state = NULL; + uint64 processed = 0; @@ -525,9 +528,13 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, for (;;) { - TupleTableSlot *slot; - bool skip_tuple; + TupleTableSlot *slot, + *tmp_slot; + ExprDoneCond itemIsDone; + bool skip_tuple, + isnull; Oid tuple_oid = InvalidOid; + Datum value; const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; @@ -540,28 +547,45 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Fetch PartRelationInfo for parent relation */ prel = get_pathman_relation_info(RelationGetRelid(parent_rel)); - /* Switch into per tuple memory context */ - MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + /* Initialize expression and expression state */ + if (expr == NULL) + { + expr = copyObject(prel->expr); + expr_state = ExecInitExpr((Expr *) expr, NULL); + } if (!NextCopyFrom(cstate, econtext, values, nulls, &tuple_oid)) break; - /* FIX this - if (nulls[prel->attnum - 1]) + /* And now we can form the input tuple. */ + tuple = heap_form_tuple(tupDesc, values, nulls); + + /* Place tuple in tuple slot --- but slot shouldn't free it */ + slot = myslot; + ExecStoreTuple(tuple, slot, InvalidBuffer, false); + + /* Switch into per tuple memory context */ + MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + + /* Execute expression */ + tmp_slot = econtext->ecxt_scantuple; + econtext->ecxt_scantuple = slot; + value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); + econtext->ecxt_scantuple = tmp_slot; + + if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); - */ + + if (itemIsDone != ExprSingleResult) + elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); /* Search for a matching partition */ - /* FIX here, attnum */ - rri_holder = select_partition_for_insert(values[/* here */1], + rri_holder = select_partition_for_insert(value, prel->atttype, prel, &parts_storage, estate); child_result_rel = rri_holder->result_rel_info; estate->es_result_relation_info = child_result_rel; - /* And now we can form the input tuple. */ - tuple = heap_form_tuple(tupDesc, values, nulls); - /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) { @@ -585,10 +609,6 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Triggers and stuff need to be invoked in query context. */ MemoryContextSwitchTo(oldcontext); - /* Place tuple in tuple slot --- but slot shouldn't free it */ - slot = myslot; - ExecStoreTuple(tuple, slot, InvalidBuffer, false); - skip_tuple = false; /* BEFORE ROW INSERT Triggers */ From d9a5003f941d9d6db481af6a1122b9c4eead8ee9 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 31 Mar 2017 17:02:19 +0300 Subject: [PATCH 0319/1124] rewrite an update trigger function --- src/pl_funcs.c | 131 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 122 insertions(+), 9 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index fa8b0158..afb7bdc5 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -22,11 +22,13 @@ #include "access/nbtree.h" #include "access/htup_details.h" #include "access/xact.h" +#include "access/sysattr.h" #include "catalog/indexing.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "commands/tablespace.h" #include "commands/trigger.h" +#include "executor/spi.h" #include "funcapi.h" #include "miscadmin.h" #include "utils/builtins.h" @@ -111,7 +113,10 @@ typedef struct static void on_partitions_created_internal(Oid partitioned_table, bool add_callbacks); static void on_partitions_updated_internal(Oid partitioned_table, bool add_callbacks); static void on_partitions_removed_internal(Oid partitioned_table, bool add_callbacks); - +static void delete_tuple(Relation rel, Datum ctid); +static void insert_tuple(Relation rel, HeapTuple tup); +static void make_arg_list(StringInfoData *buf, HeapTuple tup, TupleDesc tupdesc, + int *nargs, Oid **argtypes, Datum **args, char **nulls); /* * ---------------------------- @@ -1116,16 +1121,19 @@ update_trigger_func(PG_FUNCTION_ARGS) Datum key; bool isnull; TupleConversionMap *conversion_map; + Datum ctid; + Relation source_rel; TupleDesc source_tupdesc; - HeapTuple source_tuple; + HeapTuple old_tuple; + HeapTuple new_tuple; Oid source_relid; AttrNumber source_key; Relation target_rel; TupleDesc target_tupdesc; - HeapTuple target_tuple; Oid target_relid; + HeapTuple target_tuple; /* This function can only be invoked as a trigger */ if (!CALLED_AS_TRIGGER(fcinfo)) @@ -1135,8 +1143,10 @@ update_trigger_func(PG_FUNCTION_ARGS) if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) elog(ERROR, "This function must only be used as UPDATE trigger"); + source_rel = trigdata->tg_relation; source_relid = trigdata->tg_relation->rd_id; - source_tuple = trigdata->tg_newtuple; + old_tuple = trigdata->tg_trigtuple; + new_tuple = trigdata->tg_newtuple; source_tupdesc = trigdata->tg_relation->rd_att; /* Find parent relation and partitioning info */ @@ -1154,18 +1164,26 @@ update_trigger_func(PG_FUNCTION_ARGS) */ key_name = get_attname(parent, prel->attnum); source_key = get_attnum(source_relid, key_name); - key = heap_getattr(source_tuple, source_key, source_tupdesc, &isnull); + // target_key = get_attnum(target_relid, key_name); + key = heap_getattr(new_tuple, source_key, source_tupdesc, &isnull); /* Find partition it should go into */ target_relid = get_partition_for_key(prel, key); /* If target partition is the same then do nothing */ if (target_relid == source_relid) - return PointerGetDatum(source_tuple); + PG_RETURN_POINTER(new_tuple); + /* TODO: probably should be another lock level */ target_rel = heap_open(target_relid, RowExclusiveLock); target_tupdesc = target_rel->rd_att; + /* Read tuple id */ + ctid = heap_getsysattr(old_tuple, + SelfItemPointerAttributeNumber, + source_tupdesc, + &isnull); + /* * Else if it's a different partition then build a TupleConversionMap * between original partition and new one. And then do a convertation @@ -1173,7 +1191,11 @@ update_trigger_func(PG_FUNCTION_ARGS) conversion_map = convert_tuples_by_name(source_tupdesc, target_tupdesc, "Failed to convert tuple"); - target_tuple = do_convert_tuple(source_tuple, conversion_map); + target_tuple = do_convert_tuple(new_tuple, conversion_map); + + + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "SPI_connect failed"); /* * To make an UPDATE on a tuple in case when the tuple should be moved from @@ -1181,13 +1203,104 @@ update_trigger_func(PG_FUNCTION_ARGS) * old tuple from original partition and then insert updated version * of tuple to the target partition */ - simple_heap_delete(trigdata->tg_relation, &trigdata->tg_trigtuple->t_self); - simple_heap_insert(target_rel, target_tuple); + delete_tuple(source_rel, ctid); + insert_tuple(target_rel, target_tuple); + + if (SPI_finish() != SPI_OK_FINISH) + elog(ERROR, "SPI_finish failed"); heap_close(target_rel, RowExclusiveLock); + PG_RETURN_VOID(); } +/* + * Delete record from rel. Caller is responsible for SPI environment setup + */ +static void +delete_tuple(Relation rel, Datum ctid) +{ + char *query; + Datum args[1] = {ctid}; + Oid argtypes[1] = {TIDOID}; + char nulls[1] = {' '}; + int spi_result; + + query = psprintf("DELETE FROM %s.%s WHERE ctid = $1", + quote_identifier(get_namespace_name(RelationGetNamespace(rel))), + quote_identifier(RelationGetRelationName(rel))); + spi_result = SPI_execute_with_args(query, 1, argtypes, args, nulls, false, 1); + + /* Check result */ + if (spi_result != SPI_OK_DELETE) + elog(ERROR, "SPI_execute_with_args returned %d", spi_result); +} + +/* + * Insert a new tuple to the rel. Caller is responsible for SPI environment + * setup + */ +static void +insert_tuple(Relation rel, HeapTuple tup) +{ + TupleDesc tupdesc = rel->rd_att; + StringInfoData querybuf; + Datum *args; + Oid *argtypes; + char *nulls; + int nargs; + const char *namespace; + const char *relname; + int spi_result; + + namespace = quote_identifier(get_namespace_name(RelationGetNamespace(rel))); + relname = quote_identifier(RelationGetRelationName(rel)); + + initStringInfo(&querybuf); + appendStringInfo(&querybuf, "INSERT INTO "); + appendStringInfo(&querybuf, "%s.%s", namespace, relname); + appendStringInfo(&querybuf, " VALUES ("); + make_arg_list(&querybuf, tup, tupdesc, &nargs, &argtypes, &args, &nulls); + appendStringInfo(&querybuf, ")"); + + spi_result = SPI_execute_with_args(querybuf.data, nargs, argtypes, + args, nulls, false, 0); + + /* Check result */ + if (spi_result != SPI_OK_INSERT) + elog(ERROR, "SPI_execute_with_args returned %d", spi_result); +} + +static void +make_arg_list(StringInfoData *buf, HeapTuple tup, TupleDesc tupdesc, + int *nargs, Oid **argtypes, Datum **args, char **nulls) +{ + int i; + bool isnull; + + *nargs = tupdesc->natts; + *args = palloc(sizeof(Datum) * tupdesc->natts); + *argtypes = palloc(sizeof(Oid) * tupdesc->natts); + *nulls = palloc(sizeof(char) * tupdesc->natts); + + for (i = 0; i < tupdesc->natts; i++) + { + /* Skip dropped columns */ + if (tupdesc->attrs[i]->attisdropped) + continue; + + *args[i] = heap_getattr(tup, i + 1, tupdesc, &isnull); + *nulls[i] = isnull ? 'n' : ' '; + *argtypes[i] = tupdesc->attrs[i]->atttypid; + + /* Add comma separator (except the first time) */ + if (i != 0) + appendStringInfo(buf, ","); + + /* Add parameter */ + appendStringInfo(buf, "$%i", i+1); + } +} /* * Returns Oid of partition corresponding to partitioning key value. Throws From 5e7d5f04f303d437656669c3e87cdde724c0edd5 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 31 Mar 2017 17:54:37 +0300 Subject: [PATCH 0320/1124] fixed a bug in make_arg_list() --- src/pl_funcs.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index afb7bdc5..3745c32b 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1276,22 +1276,22 @@ make_arg_list(StringInfoData *buf, HeapTuple tup, TupleDesc tupdesc, int *nargs, Oid **argtypes, Datum **args, char **nulls) { int i; - bool isnull; + bool isnull; *nargs = tupdesc->natts; - *args = palloc(sizeof(Datum) * tupdesc->natts); - *argtypes = palloc(sizeof(Oid) * tupdesc->natts); - *nulls = palloc(sizeof(char) * tupdesc->natts); + *args = (Datum *) palloc(sizeof(Datum) * tupdesc->natts); + *argtypes = (Oid *) palloc(sizeof(Oid) * tupdesc->natts); + *nulls = (char *) palloc(sizeof(char) * tupdesc->natts); - for (i = 0; i < tupdesc->natts; i++) + for (i = 0; i < *nargs; i++) { /* Skip dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; - *args[i] = heap_getattr(tup, i + 1, tupdesc, &isnull); - *nulls[i] = isnull ? 'n' : ' '; - *argtypes[i] = tupdesc->attrs[i]->atttypid; + (*args)[i] = heap_getattr(tup, i + 1, tupdesc, &isnull); + (*nulls)[i] = isnull ? 'n' : ' '; + (*argtypes)[i] = tupdesc->attrs[i]->atttypid; /* Add comma separator (except the first time) */ if (i != 0) From b3062647ce213b96938f296d54f392ab66ca1555 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 1 Apr 2017 16:26:27 +0300 Subject: [PATCH 0321/1124] add license badge (PostgreSQL) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 74018a4d..a90f5436 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ [![Build Status](https://travis-ci.org/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.org/postgrespro/pg_pathman) [![PGXN version](https://badge.fury.io/pg/pg_pathman.svg)](https://badge.fury.io/pg/pg_pathman) [![codecov](https://codecov.io/gh/postgrespro/pg_pathman/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/pg_pathman) +[![GitHub license](https://img.shields.io/badge/license-PostgreSQL-blue.svg)](https://raw.githubusercontent.com/postgrespro/pg_pathman/master/LICENSE) # pg_pathman From 7da19fd3df7eaa46837fec64b0ccae70fd3400e9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 2 Apr 2017 16:16:14 +0300 Subject: [PATCH 0322/1124] disable ALTER COLUMN partitioned_column TYPE for tables partitioned by HASH --- expected/pathman_column_type.out | 105 ++++++++++++++++++++++++++++- sql/pathman_column_type.sql | 41 +++++++++++ src/hooks.c | 28 ++++++-- src/include/utility_stmt_hooking.h | 6 ++ src/utility_stmt_hooking.c | 65 +++++++++++++++++- 5 files changed, 236 insertions(+), 9 deletions(-) diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index ee71675f..3a3a5055 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -2,6 +2,9 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_column_type; +/* + * RANGE partitioning. + */ /* create new table (val int) */ CREATE TABLE test_column_type.test(val INT4 NOT NULL); SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); @@ -51,6 +54,106 @@ SELECT tableoid::regclass, * FROM test_column_type.test; test_column_type.test_1 | 1 (1 row) +SELECT drop_partitions('test_column_type.test'); +NOTICE: function test_column_type.test_upd_trig_func() does not exist, skipping +NOTICE: 1 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 +NOTICE: 0 rows copied from test_column_type.test_5 +NOTICE: 0 rows copied from test_column_type.test_6 +NOTICE: 0 rows copied from test_column_type.test_7 +NOTICE: 0 rows copied from test_column_type.test_8 +NOTICE: 0 rows copied from test_column_type.test_9 +NOTICE: 0 rows copied from test_column_type.test_10 + drop_partitions +----------------- + 10 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +/* + * HASH partitioning. + */ +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition dispatch cache | 1 + partition parents cache | 5 +(4 rows) + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; +ERROR: cannot change type of column "id" of table "test" partitioned by HASH +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition dispatch cache | 1 + partition parents cache | 5 +(4 rows) + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition dispatch cache | 1 + partition parents cache | 5 +(4 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | id | val +-------------------------+----+----- + test_column_type.test_0 | 1 | +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: function test_column_type.test_upd_trig_func() does not exist, skipping +NOTICE: 1 rows copied from test_column_type.test_0 +NOTICE: 0 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 + drop_partitions +----------------- + 5 +(1 row) + +DROP TABLE test_column_type.test CASCADE; DROP SCHEMA test_column_type CASCADE; -NOTICE: drop cascades to 12 other objects +NOTICE: drop cascades to sequence test_column_type.test_seq DROP EXTENSION pg_pathman; diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index ef0e470e..34f9a34c 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -5,6 +5,10 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA test_column_type; +/* + * RANGE partitioning. + */ + /* create new table (val int) */ CREATE TABLE test_column_type.test(val INT4 NOT NULL); SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); @@ -24,6 +28,43 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; INSERT INTO test_column_type.test VALUES (1); SELECT tableoid::regclass, * FROM test_column_type.test; +SELECT drop_partitions('test_column_type.test'); +DROP TABLE test_column_type.test CASCADE; + + +/* + * HASH partitioning. + */ + +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; + +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; + +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + +SELECT drop_partitions('test_column_type.test'); +DROP TABLE test_column_type.test CASCADE; + DROP SCHEMA test_column_type CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index 1dbe7673..ae214eeb 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -712,8 +712,9 @@ pathman_process_utility_hook(Node *parsetree, { if (IsPathmanReady()) { - Oid partition_relid; - AttrNumber partitioned_col; + Oid relation_oid; + PartType part_type; + AttrNumber attr_number; /* Override standard COPY statement if needed */ if (is_pathman_related_copy(parsetree)) @@ -730,12 +731,25 @@ pathman_process_utility_hook(Node *parsetree, } /* Override standard RENAME statement if needed */ - if (is_pathman_related_table_rename(parsetree, - &partition_relid, - &partitioned_col)) - PathmanRenameConstraint(partition_relid, - partitioned_col, + else if (is_pathman_related_table_rename(parsetree, + &relation_oid, + &attr_number)) + PathmanRenameConstraint(relation_oid, + attr_number, (const RenameStmt *) parsetree); + + /* Override standard ALTER COLUMN TYPE statement if needed */ + else if (is_pathman_related_alter_column_type(parsetree, + &relation_oid, + &attr_number, + &part_type) && + part_type == PT_HASH) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot change type of column \"%s\"" + " of table \"%s\" partitioned by HASH", + get_attname(relation_oid, attr_number), + get_rel_name(relation_oid)))); } /* Call hooks set by other extensions if needed */ diff --git a/src/include/utility_stmt_hooking.h b/src/include/utility_stmt_hooking.h index 18f86e2e..83f2303a 100644 --- a/src/include/utility_stmt_hooking.h +++ b/src/include/utility_stmt_hooking.h @@ -13,6 +13,8 @@ #define COPY_STMT_HOOKING_H +#include "relation_info.h" + #include "postgres.h" #include "commands/copy.h" #include "nodes/nodes.h" @@ -23,6 +25,10 @@ bool is_pathman_related_copy(Node *parsetree); bool is_pathman_related_table_rename(Node *parsetree, Oid *partition_relid_out, AttrNumber *partitioned_col_out); +bool is_pathman_related_alter_column_type(Node *parsetree, + Oid *parent_relid_out, + AttrNumber *attr_number_out, + PartType *part_type_out); /* Statement handlers */ void PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 2c859731..2f0b6fa6 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -15,7 +15,6 @@ #include "init.h" #include "utility_stmt_hooking.h" #include "partition_filter.h" -#include "relation_info.h" #include "access/htup_details.h" #include "access/sysattr.h" @@ -179,6 +178,70 @@ is_pathman_related_table_rename(Node *parsetree, return false; } +/* + * Is pg_pathman supposed to handle this ALTER COLUMN TYPE stmt? + */ +bool +is_pathman_related_alter_column_type(Node *parsetree, + Oid *parent_relid_out, + AttrNumber *attr_number_out, + PartType *part_type_out) +{ + AlterTableStmt *alter_table_stmt = (AlterTableStmt *) parsetree; + ListCell *lc; + Oid parent_relid; + const PartRelationInfo *prel; + + Assert(IsPathmanReady()); + + if (!IsA(alter_table_stmt, AlterTableStmt)) + return false; + + /* Are we going to modify some table? */ + if (alter_table_stmt->relkind != OBJECT_TABLE) + return false; + + /* Assume it's a parent, fetch its Oid */ + parent_relid = RangeVarGetRelid(alter_table_stmt->relation, + AccessShareLock, + false); + + /* Is parent partitioned? */ + if ((prel = get_pathman_relation_info(parent_relid)) != NULL) + { + /* Return 'parent_relid' and 'prel->parttype' */ + if (parent_relid_out) *parent_relid_out = parent_relid; + if (part_type_out) *part_type_out = prel->parttype; + } + else return false; + + /* Examine command list */ + foreach (lc, alter_table_stmt->cmds) + { + AlterTableCmd *alter_table_cmd = (AlterTableCmd *) lfirst(lc); + + if (!IsA(alter_table_cmd, AlterTableCmd)) + continue; + + /* Is it an ALTER COLUMN TYPE statement? */ + if (alter_table_cmd->subtype != AT_AlterColumnType) + continue; + + /* Is it a partitioned column? */ + if (get_attnum(parent_relid, alter_table_cmd->name) != prel->attnum) + continue; + + /* Return 'prel->attnum' */ + if (attr_number_out) *attr_number_out = prel->attnum; + + /* Success! */ + return true; + } + + /* Default failure */ + return false; +} + /* * CopyGetAttnums - build an integer list of attnums to be copied From 69a751cad867f333b1eaf7bf8ac11f98505c2f3a Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 3 Apr 2017 14:00:17 +0300 Subject: [PATCH 0323/1124] comments edited slightly --- src/pl_funcs.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 3745c32b..5d22b687 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1164,7 +1164,6 @@ update_trigger_func(PG_FUNCTION_ARGS) */ key_name = get_attname(parent, prel->attnum); source_key = get_attnum(source_relid, key_name); - // target_key = get_attnum(target_relid, key_name); key = heap_getattr(new_tuple, source_key, source_tupdesc, &isnull); /* Find partition it should go into */ @@ -1174,18 +1173,18 @@ update_trigger_func(PG_FUNCTION_ARGS) if (target_relid == source_relid) PG_RETURN_POINTER(new_tuple); - /* TODO: probably should be another lock level */ - target_rel = heap_open(target_relid, RowExclusiveLock); - target_tupdesc = target_rel->rd_att; - /* Read tuple id */ ctid = heap_getsysattr(old_tuple, SelfItemPointerAttributeNumber, source_tupdesc, &isnull); + /* Open partition table */ + target_rel = heap_open(target_relid, RowExclusiveLock); + target_tupdesc = target_rel->rd_att; + /* - * Else if it's a different partition then build a TupleConversionMap + * As it is different partition we need to build a TupleConversionMap * between original partition and new one. And then do a convertation */ conversion_map = convert_tuples_by_name(source_tupdesc, From 095eb98e1930f502256527f7a73038d52962b7f4 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 3 Apr 2017 14:30:27 +0300 Subject: [PATCH 0324/1124] Fix pathlist hooks --- sql/pathman_basic.sql | 2 - src/hooks.c | 19 ++--- src/include/pathman.h | 11 ++- src/include/relation_info.h | 1 + src/include/utils.h | 1 + src/nodes_common.c | 146 +++++++++++++++++++++++++++--------- src/pg_pathman.c | 21 +----- src/relation_info.c | 1 + src/utils.c | 19 +++++ 9 files changed, 154 insertions(+), 67 deletions(-) diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 15c84b85..52a47057 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -11,8 +11,6 @@ CREATE TABLE test.hash_rel ( INSERT INTO test.hash_rel VALUES (1, 1); INSERT INTO test.hash_rel VALUES (2, 2); INSERT INTO test.hash_rel VALUES (3, 3); -:gdb -SELECT pg_sleep(10); SELECT pathman.create_hash_partitions('test.hash_rel', 'value + 1', 3); ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); diff --git a/src/hooks.c b/src/hooks.c index c8fb19af..0e2ede4e 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -248,8 +248,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, Relation parent_rel; /* parent's relation (heap) */ Oid *children; /* selected children oids */ List *ranges, /* a list of IndexRanges */ - *wrappers, /* a list of WrapperNodes */ - *rel_part_clauses = NIL; /* clauses with part. column */ + *wrappers; /* a list of WrapperNodes */ PathKey *pathkeyAsc = NULL, *pathkeyDesc = NULL; double paramsel = 1.0; /* default part selectivity */ @@ -257,6 +256,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, ListCell *lc; int i; Node *expr; + bool modify_append_nodes; /* Make copy of partitioning expression and fix Var's varno attributes */ expr = copyObject(prel->expr); @@ -306,6 +306,12 @@ pathman_rel_pathlist_hook(PlannerInfo *root, ranges = irange_list_intersection(ranges, wrap->rangeset); } + /* + * Walker should been have filled these parameter while checking. + * Runtime[Merge]Append is pointless if there are no params in clauses. + */ + modify_append_nodes = context.found_params; + /* Get number of selected partitions */ irange_len = irange_list_length(ranges); if (prel->enable_parent) @@ -382,12 +388,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, pg_pathman_enable_runtime_merge_append)) return; - /* Check that rel's RestrictInfo contains partitioned column */ - rel_part_clauses = get_partitioned_attr_clauses(rel->baserestrictinfo, - prel, rel->relid); - - /* Runtime[Merge]Append is pointless if there are no params in clauses */ - if (!clause_contains_params((Node *) rel_part_clauses)) + if (!modify_append_nodes) return; /* Generate Runtime[Merge]Append paths if needed */ @@ -416,7 +417,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, * Skip if neither rel->baserestrictinfo nor * ppi->ppi_clauses reference partition attribute */ - if (!(rel_part_clauses || ppi_part_clauses)) + if (!(modify_append_nodes || ppi_part_clauses)) continue; if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) diff --git a/src/include/pathman.h b/src/include/pathman.h index 6e3fd104..80672fbb 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -141,10 +141,12 @@ typedef struct typedef struct { - Node *prel_expr; /* expression from PartRelationInfo */ - const PartRelationInfo *prel; /* main partitioning structure */ - ExprContext *econtext; /* for ExecEvalExpr() */ - bool for_insert; /* are we in PartitionFilter now? */ + Node *prel_expr; /* expression from PartRelationInfo */ + const PartRelationInfo *prel; /* main partitioning structure */ + ExprContext *econtext; /* for ExecEvalExpr() */ + bool for_insert; /* are we in PartitionFilter now? */ + bool found_params; /* mark if left or right argument + of clause is Param */ } WalkerContext; /* Usual initialization procedure for WalkerContext */ @@ -154,6 +156,7 @@ typedef struct (context)->prel = (prel_info); \ (context)->econtext = (ecxt); \ (context)->for_insert = (for_ins); \ + (context)->found_params = (false); \ } while (0) /* Check that WalkerContext contains ExprContext (plan execution stage) */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h index b55a4250..57003bae 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -139,6 +139,7 @@ typedef struct const char *attname; /* original expression */ Node *expr; /* planned expression */ + List *expr_vars; /* vars from expression, lazy */ Oid atttype; /* expression type */ int32 atttypmod; /* expression type modifier */ bool attbyval; /* is partitioned column stored by value? */ diff --git a/src/include/utils.h b/src/include/utils.h index 8ecaf46c..a476d219 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -28,6 +28,7 @@ bool clause_contains_params(Node *clause); bool is_date_type_internal(Oid typid); bool check_security_policy_internal(Oid relid, Oid role); +bool match_expr_to_operand(Node *operand, Node *expr); /* * Misc. diff --git a/src/nodes_common.c b/src/nodes_common.c index bf8bde3a..cb91c254 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -8,6 +8,7 @@ * ------------------------------------------------------------------------ */ +#include "init.h" #include "nodes_common.h" #include "runtimeappend.h" #include "utils.h" @@ -128,41 +129,83 @@ replace_tlist_varnos(List *tlist, Index old_varno, Index new_varno) return temp_tlist; } +/* Check that one of arguments of OpExpr is expression */ +static bool +extract_vars(Node *node, PartRelationInfo *prel) +{ + if (node == NULL) + return false; + + if (IsA(node, Var)) + { + prel->expr_vars = lappend(prel->expr_vars, node); + return false; + } + + return expression_tree_walker(node, extract_vars, (void *) prel); +} + + +/* + * This function fills 'expr_vars' attribute in PartRelationInfo. + * For now it's static because there are no other places where we need it. + */ +static inline List * +extract_vars_from_expression(PartRelationInfo *prel) +{ + if (prel->expr_vars == NIL) + { + MemoryContext ctx; + + prel->expr_vars = NIL; + ctx = MemoryContextSwitchTo(PathmanRelationCacheContext); + extract_vars(prel->expr, prel); + MemoryContextSwitchTo(ctx); + } + + return prel->expr_vars; +} + /* Append partition attribute in case it's not present in target list */ static List * append_part_attr_to_tlist(List *tlist, Index relno, const PartRelationInfo *prel) { - ListCell *lc; - bool part_attr_found = false; + ListCell *lc, + *lc_var; + List *vars = extract_vars_from_expression((PartRelationInfo *) prel); + List *vars_not_found = NIL; - foreach (lc, tlist) + foreach (lc_var, vars) { - TargetEntry *te = (TargetEntry *) lfirst(lc); - Var *var = (Var *) te->expr; + bool part_attr_found = false; + Var *expr_var = (Var *) lfirst(lc_var); + + foreach (lc, tlist) + { + TargetEntry *te = (TargetEntry *) lfirst(lc); + Var *var = (Var *) te->expr; + + if (IsA(var, Var) && var->varoattno == expr_var->varattno) + { + part_attr_found = true; + break; + } + } - /* FIX this - if (IsA(var, Var) && var->varoattno == prel->attnum) - part_attr_found = true; - */ + if (!part_attr_found) + vars_not_found = lappend(vars_not_found, expr_var); } - /* FIX this - if (!part_attr_found) + foreach(lc, vars_not_found) { - Var *newvar = makeVar(relno, - prel->attnum, - prel->atttype, - prel->atttypmod, - prel->attcollid, - 0); - + Var *var = (Var *) lfirst(lc); Index last_item = list_length(tlist) + 1; - - tlist = lappend(tlist, makeTargetEntry((Expr *) newvar, + tlist = lappend(tlist, makeTargetEntry((Expr *) var, last_item, NULL, false)); - } */ + } + list_free(vars_not_found); return tlist; } @@ -242,6 +285,37 @@ unpack_runtimeappend_private(RuntimeAppendState *scan_state, CustomScan *cscan) scan_state->enable_parent = (bool) linitial_int(lthird(runtimeappend_private)); } +struct check_clause_context +{ + Node *prel_expr; + int count; +}; + +/* Check that one of arguments of OpExpr is expression */ +static bool +check_clause_for_expression(Node *node, struct check_clause_context *ctx) +{ + if (node == NULL) + return false; + + if (IsA(node, OpExpr)) + { + OpExpr *expr = (OpExpr *) node; + Node *left = linitial(expr->args), + *right = lsecond(expr->args); + + if (match_expr_to_operand(left, ctx->prel_expr)) + ctx->count += 1; + + if (match_expr_to_operand(right, ctx->prel_expr)) + ctx->count += 1; + + return false; + } + + return expression_tree_walker(node, check_clause_for_expression, (void *) ctx); +} + /* * Filter all available clauses and extract relevant ones. */ @@ -250,27 +324,22 @@ get_partitioned_attr_clauses(List *restrictinfo_list, const PartRelationInfo *prel, Index partitioned_rel) { -#define AdjustAttno(attno) \ - ( (AttrNumber) (attno + FirstLowInvalidHeapAttributeNumber) ) - List *result = NIL; ListCell *l; foreach(l, restrictinfo_list) { - RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); - Bitmapset *varattnos = NULL; - int part_attno; + RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); + struct check_clause_context ctx; Assert(IsA(rinfo, RestrictInfo)); - pull_varattnos((Node *) rinfo->clause, partitioned_rel, &varattnos); - /* FIX this - if (bms_get_singleton_member(varattnos, &part_attno) && - AdjustAttno(part_attno) == prel->attnum) - { + ctx.count = 0; + ctx.prel_expr = prel->expr; + check_clause_for_expression((Node *) rinfo->clause, &ctx); + + if (ctx.count == 1) result = lappend(result, rinfo->clause); - } */ } return result; } @@ -554,14 +623,23 @@ rescan_append_common(CustomScanState *node) WalkerContext wcxt; Oid *parts; int nparts; + Node *prel_expr; prel = get_pathman_relation_info(scan_state->relid); Assert(prel); + /* Prepare expression */ + prel_expr = prel->expr; + if (INDEX_VAR != 1) + { + prel_expr = copyObject(prel_expr); + ChangeVarNodes(prel_expr, 1, INDEX_VAR, 0); + } + /* First we select all available partitions... */ ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_COMPLETE)); - InitWalkerContext(&wcxt, INDEX_VAR, prel, econtext, false); + InitWalkerContext(&wcxt, prel_expr, prel, econtext, false); foreach (lc, scan_state->custom_exprs) { WrapperNode *wn; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 9192c666..6dfce261 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -99,7 +99,6 @@ static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer); -static bool match_expr_to_operand(Node *operand, Node *expr); /* We can transform Param into Const provided that 'econtext' is available */ #define IsConstValue(wcxt, node) \ ( IsA((node), Const) || (WcxtHasExprContext(wcxt) ? IsA((node), Param) : false) ) @@ -1052,6 +1051,9 @@ handle_opexpr(const OpExpr *expr, WalkerContext *context) } else if (IsA(param, Param) || IsA(param, Var)) { + if (IsA(param, Param)) + context->found_params = true; + handle_binary_opexpr_param(prel, result, var); return result; } @@ -1153,23 +1155,6 @@ handle_binary_opexpr_param(const PartRelationInfo *prel, } -/* - * Compare clause operand with our expression - */ -static bool -match_expr_to_operand(Node *operand, Node *expr) -{ - /* strip relabeling for both operand and expr */ - if (operand && IsA(operand, RelabelType)) - operand = (Node *) ((RelabelType *) operand)->arg; - - if (expr && IsA(expr, RelabelType)) - expr = (Node *) ((RelabelType *) expr)->arg; - - /* compare expressions and return result right away */ - return equal(expr, operand); -} - /* * Checks if expression is a KEY OP PARAM or PARAM OP KEY, where KEY is * partition expression and PARAM is whatever. diff --git a/src/relation_info.c b/src/relation_info.c index f9665488..949da871 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -187,6 +187,7 @@ refresh_pathman_relation_info(Oid relid, prel->attname = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); prel->expr = (Node *) stringToNode(expr); + prel->expr_vars = NIL; fix_opfuncids(prel->expr); MemoryContextSwitchTo(oldcontext); diff --git a/src/utils.c b/src/utils.c index ec1b1dd9..219779c7 100644 --- a/src/utils.c +++ b/src/utils.c @@ -467,3 +467,22 @@ extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ return interval_binary; } + + +/* + * Compare clause operand with expression + */ +bool +match_expr_to_operand(Node *operand, Node *expr) +{ + /* strip relabeling for both operand and expr */ + if (operand && IsA(operand, RelabelType)) + operand = (Node *) ((RelabelType *) operand)->arg; + + if (expr && IsA(expr, RelabelType)) + expr = (Node *) ((RelabelType *) expr)->arg; + + /* compare expressions and return result right away */ + return equal(expr, operand); +} + From d33f4e3118697d35e7c18bbf3ebc263889f9c053 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 3 Apr 2017 20:08:02 +0300 Subject: [PATCH 0325/1124] update pg_pathman.control --- pg_pathman.control | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pg_pathman.control b/pg_pathman.control index 280f2aa4..bace115b 100644 --- a/pg_pathman.control +++ b/pg_pathman.control @@ -1,4 +1,4 @@ # pg_pathman extension -comment 'Partitioning tool' +comment = 'Partitioning tool for PostgreSQL' default_version = '1.3' -module_pathname='$libdir/pg_pathman' +module_pathname = '$libdir/pg_pathman' From 91506a3901c8714359be763f6bddbf99f416e39b Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 4 Apr 2017 12:15:48 +0300 Subject: [PATCH 0326/1124] Fix bug related with disabling hooks --- src/hooks.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 0e2ede4e..d06cc594 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -511,10 +511,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) else result = standard_planner(parse, cursorOptions, boundParams); - if (!pathman_hooks_enabled) - return result; - - if (pathman_ready) + if (pathman_ready && pathman_hooks_enabled) { /* Give rowmark-related attributes correct names */ ExecuteForPlanTree(result, postprocess_lock_rows); From 74778cee444fdf0cce91e217d93ceab7829de1c2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 4 Apr 2017 12:50:37 +0300 Subject: [PATCH 0327/1124] Return null check if there is only column in expression --- sql/pathman_basic.sql | 2 +- src/partition_creation.c | 28 ++++++++++++++++++++++++++++ src/pl_funcs.c | 3 +-- 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 52a47057..36dd7e8d 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -11,7 +11,7 @@ CREATE TABLE test.hash_rel ( INSERT INTO test.hash_rel VALUES (1, 1); INSERT INTO test.hash_rel VALUES (2, 2); INSERT INTO test.hash_rel VALUES (3, 3); -SELECT pathman.create_hash_partitions('test.hash_rel', 'value + 1', 3); +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; diff --git a/src/partition_creation.c b/src/partition_creation.c index 2ae12a3d..27e288f1 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1766,6 +1766,34 @@ get_part_expression_info(Oid relid, const char *expr_string, /* Keep raw expression */ expr_info->raw_expr = get_raw_expression(relid, expr_string, &query_string, &parsetree); + + /* If expression is just column we check that is not null */ + if (IsA(expr_info->raw_expr, ColumnRef)) + { + ColumnRef *col = (ColumnRef *) expr_info->raw_expr; + if (list_length(col->fields) == 1) + { + HeapTuple tp; + bool result; + char *attname = strVal(linitial(col->fields)); + + /* check if attribute is nullable */ + tp = SearchSysCacheAttName(relid, attname); + if (HeapTupleIsValid(tp)) + { + Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); + result = !att_tup->attnotnull; + ReleaseSysCache(tp); + } + else + elog(ERROR, "Cannot find type name for attribute \"%s\" " + "of relation \"%s\"", + attname, get_rel_name_or_relid(relid)); + + if (result) + elog(ERROR, "partitioning key \"%s\" must be NOT NULL", attname); + } + } expr_info->expr_datum = (Datum) 0; /* We don't need pathman activity initialization for this relation yet */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 1331947f..664b1b2a 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -57,8 +57,6 @@ PG_FUNCTION_INFO_V1( build_check_constraint_name_attname ); PG_FUNCTION_INFO_V1( validate_relname ); PG_FUNCTION_INFO_V1( is_date_type ); -PG_FUNCTION_INFO_V1( is_attribute_nullable ); -//PG_FUNCTION_INFO_V1( is_expression_suitable ); PG_FUNCTION_INFO_V1( add_to_pathman_config ); PG_FUNCTION_INFO_V1( pathman_config_params_trigger_func ); @@ -602,6 +600,7 @@ is_date_type(PG_FUNCTION_ARGS) PG_RETURN_BOOL(is_date_type_internal(PG_GETARG_OID(0))); } + /* * ------------------------ * Useful string builders From f46bd2263aa62ad675058134924eaf19e9ba9145 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 4 Apr 2017 14:11:07 +0300 Subject: [PATCH 0328/1124] Fix test cases related with NOT NULL check --- expected/pathman_basic.out | 4 ++-- sql/pathman_basic.sql | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 8877d99e..c8e4e7a0 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -131,10 +131,10 @@ CREATE INDEX ON test.range_rel (dt); INSERT INTO test.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); ERROR: partitioning key "dt" must be NOT NULL ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; -SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); -ERROR: not enough partitions to fit all values of "dt" SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); NOTICE: sequence "range_rel_seq" does not exist, skipping create_range_partitions diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 36dd7e8d..fe09f9e4 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -40,8 +40,8 @@ CREATE INDEX ON test.range_rel (dt); INSERT INTO test.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; -SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); SELECT COUNT(*) FROM test.range_rel; SELECT COUNT(*) FROM ONLY test.range_rel; From 44dce9d9121b1ab1e474ad5dc3d161cc77d33d32 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 4 Apr 2017 18:28:18 +0300 Subject: [PATCH 0329/1124] Fix basic tests --- expected/pathman_basic.out | 18 +++++++++--------- src/partition_creation.c | 18 +++++++++++------- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index c8e4e7a0..e9ddd48d 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1288,7 +1288,7 @@ Triggers: Indexes: "hash_rel_extern_pkey" PRIMARY KEY, btree (id) Check constraints: - "pathman_hash_rel_extern_2_check" CHECK (pathman.get_hash_part_idx(hashint4(value), 3) = 0) + "pathman_hash_rel_extern_check" CHECK (pathman.get_hash_part_idx(hashint4(value), 3) = 0) Inherits: test.hash_rel INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; @@ -1424,16 +1424,16 @@ INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); */ ALTER TABLE test.range_rel DROP COLUMN data; SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval -----------------+---------+----------+---------------- - test.range_rel | dt | 2 | @ 10 days + partrel | attname | parttype | range_interval | expression_p | atttype +----------------+---------+----------+----------------+-------------------------------------------------------------------------------------------------------------------------+--------- + test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} | 1114 (1 row) DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 20 other objects SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval ----------+---------+----------+---------------- + partrel | attname | parttype | range_interval | expression_p | atttype +---------+---------+----------+----------------+--------------+--------- (0 rows) /* Check overlaps */ @@ -1596,9 +1596,9 @@ SELECT pathman.create_partitions_from_range('test."RangeRel"', 'dt', '2015-01-01 DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 5 other objects SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval ---------------------+---------+----------+---------------- - test.num_range_rel | id | 2 | 1000 + partrel | attname | parttype | range_interval | expression_p | atttype +--------------------+---------+----------+----------------+------------------------------------------------------------------------------------------------------------------------+--------- + test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 23 (1 row) CREATE TABLE test."RangeRel" ( diff --git a/src/partition_creation.c b/src/partition_creation.c index 27e288f1..22f35f36 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -559,7 +559,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ bounds[1] = MakeBound(should_append ? cur_leading_bound : cur_following_bound); last_partition = create_single_range_partition_internal(parent_relid, - value_type, + range_bound_type, &bounds[0], &bounds[1], NULL, NULL); @@ -1713,7 +1713,7 @@ get_raw_expression(Oid relid, const char *expr, char **query_string_out, SelectStmt *select_stmt; ResTarget *target; - char *fmt = "SELECT (%s) FROM ONLY %s.%s"; + char *fmt = "SELECT (%s) FROM ONLY %s.\"%s\""; char *relname = get_rel_name(relid), *namespace_name = get_namespace_name(get_rel_namespace(relid)); List *parsetree_list; @@ -1727,14 +1727,14 @@ get_raw_expression(Oid relid, const char *expr, char **query_string_out, *query_string_out = query_string; } - select_stmt = (SelectStmt *) lfirst(list_head(parsetree_list)); + select_stmt = (SelectStmt *) linitial(parsetree_list); if (parsetree) { *parsetree = (Node *) select_stmt; } - target = (ResTarget *) lfirst(list_head(select_stmt->targetList)); + target = (ResTarget *) linitial(select_stmt->targetList); result = (Node *) target->val; return result; } @@ -1810,10 +1810,10 @@ get_part_expression_info(Oid relid, const char *expr_string, * with more or less understable text */ querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); - query = (Query *) lfirst(list_head(querytree_list)); + query = (Query *) linitial(querytree_list); /* expr_node is node that we need for further use */ - target_entry = lfirst(list_head(query->targetList)); + target_entry = linitial(query->targetList); expr_node = (Node *) target_entry->expr; /* Now we have node and can determine type of that node */ @@ -1833,7 +1833,11 @@ get_part_expression_info(Oid relid, const char *expr_string, /* Plan this query. We reuse 'expr_node' here */ plan = pg_plan_query(query, 0, NULL); - target_entry = lfirst(list_head(plan->planTree->targetlist)); + if (IsA(plan->planTree, IndexOnlyScan)) + target_entry = linitial(((IndexOnlyScan *) plan->planTree)->indextlist); + else + target_entry = linitial(plan->planTree->targetlist); + expr_node = (Node *) target_entry->expr; expr_node = eval_const_expressions(NULL, expr_node); validate_part_expression(expr_node, NULL); From 69028ca82bf10cccd844d8741b7ee9217e6626d3 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 4 Apr 2017 18:46:25 +0300 Subject: [PATCH 0330/1124] small changes in validate_hash_constraint() --- src/include/init.h | 2 +- src/include/relation_info.h | 2 +- src/init.c | 68 ++++++++++++++++++++++++------------- src/relation_info.c | 4 +-- 4 files changed, 48 insertions(+), 28 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index 039e31df..ee3d0158 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -218,7 +218,7 @@ bool validate_range_constraint(const Expr *expr, bool validate_hash_constraint(const Expr *expr, const PartRelationInfo *prel, const AttrNumber part_attno, - uint32 *part_hash); + uint32 *part_idx); #endif /* PATHMAN_INIT_H */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 710b5e40..43bcab08 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -176,7 +176,7 @@ typedef struct bool byval; /* For HASH partitions */ - uint32 hash; + uint32 part_idx; } PartBoundInfo; /* diff --git a/src/init.c b/src/init.c index b41b9d6e..7e983993 100644 --- a/src/init.c +++ b/src/init.c @@ -988,27 +988,27 @@ read_opexpr_const(const OpExpr *opexpr, /* * Validate hash constraint. It MUST have this exact format: * - * get_hash_part_idx(TYPE_HASH_PROC(VALUE), PARTITIONS_COUNT) = CUR_PARTITION_HASH + * get_hash_part_idx(TYPE_HASH_PROC(VALUE), PARTITIONS_COUNT) = CUR_PARTITION_IDX * - * Writes 'part_hash' hash value for this partition on success. + * Writes 'part_idx' hash value for this partition on success. */ bool validate_hash_constraint(const Expr *expr, const PartRelationInfo *prel, const AttrNumber part_attno, - uint32 *part_hash) + uint32 *part_idx) { const TypeCacheEntry *tce; const OpExpr *eq_expr; const FuncExpr *get_hash_expr, *type_hash_proc_expr; - const Var *var; /* partitioned column */ if (!expr) return false; if (!IsA(expr, OpExpr)) return false; + eq_expr = (const OpExpr *) expr; /* Check that left expression is a function call */ @@ -1027,31 +1027,51 @@ validate_hash_constraint(const Expr *expr, { Node *first = linitial(get_hash_expr->args); /* arg #1: TYPE_HASH_PROC(VALUE) */ Node *second = lsecond(get_hash_expr->args); /* arg #2: PARTITIONS_COUNT */ - Const *cur_partition_hash; /* hash value for this partition */ + Const *cur_partition_idx; /* hash value for this partition */ + Node *hash_arg; if (!IsA(first, FuncExpr) || !IsA(second, Const)) return false; type_hash_proc_expr = (FuncExpr *) first; - /* Check that function is indeed TYPE_HASH_PROC */ - if (type_hash_proc_expr->funcid != prel->hash_proc || - !(IsA(linitial(type_hash_proc_expr->args), Var) || - IsA(linitial(type_hash_proc_expr->args), RelabelType))) - { + /* Check that function is indeed TYPE_HASH_PROC() */ + if (type_hash_proc_expr->funcid != prel->hash_proc) return false; - } - /* Extract argument into 'var' */ - if (IsA(linitial(type_hash_proc_expr->args), RelabelType)) - var = (Var *) ((RelabelType *) linitial(type_hash_proc_expr->args))->arg; - else - var = (Var *) linitial(type_hash_proc_expr->args); - - /* Check that 'var' is the partitioning key attribute */ - if (var->varoattno != part_attno) + /* There should be exactly 1 argument */ + if (list_length(type_hash_proc_expr->args) != 1) return false; + /* Extract arg of TYPE_HASH_PROC() */ + hash_arg = (Node *) linitial(type_hash_proc_expr->args); + + /* Check arg of TYPE_HASH_PROC() */ + switch (nodeTag(hash_arg)) + { + case T_RelabelType: + { + hash_arg = (Node *) ((RelabelType *) hash_arg)->arg; + } + /* FALL THROUGH (no break) */ + + case T_Var: + { + Var *var = (Var *) hash_arg; + + if (!IsA(var, Var)) + return false; + + /* Check that 'var' is the partitioning key attribute */ + if (var->varoattno != part_attno) + return false; + } + break; + + default: + return false; + } + /* Check that PARTITIONS_COUNT is equal to total amount of partitions */ if (DatumGetUInt32(((Const *) second)->constvalue) != PrelChildrenCount(prel)) return false; @@ -1060,14 +1080,15 @@ validate_hash_constraint(const Expr *expr, if (!IsA(lsecond(eq_expr->args), Const)) return false; - cur_partition_hash = lsecond(eq_expr->args); + /* Fetch CUR_PARTITION_IDX */ + cur_partition_idx = lsecond(eq_expr->args); /* Check that CUR_PARTITION_HASH is NOT NULL */ - if (cur_partition_hash->constisnull) + if (cur_partition_idx->constisnull) return false; - *part_hash = DatumGetUInt32(cur_partition_hash->constvalue); - if (*part_hash >= PrelChildrenCount(prel)) + *part_idx = DatumGetUInt32(cur_partition_idx->constvalue); + if (*part_idx >= PrelChildrenCount(prel)) return false; return true; /* everything seems to be ok */ @@ -1075,7 +1096,6 @@ validate_hash_constraint(const Expr *expr, return false; } - /* needed for find_inheritance_children_array() function */ static int oid_cmp(const void *p1, const void *p2) diff --git a/src/relation_info.c b/src/relation_info.c index e2083bf7..1ff2fd4a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -479,7 +479,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, switch (prel->parttype) { case PT_HASH: - prel->children[bound_info->hash] = bound_info->child_rel; + prel->children[bound_info->part_idx] = bound_info->child_rel; break; case PT_RANGE: @@ -1047,7 +1047,7 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, { if (!validate_hash_constraint(constraint_expr, prel, part_attno, - &pbin->hash)) + &pbin->part_idx)) { DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, From 6d9494d2d96b49281a9774314d59fac7b90bd6d8 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 4 Apr 2017 19:21:01 +0300 Subject: [PATCH 0331/1124] Fix some tests --- expected/pathman_calamity.out | 45 +++++++------------------------ expected/pathman_interval.out | 24 ++++++++--------- expected/pathman_permissions.out | 6 ++--- expected/pathman_utility_stmt.out | 10 +++---- sql/pathman_calamity.sql | 16 +++-------- 5 files changed, 34 insertions(+), 67 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index b698bed1..ffa25909 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -206,31 +206,6 @@ NOTICE: function calamity.part_test_upd_trig_func() does not exist, skipping (1 row) DELETE FROM calamity.part_test; -/* check function build_hash_condition() */ -SELECT build_hash_condition('int4', 'val', 10, 1); - build_hash_condition -------------------------------------------------- - public.get_hash_part_idx(hashint4(val), 10) = 1 -(1 row) - -SELECT build_hash_condition('text', 'val', 10, 1); - build_hash_condition -------------------------------------------------- - public.get_hash_part_idx(hashtext(val), 10) = 1 -(1 row) - -SELECT build_hash_condition('int4', 'val', 1, 1); -ERROR: 'partition_index' must be lower than 'partitions_count' -SELECT build_hash_condition('int4', 'val', 10, 20); -ERROR: 'partition_index' must be lower than 'partitions_count' -SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; - ?column? ----------- - t -(1 row) - -SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); -ERROR: no hash function for type calamity.part_test /* check function build_range_condition() */ SELECT build_range_condition('calamity.part_test', 'val', 10, 20); build_range_condition @@ -318,7 +293,7 @@ SELECT get_partition_key_type(NULL) IS NULL; SELECT build_check_constraint_name('calamity.part_test', 1::int2); build_check_constraint_name ----------------------------- - pathman_part_test_1_check + pathman_part_test_check (1 row) SELECT build_check_constraint_name('calamity.part_test', NULL::int2) IS NULL; @@ -343,7 +318,7 @@ SELECT build_check_constraint_name(NULL, NULL::int2) IS NULL; SELECT build_check_constraint_name('calamity.part_test', 'val'); build_check_constraint_name ----------------------------- - pathman_part_test_1_check + pathman_part_test_check (1 row) SELECT build_check_constraint_name('calamity.part_test', NULL::text) IS NULL; @@ -469,9 +444,9 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL SELECT add_to_pathman_config('calamity.part_test', NULL); /* no column */ -ERROR: 'attname' should not be NULL +ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong column */ -ERROR: relation "part_test" has no column "V_A_L" +ERROR: Cannot find type name for attribute "v_a_l" of relation "part_test" SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ add_to_pathman_config ----------------------- @@ -510,7 +485,7 @@ SELECT create_hash_partitions('calamity.part_ok', 'val', 4); CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ NOTICE: merging column "val" with inherited definition SELECT add_to_pathman_config('calamity.part_test', 'val'); -ERROR: constraint "pathman_wrong_partition_1_check" of partition "wrong_partition" does not exist +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ QUERY PLAN ----------------------------- @@ -522,7 +497,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is ena (5 rows) SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); -ERROR: constraint "pathman_wrong_partition_1_check" of partition "wrong_partition" does not exist +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ QUERY PLAN ----------------------------- @@ -534,7 +509,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is ena (5 rows) ALTER TABLE calamity.wrong_partition -ADD CONSTRAINT pathman_wrong_partition_1_check +ADD CONSTRAINT pathman_wrong_partition_check CHECK (val = 1 OR val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: wrong constraint format for RANGE partition "wrong_partition" @@ -548,9 +523,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is ena -> Seq Scan on part_ok_3 (5 rows) -ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; ALTER TABLE calamity.wrong_partition -ADD CONSTRAINT pathman_wrong_partition_1_check +ADD CONSTRAINT pathman_wrong_partition_check CHECK (val >= 10 AND val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: wrong constraint format for RANGE partition "wrong_partition" @@ -564,7 +539,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is ena -> Seq Scan on part_ok_3 (5 rows) -ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; /* check GUC variable */ SHOW pg_pathman.enable; pg_pathman.enable diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index ff7340ea..73fad82d 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -38,9 +38,9 @@ SELECT set_interval('test_interval.abc', 1000); INSERT INTO test_interval.abc VALUES (250); SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval --------------------+---------+----------+---------------- - test_interval.abc | id | 2 | 1000 + partrel | attname | parttype | range_interval | expression_p | atttype +-------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- + test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 21 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 21 (1 row) DROP TABLE test_interval.abc CASCADE; @@ -81,9 +81,9 @@ SELECT set_interval('test_interval.abc', 1000); INSERT INTO test_interval.abc VALUES (250); SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval --------------------+---------+----------+---------------- - test_interval.abc | id | 2 | 1000 + partrel | attname | parttype | range_interval | expression_p | atttype +-------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- + test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 23 (1 row) DROP TABLE test_interval.abc CASCADE; @@ -124,9 +124,9 @@ SELECT set_interval('test_interval.abc', 1000); INSERT INTO test_interval.abc VALUES (250); SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval --------------------+---------+----------+---------------- - test_interval.abc | id | 2 | 1000 + partrel | attname | parttype | range_interval | expression_p | atttype +-------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- + test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 20 (1 row) DROP TABLE test_interval.abc CASCADE; @@ -157,9 +157,9 @@ SELECT set_interval('test_interval.abc', '1 month'::INTERVAL); (1 row) SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval --------------------+---------+----------+---------------- - test_interval.abc | dt | 2 | @ 1 mon + partrel | attname | parttype | range_interval | expression_p | atttype +-------------------+---------+----------+----------------+-------------------------------------------------------------------------------------------------------------------------+--------- + test_interval.abc | dt | 2 | @ 1 mon | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 1082 (1 row) DROP TABLE test_interval.abc CASCADE; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 55727aad..698b7c98 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -33,9 +33,9 @@ NOTICE: sequence "user1_table_seq" does not exist, skipping /* Should be able to see */ SET ROLE user2; SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval --------------------------+---------+----------+---------------- - permissions.user1_table | id | 2 | 10 + partrel | attname | parttype | range_interval | expression_p | atttype +-------------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- + permissions.user1_table | id | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 23 (1 row) SELECT * FROM pathman_config_params; diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index f6642d1b..e7e09070 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -159,7 +159,7 @@ SELECT * FROM copy_stmt_hooking.test WHERE val > 20; /* COPY FROM (partitioned column is not specified) */ COPY copy_stmt_hooking.test(comment) FROM stdin; -ERROR: partitioned column's value should not be NULL +ERROR: partition expression's value should not be NULL /* COPY FROM (we don't support FREEZE) */ COPY copy_stmt_hooking.test FROM stdin WITH (FREEZE); ERROR: freeze is not supported for partitioned tables @@ -278,7 +278,7 @@ ALTER TABLE rename.test_0 RENAME TO test_one; a | integer | not null default nextval('rename.test_a_seq'::regclass) | plain | | b | integer | | plain | | Check constraints: - "pathman_test_one_1_check" CHECK (get_hash_part_idx(hashint4(a), 3) = 0) + "pathman_test_one_check" CHECK (get_hash_part_idx(hashint4(a), 3) = 0) Inherits: rename.test /* Generates check constraint for relation */ @@ -313,7 +313,7 @@ ALTER TABLE rename.test_inh_1 RENAME TO test_inh_one; a | integer | not null default nextval('rename.test_a_seq'::regclass) | plain | | b | integer | | plain | | Check constraints: - "pathman_test_inh_1_1_check" CHECK (a < 100) + "pathman_test_inh_1_check" CHECK (a < 100) Inherits: rename.test_inh /* Check that plain tables are not affected too */ @@ -332,7 +332,7 @@ SELECT add_constraint('rename.plain_test_renamed', 'a'); a | integer | not null default nextval('rename.plain_test_a_seq'::regclass) | plain | | b | integer | | plain | | Check constraints: - "pathman_plain_test_renamed_1_check" CHECK (a < 100) + "pathman_plain_test_renamed_check" CHECK (a < 100) ALTER TABLE rename.plain_test_renamed RENAME TO plain_test; \d+ rename.plain_test @@ -342,7 +342,7 @@ ALTER TABLE rename.plain_test_renamed RENAME TO plain_test; a | integer | not null default nextval('rename.plain_test_a_seq'::regclass) | plain | | b | integer | | plain | | Check constraints: - "pathman_plain_test_renamed_1_check" CHECK (a < 100) + "pathman_plain_test_renamed_check" CHECK (a < 100) DROP SCHEMA rename CASCADE; NOTICE: drop cascades to 7 other objects diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index cbeea2f9..c1278a07 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -90,14 +90,6 @@ SELECT drop_partitions('calamity.part_test', true); DELETE FROM calamity.part_test; -/* check function build_hash_condition() */ -SELECT build_hash_condition('int4', 'val', 10, 1); -SELECT build_hash_condition('text', 'val', 10, 1); -SELECT build_hash_condition('int4', 'val', 1, 1); -SELECT build_hash_condition('int4', 'val', 10, 20); -SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; -SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); - /* check function build_range_condition() */ SELECT build_range_condition('calamity.part_test', 'val', 10, 20); SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); @@ -204,18 +196,18 @@ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ ALTER TABLE calamity.wrong_partition -ADD CONSTRAINT pathman_wrong_partition_1_check +ADD CONSTRAINT pathman_wrong_partition_check CHECK (val = 1 OR val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ -ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; ALTER TABLE calamity.wrong_partition -ADD CONSTRAINT pathman_wrong_partition_1_check +ADD CONSTRAINT pathman_wrong_partition_check CHECK (val >= 10 AND val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ -ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_1_check; +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; /* check GUC variable */ SHOW pg_pathman.enable; From c7d8fd4491a6defe0735e7b314de57be5cb5146a Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 4 Apr 2017 19:33:26 +0300 Subject: [PATCH 0332/1124] Fix permission tests --- expected/pathman_permissions.out | 3 +-- src/pl_funcs.c | 7 +++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 698b7c98..243397f8 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -19,8 +19,7 @@ GRANT SELECT ON permissions.user1_table TO user2; /* Should fail (don't own parent) */ SET ROLE user2; SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" -ERROR: new row violates row-level security policy for table "pathman_config" +ERROR: only the owner or superuser can change partitioning configuration of table "user1_table" /* Should be ok */ SET ROLE user1; SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 664b1b2a..29bdb1be 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -723,6 +723,13 @@ add_to_pathman_config(PG_FUNCTION_ARGS) if (!check_relation_exists(relid)) elog(ERROR, "Invalid relation %u", relid); + if (!check_security_policy_internal(relid, GetUserId())) + { + elog(ERROR, "only the owner or superuser can change " + "partitioning configuration of table \"%s\"", + get_rel_name_or_relid(relid)); + } + /* Select partitioning type using 'range_interval' */ parttype = PG_ARGISNULL(2) ? PT_HASH : PT_RANGE; From 495e036e05faa69baa4162d26d56c3d02e1e7a50 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 4 Apr 2017 19:33:32 +0300 Subject: [PATCH 0333/1124] sligtly fix regression tests --- expected/pathman_column_type.out | 2 -- 1 file changed, 2 deletions(-) diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index 3a3a5055..389ba666 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -55,7 +55,6 @@ SELECT tableoid::regclass, * FROM test_column_type.test; (1 row) SELECT drop_partitions('test_column_type.test'); -NOTICE: function test_column_type.test_upd_trig_func() does not exist, skipping NOTICE: 1 rows copied from test_column_type.test_1 NOTICE: 0 rows copied from test_column_type.test_2 NOTICE: 0 rows copied from test_column_type.test_3 @@ -142,7 +141,6 @@ SELECT tableoid::regclass, * FROM test_column_type.test; (1 row) SELECT drop_partitions('test_column_type.test'); -NOTICE: function test_column_type.test_upd_trig_func() does not exist, skipping NOTICE: 1 rows copied from test_column_type.test_0 NOTICE: 0 rows copied from test_column_type.test_1 NOTICE: 0 rows copied from test_column_type.test_2 From b31d607cc20b8ca1267afc76cacd4ed1120d66e8 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 5 Apr 2017 15:48:47 +0300 Subject: [PATCH 0334/1124] Fix rescan_append_common --- src/nodes_common.c | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/src/nodes_common.c b/src/nodes_common.c index cb91c254..1d8985bf 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -612,6 +612,27 @@ end_append_common(CustomScanState *node) hash_destroy(scan_state->children_table); } +/* + * This function is similar to ChangeVarNodes, but changes only + * varno attributes, but doesn't change varnoold attribute + */ +static bool +change_only_varnos(Node *node, const int *idx) +{ + if (node == NULL) + return false; + + if (IsA(node, Var)) + { + Var *var = (Var *) node; + Assert(var->varno == 1); + var->varno = *idx; + return false; + } + + return expression_tree_walker(node, change_only_varnos, (void *) idx); +} + void rescan_append_common(CustomScanState *node) { @@ -625,16 +646,14 @@ rescan_append_common(CustomScanState *node) int nparts; Node *prel_expr; + const int index_var = INDEX_VAR; + prel = get_pathman_relation_info(scan_state->relid); Assert(prel); /* Prepare expression */ - prel_expr = prel->expr; - if (INDEX_VAR != 1) - { - prel_expr = copyObject(prel_expr); - ChangeVarNodes(prel_expr, 1, INDEX_VAR, 0); - } + prel_expr = copyObject(prel->expr); + change_only_varnos(prel_expr, &index_var); /* First we select all available partitions... */ ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_COMPLETE)); From d18417109b4226359df09315c725162b411cf681 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Apr 2017 16:48:28 +0300 Subject: [PATCH 0335/1124] huge refactoring, fix code style and various mistakes --- expected/pathman_basic.out | 2 +- expected/pathman_calamity.out | 6 +- hash.sql | 2 +- init.sql | 116 ++++--- range.sql | 87 +++-- src/include/init.h | 6 +- src/include/partition_creation.h | 5 +- src/include/partition_filter.h | 1 + src/include/relation_info.h | 9 +- src/include/utils.h | 4 +- src/init.c | 47 ++- src/partition_creation.c | 100 +++--- src/partition_filter.c | 2 +- src/pathman_workers.c | 4 +- src/pl_funcs.c | 535 +++++++++++++++---------------- src/pl_range_funcs.c | 362 +++++++++------------ src/utils.c | 34 +- 17 files changed, 643 insertions(+), 679 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index df5dd77b..69c677d7 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1276,7 +1276,7 @@ SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern') Indexes: "hash_rel_0_pkey" PRIMARY KEY, btree (id) Triggers: - hash_rel_upd_trig BEFORE UPDATE OF value ON test.hash_rel_0 FOR EACH ROW EXECUTE PROCEDURE pathman.update_trigger_func() + hash_rel_upd_trig BEFORE UPDATE OF value ON test.hash_rel_0 FOR EACH ROW EXECUTE PROCEDURE pathman.pathman_update_trigger_func() \d+ test.hash_rel_extern Table "test.hash_rel_extern" diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index dbc81828..1910da63 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -105,13 +105,13 @@ DELETE FROM calamity.part_test; /* test function create_hash_partitions() */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ -ERROR: 'partition_names' and 'tablespaces' may not be empty +ERROR: array should not be empty SELECT create_hash_partitions('calamity.part_test', 'val', 2, partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ -ERROR: 'partition_names' and 'tablespaces' may not contain NULLs +ERROR: array should not contain NULLs SELECT create_hash_partitions('calamity.part_test', 'val', 2, partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ -ERROR: 'partition_names' and 'tablespaces' may contain only 1 dimension +ERROR: array should contain only 1 dimension SELECT create_hash_partitions('calamity.part_test', 'val', 2, partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ ERROR: size of 'partition_names' must be equal to 'partitions_count' diff --git a/hash.sql b/hash.sql index b1bd4545..9ec00791 100644 --- a/hash.sql +++ b/hash.sql @@ -110,7 +110,7 @@ BEGIN END IF; /* Check that new partition has an equal structure as parent does */ - IF NOT @extschema@.tuple_format_is_convertable(parent_relid, new_partition) THEN + IF NOT @extschema@.is_tuple_convertible(parent_relid, new_partition) THEN RAISE EXCEPTION 'partition must have a compatible tuple format'; END IF; diff --git a/init.sql b/init.sql index c1542166..e4feeb19 100644 --- a/init.sql +++ b/init.sql @@ -508,15 +508,6 @@ END $$ LANGUAGE plpgsql STRICT; -/* - * Check that tuple from first relation could be converted to fit the second one - */ -CREATE OR REPLACE FUNCTION @extschema@.tuple_format_is_convertable( - relation1 OID, - relation2 OID) -RETURNS BOOL AS 'pg_pathman', 'tuple_format_is_convertable' -LANGUAGE C; - /* * DDL trigger that removes entry from pathman_config table. */ @@ -545,30 +536,6 @@ END $$ LANGUAGE plpgsql; -/* - * Function for update triggers - */ -CREATE OR REPLACE FUNCTION @extschema@.update_trigger_func() -RETURNS TRIGGER AS 'pg_pathman', 'update_trigger_func' -LANGUAGE C; - -/* - * Creates an update trigger - */ -CREATE OR REPLACE FUNCTION @extschema@.create_update_triggers(parent_relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'create_update_triggers' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.create_single_update_trigger( - parent_relid REGCLASS, - partition_relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'create_single_update_trigger' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.is_update_trigger_enabled(parent_relid REGCLASS) -RETURNS BOOL AS 'pg_pathman', 'is_update_trigger_enabled' -LANGUAGE C STRICT; - /* * Drop triggers */ @@ -742,6 +709,49 @@ END $$ LANGUAGE plpgsql; +/* + * Check if tuple from first relation can be converted to fit the second one. + */ +CREATE OR REPLACE FUNCTION @extschema@.is_tuple_convertible( + relation1 REGCLASS, + relation2 REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'is_tuple_convertible' +LANGUAGE C STRICT; + + +/* + * Function for UPDATE triggers. + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_update_trigger_func() +RETURNS TRIGGER AS 'pg_pathman', 'pathman_update_trigger_func' +LANGUAGE C STRICT; + +/* + * Creates UPDATE triggers. + */ +CREATE OR REPLACE FUNCTION @extschema@.create_update_triggers( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'create_update_triggers' +LANGUAGE C STRICT; + +/* + * Creates single UPDATE trigger. + */ +CREATE OR REPLACE FUNCTION @extschema@.create_single_update_trigger( + parent_relid REGCLASS, + partition_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'create_single_update_trigger' +LANGUAGE C STRICT; + +/* + * Check if relation has pg_pathman's UPDATE trigger. + */ +CREATE OR REPLACE FUNCTION @extschema@.has_update_trigger( + parent_relid REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'has_update_trigger' +LANGUAGE C STRICT; + + /* * Partitioning key */ @@ -845,6 +855,15 @@ CREATE OR REPLACE FUNCTION @extschema@.is_date_type( RETURNS BOOLEAN AS 'pg_pathman', 'is_date_type' LANGUAGE C STRICT; +/* + * Check if TYPE supports the specified operator. + */ +CREATE OR REPLACE FUNCTION @extschema@.is_operator_supported( + type_oid OID, + opname TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'is_operator_supported' +LANGUAGE C; + /* * Build check constraint name for a specified relation's column. @@ -862,13 +881,16 @@ RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attname' LANGUAGE C STRICT; /* - * Build update trigger and its underlying function's names. + * Build UPDATE trigger's name. */ CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_name( relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_name' LANGUAGE C STRICT; +/* + * Buld UPDATE trigger function's name. + */ CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_func_name( relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_func_name' @@ -904,18 +926,6 @@ RETURNS VOID AS 'pg_pathman', 'prevent_relation_modification' LANGUAGE C STRICT; -/* - * DEBUG: Place this inside some plpgsql fuction and set breakpoint. - */ -CREATE OR REPLACE FUNCTION @extschema@.debug_capture() -RETURNS VOID AS 'pg_pathman', 'debug_capture' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() -RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' -LANGUAGE C STRICT; - - /* * Invoke init_callback on RANGE partition. */ @@ -938,12 +948,14 @@ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; + /* - * + * DEBUG: Place this inside some plpgsql fuction and set breakpoint. */ -CREATE OR REPLACE FUNCTION @extschema@.is_operator_supported( - type_oid OID, - opname TEXT) -RETURNS BOOLEAN AS 'pg_pathman', 'is_operator_supported' -LANGUAGE C; +CREATE OR REPLACE FUNCTION @extschema@.debug_capture() +RETURNS VOID AS 'pg_pathman', 'debug_capture' +LANGUAGE C STRICT; +CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() +RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' +LANGUAGE C STRICT; diff --git a/range.sql b/range.sql index 9cbaa336..d5a5262e 100644 --- a/range.sql +++ b/range.sql @@ -164,7 +164,7 @@ BEGIN IF p_count != 0 THEN part_count := @extschema@.create_range_partitions_internal( parent_relid, - @extschema@.generate_bounds(start_value, p_interval, p_count), + @extschema@.generate_range_bounds(start_value, p_interval, p_count), NULL, NULL); END IF; @@ -263,7 +263,7 @@ BEGIN IF p_count != 0 THEN part_count := @extschema@.create_range_partitions_internal( parent_relid, - @extschema@.generate_bounds(start_value, p_interval, p_count), + @extschema@.generate_range_bounds(start_value, p_interval, p_count), NULL, NULL); END IF; @@ -465,31 +465,6 @@ END $$ LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( - parent_relid REGCLASS, - bounds ANYARRAY, - relnames TEXT[], - tablespaces TEXT[]) -RETURNS REGCLASS AS 'pg_pathman', 'create_range_partitions_internal' -LANGUAGE C; - - -CREATE OR REPLACE FUNCTION @extschema@.generate_bounds( - p_start ANYELEMENT, - p_interval INTERVAL, - p_count INTEGER) -RETURNS ANYARRAY AS 'pg_pathman', 'generate_bounds' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION @extschema@.generate_bounds( - p_start ANYELEMENT, - p_interval ANYELEMENT, - p_count INTEGER) -RETURNS ANYARRAY AS 'pg_pathman', 'generate_bounds' -LANGUAGE C; - - /* * Split RANGE partition */ @@ -584,15 +559,6 @@ END $$ LANGUAGE plpgsql; -/* - * Merge multiple partitions. All data will be copied to the first one. - * The rest of partitions will be dropped. - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partitions REGCLASS[]) -RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' -LANGUAGE C STRICT; - /* * The special case of merging two partitions */ @@ -630,7 +596,7 @@ BEGIN IF NOT @extschema@.is_date_type(v_atttype) AND NOT @extschema@.is_operator_supported(v_atttype, '+') THEN - RAISE EXCEPTION 'Type % doesn''t support ''+'' operator', v_atttype::regtype; + RAISE EXCEPTION 'type % does not support ''+'' operator', v_atttype::REGTYPE; END IF; SELECT range_interval @@ -740,7 +706,7 @@ BEGIN IF NOT @extschema@.is_date_type(v_atttype) AND NOT @extschema@.is_operator_supported(v_atttype, '-') THEN - RAISE EXCEPTION 'Type % doesn''t support ''-'' operator', v_atttype::regtype; + RAISE EXCEPTION 'type % does not support ''-'' operator', v_atttype::REGTYPE; END IF; SELECT range_interval @@ -1004,7 +970,7 @@ BEGIN /* check range overlap */ PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); - IF NOT @extschema@.tuple_format_is_convertable(parent_relid, partition_relid) THEN + IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN RAISE EXCEPTION 'partition must have a compatible tuple format'; END IF; @@ -1035,7 +1001,7 @@ BEGIN INTO v_init_callback; /* If update trigger is enabled then create one for this partition */ - if @extschema@.is_update_trigger_enabled(parent_relid) THEN + if @extschema@.has_update_trigger(parent_relid) THEN PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); END IF; @@ -1102,9 +1068,18 @@ END $$ LANGUAGE plpgsql; + /* - * Drops partition and expands the next partition so that it cover dropped - * one + * Merge multiple partitions. All data will be copied to the first one. + * The rest of partitions will be dropped. + */ +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partitions REGCLASS[]) +RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' +LANGUAGE C STRICT; + +/* + * Drops partition and expands the next partition so that it cover dropped one * * This function was written in order to support Oracle-like ALTER TABLE ... * DROP PARTITION. In Oracle partitions only have upper bound and when @@ -1115,6 +1090,15 @@ CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( + parent_relid REGCLASS, + bounds ANYARRAY, + relnames TEXT[], + tablespaces TEXT[]) +RETURNS REGCLASS AS 'pg_pathman', 'create_range_partitions_internal' +LANGUAGE C; + /* * Creates new RANGE partition. Returns partition name. * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). @@ -1145,6 +1129,7 @@ CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' LANGUAGE C; + /* * Returns N-th range (as an array of two elements). */ @@ -1176,10 +1161,18 @@ RETURNS VOID AS 'pg_pathman', 'check_range_available_pl' LANGUAGE C; /* - * Needed for an UPDATE trigger. + * Generate range bounds starting with 'p_start' using 'p_interval'. */ -CREATE OR REPLACE FUNCTION @extschema@.find_or_create_range_partition( - parent_relid REGCLASS, - value ANYELEMENT) -RETURNS REGCLASS AS 'pg_pathman', 'find_or_create_range_partition' +CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( + p_start ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER) +RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( + p_start ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER) +RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' LANGUAGE C; diff --git a/src/include/init.h b/src/include/init.h index ee3d0158..f03d7564 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -191,14 +191,18 @@ find_children_status find_inheritance_children_array(Oid parentrelId, uint32 *children_size, Oid **children); + char *build_check_constraint_name_relid_internal(Oid relid, AttrNumber attno); - char *build_check_constraint_name_relname_internal(const char *relname, AttrNumber attno); char *build_sequence_name_internal(Oid relid); +char *build_update_trigger_name_internal(Oid relid); +char *build_update_trigger_func_name_internal(Oid relid); + + bool pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index faa70f7c..30ed2418 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -78,10 +78,11 @@ Node * build_raw_hash_check_tree(char *attname, void drop_check_constraint(Oid relid, AttrNumber attnum); /* Update triggers */ -void create_single_update_trigger_internal(Oid relid, +void create_single_update_trigger_internal(Oid partition_relid, const char *trigname, const char *attname); -bool is_update_trigger_enabled_internal(Oid parent); + +bool has_update_trigger_internal(Oid parent); /* Partitioning callback type */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index af8d0993..893200af 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -28,6 +28,7 @@ #define ERR_PART_ATTR_NULL "partitioned column's value should not be NULL" #define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" #define ERR_PART_ATTR_MULTIPLE "PartitionFilter selected more than one partition" +#define ERR_PART_DESC_CONVERT "could not convert row type for partition" /* diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 0fb046bf..9ee629a1 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -92,11 +92,10 @@ cmp_bounds(FmgrInfo *cmp_func, Oid collid, const Bound *b1, const Bound *b2) Assert(cmp_func); - return DatumGetInt32( - FunctionCall2Coll(cmp_func, - collid, - BoundGetValue(b1), - BoundGetValue(b2))); + return DatumGetInt32(FunctionCall2Coll(cmp_func, + collid, + BoundGetValue(b1), + BoundGetValue(b2))); } diff --git a/src/include/utils.h b/src/include/utils.h index 3ed05a2f..9655bace 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -28,7 +28,6 @@ bool clause_contains_params(Node *clause); bool is_date_type_internal(Oid typid); bool check_security_policy_internal(Oid relid, Oid role); -char *build_update_trigger_name_internal(Oid relid); /* * Misc. @@ -43,7 +42,6 @@ Oid get_rel_owner(Oid relid); char * get_rel_name_or_relid(Oid relid); Oid get_attribute_type(Oid relid, const char *attname, bool missing_ok); RangeVar *makeRangeVarFromRelid(Oid relid); -bool check_relation_exists(Oid relid); /* * Operator-related stuff. @@ -63,7 +61,7 @@ Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); Datum extract_binary_interval_from_text(Datum interval_text, Oid part_atttype, Oid *interval_type); -char **deconstruct_text_array(Datum array, int *array_size); +char ** deconstruct_text_array(Datum array, int *array_size); RangeVar ** qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); diff --git a/src/init.c b/src/init.c index 9222d016..fff5ecd2 100644 --- a/src/init.c +++ b/src/init.c @@ -542,27 +542,33 @@ find_inheritance_children_array(Oid parentrelId, return nresult > 0 ? FCS_FOUND : FCS_NO_CHILDREN; } + + /* * Generate check constraint name for a partition. - * - * These functions does not perform sanity checks at all. + * NOTE: this function does not perform sanity checks at all. */ char * -build_check_constraint_name_relid_internal(Oid relid, AttrNumber attno) +build_check_constraint_name_relid_internal(Oid relid, + AttrNumber attno) { return build_check_constraint_name_relname_internal(get_rel_name(relid), attno); } +/* + * Generate check constraint name for a partition. + * NOTE: this function does not perform sanity checks at all. + */ char * -build_check_constraint_name_relname_internal(const char *relname, AttrNumber attno) +build_check_constraint_name_relname_internal(const char *relname, + AttrNumber attno) { return psprintf("pathman_%s_%u_check", relname, attno); } /* * Generate part sequence name for a parent. - * - * This function does not perform sanity checks at all. + * NOTE: this function does not perform sanity checks at all. */ char * build_sequence_name_internal(Oid relid) @@ -570,6 +576,33 @@ build_sequence_name_internal(Oid relid) return psprintf("%s_seq", get_rel_name(relid)); } +/* + * Generate name for update trigger. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_update_trigger_name_internal(Oid relid) +{ + return psprintf("%s_upd_trig", get_rel_name(relid)); +} + +/* + * Generate name for update trigger's function. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_update_trigger_func_name_internal(Oid relid) +{ + Oid nspid = get_rel_namespace(relid); + + return psprintf("%s.%s", + quote_identifier(get_namespace_name(nspid)), + quote_identifier(psprintf("%s_upd_trig_func", + get_rel_name(relid)))); +} + + + /* * Check that relation 'relid' is partitioned by pg_pathman. * @@ -975,7 +1008,7 @@ read_opexpr_const(const OpExpr *opexpr, if (!cast_success) { - elog(WARNING, "Constant type in some check constraint " + elog(WARNING, "constant type in some check constraint " "does not match the partitioned column's type"); return false; diff --git a/src/partition_creation.c b/src/partition_creation.c index 68326095..1c2be12c 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -59,7 +59,7 @@ static void create_single_partition_common(Oid parent_relid, Oid partition_relid, Constraint *check_constraint, init_callback_params *callback_params, - const char *attname); + const char *partitioned_column); static Oid create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, @@ -82,8 +82,6 @@ static Constraint *make_constraint_common(char *name, Node *raw_expr); static Value make_string_value_struct(char *str); static Value make_int_value_struct(int int_val); -static bool update_trigger_exists(Oid relid, char *trigname); - /* * --------------------------------------- @@ -208,7 +206,7 @@ create_single_partition_common(Oid parent_relid, Oid partition_relid, Constraint *check_constraint, init_callback_params *callback_params, - const char *attname) + const char *partitioned_column) { Relation child_relation; @@ -222,17 +220,17 @@ create_single_partition_common(Oid parent_relid, /* Make constraint visible */ CommandCounterIncrement(); - /* Create trigger */ - if (is_update_trigger_enabled_internal(parent_relid)) + /* Create trigger if needed */ + if (has_update_trigger_internal(parent_relid)) { - char *trigname; + const char *trigname; trigname = build_update_trigger_name_internal(parent_relid); create_single_update_trigger_internal(partition_relid, trigname, - attname); + partitioned_column); } - + /* Make trigger visible */ CommandCounterIncrement(); @@ -1698,11 +1696,16 @@ text_to_regprocedure(text *proc_signature) return DatumGetObjectId(result); } + /* - * Create trigger for partition + * ------------------------- + * Update trigger creation + * ------------------------- */ + +/* Create trigger for partition */ void -create_single_update_trigger_internal(Oid relid, +create_single_update_trigger_internal(Oid partition_relid, const char *trigname, const char *attname) { @@ -1710,68 +1713,63 @@ create_single_update_trigger_internal(Oid relid, List *func; func = list_make2(makeString(get_namespace_name(get_pathman_schema())), - makeString("update_trigger_func")); + makeString(CppAsString(pathman_update_trigger_func))); stmt = makeNode(CreateTrigStmt); - stmt->trigname = (char *) trigname; - stmt->relation = makeRangeVarFromRelid(relid); - stmt->funcname = func; - stmt->args = NIL; - stmt->row = true; - stmt->timing = TRIGGER_TYPE_BEFORE; - stmt->events = TRIGGER_TYPE_UPDATE; - stmt->columns = list_make1(makeString((char *) attname)); - stmt->whenClause = NULL; - stmt->isconstraint = false; - stmt->deferrable = false; - stmt->initdeferred = false; - stmt->constrrel = NULL; + stmt->trigname = (char *) trigname; + stmt->relation = makeRangeVarFromRelid(partition_relid); + stmt->funcname = func; + stmt->args = NIL; + stmt->row = true; + stmt->timing = TRIGGER_TYPE_BEFORE; + stmt->events = TRIGGER_TYPE_UPDATE; + stmt->columns = list_make1(makeString((char *) attname)); + stmt->whenClause = NULL; + stmt->isconstraint = false; + stmt->deferrable = false; + stmt->initdeferred = false; + stmt->constrrel = NULL; (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, InvalidOid, InvalidOid, false); } -/* - * Check if update trigger is enabled. Basicly it returns true if update - * trigger exists for parent table - */ +/* Check if relation has pg_pathman's update trigger */ bool -is_update_trigger_enabled_internal(Oid parent) +has_update_trigger_internal(Oid parent_relid) { - char *trigname; - - trigname = build_update_trigger_name_internal(parent); - return update_trigger_exists(parent, trigname); -} + bool res = false; + Relation tgrel; + SysScanDesc scan; + ScanKeyData key[1]; + HeapTuple tuple; + const char *trigname; -static bool -update_trigger_exists(Oid relid, char *trigname) -{ - bool res = false; - Relation tgrel; - SysScanDesc tgscan; - ScanKeyData key; - HeapTuple tuple; + /* Build update trigger's name */ + trigname = build_update_trigger_name_internal(parent_relid); tgrel = heap_open(TriggerRelationId, RowExclusiveLock); - ScanKeyInit(&key, + ScanKeyInit(&key[0], Anum_pg_trigger_tgrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(relid)); - tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, - NULL, 1, &key); - while (HeapTupleIsValid(tuple = systable_getnext(tgscan))) + ObjectIdGetDatum(parent_relid)); + + scan = systable_beginscan(tgrel, TriggerRelidNameIndexId, + true, NULL, lengthof(key), key); + + while (HeapTupleIsValid(tuple = systable_getnext(scan))) { - Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple); + Form_pg_trigger trigger = (Form_pg_trigger) GETSTRUCT(tuple); - if (namestrcmp(&(pg_trigger->tgname), trigname) == 0) + if (namestrcmp(&(trigger->tgname), trigname) == 0) { res = true; break; } } - systable_endscan(tgscan); + + systable_endscan(scan); heap_close(tgrel, RowExclusiveLock); return res; diff --git a/src/partition_filter.c b/src/partition_filter.c index a5e80e6f..c5df63bc 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -354,7 +354,7 @@ build_part_tuple_map(Relation parent_rel, Relation child_rel) /* Generate tuple transformation map and some other stuff */ tuple_map = convert_tuples_by_name(parent_tupdesc, child_tupdesc, - "could not convert row type for partition"); + ERR_PART_DESC_CONVERT); /* If map is one-to-one, free unused TupleDescs */ if (!tuple_map) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 627b3210..260920a5 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -466,7 +466,9 @@ bgw_main_concurrent_part(Datum main_arg) /* We'll need this to recover from errors */ old_mcxt = CurrentMemoryContext; - SPI_connect(); + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect using SPI"); + PushActiveSnapshot(GetTransactionSnapshot()); /* Prepare the query if needed */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 5d22b687..a4aa61c7 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -11,7 +11,6 @@ #include "compat/pg_compat.h" #include "init.h" -#include "utils.h" #include "pathman.h" #include "partition_creation.h" #include "partition_filter.h" @@ -21,8 +20,6 @@ #include "access/tupconvert.h" #include "access/nbtree.h" #include "access/htup_details.h" -#include "access/xact.h" -#include "access/sysattr.h" #include "catalog/indexing.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" @@ -34,15 +31,10 @@ #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/inval.h" -#include "utils/jsonb.h" -#include "utils/snapmgr.h" #include "utils/lsyscache.h" #include "utils/syscache.h" -static Oid get_partition_for_key(const PartRelationInfo *prel, Datum key); - - /* Function declarations */ PG_FUNCTION_INFO_V1( on_partitions_created ); @@ -58,15 +50,15 @@ PG_FUNCTION_INFO_V1( get_tablespace_pl ); PG_FUNCTION_INFO_V1( show_cache_stats_internal ); PG_FUNCTION_INFO_V1( show_partition_list_internal ); -PG_FUNCTION_INFO_V1( build_update_trigger_func_name ); PG_FUNCTION_INFO_V1( build_update_trigger_name ); +PG_FUNCTION_INFO_V1( build_update_trigger_func_name ); PG_FUNCTION_INFO_V1( build_check_constraint_name_attnum ); PG_FUNCTION_INFO_V1( build_check_constraint_name_attname ); PG_FUNCTION_INFO_V1( validate_relname ); PG_FUNCTION_INFO_V1( is_date_type ); PG_FUNCTION_INFO_V1( is_attribute_nullable ); -PG_FUNCTION_INFO_V1( tuple_format_is_convertable ); +PG_FUNCTION_INFO_V1( is_tuple_convertible ); PG_FUNCTION_INFO_V1( add_to_pathman_config ); PG_FUNCTION_INFO_V1( pathman_config_params_trigger_func ); @@ -79,14 +71,14 @@ PG_FUNCTION_INFO_V1( invoke_on_partition_created_callback ); PG_FUNCTION_INFO_V1( check_security_policy ); -PG_FUNCTION_INFO_V1( debug_capture ); -PG_FUNCTION_INFO_V1( get_pathman_lib_version ); - PG_FUNCTION_INFO_V1( is_operator_supported ); PG_FUNCTION_INFO_V1( create_update_triggers ); -PG_FUNCTION_INFO_V1( update_trigger_func ); +PG_FUNCTION_INFO_V1( pathman_update_trigger_func ); PG_FUNCTION_INFO_V1( create_single_update_trigger ); -PG_FUNCTION_INFO_V1( is_update_trigger_enabled ); +PG_FUNCTION_INFO_V1( has_update_trigger ); + +PG_FUNCTION_INFO_V1( debug_capture ); +PG_FUNCTION_INFO_V1( get_pathman_lib_version ); /* User context for function show_partition_list_internal() */ @@ -113,10 +105,19 @@ typedef struct static void on_partitions_created_internal(Oid partitioned_table, bool add_callbacks); static void on_partitions_updated_internal(Oid partitioned_table, bool add_callbacks); static void on_partitions_removed_internal(Oid partitioned_table, bool add_callbacks); -static void delete_tuple(Relation rel, Datum ctid); -static void insert_tuple(Relation rel, HeapTuple tup); -static void make_arg_list(StringInfoData *buf, HeapTuple tup, TupleDesc tupdesc, - int *nargs, Oid **argtypes, Datum **args, char **nulls); + +static void pathman_update_trigger_func_move_tuple(Relation source_rel, + Relation target_rel, + HeapTuple old_tuple, + HeapTuple new_tuple); + +/* Extracted common check */ +static bool +check_relation_exists(Oid relid) +{ + return get_rel_type_id(relid) != InvalidOid; +} + /* * ---------------------------- @@ -589,7 +590,10 @@ show_partition_list_internal(PG_FUNCTION_ARGS) * -------- */ -/* Check that relation exists. Usually we pass regclass as text, hence the name */ +/* + * Check that relation exists. + * NOTE: we pass REGCLASS as text, hence the function's name. + */ Datum validate_relname(PG_FUNCTION_ARGS) { @@ -642,24 +646,25 @@ is_attribute_nullable(PG_FUNCTION_ARGS) } Datum -tuple_format_is_convertable(PG_FUNCTION_ARGS) +is_tuple_convertible(PG_FUNCTION_ARGS) { - Oid relid1 = PG_GETARG_OID(0), - relid2 = PG_GETARG_OID(1); Relation rel1, rel2; bool res = true; - /* Relations should be already locked */ - rel1 = heap_open(relid1, NoLock); - rel2 = heap_open(relid2, NoLock); + rel1 = heap_open(PG_GETARG_OID(0), AccessShareLock); + rel2 = heap_open(PG_GETARG_OID(1), AccessShareLock); PG_TRY(); { + void *map; /* we don't actually need it */ + /* Try to build a conversion map */ - (void) convert_tuples_by_name_map(rel1->rd_att, - rel2->rd_att, - "doesn't matter"); + map = convert_tuples_by_name_map(RelationGetDescr(rel1), + RelationGetDescr(rel2), + ERR_PART_DESC_CONVERT); + /* Now free map */ + pfree(map); } PG_CATCH(); { @@ -667,8 +672,8 @@ tuple_format_is_convertable(PG_FUNCTION_ARGS) } PG_END_TRY(); - heap_close(rel1, NoLock); - heap_close(rel2, NoLock); + heap_close(rel1, AccessShareLock); + heap_close(rel2, AccessShareLock); PG_RETURN_BOOL(res); } @@ -681,32 +686,31 @@ tuple_format_is_convertable(PG_FUNCTION_ARGS) */ Datum -build_update_trigger_func_name(PG_FUNCTION_ARGS) +build_update_trigger_name(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0), - nspid; + Oid relid = PG_GETARG_OID(0); const char *result; /* Check that relation exists */ if (!check_relation_exists(relid)) elog(ERROR, "Invalid relation %u", relid); - nspid = get_rel_namespace(relid); - result = psprintf("%s.%s", - quote_identifier(get_namespace_name(nspid)), - quote_identifier(psprintf("%s_upd_trig_func", - get_rel_name(relid)))); + result = quote_identifier(build_update_trigger_name_internal(relid)); PG_RETURN_TEXT_P(cstring_to_text(result)); } Datum -build_update_trigger_name(PG_FUNCTION_ARGS) +build_update_trigger_func_name(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); - const char *result; /* trigger's name can't be qualified */ + const char *result; - result = quote_identifier(build_update_trigger_name_internal(relid)); + /* Check that relation exists */ + if (!check_relation_exists(relid)) + elog(ERROR, "Invalid relation %u", relid); + + result = build_update_trigger_func_name_internal(relid); PG_RETURN_TEXT_P(cstring_to_text(result)); } @@ -1062,298 +1066,252 @@ check_security_policy(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } +/* + * Check if type supports the specified operator ( + | - | etc ). + */ Datum is_operator_supported(PG_FUNCTION_ARGS) { - Oid tp = PG_GETARG_OID(0); - char *opname = TextDatumGetCString(PG_GETARG_TEXT_P(1)); - Oid opid; + Oid opid, + typid = PG_GETARG_OID(0); + char *opname = TextDatumGetCString(PG_GETARG_TEXT_P(1)); - opid = compatible_oper_opid(list_make1(makeString(opname)), tp, tp, true); - if (!OidIsValid(opid)) - PG_RETURN_BOOL(false); + opid = compatible_oper_opid(list_make1(makeString(opname)), + typid, typid, true); - PG_RETURN_BOOL(true); + PG_RETURN_BOOL(OidIsValid(opid)); } /* - * ------- - * DEBUG - * ------- + * -------------------------- + * Update trigger machinery + * -------------------------- */ -/* - * NOTE: used for DEBUG, set breakpoint here. - */ +/* Behold: the update trigger itself */ Datum -debug_capture(PG_FUNCTION_ARGS) +pathman_update_trigger_func(PG_FUNCTION_ARGS) { - static float8 sleep_time = 0; - DirectFunctionCall1(pg_sleep, Float8GetDatum(sleep_time)); + TriggerData *trigdata = (TriggerData *) fcinfo->context; - /* Write something (doesn't really matter) */ - elog(WARNING, "debug_capture [%u]", MyProcPid); + Relation source_rel; - PG_RETURN_VOID(); -} + Oid parent_relid, + source_relid, + target_relid; -/* - * NOTE: just in case. - */ -Datum -get_pathman_lib_version(PG_FUNCTION_ARGS) -{ - PG_RETURN_CSTRING(psprintf("%x", CURRENT_LIB_VERSION)); -} + HeapTuple old_tuple, + new_tuple; -/* - * Update trigger - */ -Datum -update_trigger_func(PG_FUNCTION_ARGS) -{ + AttrNumber value_attnum; + Datum value; + Oid value_type; + bool isnull; + + Oid *parts; + int nparts; + + PartParentSearch parent_search; const PartRelationInfo *prel; - PartParentSearch parent_search; - Oid parent; - TriggerData *trigdata = (TriggerData *) fcinfo->context; - char *key_name; - Datum key; - bool isnull; - TupleConversionMap *conversion_map; - Datum ctid; - - Relation source_rel; - TupleDesc source_tupdesc; - HeapTuple old_tuple; - HeapTuple new_tuple; - Oid source_relid; - AttrNumber source_key; - - Relation target_rel; - TupleDesc target_tupdesc; - Oid target_relid; - HeapTuple target_tuple; - - /* This function can only be invoked as a trigger */ + + /* Handle user calls */ if (!CALLED_AS_TRIGGER(fcinfo)) - elog(ERROR, "Function invoked not in a trigger context"); + elog(ERROR, "this function should not be called directly"); + + /* Handle wrong fire mode */ + if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) + elog(ERROR, "%s: must be fired for row", + trigdata->tg_trigger->tgname); /* Make sure that trigger was fired during UPDATE command */ if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) - elog(ERROR, "This function must only be used as UPDATE trigger"); + elog(ERROR, "this function should only be used as UPDATE trigger"); - source_rel = trigdata->tg_relation; - source_relid = trigdata->tg_relation->rd_id; + /* Get source relation and its Oid */ + source_rel = trigdata->tg_relation; + source_relid = RelationGetRelid(trigdata->tg_relation); + + /* Fetch old & new tuples */ old_tuple = trigdata->tg_trigtuple; new_tuple = trigdata->tg_newtuple; - source_tupdesc = trigdata->tg_relation->rd_att; /* Find parent relation and partitioning info */ - parent = get_parent_of_partition(source_relid, &parent_search); + parent_relid = get_parent_of_partition(source_relid, &parent_search); if (parent_search != PPS_ENTRY_PART_PARENT) - parent = source_relid; + elog(ERROR, "relation \"%s\" is not a partition", + RelationGetRelationName(source_rel)); - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_ANY); + /* Fetch partition dispatch info */ + prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); - /* - * Find partitioning key attribute of source partition. Keep in mind that - * there could be dropped columns in parent relation or partition and so - * key attribute may have different number - */ - key_name = get_attname(parent, prel->attnum); - source_key = get_attnum(source_relid, key_name); - key = heap_getattr(new_tuple, source_key, source_tupdesc, &isnull); + /* Get attribute number of partitioning key (may differ from 'prel->attnum') */ + value_attnum = get_attnum(source_relid, get_attname(parent_relid, prel->attnum)); - /* Find partition it should go into */ - target_relid = get_partition_for_key(prel, key); + /* Extract partitioning key from NEW tuple */ + value = heap_getattr(new_tuple, + value_attnum, + RelationGetDescr(source_rel), + &isnull); - /* If target partition is the same then do nothing */ - if (target_relid == source_relid) - PG_RETURN_POINTER(new_tuple); + /* Extract value's type */ + value_type = RelationGetDescr(source_rel)->attrs[value_attnum - 1]->atttypid; - /* Read tuple id */ - ctid = heap_getsysattr(old_tuple, - SelfItemPointerAttributeNumber, - source_tupdesc, - &isnull); + /* Search for matching partitions */ + parts = find_partitions_for_value(value, value_type, prel, &nparts); - /* Open partition table */ - target_rel = heap_open(target_relid, RowExclusiveLock); - target_tupdesc = target_rel->rd_att; + if (nparts > 1) + elog(ERROR, ERR_PART_ATTR_MULTIPLE); + else if (nparts == 0) + { + target_relid = create_partitions_for_value(PrelParentRelid(prel), + value, prel->atttype); - /* - * As it is different partition we need to build a TupleConversionMap - * between original partition and new one. And then do a convertation - */ - conversion_map = convert_tuples_by_name(source_tupdesc, - target_tupdesc, - "Failed to convert tuple"); - target_tuple = do_convert_tuple(new_tuple, conversion_map); + /* get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); + } + else target_relid = parts[0]; + pfree(parts); - if (SPI_connect() != SPI_OK_CONNECT) - elog(ERROR, "SPI_connect failed"); + /* Convert tuple if target partition has changed */ + if (target_relid != source_relid) + { + Relation target_rel; + LOCKMODE lockmode = RowExclusiveLock; /* UPDATE */ - /* - * To make an UPDATE on a tuple in case when the tuple should be moved from - * one partition to another we need to perform two actions. First, remove - * old tuple from original partition and then insert updated version - * of tuple to the target partition - */ - delete_tuple(source_rel, ctid); - insert_tuple(target_rel, target_tuple); + /* Lock partition and check if it exists */ + LockRelationOid(target_relid, lockmode); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(target_relid))) + elog(ERROR, ERR_PART_ATTR_NO_PART, datum_to_cstring(value, value_type)); - if (SPI_finish() != SPI_OK_FINISH) - elog(ERROR, "SPI_finish failed"); + /* Open partition */ + target_rel = heap_open(target_relid, lockmode); - heap_close(target_rel, RowExclusiveLock); + /* Move tuple from source relation to the selected partition */ + pathman_update_trigger_func_move_tuple(source_rel, target_rel, + old_tuple, new_tuple); - PG_RETURN_VOID(); -} + /* Close partition */ + heap_close(target_rel, lockmode); -/* - * Delete record from rel. Caller is responsible for SPI environment setup - */ -static void -delete_tuple(Relation rel, Datum ctid) -{ - char *query; - Datum args[1] = {ctid}; - Oid argtypes[1] = {TIDOID}; - char nulls[1] = {' '}; - int spi_result; - - query = psprintf("DELETE FROM %s.%s WHERE ctid = $1", - quote_identifier(get_namespace_name(RelationGetNamespace(rel))), - quote_identifier(RelationGetRelationName(rel))); - spi_result = SPI_execute_with_args(query, 1, argtypes, args, nulls, false, 1); - - /* Check result */ - if (spi_result != SPI_OK_DELETE) - elog(ERROR, "SPI_execute_with_args returned %d", spi_result); -} + /* We've made some changes */ + PG_RETURN_VOID(); + } -/* - * Insert a new tuple to the rel. Caller is responsible for SPI environment - * setup - */ -static void -insert_tuple(Relation rel, HeapTuple tup) -{ - TupleDesc tupdesc = rel->rd_att; - StringInfoData querybuf; - Datum *args; - Oid *argtypes; - char *nulls; - int nargs; - const char *namespace; - const char *relname; - int spi_result; - - namespace = quote_identifier(get_namespace_name(RelationGetNamespace(rel))); - relname = quote_identifier(RelationGetRelationName(rel)); - - initStringInfo(&querybuf); - appendStringInfo(&querybuf, "INSERT INTO "); - appendStringInfo(&querybuf, "%s.%s", namespace, relname); - appendStringInfo(&querybuf, " VALUES ("); - make_arg_list(&querybuf, tup, tupdesc, &nargs, &argtypes, &args, &nulls); - appendStringInfo(&querybuf, ")"); - - spi_result = SPI_execute_with_args(querybuf.data, nargs, argtypes, - args, nulls, false, 0); - - /* Check result */ - if (spi_result != SPI_OK_INSERT) - elog(ERROR, "SPI_execute_with_args returned %d", spi_result); + /* Just return NEW tuple */ + PG_RETURN_POINTER(new_tuple); } +/* Move tuple to new partition (delete 'old_tuple' + insert 'new_tuple') */ static void -make_arg_list(StringInfoData *buf, HeapTuple tup, TupleDesc tupdesc, - int *nargs, Oid **argtypes, Datum **args, char **nulls) +pathman_update_trigger_func_move_tuple(Relation source_rel, + Relation target_rel, + HeapTuple old_tuple, + HeapTuple new_tuple) { - int i; - bool isnull; + TupleDesc source_tupdesc = RelationGetDescr(source_rel), + target_tupdesc = RelationGetDescr(target_rel); + HeapTuple target_tuple; + TupleConversionMap *conversion_map; - *nargs = tupdesc->natts; - *args = (Datum *) palloc(sizeof(Datum) * tupdesc->natts); - *argtypes = (Oid *) palloc(sizeof(Oid) * tupdesc->natts); - *nulls = (char *) palloc(sizeof(char) * tupdesc->natts); + /* Build tuple conversion map */ + conversion_map = convert_tuples_by_name(source_tupdesc, + target_tupdesc, + ERR_PART_DESC_CONVERT); - for (i = 0; i < *nargs; i++) + if (conversion_map) { - /* Skip dropped columns */ - if (tupdesc->attrs[i]->attisdropped) - continue; - - (*args)[i] = heap_getattr(tup, i + 1, tupdesc, &isnull); - (*nulls)[i] = isnull ? 'n' : ' '; - (*argtypes)[i] = tupdesc->attrs[i]->atttypid; - - /* Add comma separator (except the first time) */ - if (i != 0) - appendStringInfo(buf, ","); + /* Convert tuple */ + target_tuple = do_convert_tuple(new_tuple, conversion_map); - /* Add parameter */ - appendStringInfo(buf, "$%i", i+1); + /* Free tuple conversion map */ + free_conversion_map(conversion_map); } -} + else target_tuple = new_tuple; -/* - * Returns Oid of partition corresponding to partitioning key value. Throws - * an error if no partition found - */ -static Oid -get_partition_for_key(const PartRelationInfo *prel, Datum key) -{ - Oid *parts; - int nparts; + /* Connect using SPI and execute a few queries */ + if (SPI_connect() == SPI_OK_CONNECT) + { + int nvalues = RelationGetDescr(target_rel)->natts; + Oid *types = palloc(nvalues * sizeof(Oid)); + Datum *values = palloc(nvalues * sizeof(Datum)); + char *nulls = palloc(nvalues * sizeof(char)); + StringInfo query = makeStringInfo(); + int i; + + /* Prepare query string */ + appendStringInfo(query, "DELETE FROM %s.%s WHERE ctid = $1", + quote_identifier(get_namespace_name( + RelationGetNamespace(source_rel))), + quote_identifier(RelationGetRelationName(source_rel))); + + /* Build singe argument */ + types[0] = TIDOID; + values[0] = PointerGetDatum(&old_tuple->t_self); + nulls[0] = ' '; + + /* DELETE FROM source_rel WHERE ctid = $1 */ + SPI_execute_with_args(query->data, 1, types, values, nulls, false, 0); + + resetStringInfo(query); + + /* Prepare query string */ + appendStringInfo(query, "INSERT INTO %s.%s VALUES (", + quote_identifier(get_namespace_name( + RelationGetNamespace(target_rel))), + quote_identifier(RelationGetRelationName(target_rel))); + for (i = 0; i < target_tupdesc->natts; i++) + { + AttrNumber attnum = i + 1; + bool isnull; - /* Search for matching partitions */ - parts = find_partitions_for_value(key, prel->atttype, prel, &nparts); + /* Build singe argument */ + types[i] = target_tupdesc->attrs[i]->atttypid; + values[i] = heap_getattr(target_tuple, attnum, target_tupdesc, &isnull); + nulls[i] = isnull ? 'n' : ' '; - if (nparts > 1) - elog(ERROR, ERR_PART_ATTR_MULTIPLE); - else if (nparts == 0) - elog(ERROR, - "There is not partition to fit partition key \"%s\"", - datum_to_cstring(key, prel->atttype)); - else - return parts[0]; -} + /* Append "$N [,]" */ + appendStringInfo(query, (i != 0 ? ", $%i" : "$%i"), attnum); + } + appendStringInfoChar(query, ')'); + /* INSERT INTO target_rel VALUES($1, $2, $3 ...) */ + SPI_execute_with_args(query->data, nvalues, types, values, nulls, false, 0); -/* - * ------------------------ - * Trigger functions - * ------------------------ - */ + /* Finally close SPI connection */ + SPI_finish(); + } + /* Else emit error */ + else elog(ERROR, "could not connect using SPI"); +} -/* - * Create UPDATE triggers for all partitions - */ +/* Create UPDATE triggers for all partitions */ Datum create_update_triggers(PG_FUNCTION_ARGS) { + Oid parent = PG_GETARG_OID(0); + Oid *children; + const char *attname, + *trigname; const PartRelationInfo *prel; - Oid parent = PG_GETARG_OID(0); - Oid *children; - char *attname, - *trigname; - int i; + uint32 i; + /* Check that table is partitioned */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_ANY); - attname = get_attname(prel->key, prel->attnum); - children = PrelGetChildrenArray(prel); + /* Acquire trigger and attribute names */ trigname = build_update_trigger_name_internal(parent); + attname = get_attname(parent, prel->attnum); - /* Create triggers for parent */ + /* Create trigger for parent */ create_single_update_trigger_internal(parent, trigname, attname); + /* Fetch children array */ + children = PrelGetChildrenArray(prel); + /* Create triggers for each partition */ for (i = 0; i < PrelChildrenCount(prel); i++) create_single_update_trigger_internal(children[i], trigname, attname); @@ -1361,32 +1319,59 @@ create_update_triggers(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -/* - * Create an UPDATE trigger for partition - */ +/* Create an UPDATE trigger for partition */ Datum create_single_update_trigger(PG_FUNCTION_ARGS) { + Oid parent = PG_GETARG_OID(0); + Oid child = PG_GETARG_OID(1); + const char *trigname, + *attname; const PartRelationInfo *prel; - Oid parent = PG_GETARG_OID(0); - Oid partition = PG_GETARG_OID(1); - char *trigname, - *attname; - /* Determine partitioning key name */ + /* Check that table is partitioned */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_ANY); + /* Acquire trigger and attribute names */ trigname = build_update_trigger_name_internal(parent); attname = get_attname(prel->key, prel->attnum); - create_single_update_trigger_internal(partition, trigname, attname); + create_single_update_trigger_internal(child, trigname, attname); PG_RETURN_VOID(); } +/* Check if relation has pg_pathman's update trigger */ Datum -is_update_trigger_enabled(PG_FUNCTION_ARGS) +has_update_trigger(PG_FUNCTION_ARGS) { - PG_RETURN_BOOL(is_update_trigger_enabled_internal(PG_GETARG_OID(0))); + PG_RETURN_BOOL(has_update_trigger_internal(PG_GETARG_OID(0))); +} + + +/* + * ------- + * DEBUG + * ------- + */ + +/* NOTE: used for DEBUG, set breakpoint here */ +Datum +debug_capture(PG_FUNCTION_ARGS) +{ + static float8 sleep_time = 0; + DirectFunctionCall1(pg_sleep, Float8GetDatum(sleep_time)); + + /* Write something (doesn't really matter) */ + elog(WARNING, "debug_capture [%u]", MyProcPid); + + PG_RETURN_VOID(); +} + +/* NOTE: just in case */ +Datum +get_pathman_lib_version(PG_FUNCTION_ARGS) +{ + PG_RETURN_CSTRING(psprintf("%x", CURRENT_LIB_VERSION)); } diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index d6570ed3..7f5fcd45 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -57,8 +57,9 @@ static bool interval_is_trivial(Oid atttype, /* Function declarations */ PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); -PG_FUNCTION_INFO_V1( find_or_create_range_partition ); +PG_FUNCTION_INFO_V1( create_range_partitions_internal ); PG_FUNCTION_INFO_V1( check_range_available_pl ); +PG_FUNCTION_INFO_V1( generate_range_bounds_pl ); PG_FUNCTION_INFO_V1( get_part_range_by_oid ); PG_FUNCTION_INFO_V1( get_part_range_by_idx ); @@ -69,9 +70,6 @@ PG_FUNCTION_INFO_V1( merge_range_partitions ); PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); PG_FUNCTION_INFO_V1( validate_interval_value ); -PG_FUNCTION_INFO_V1( create_range_partitions_internal ); -PG_FUNCTION_INFO_V1( generate_bounds ); - /* * ----------------------------- @@ -79,9 +77,7 @@ PG_FUNCTION_INFO_V1( generate_bounds ); * ----------------------------- */ -/* - * pl/PgSQL wrapper for the create_single_range_partition(). - */ +/* pl/PgSQL wrapper for the create_single_range_partition(). */ Datum create_single_range_partition_pl(PG_FUNCTION_ARGS) { @@ -146,57 +142,107 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) PG_RETURN_OID(partition_relid); } -/* - * Returns partition oid for specified parent relid and value. - * In case when partition doesn't exist try to create one. - */ Datum -find_or_create_range_partition(PG_FUNCTION_ARGS) +create_range_partitions_internal(PG_FUNCTION_ARGS) { - Oid parent_relid = PG_GETARG_OID(0); - Datum value = PG_GETARG_DATUM(1); - Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - const PartRelationInfo *prel; - FmgrInfo cmp_func; - RangeEntry found_rentry; - search_rangerel_result search_state; + Oid relid = PG_GETARG_OID(0); + int16 typlen; + bool typbyval; + char typalign; + FmgrInfo cmp_func; - prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + /* Partition names and tablespaces */ + char **partnames = NULL; + RangeVar **rangevars = NULL; + char **tablespaces = NULL; + int npartnames = 0; + int ntablespaces = 0; + + /* Bounds */ + ArrayType *array = PG_GETARG_ARRAYTYPE_P(1); + Oid elemtype = ARR_ELEMTYPE(array); + Datum *datums; + bool *nulls; + int ndatums; + int i; + /* Extract partition names */ + if (!PG_ARGISNULL(2)) + { + partnames = deconstruct_text_array(PG_GETARG_DATUM(2), &npartnames); + rangevars = qualified_relnames_to_rangevars(partnames, npartnames); + } + + /* Extract partition tablespaces */ + if (!PG_ARGISNULL(3)) + tablespaces = deconstruct_text_array(PG_GETARG_DATUM(3), &ntablespaces); + + /* Extract bounds */ + get_typlenbyvalalign(elemtype, &typlen, &typbyval, &typalign); + deconstruct_array(array, elemtype, + typlen, typbyval, typalign, + &datums, &nulls, &ndatums); + + if (partnames && npartnames != ndatums - 1) + ereport(ERROR, (errmsg("wrong length of relnames array"), + errdetail("relnames number must be less than " + "bounds array length by one"))); + + if (tablespaces && ntablespaces != ndatums - 1) + ereport(ERROR, (errmsg("wrong length of tablespaces array"), + errdetail("tablespaces number must be less than " + "bounds array length by one"))); + + /* Check if bounds array is ascending */ fill_type_cmp_fmgr_info(&cmp_func, - getBaseType(value_type), - getBaseType(prel->atttype)); + getBaseType(elemtype), + getBaseType(elemtype)); - /* Use available PartRelationInfo to find partition */ - search_state = search_range_partition_eq(value, &cmp_func, prel, - &found_rentry); + /* Validate bounds */ + for (i = 0; i < ndatums - 1; i++) + { + /* Disregard 1st bound */ + if (i == 0) continue; - /* - * If found then just return oid, else create new partitions - */ - if (search_state == SEARCH_RANGEREL_FOUND) - PG_RETURN_OID(found_rentry.child_oid); - /* - * If not found and value is between first and last partitions - */ - else if (search_state == SEARCH_RANGEREL_GAP) - PG_RETURN_NULL(); - else + /* Check that bound is valid */ + if (nulls[i]) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("only first bound can be NULL"))); + + /* Check that bounds are ascending */ + if (!nulls[i - 1] && !check_le(&cmp_func, datums[i - 1], datums[i])) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("bounds array must be ascending"))); + } + + /* Create partitions using provided bounds */ + for (i = 0; i < ndatums - 1; i++) { - Oid child_oid = create_partitions_for_value(parent_relid, value, value_type); + Bound start = nulls[i] ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(datums[i]), + + end = nulls[i + 1] ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(datums[i + 1]); + + RangeVar *name = rangevars ? rangevars[i] : NULL; - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_relid, NULL); + char *tablespace = tablespaces ? tablespaces[i] : NULL; - PG_RETURN_OID(child_oid); + (void) create_single_range_partition_internal(relid, + &start, + &end, + elemtype, + name, + tablespace); } + + /* Return number of partitions */ + PG_RETURN_INT32(ndatums - 1); } -/* - * Checks if range overlaps with existing partitions. - * Returns TRUE if overlaps and FALSE otherwise. - */ +/* Checks if range overlaps with existing partitions. */ Datum check_range_available_pl(PG_FUNCTION_ARGS) { @@ -223,6 +269,72 @@ check_range_available_pl(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +/* Generate range bounds starting with 'value' using 'interval'. */ +Datum +generate_range_bounds_pl(PG_FUNCTION_ARGS) +{ + /* Input params */ + Datum value = PG_GETARG_DATUM(0); + Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 0); + Datum interval = PG_GETARG_DATUM(1); + Oid interval_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + int count = PG_GETARG_INT32(2); + int i; + + /* Operator */ + Oid plus_op_func; + Datum plus_op_result; + Oid plus_op_result_type; + + /* Array */ + ArrayType *array; + int16 elemlen; + bool elembyval; + char elemalign; + Datum *datums; + + if (count < 1) + elog(ERROR, "'p_count' must be greater than zero"); + + /* We must provide count+1 bounds */ + count += 1; + + /* Find suitable addition operator for given value and interval */ + extract_op_func_and_ret_type("+", value_type, interval_type, + &plus_op_func, + &plus_op_result_type); + + /* Fetch type's information for array */ + get_typlenbyvalalign(value_type, &elemlen, &elembyval, &elemalign); + + datums = palloc(sizeof(Datum) * count); + datums[0] = value; + + /* Calculate bounds */ + for (i = 1; i < count; i++) + { + /* Invoke addition operator and get a result */ + plus_op_result = OidFunctionCall2(plus_op_func, value, interval); + + /* Cast result to 'value_type' if needed */ + if (plus_op_result_type != value_type) + plus_op_result = perform_type_cast(plus_op_result, + plus_op_result_type, + value_type, NULL); + + /* Update 'value' and store current bound */ + value = datums[i] = plus_op_result; + } + + /* build an array based on calculated datums */ + array = construct_array(datums, count, value_type, + elemlen, elembyval, elemalign); + + pfree(datums); + + PG_RETURN_ARRAYTYPE_P(array); +} + /* * ------------------------ @@ -1010,163 +1122,3 @@ drop_table_by_oid(Oid relid) RemoveRelations(n); } - - -Datum -create_range_partitions_internal(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - int16 typlen; - bool typbyval; - char typalign; - FmgrInfo cmp_func; - - /* partition names and tablespaces */ - char **partnames = NULL; - RangeVar **rangevars = NULL; - char **tablespaces = NULL; - int npartnames = 0; - int ntablespaces = 0; - - /* bounds */ - ArrayType *arr = PG_GETARG_ARRAYTYPE_P(1); - Oid elemtype = ARR_ELEMTYPE(arr); - Datum *datums; - bool *nulls; - int ndatums; - int i; - - /* Extract partition names */ - if (!PG_ARGISNULL(2)) - { - partnames = deconstruct_text_array(PG_GETARG_DATUM(2), &npartnames); - rangevars = qualified_relnames_to_rangevars(partnames, npartnames); - } - - /* Extract partition tablespaces */ - if (!PG_ARGISNULL(3)) - tablespaces = deconstruct_text_array(PG_GETARG_DATUM(3), &ntablespaces); - - /* Extract bounds */ - get_typlenbyvalalign(elemtype, &typlen, &typbyval, &typalign); - deconstruct_array(arr, elemtype, - typlen, typbyval, typalign, - &datums, &nulls, &ndatums); - - if (partnames && npartnames != ndatums-1) - ereport(ERROR, (errmsg("wrong length of relnames array"), - errdetail("relnames number must be less than " - "bounds array length by one"))); - - if (tablespaces && ntablespaces != ndatums-1) - ereport(ERROR, (errmsg("wrong length of tablespaces array"), - errdetail("tablespaces number must be less than " - "bounds array length by one"))); - - /* Check if bounds array is ascending */ - fill_type_cmp_fmgr_info(&cmp_func, - getBaseType(elemtype), - getBaseType(elemtype)); - for (i = 0; i < ndatums-1; i++) - { - /* - * Only first bound can be NULL - * - * XXX Probably the last one too... - */ - if (nulls[i]) - { - if (i == 0) - continue; - else - elog(ERROR, - "Only first bound can be NULL"); - } - - if (DatumGetInt32(FunctionCall2(&cmp_func, datums[i], datums[i+1])) >= 0) - elog(ERROR, - "Bounds array must be ascending"); - } - - /* Create partitions */ - for (i = 0; i < ndatums-1; i++) - { - Bound start = nulls[i] ? - MakeBoundInf(MINUS_INFINITY) : - MakeBound(datums[i]); - Bound end = nulls[i+1] ? - MakeBoundInf(PLUS_INFINITY) : - MakeBound(datums[i+1]); - RangeVar *rv = npartnames > 0 ? rangevars[i] : NULL; - char *tablespace = tablespaces ? tablespaces[i] : NULL; - - (void) create_single_range_partition_internal(relid, - &start, - &end, - elemtype, - rv, - tablespace); - } - - PG_RETURN_INT32(ndatums-1); -} - - -Datum -generate_bounds(PG_FUNCTION_ARGS) -{ - /* input params */ - Datum value = PG_GETARG_DATUM(0); - Oid v_type = get_fn_expr_argtype(fcinfo->flinfo, 0); - Datum interval = PG_GETARG_DATUM(1); - Oid i_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - int count = PG_GETARG_INT32(2); - int i; - - /* operator */ - Oid plus_op_func; - Datum plus_op_result; - Oid plus_op_result_type; - - /* array */ - ArrayType *arr; - int16 elemlen; - bool elembyval; - char elemalign; - Datum *datums; - - if (count < 1) - elog(ERROR, "Partitions count must be greater than zero"); - - /* Find suitable addition operator for given value and interval */ - extract_op_func_and_ret_type("+", v_type, i_type, - &plus_op_func, - &plus_op_result_type); - - get_typlenbyvalalign(v_type, &elemlen, &elembyval, &elemalign); - - datums = palloc(sizeof(Datum) * (count + 1)); - datums[0] = value; - - /* calculate bounds */ - for (i = 1; i <= count; i++) - { - /* Invoke addition operator and get a result */ - plus_op_result = OidFunctionCall2(plus_op_func, value, interval); - - if (plus_op_result_type != v_type) - plus_op_result = perform_type_cast(plus_op_result, - plus_op_result_type, - v_type, NULL); - - value = datums[i] = plus_op_result; - } - - /* build an array based on calculated datums */ - arr = construct_array(datums, count + 1, v_type, - elemlen, elembyval, elemalign); - - pfree(datums); - - PG_RETURN_ARRAYTYPE_P(arr); -} diff --git a/src/utils.c b/src/utils.c index 50019883..dcc7f328 100644 --- a/src/utils.c +++ b/src/utils.c @@ -105,18 +105,7 @@ check_security_policy_internal(Oid relid, Oid role) return true; } -/* - * Create an update trigger name - */ -char * -build_update_trigger_name_internal(Oid relid) -{ - /* Check that relation exists */ - if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); - return (char *) psprintf("%s_upd_trig", get_rel_name(relid)); -} /* * Return pg_pathman schema's Oid or InvalidOid if that's not possible. @@ -177,6 +166,7 @@ list_reverse(List *l) } + /* * Get relation owner. */ @@ -246,19 +236,12 @@ RangeVar * makeRangeVarFromRelid(Oid relid) { char *relname = get_rel_name(relid); - char *namespace = get_namespace_name(get_rel_namespace(relid)); + char *nspname = get_namespace_name(get_rel_namespace(relid)); - return makeRangeVar(namespace, relname, -1); + return makeRangeVar(nspname, relname, -1); } -/* - * Extracted common check. - */ -bool -check_relation_exists(Oid relid) -{ - return get_rel_type_id(relid) != InvalidOid; -} + /* * Try to find binary operator. @@ -516,7 +499,8 @@ deconstruct_text_array(Datum array, int *array_size) /* Check number of dimensions */ if (ARR_NDIM(array_ptr) > 1) - elog(ERROR, "'partition_names' and 'tablespaces' may contain only 1 dimension"); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("array should contain only 1 dimension"))); get_typlenbyvalalign(ARR_ELEMTYPE(array_ptr), &elemlen, &elembyval, &elemalign); @@ -535,7 +519,8 @@ deconstruct_text_array(Datum array, int *array_size) for (i = 0; i < arr_size; i++) { if (elem_nulls[i]) - elog(ERROR, "'partition_names' and 'tablespaces' may not contain NULLs"); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("array should not contain NULLs"))); strings[i] = TextDatumGetCString(elem_values[i]); } @@ -545,7 +530,8 @@ deconstruct_text_array(Datum array, int *array_size) return strings; } /* Else emit ERROR */ - else elog(ERROR, "'partition_names' and 'tablespaces' may not be empty"); + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("array should not be empty"))); /* Keep compiler happy */ return NULL; From d7cabff879de332e1eea122bb0771edbd3d00d6b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Apr 2017 16:59:42 +0300 Subject: [PATCH 0336/1124] add missing include (pg_funcs.c) --- src/pl_funcs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index a4aa61c7..94ad2fc8 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -31,6 +31,7 @@ #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/inval.h" +#include "utils/snapmgr.h" #include "utils/lsyscache.h" #include "utils/syscache.h" From 38c0135cd5b83c5f9ed9b6c85554797cbbbab118 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Apr 2017 17:04:50 +0300 Subject: [PATCH 0337/1124] fix comment regarding collation --- src/pg_pathman.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 79cdc834..f2a786f7 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1133,16 +1133,17 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, Oid collid; /* - * If operator collation is different from default attribute - * collation then we cannot guarantee that we return correct - * partitions. So in this case we just return all of them + * We cannot guarantee that we'll return correct partitions set + * if operator collation is different from default attribute collation. + * In this case we just return all of them. */ - if (expr->opcollid != prel->attcollid && strategy != BTEqualStrategyNumber) + if (expr->opcollid != prel->attcollid && + strategy != BTEqualStrategyNumber) goto binary_opexpr_return; collid = OidIsValid(expr->opcollid) ? - expr->opcollid : - prel->attcollid; + expr->opcollid : + prel->attcollid; fill_type_cmp_fmgr_info(&cmp_func, getBaseType(c->consttype), From eafc6dfb960cdf8c7e9c6af927d8e6b1003b4e84 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Apr 2017 17:08:58 +0300 Subject: [PATCH 0338/1124] shorten comment for cmp_func_info --- src/relation_info.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index d3065001..38a0bff5 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -37,10 +37,7 @@ #endif -/* - * Comparison function info. This structure is only needed to pass FmgrInfo and - * collation to qsort - */ +/* Comparison function info */ typedef struct cmp_func_info { FmgrInfo flinfo; From 44c3e0db7de1a66e8926334b6ade500c2a8ba215 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 5 Apr 2017 17:47:57 +0300 Subject: [PATCH 0339/1124] Fix runtime nodes tests --- src/nodes_common.c | 54 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 45 insertions(+), 9 deletions(-) diff --git a/src/nodes_common.c b/src/nodes_common.c index 1d8985bf..c85af743 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -198,8 +198,12 @@ append_part_attr_to_tlist(List *tlist, Index relno, const PartRelationInfo *prel foreach(lc, vars_not_found) { - Var *var = (Var *) lfirst(lc); Index last_item = list_length(tlist) + 1; + Var *var = (Var *) copyObject((Node *) lfirst(lc)); + + /* other fields except 'varno' should be correct */ + var->varno = relno; + tlist = lappend(tlist, makeTargetEntry((Expr *) var, last_item, NULL, false)); @@ -612,12 +616,35 @@ end_append_common(CustomScanState *node) hash_destroy(scan_state->children_table); } +/* Find first Var with varno == INDEX_VAR, and returns its varnoold */ +static bool +find_varnoold(Node *node, int *varnoold) +{ + if (node == NULL) + return false; + + if (IsA(node, Var)) + { + Var *var = (Var *) node; + if (var->varno == INDEX_VAR) + { + /* we found it */ + *varnoold = var->varnoold; + return true; + } + return false; + } + + return expression_tree_walker(node, find_varnoold, (void *) varnoold); +} + /* - * This function is similar to ChangeVarNodes, but changes only - * varno attributes, but doesn't change varnoold attribute + * To check equality we need to modify partitioning expression's Vars like + * they appear in custom_exprs, it means that varno should be equal to + * INDEX_VAR and varnoold should be changed according to query */ static bool -change_only_varnos(Node *node, const int *idx) +prepare_vars(Node *node, const int *varnoold) { if (node == NULL) return false; @@ -626,11 +653,12 @@ change_only_varnos(Node *node, const int *idx) { Var *var = (Var *) node; Assert(var->varno == 1); - var->varno = *idx; + var->varno = INDEX_VAR; + var->varnoold = *varnoold; return false; } - return expression_tree_walker(node, change_only_varnos, (void *) idx); + return expression_tree_walker(node, prepare_vars, (void *) varnoold); } void @@ -646,14 +674,22 @@ rescan_append_common(CustomScanState *node) int nparts; Node *prel_expr; - const int index_var = INDEX_VAR; + int varnoold = -100; /* not possible number */ prel = get_pathman_relation_info(scan_state->relid); Assert(prel); - /* Prepare expression */ + /* Prepare expression. Copy and modify 'varno' and 'varnoold' attributes */ prel_expr = copyObject(prel->expr); - change_only_varnos(prel_expr, &index_var); + foreach(lc, scan_state->custom_exprs) + { + find_varnoold((Node *) lfirst(lc), &varnoold); + if (varnoold != -100) + break; + } + + if (varnoold != -100) + prepare_vars(prel_expr, &varnoold); /* First we select all available partitions... */ ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_COMPLETE)); From bf93b99fe7be8aa5e42f58924a539bf1ac5db3b5 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 5 Apr 2017 17:59:15 +0300 Subject: [PATCH 0340/1124] Remove unused code --- src/partition_filter.c | 38 -------------------------------------- src/pg_pathman.c | 3 --- 2 files changed, 41 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 2edd64f0..fe8ba3e8 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -529,44 +529,6 @@ partition_filter_create_scan_state(CustomScan *node) return (Node *) state; } -struct expr_walker_context -{ - const PartRelationInfo *prel; - TupleTableSlot *slot; - HeapTuple tup; -}; - -/* Fills CustomConst nodes with values from slot */ -static bool -adapt_values (Node *node, struct expr_walker_context *context) -{ - if (node == NULL) - return false; - - /* location == -2 means that it's our CustomConst node */ - if (IsA(node, Const) && ((Const *)node)->location == -2) - { - AttrNumber attnum; - Const *cst; - bool isNull; - - cst = (Const *)node; - - attnum = ((CustomConst *)node)->varattno; - Assert(attnum != InvalidAttrNumber); - - /* check that type is still same */ - Assert(context->slot->tts_tupleDescriptor-> - attrs[attnum - 1]->atttypid == cst->consttype); - cst->constvalue = heap_getattr(context->tup, attnum, - context->slot->tts_tupleDescriptor, &isNull); - cst->constisnull = isNull; - return false; - } - - return expression_tree_walker(node, adapt_values, (void *) context); -} - void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 6dfce261..56aaed0b 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -907,9 +907,6 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) if (!match_expr_to_operand(context->prel_expr, exprnode)) goto handle_arrexpr_return; - if (exprnode && IsA(exprnode, RelabelType)) - exprnode = (Node *) ((RelabelType *) exprnode)->arg; - if (arraynode && IsA(arraynode, Const) && !((Const *) arraynode)->constisnull) { From 301aa7896f89156bc6727cca90a53d8ca7eff7d8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Apr 2017 18:07:51 +0300 Subject: [PATCH 0341/1124] add tests for generate_range_bounds() --- Makefile | 17 +--------------- expected/pathman_calamity.out | 37 +++++++++++++++++++++++++++++++++++ range.sql | 4 ++-- sql/pathman_calamity.sql | 12 +++++++++++- src/pl_range_funcs.c | 15 ++++++++++++-- src/utils.c | 9 +++++---- 6 files changed, 69 insertions(+), 25 deletions(-) diff --git a/Makefile b/Makefile index a2453679..1f949aba 100644 --- a/Makefile +++ b/Makefile @@ -24,22 +24,7 @@ DATA = pg_pathman--1.0--1.1.sql \ PGFILEDESC = "pg_pathman - partitioning tool" -REGRESS = pathman_basic \ - pathman_only \ - pathman_cte \ - pathman_bgw \ - pathman_inserts \ - pathman_updates \ - pathman_domains \ - pathman_interval \ - pathman_callbacks \ - pathman_foreign_keys \ - pathman_permissions \ - pathman_rowmarks \ - pathman_runtime_nodes \ - pathman_utility_stmt \ - pathman_column_type \ - pathman_calamity +REGRESS = pathman_calamity EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 1910da63..ad81d679 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -396,6 +396,43 @@ SELECT drop_range_partition_expand_next(NULL) IS NULL; t (1 row) +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + /* check invoke_on_partition_created_callback() */ CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ begin diff --git a/range.sql b/range.sql index d5a5262e..2b56194a 100644 --- a/range.sql +++ b/range.sql @@ -1168,11 +1168,11 @@ CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( p_interval INTERVAL, p_count INTEGER) RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' -LANGUAGE C; +LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( p_start ANYELEMENT, p_interval ANYELEMENT, p_count INTEGER) RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' -LANGUAGE C; +LANGUAGE C STRICT; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index cbeea2f9..d6b9208d 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -89,7 +89,6 @@ SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ SELECT drop_partitions('calamity.part_test', true); DELETE FROM calamity.part_test; - /* check function build_hash_condition() */ SELECT build_hash_condition('int4', 'val', 10, 1); SELECT build_hash_condition('text', 'val', 10, 1); @@ -154,6 +153,17 @@ SELECT stop_concurrent_part_task(1::regclass); SELECT drop_range_partition_expand_next('pg_class'); SELECT drop_range_partition_expand_next(NULL) IS NULL; +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; +SELECT generate_range_bounds(0, 100, NULL) IS NULL; +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + /* check invoke_on_partition_created_callback() */ CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 7f5fcd45..f8765658 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -246,11 +246,17 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) Datum check_range_available_pl(PG_FUNCTION_ARGS) { - Oid parent_relid = PG_GETARG_OID(0); + Oid parent_relid; Bound start, end; Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + if (PG_ARGISNULL(0)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); + + parent_relid = PG_GETARG_OID(0); + start = PG_ARGISNULL(1) ? MakeBoundInf(MINUS_INFINITY) : MakeBound(PG_GETARG_DATUM(1)); @@ -293,8 +299,13 @@ generate_range_bounds_pl(PG_FUNCTION_ARGS) char elemalign; Datum *datums; + Assert(!PG_ARGISNULL(0)); + Assert(!PG_ARGISNULL(1)); + Assert(!PG_ARGISNULL(2)); + if (count < 1) - elog(ERROR, "'p_count' must be greater than zero"); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'p_count' must be greater than zero"))); /* We must provide count+1 bounds */ count += 1; diff --git a/src/utils.c b/src/utils.c index dcc7f328..099f5a74 100644 --- a/src/utils.c +++ b/src/utils.c @@ -257,7 +257,10 @@ get_binary_operator(char *oprname, Oid arg1, Oid arg2) arg1, arg2, true, -1); if (!op) - elog(ERROR, "Cannot find operator \"%s\"(%u, %u)", oprname, arg1, arg2); + elog(ERROR, "cannot find operator %s(%s, %s)", + oprname, + format_type_be(arg1), + format_type_be(arg2)); return op; } @@ -319,9 +322,7 @@ extract_op_func_and_ret_type(char *opname, /* Get "move bound operator" descriptor */ op = get_binary_operator(opname, type1, type2); - if (!op) - elog(ERROR, "missing %s operator for types %s and %s", - opname, format_type_be(type1), format_type_be(type2)); + Assert(op); *op_func = oprfuncid(op); *op_ret_type = ((Form_pg_operator) GETSTRUCT(op))->oprresult; From b111fd2330ab48cb5db1f4cf7e0dfcbbc4319aa6 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 5 Apr 2017 18:14:49 +0300 Subject: [PATCH 0342/1124] Fix includes for different versions of postgres --- src/partition_creation.c | 1 + src/relation_info.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/src/partition_creation.c b/src/partition_creation.c index 22f35f36..954936ff 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -14,6 +14,7 @@ #include "pathman.h" #include "pathman_workers.h" #include "xact_handling.h" +#include "compat/pg_compat.h" #include "access/htup_details.h" #include "access/reloptions.h" diff --git a/src/relation_info.c b/src/relation_info.c index 949da871..6632291e 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -35,8 +35,13 @@ #include "utils/lsyscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM < 90600 +#include "optimizer/planmain.h" +#endif + #if PG_VERSION_NUM >= 90600 #include "catalog/pg_constraint_fn.h" +#include "nodes/nodeFuncs.h" #endif From 9e6f32b1a73d84f238ebb4b3beeea72096d36989 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Apr 2017 18:37:26 +0300 Subject: [PATCH 0343/1124] fix Makefile --- Makefile | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1f949aba..a2453679 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,22 @@ DATA = pg_pathman--1.0--1.1.sql \ PGFILEDESC = "pg_pathman - partitioning tool" -REGRESS = pathman_calamity +REGRESS = pathman_basic \ + pathman_only \ + pathman_cte \ + pathman_bgw \ + pathman_inserts \ + pathman_updates \ + pathman_domains \ + pathman_interval \ + pathman_callbacks \ + pathman_foreign_keys \ + pathman_permissions \ + pathman_rowmarks \ + pathman_runtime_nodes \ + pathman_utility_stmt \ + pathman_column_type \ + pathman_calamity EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add From 2604a1ccc8ccf5afefdfc8dd9266528fc89e4597 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Apr 2017 19:12:36 +0300 Subject: [PATCH 0344/1124] introduce new regression test (pathman_update_trigger) --- Makefile | 3 +- expected/pathman_basic.out | 47 +----- expected/pathman_update_trigger.out | 239 ++++++++++++++++++++++++++++ sql/pathman_basic.sql | 11 -- sql/pathman_update_trigger.sql | 135 ++++++++++++++++ 5 files changed, 377 insertions(+), 58 deletions(-) create mode 100644 expected/pathman_update_trigger.out create mode 100644 sql/pathman_update_trigger.sql diff --git a/Makefile b/Makefile index a2453679..da14ebd4 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ DATA = pg_pathman--1.0--1.1.sql \ pg_pathman--1.1--1.2.sql \ pg_pathman--1.2--1.3.sql -PGFILEDESC = "pg_pathman - partitioning tool" +PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" REGRESS = pathman_basic \ pathman_only \ @@ -39,6 +39,7 @@ REGRESS = pathman_basic \ pathman_runtime_nodes \ pathman_utility_stmt \ pathman_column_type \ + pathman_update_trigger \ pathman_calamity EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 69c677d7..ea5801f9 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -367,49 +367,6 @@ NOTICE: drop cascades to 8 other objects SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; VACUUM; -/* update triggers test */ -SELECT pathman.create_update_triggers('test.hash_rel'); - create_update_triggers ------------------------- - -(1 row) - -UPDATE test.hash_rel SET value = 7 WHERE value = 6; -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 7; - QUERY PLAN ------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (value = 7) -(3 rows) - -SELECT * FROM test.hash_rel WHERE value = 7; - id | value -----+------- - 6 | 7 -(1 row) - -SELECT pathman.create_update_triggers('test.num_range_rel'); - create_update_triggers ------------------------- - -(1 row) - -UPDATE test.num_range_rel SET id = 3001 WHERE id = 1; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = 3001; - QUERY PLAN ------------------------------------ - Append - -> Seq Scan on num_range_rel_4 - Filter: (id = 3001) -(3 rows) - -SELECT * FROM test.num_range_rel WHERE id = 3001; - id | txt -------+---------------------------------- - 3001 | c4ca4238a0b923820dcc509a6f75849b -(1 row) - SET enable_indexscan = OFF; SET enable_bitmapscan = OFF; SET enable_seqscan = ON; @@ -1275,8 +1232,6 @@ SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern') abc | integer | | plain | | Indexes: "hash_rel_0_pkey" PRIMARY KEY, btree (id) -Triggers: - hash_rel_upd_trig BEFORE UPDATE OF value ON test.hash_rel_0 FOR EACH ROW EXECUTE PROCEDURE pathman.pathman_update_trigger_func() \d+ test.hash_rel_extern Table "test.hash_rel_extern" @@ -1346,7 +1301,7 @@ SELECT COUNT(*) FROM ONLY test.hash_rel; DROP TABLE test.hash_rel CASCADE; SELECT pathman.drop_partitions('test.num_range_rel'); -NOTICE: 998 rows copied from test.num_range_rel_1 +NOTICE: 999 rows copied from test.num_range_rel_1 NOTICE: 1000 rows copied from test.num_range_rel_2 NOTICE: 1000 rows copied from test.num_range_rel_3 drop_partitions diff --git a/expected/pathman_update_trigger.out b/expected/pathman_update_trigger.out new file mode 100644 index 00000000..41a3314b --- /dev/null +++ b/expected/pathman_update_trigger.out @@ -0,0 +1,239 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_trigger; +/* Partition table by RANGE (NUMERIC) */ +CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); +NOTICE: sequence "test_range_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT create_update_triggers('test_update_trigger.test_range'); + create_update_triggers +------------------------ + +(1 row) + +/* Update values in 1st partition (rows remain there) */ +UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val < 10 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_1 | 5 | 1 + test_update_trigger.test_range_1 | 5 | 10 + test_update_trigger.test_range_1 | 5 | 2 + test_update_trigger.test_range_1 | 5 | 3 + test_update_trigger.test_range_1 | 5 | 4 + test_update_trigger.test_range_1 | 5 | 5 + test_update_trigger.test_range_1 | 5 | 6 + test_update_trigger.test_range_1 | 5 | 7 + test_update_trigger.test_range_1 | 5 | 8 + test_update_trigger.test_range_1 | 5 | 9 +(10 rows) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Update values in 2nd partition (rows move to 3rd partition) */ +UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val > 20 AND val <= 30 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_3 | 21 | 11 + test_update_trigger.test_range_3 | 22 | 12 + test_update_trigger.test_range_3 | 23 | 13 + test_update_trigger.test_range_3 | 24 | 14 + test_update_trigger.test_range_3 | 25 | 15 + test_update_trigger.test_range_3 | 26 | 16 + test_update_trigger.test_range_3 | 27 | 17 + test_update_trigger.test_range_3 | 28 | 18 + test_update_trigger.test_range_3 | 29 | 19 + test_update_trigger.test_range_3 | 30 | 20 + test_update_trigger.test_range_3 | 21 | 21 + test_update_trigger.test_range_3 | 22 | 22 + test_update_trigger.test_range_3 | 23 | 23 + test_update_trigger.test_range_3 | 24 | 24 + test_update_trigger.test_range_3 | 25 | 25 + test_update_trigger.test_range_3 | 26 | 26 + test_update_trigger.test_range_3 | 27 | 27 + test_update_trigger.test_range_3 | 28 | 28 + test_update_trigger.test_range_3 | 29 | 29 + test_update_trigger.test_range_3 | 30 | 30 +(20 rows) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Move single row */ +UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; +/* Check values #3 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 90 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_9 | 90 | 80 + test_update_trigger.test_range_9 | 90 | 90 +(2 rows) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Move single row (create new partition) */ +UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; +/* Check values #4 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = -1 +ORDER BY comment; + tableoid | val | comment +-----------------------------------+-----+--------- + test_update_trigger.test_range_11 | -1 | 50 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Update non-key column */ +UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; +/* Check values #5 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 100 +ORDER BY comment; + tableoid | val | comment +-----------------------------------+-----+--------- + test_update_trigger.test_range_10 | 100 | test! +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Try moving row into a gap (ERROR) */ +DROP TABLE test_update_trigger.test_range_4; +UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; +ERROR: cannot spawn a partition +/* Check values #6 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 70 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_7 | 70 | 70 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + +/* Test trivial move (same key) */ +UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; +/* Check values #7 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 65 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_7 | 65 | 65 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + +/* Partition table by HASH (INT4) */ +CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_update_triggers('test_update_trigger.test_hash'); + create_update_triggers +------------------------ + +(1 row) + +/* Move all rows into single partition */ +UPDATE test_update_trigger.test_hash SET val = 1; +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 1 +ORDER BY comment; + tableoid | val | comment +---------------------------------+-----+--------- + test_update_trigger.test_hash_2 | 1 | 1 + test_update_trigger.test_hash_2 | 1 | 10 + test_update_trigger.test_hash_2 | 1 | 2 + test_update_trigger.test_hash_2 | 1 | 3 + test_update_trigger.test_hash_2 | 1 | 4 + test_update_trigger.test_hash_2 | 1 | 5 + test_update_trigger.test_hash_2 | 1 | 6 + test_update_trigger.test_hash_2 | 1 | 7 + test_update_trigger.test_hash_2 | 1 | 8 + test_update_trigger.test_hash_2 | 1 | 9 +(10 rows) + +SELECT count(*) FROM test_update_trigger.test_hash; + count +------- + 10 +(1 row) + +/* Don't move any rows */ +UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 3 +ORDER BY comment; + tableoid | val | comment +----------+-----+--------- +(0 rows) + +SELECT count(*) FROM test_update_trigger.test_hash; + count +------- + 10 +(1 row) + +DROP SCHEMA test_update_trigger CASCADE; +NOTICE: drop cascades to 16 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index bd12c8cb..b9d638bb 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -126,17 +126,6 @@ SET pg_pathman.enable_runtimemergeappend = OFF; VACUUM; -/* update triggers test */ -SELECT pathman.create_update_triggers('test.hash_rel'); -UPDATE test.hash_rel SET value = 7 WHERE value = 6; -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 7; -SELECT * FROM test.hash_rel WHERE value = 7; - -SELECT pathman.create_update_triggers('test.num_range_rel'); -UPDATE test.num_range_rel SET id = 3001 WHERE id = 1; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = 3001; -SELECT * FROM test.num_range_rel WHERE id = 3001; - SET enable_indexscan = OFF; SET enable_bitmapscan = OFF; SET enable_seqscan = ON; diff --git a/sql/pathman_update_trigger.sql b/sql/pathman_update_trigger.sql new file mode 100644 index 00000000..dd3f82ad --- /dev/null +++ b/sql/pathman_update_trigger.sql @@ -0,0 +1,135 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_trigger; + + + +/* Partition table by RANGE (NUMERIC) */ +CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); +SELECT create_update_triggers('test_update_trigger.test_range'); + + +/* Update values in 1st partition (rows remain there) */ +UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; + +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val < 10 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Update values in 2nd partition (rows move to 3rd partition) */ +UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; + +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val > 20 AND val <= 30 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Move single row */ +UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; + +/* Check values #3 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 90 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Move single row (create new partition) */ +UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; + +/* Check values #4 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = -1 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Update non-key column */ +UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; + +/* Check values #5 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 100 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Try moving row into a gap (ERROR) */ +DROP TABLE test_update_trigger.test_range_4; +UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; + +/* Check values #6 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 70 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Test trivial move (same key) */ +UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; + +/* Check values #7 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 65 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + + +/* Partition table by HASH (INT4) */ +CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); +SELECT create_update_triggers('test_update_trigger.test_hash'); + + +/* Move all rows into single partition */ +UPDATE test_update_trigger.test_hash SET val = 1; + +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 1 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_hash; + + +/* Don't move any rows */ +UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; + +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 3 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_hash; + + + +DROP SCHEMA test_update_trigger CASCADE; +DROP EXTENSION pg_pathman; From b1d13fd21046cdde67a28a3cb89166c635b5634b Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 5 Apr 2017 19:21:01 +0300 Subject: [PATCH 0345/1124] Add basic tests for expressions --- expected/pathman_expressions.out | 100 +++++++++++++++++++++++++++++++ sql/pathman_expressions.sql | 43 +++++++++++++ 2 files changed, 143 insertions(+) create mode 100644 expected/pathman_expressions.out create mode 100644 sql/pathman_expressions.sql diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out new file mode 100644 index 00000000..7ddbf216 --- /dev/null +++ b/expected/pathman_expressions.out @@ -0,0 +1,100 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* hash */ +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER, + value2 INTEGER +); +INSERT INTO test.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 5 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 5; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------------- + Append + -> Seq Scan on hash_rel_0 + Filter: ((value * value2) = 5) +(3 rows) + +/* range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +NOTICE: sequence "range_rel_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT * FROM test.range_rel_6; + id | dt | txt +----+--------------------------+---------------------------------- + 61 | Wed Jan 01 00:00:00 2020 | 339e0b1f73322ffca5ec77523ff1adfa + 62 | Sat Feb 01 00:00:00 2020 | 3c09dde93bf2730744668c266845a828 + 63 | Sun Mar 01 00:00:00 2020 | e6c8aaac1e4a1eb6594309a2fd24a5e5 + 64 | Wed Apr 01 00:00:00 2020 | 8cea991c596b35cc412ad489af424341 +(4 rows) + +INSERT INTO test.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql new file mode 100644 index 00000000..95fbea82 --- /dev/null +++ b/sql/pathman_expressions.sql @@ -0,0 +1,43 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +/* hash */ +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER, + value2 INTEGER +); +INSERT INTO test.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; + +SELECT COUNT(*) FROM test.hash_rel; +SELECT pathman.create_hash_partitions('test.hash_rel', 'value * value2', 4); +SELECT COUNT(*) FROM ONLY test.hash_rel; +SELECT COUNT(*) FROM test.hash_rel; + +INSERT INTO test.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test.hash_rel; +SELECT COUNT(*) FROM test.hash_rel; + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 5; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE (value * value2) = 5; + +/* range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); + +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +INSERT INTO test.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT * FROM test.range_rel_6; +INSERT INTO test.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); + From 992a7ad6f4cff5163c6c62c5b027ad84229a4094 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Apr 2017 19:39:40 +0300 Subject: [PATCH 0346/1124] improve function pathman_update_trigger_func_move_tuple() --- src/partition_filter.c | 2 +- src/pl_funcs.c | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index c5df63bc..8fa09d88 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -344,7 +344,7 @@ build_part_tuple_map(Relation parent_rel, Relation child_rel) TupleDesc child_tupdesc, parent_tupdesc; - /* Use fake 'tdtypeid' in order to fool convert_tuples_by_name() */ + /* HACK: use fake 'tdtypeid' in order to fool convert_tuples_by_name() */ child_tupdesc = CreateTupleDescCopy(RelationGetDescr(child_rel)); child_tupdesc->tdtypeid = InvalidOid; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 94ad2fc8..ecc10c94 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1212,11 +1212,18 @@ pathman_update_trigger_func_move_tuple(Relation source_rel, HeapTuple old_tuple, HeapTuple new_tuple) { - TupleDesc source_tupdesc = RelationGetDescr(source_rel), - target_tupdesc = RelationGetDescr(target_rel); + TupleDesc source_tupdesc, + target_tupdesc; HeapTuple target_tuple; TupleConversionMap *conversion_map; + /* HACK: use fake 'tdtypeid' in order to fool convert_tuples_by_name() */ + source_tupdesc = CreateTupleDescCopy(RelationGetDescr(source_rel)); + source_tupdesc->tdtypeid = InvalidOid; + + target_tupdesc = CreateTupleDescCopy(RelationGetDescr(target_rel)); + target_tupdesc->tdtypeid = InvalidOid; + /* Build tuple conversion map */ conversion_map = convert_tuples_by_name(source_tupdesc, target_tupdesc, @@ -1286,6 +1293,10 @@ pathman_update_trigger_func_move_tuple(Relation source_rel, } /* Else emit error */ else elog(ERROR, "could not connect using SPI"); + + /* At last, free these temporary tuple descs */ + FreeTupleDesc(source_tupdesc); + FreeTupleDesc(target_tupdesc); } /* Create UPDATE triggers for all partitions */ From eb8bcec975e77e82944ebff99781b8ab82aff7c9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Apr 2017 20:02:04 +0300 Subject: [PATCH 0347/1124] improve regression test 'pathman_update_trigger' --- expected/pathman_update_trigger.out | 53 ++++++++++++++++++++++++++++- sql/pathman_update_trigger.sql | 29 ++++++++++++++++ 2 files changed, 81 insertions(+), 1 deletion(-) diff --git a/expected/pathman_update_trigger.out b/expected/pathman_update_trigger.out index 41a3314b..ae60c733 100644 --- a/expected/pathman_update_trigger.out +++ b/expected/pathman_update_trigger.out @@ -175,6 +175,57 @@ SELECT count(*) FROM test_update_trigger.test_range; 90 (1 row) +/* Test tuple conversion (attached partition) */ +CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_trigger.test_range', + 'test_update_trigger.test_range_inv', + 101::NUMERIC, 111::NUMERIC); + attach_range_partition +------------------------------------ + test_update_trigger.test_range_inv +(1 row) + +UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; +/* Check values #8 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 105 +ORDER BY comment; + tableoid | val | comment +------------------------------------+-----+--------- + test_update_trigger.test_range_inv | 105 | 60 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + +/* Test tuple conversion (dropped column) */ +ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_trigger.test_range'); + append_range_partition +----------------------------------- + test_update_trigger.test_range_12 +(1 row) + +UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; +/* Check values #9 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 115; + tableoid | val +-----------------------------------+----- + test_update_trigger.test_range_12 | 115 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + /* Partition table by HASH (INT4) */ CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; @@ -235,5 +286,5 @@ SELECT count(*) FROM test_update_trigger.test_hash; (1 row) DROP SCHEMA test_update_trigger CASCADE; -NOTICE: drop cascades to 16 other objects +NOTICE: drop cascades to 18 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_trigger.sql b/sql/pathman_update_trigger.sql index dd3f82ad..a5f5b10e 100644 --- a/sql/pathman_update_trigger.sql +++ b/sql/pathman_update_trigger.sql @@ -98,6 +98,35 @@ ORDER BY comment; SELECT count(*) FROM test_update_trigger.test_range; +/* Test tuple conversion (attached partition) */ +CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_trigger.test_range', + 'test_update_trigger.test_range_inv', + 101::NUMERIC, 111::NUMERIC); +UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; + +/* Check values #8 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 105 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Test tuple conversion (dropped column) */ +ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_trigger.test_range'); +UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; + +/* Check values #9 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 115; + +SELECT count(*) FROM test_update_trigger.test_range; + + /* Partition table by HASH (INT4) */ CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); From e9f2f326d2ec2c0a7a571786ae204b407e248748 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Apr 2017 20:18:12 +0300 Subject: [PATCH 0348/1124] remove function search_range_partition_eq() --- src/include/pathman.h | 13 ------------ src/pg_pathman.c | 48 ------------------------------------------- 2 files changed, 61 deletions(-) diff --git a/src/include/pathman.h b/src/include/pathman.h index 9ed0c21f..26d83679 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -165,19 +165,6 @@ void select_range_partitions(const Datum value, Oid collid, WrapperNode *result); -/* Result of search_range_partition_eq() */ -typedef enum -{ - SEARCH_RANGEREL_OUT_OF_RANGE = 0, - SEARCH_RANGEREL_GAP, - SEARCH_RANGEREL_FOUND -} search_rangerel_result; - -search_rangerel_result search_range_partition_eq(const Datum value, - FmgrInfo *cmp_func, - const PartRelationInfo *prel, - RangeEntry *out_re); - /* Convert hash value to the partition index */ static inline uint32 diff --git a/src/pg_pathman.c b/src/pg_pathman.c index f2a786f7..0e2e3889 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -572,54 +572,6 @@ select_range_partitions(const Datum value, } } -/* Fetch RangeEntry of RANGE partition which suits 'value' */ -search_rangerel_result -search_range_partition_eq(const Datum value, - FmgrInfo *cmp_func, - const PartRelationInfo *prel, - RangeEntry *out_re) /* returned RangeEntry */ -{ - RangeEntry *ranges; - int nranges; - WrapperNode result; - - ranges = PrelGetRangesArray(prel); - nranges = PrelChildrenCount(prel); - - select_range_partitions(value, - cmp_func, - ranges, - nranges, - BTEqualStrategyNumber, - prel->attcollid, - &result); /* output */ - - if (result.found_gap) - { - return SEARCH_RANGEREL_GAP; - } - else if (result.rangeset == NIL) - { - return SEARCH_RANGEREL_OUT_OF_RANGE; - } - else - { - IndexRange irange = linitial_irange(result.rangeset); - - Assert(list_length(result.rangeset) == 1); - Assert(irange_lower(irange) == irange_upper(irange)); - Assert(is_irange_valid(irange)); - - /* Write result to the 'out_rentry' if necessary */ - if (out_re) - memcpy((void *) out_re, - (const void *) &ranges[irange_lower(irange)], - sizeof(RangeEntry)); - - return SEARCH_RANGEREL_FOUND; - } -} - /* From 068f0da43d2f31fca08c5a474d3e4eabd9d9bb3d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Apr 2017 20:26:07 +0300 Subject: [PATCH 0349/1124] cosmetic fixes --- src/include/pathman.h | 2 +- src/include/relation_info.h | 2 +- src/pg_pathman.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/include/pathman.h b/src/include/pathman.h index 26d83679..23860924 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -162,7 +162,7 @@ void select_range_partitions(const Datum value, const RangeEntry *ranges, const int nranges, const int strategy, - Oid collid, + const Oid collid, WrapperNode *result); diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 9ee629a1..2c379049 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -82,7 +82,7 @@ FreeBound(Bound *bound, bool byval) } inline static int -cmp_bounds(FmgrInfo *cmp_func, Oid collid, const Bound *b1, const Bound *b2) +cmp_bounds(FmgrInfo *cmp_func, const Oid collid, const Bound *b1, const Bound *b2) { if (IsMinusInfinity(b1) || IsPlusInfinity(b2)) return -1; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 0e2e3889..fdf00aa2 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -395,7 +395,7 @@ select_range_partitions(const Datum value, const RangeEntry *ranges, const int nranges, const int strategy, - Oid collid, + const Oid collid, WrapperNode *result) /* returned partitions */ { bool lossy = false, From e0ea7b93f4eb856e498a5f06a477ebf4ce872145 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 6 Apr 2017 11:23:57 +0300 Subject: [PATCH 0350/1124] Modify gitignore --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index f0d2c2c4..2bcd4998 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,6 @@ regression.out *.gcno *.gcov pg_pathman--*.sql + +/cscope +/tags From d464d973d13659948dee4af8cd66c9ce7c023cc2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 6 Apr 2017 11:24:50 +0300 Subject: [PATCH 0351/1124] Modify gitignore --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 2bcd4998..4193037d 100644 --- a/.gitignore +++ b/.gitignore @@ -11,5 +11,5 @@ regression.out *.gcov pg_pathman--*.sql -/cscope +/cscope* /tags From d9e7edb09cf4137281fbbb3e270c853e87c98b6e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 6 Apr 2017 13:50:14 +0300 Subject: [PATCH 0352/1124] even more regression tests, fix in function create_range_partitions_internal() --- expected/pathman_basic.out | 4 ++-- expected/pathman_calamity.out | 20 ++++++++++++++++ init.sql | 2 +- sql/pathman_calamity.sql | 19 +++++++++++++++ src/pl_range_funcs.c | 45 +++++++++++++++++++++++++---------- 5 files changed, 74 insertions(+), 16 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index ea5801f9..cf90da94 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -10,7 +10,7 @@ INSERT INTO test.hash_rel VALUES (1, 1); INSERT INTO test.hash_rel VALUES (2, 2); INSERT INTO test.hash_rel VALUES (3, 3); SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); -ERROR: partitioning key "value" must be NOT NULL +ERROR: partitioning key "value" must be marked NOT NULL ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); create_hash_partitions @@ -130,7 +130,7 @@ CREATE INDEX ON test.range_rel (dt); INSERT INTO test.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); -ERROR: partitioning key "dt" must be NOT NULL +ERROR: partitioning key "dt" must be marked NOT NULL ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); ERROR: not enough partitions to fit all values of "dt" diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index ad81d679..9fd53712 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -102,6 +102,24 @@ SELECT count(*) FROM calamity.part_test; (1 row) DELETE FROM calamity.part_test; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'relnames' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending /* test function create_hash_partitions() */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ @@ -415,6 +433,8 @@ SELECT generate_range_bounds(0, 100, NULL) IS NULL; t (1 row) +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ ERROR: cannot find operator +(text, text) SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ diff --git a/init.sql b/init.sql index e4feeb19..f2d87d13 100644 --- a/init.sql +++ b/init.sql @@ -469,7 +469,7 @@ BEGIN END IF; IF @extschema@.is_attribute_nullable(relation, p_attribute) THEN - RAISE EXCEPTION 'partitioning key "%" must be NOT NULL', p_attribute; + RAISE EXCEPTION 'partitioning key "%" must be marked NOT NULL', p_attribute; END IF; /* Check if there are foreign keys that reference the relation */ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index d6b9208d..7864c90d 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -36,6 +36,24 @@ SELECT count(*) FROM calamity.part_test; DELETE FROM calamity.part_test; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ + +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ + +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ + +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ + +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ + +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ + /* test function create_hash_partitions() */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ @@ -157,6 +175,7 @@ SELECT drop_range_partition_expand_next(NULL) IS NULL; SELECT generate_range_bounds(NULL, 100, 10) IS NULL; SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; SELECT generate_range_bounds(0, 100, NULL) IS NULL; +SELECT generate_range_bounds(0, 100, 0); /* not ok */ SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index f8765658..748fc8c0 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -145,7 +145,7 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) Datum create_range_partitions_internal(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); + Oid parent_relid; int16 typlen; bool typbyval; char typalign; @@ -159,13 +159,30 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) int ntablespaces = 0; /* Bounds */ - ArrayType *array = PG_GETARG_ARRAYTYPE_P(1); - Oid elemtype = ARR_ELEMTYPE(array); + ArrayType *bounds; + Oid elemtype; Datum *datums; bool *nulls; int ndatums; int i; + /* Extract parent's Oid */ + if (!PG_ARGISNULL(0)) + { + parent_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); + + /* Extract array of bounds */ + if (!PG_ARGISNULL(1)) + { + bounds = PG_GETARG_ARRAYTYPE_P(1); + elemtype = ARR_ELEMTYPE(bounds); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'bounds' should not be NULL"))); + /* Extract partition names */ if (!PG_ARGISNULL(2)) { @@ -179,19 +196,21 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) /* Extract bounds */ get_typlenbyvalalign(elemtype, &typlen, &typbyval, &typalign); - deconstruct_array(array, elemtype, + deconstruct_array(bounds, elemtype, typlen, typbyval, typalign, &datums, &nulls, &ndatums); if (partnames && npartnames != ndatums - 1) - ereport(ERROR, (errmsg("wrong length of relnames array"), - errdetail("relnames number must be less than " - "bounds array length by one"))); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("wrong length of 'relnames' array"), + errdetail("number of 'relnames' must be less than " + "'bounds' array length by one"))); if (tablespaces && ntablespaces != ndatums - 1) - ereport(ERROR, (errmsg("wrong length of tablespaces array"), - errdetail("tablespaces number must be less than " - "bounds array length by one"))); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("wrong length of 'tablespaces' array"), + errdetail("number of 'tablespaces' must be less than " + "'bounds' array length by one"))); /* Check if bounds array is ascending */ fill_type_cmp_fmgr_info(&cmp_func, @@ -199,7 +218,7 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) getBaseType(elemtype)); /* Validate bounds */ - for (i = 0; i < ndatums - 1; i++) + for (i = 0; i < ndatums; i++) { /* Disregard 1st bound */ if (i == 0) continue; @@ -212,7 +231,7 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) /* Check that bounds are ascending */ if (!nulls[i - 1] && !check_le(&cmp_func, datums[i - 1], datums[i])) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("bounds array must be ascending"))); + errmsg("'bounds' array must be ascending"))); } /* Create partitions using provided bounds */ @@ -230,7 +249,7 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) char *tablespace = tablespaces ? tablespaces[i] : NULL; - (void) create_single_range_partition_internal(relid, + (void) create_single_range_partition_internal(parent_relid, &start, &end, elemtype, From efa29186c97df9a021b2e11cb03d6e2d862c6b3e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 6 Apr 2017 13:56:10 +0300 Subject: [PATCH 0353/1124] remove function get_type_hash_func() --- hash.sql | 7 ------- src/pl_hash_funcs.c | 15 --------------- 2 files changed, 22 deletions(-) diff --git a/hash.sql b/hash.sql index 9ec00791..c3abe1d9 100644 --- a/hash.sql +++ b/hash.sql @@ -176,13 +176,6 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' LANGUAGE C; -/* - * Returns hash function OID for specified type - */ -CREATE OR REPLACE FUNCTION @extschema@.get_type_hash_func(REGTYPE) -RETURNS REGPROC AS 'pg_pathman', 'get_type_hash_func' -LANGUAGE C STRICT; - /* * Calculates hash for integer value */ diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index a5390c6f..9b6397f9 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -25,7 +25,6 @@ PG_FUNCTION_INFO_V1( create_hash_partitions_internal ); -PG_FUNCTION_INFO_V1( get_type_hash_func ); PG_FUNCTION_INFO_V1( get_hash_part_idx ); PG_FUNCTION_INFO_V1( build_hash_condition ); @@ -107,20 +106,6 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -/* - * Returns hash function's OID for a specified type. - */ -Datum -get_type_hash_func(PG_FUNCTION_ARGS) -{ - TypeCacheEntry *tce; - Oid type_oid = PG_GETARG_OID(0); - - tce = lookup_type_cache(type_oid, TYPECACHE_HASH_PROC); - - PG_RETURN_OID(tce->hash_proc); -} - /* * Wrapper for hash_to_part_index(). */ From 1df43cbda8ab9b0928fed17c078df10a0ae0f990 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 6 Apr 2017 14:19:35 +0300 Subject: [PATCH 0354/1124] improve function names (and arg names too) --- expected/pathman_calamity.out | 2 +- range.sql | 132 +++++++++++++++++----------------- src/pl_range_funcs.c | 4 +- 3 files changed, 71 insertions(+), 67 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 9fd53712..ae9cfa37 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -110,7 +110,7 @@ SELECT create_range_partitions_internal('calamity.part_test', ERROR: 'bounds' should not be NULL SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], '{part_1}'::TEXT[], NULL); /* not ok */ -ERROR: wrong length of 'relnames' array +ERROR: wrong length of 'partition_names' array SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], NULL, '{tblspc_1}'::TEXT[]); /* not ok */ ERROR: wrong length of 'tablespaces' array diff --git a/range.sql b/range.sql index 2b56194a..ae918255 100644 --- a/range.sql +++ b/range.sql @@ -98,7 +98,6 @@ $$ DECLARE v_rows_count BIGINT; v_atttype REGTYPE; - v_tablespace TEXT; v_max start_value%TYPE; v_cur_value start_value%TYPE := start_value; end_value start_value%TYPE; @@ -163,10 +162,12 @@ BEGIN IF p_count != 0 THEN part_count := @extschema@.create_range_partitions_internal( - parent_relid, - @extschema@.generate_range_bounds(start_value, p_interval, p_count), - NULL, - NULL); + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); END IF; /* Notify backend about changes */ @@ -284,49 +285,49 @@ END $$ LANGUAGE plpgsql; /* - * Creates RANGE partitions for specified range + * Creates RANGE partitions for specified relation based on bounds array */ -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval ANYELEMENT, + bounds ANYARRAY, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) RETURNS INTEGER AS $$ DECLARE - part_count INTEGER := 0; - + part_count INTEGER; BEGIN + IF array_ndims(bounds) > 1 THEN + RAISE EXCEPTION 'Bounds array must be a one dimensional array'; + END IF; + + IF array_length(bounds, 1) < 2 THEN + RAISE EXCEPTION 'Bounds array must have at least two values'; + END IF; + attribute := lower(attribute); PERFORM @extschema@.prepare_for_partitioning(parent_relid, attribute, partition_data); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, attribute, - start_value, - end_value); + bounds[0], + bounds[array_length(bounds, 1) - 1]); - /* Insert new entry to pathman config */ INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, p_interval::TEXT); + VALUES (parent_relid, attribute, 2, NULL); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid) FROM @extschema@.get_plain_schema_and_relname(parent_relid); - WHILE start_value <= end_value - LOOP - PERFORM @extschema@.create_single_range_partition( - parent_relid, - start_value, - start_value + p_interval, - tablespace := @extschema@.get_tablespace(parent_relid)); - - start_value := start_value + p_interval; - part_count := part_count + 1; - END LOOP; + /* Create partitions */ + part_count := @extschema@.create_range_partitions_internal(parent_relid, + bounds, + partition_names, + tablespaces); /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); @@ -339,19 +340,20 @@ BEGIN PERFORM @extschema@.set_enable_parent(parent_relid, true); END IF; - RETURN part_count; /* number of created partitions */ + RETURN part_count; END -$$ LANGUAGE plpgsql; +$$ +LANGUAGE plpgsql; /* - * Creates RANGE partitions for specified range based on datetime attribute + * Creates RANGE partitions for specified range */ CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( parent_relid REGCLASS, attribute TEXT, start_value ANYELEMENT, end_value ANYELEMENT, - p_interval INTERVAL, + p_interval ANYELEMENT, partition_data BOOLEAN DEFAULT TRUE) RETURNS INTEGER AS $$ @@ -378,14 +380,11 @@ BEGIN WHILE start_value <= end_value LOOP - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', - @extschema@.get_base_type(pg_typeof(start_value))::TEXT) - USING + PERFORM @extschema@.create_single_range_partition( parent_relid, start_value, start_value + p_interval, - @extschema@.get_tablespace(parent_relid); + tablespace := @extschema@.get_tablespace(parent_relid)); start_value := start_value + p_interval; part_count := part_count + 1; @@ -406,48 +405,53 @@ BEGIN END $$ LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions2( +/* + * Creates RANGE partitions for specified range based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( parent_relid REGCLASS, attribute TEXT, - bounds ANYARRAY, - relnames TEXT[] DEFAULT NULL, - tablespaces TEXT[] DEFAULT NULL, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval INTERVAL, partition_data BOOLEAN DEFAULT TRUE) RETURNS INTEGER AS $$ DECLARE - part_count INTEGER; -BEGIN - IF array_ndims(bounds) > 1 THEN - RAISE EXCEPTION 'Bounds array must be a one dimensional array'; - END IF; - - IF array_length(bounds, 1) < 2 THEN - RAISE EXCEPTION 'Bounds array must have at least two values'; - END IF; + part_count INTEGER := 0; +BEGIN attribute := lower(attribute); PERFORM @extschema@.prepare_for_partitioning(parent_relid, attribute, partition_data); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, attribute, - bounds[0], - bounds[array_length(bounds, 1) - 1]); + start_value, + end_value); + /* Insert new entry to pathman config */ INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, NULL); + VALUES (parent_relid, attribute, 2, p_interval::TEXT); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid) FROM @extschema@.get_plain_schema_and_relname(parent_relid); - /* Create partitions */ - part_count := @extschema@.create_range_partitions_internal(parent_relid, - bounds, - relnames, - tablespaces); + WHILE start_value <= end_value + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', + @extschema@.get_base_type(pg_typeof(start_value))::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_tablespace(parent_relid); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; /* Notify backend about changes */ PERFORM @extschema@.on_create_partitions(parent_relid); @@ -460,10 +464,10 @@ BEGIN PERFORM @extschema@.set_enable_parent(parent_relid, true); END IF; - RETURN part_count; + RETURN part_count; /* number of created partitions */ END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; + /* * Split RANGE partition @@ -843,7 +847,7 @@ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.add_range_partitions( parent_relid REGCLASS, bounds ANYARRAY, - relnames TEXT[] DEFAULT NULL, + partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL) RETURNS INTEGER AS $$ @@ -858,7 +862,7 @@ BEGIN /* Create partitions */ part_count := @extschema@.create_range_partitions_internal(parent_relid, bounds, - relnames, + partition_names, tablespaces); /* Notify backend about changes */ @@ -1094,7 +1098,7 @@ LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( parent_relid REGCLASS, bounds ANYARRAY, - relnames TEXT[], + partition_names TEXT[], tablespaces TEXT[]) RETURNS REGCLASS AS 'pg_pathman', 'create_range_partitions_internal' LANGUAGE C; diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 748fc8c0..8e3391ad 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -202,8 +202,8 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) if (partnames && npartnames != ndatums - 1) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("wrong length of 'relnames' array"), - errdetail("number of 'relnames' must be less than " + errmsg("wrong length of 'partition_names' array"), + errdetail("number of 'partition_names' must be less than " "'bounds' array length by one"))); if (tablespaces && ntablespaces != ndatums - 1) From 88f31301f614f83e5faa54394d08b5bcf46da325 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 6 Apr 2017 14:56:18 +0300 Subject: [PATCH 0355/1124] add regression tests for function create_range_partitions (bounds array) --- expected/pathman_basic.out | 71 ++++++++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 34 ++++++++++++++++++ 2 files changed, 105 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index cf90da94..631d35f6 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -261,6 +261,77 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 A DROP TABLE test.improved_dummy CASCADE; NOTICE: drop cascades to 11 other objects +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | partattr | range_min | range_max +---------------------+-----------------------+----------+----------+-----------+----------- + test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 + test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from test.improved_dummy_1 +NOTICE: 0 rows copied from test.improved_dummy_2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | partattr | range_min | range_max +---------------------+-----------+----------+----------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from p1 +NOTICE: 0 rows copied from p2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | partattr | range_min | range_max +---------------------+-----------+----------+----------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 2 other objects /* Test pathman_rel_pathlist_hook() with INSERT query */ CREATE TABLE test.insert_into_select(val int NOT NULL); INSERT INTO test.insert_into_select SELECT generate_series(1, 100); diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index b9d638bb..1411c930 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -78,6 +78,40 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 A DROP TABLE test.improved_dummy CASCADE; +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + +SELECT pathman.drop_partitions('test.improved_dummy'); + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + +SELECT pathman.drop_partitions('test.improved_dummy'); + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + +DROP TABLE test.improved_dummy CASCADE; + + /* Test pathman_rel_pathlist_hook() with INSERT query */ CREATE TABLE test.insert_into_select(val int NOT NULL); INSERT INTO test.insert_into_select SELECT generate_series(1, 100); From e76dd5270b68105637c704793e22848f24ca9b09 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 6 Apr 2017 14:57:41 +0300 Subject: [PATCH 0356/1124] remove function add_range_partitions() --- range.sql | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/range.sql b/range.sql index ae918255..0f8043ee 100644 --- a/range.sql +++ b/range.sql @@ -841,39 +841,6 @@ $$ LANGUAGE plpgsql; -/* - * Add multiple partitions - */ -CREATE OR REPLACE FUNCTION @extschema@.add_range_partitions( - parent_relid REGCLASS, - bounds ANYARRAY, - partition_names TEXT[] DEFAULT NULL, - tablespaces TEXT[] DEFAULT NULL) -RETURNS INTEGER AS -$$ -DECLARE - part_count INTEGER; -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - /* Create partitions */ - part_count := @extschema@.create_range_partitions_internal(parent_relid, - bounds, - partition_names, - tablespaces); - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - RETURN part_count; -END -$$ -LANGUAGE plpgsql; - - /* * Drop range partition */ From af7128ddf264e39110172037f47f3d358f2ed81c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 6 Apr 2017 15:14:07 +0300 Subject: [PATCH 0357/1124] make function is_operator_supported() strict --- init.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init.sql b/init.sql index f2d87d13..5acba319 100644 --- a/init.sql +++ b/init.sql @@ -862,7 +862,7 @@ CREATE OR REPLACE FUNCTION @extschema@.is_operator_supported( type_oid OID, opname TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'is_operator_supported' -LANGUAGE C; +LANGUAGE C STRICT; /* From f6dd5a0852b46c6855627efd580250126c42980f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 7 Apr 2017 15:20:55 +0300 Subject: [PATCH 0358/1124] bugfix: copy parent's joininfo to child RelOptInfo --- src/pg_pathman.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 65d06aef..5ced8862 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -340,6 +340,11 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, /* Now append 'appinfo' to 'root->append_rel_list' */ root->append_rel_list = lappend(root->append_rel_list, appinfo); + /* Adjust join quals for this child */ + child_rel->joininfo = (List *) adjust_appendrel_attrs(root, + (Node *) parent_rel->joininfo, + appinfo); + /* Adjust target list for this child */ adjust_rel_targetlist_compat(root, child_rel, parent_rel, appinfo); From 777917d74cde8dfd81d2a5b8c61de0fa134811e3 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Fri, 7 Apr 2017 17:54:55 +0300 Subject: [PATCH 0359/1124] Add first regression test on join clause --- Makefile | 3 +- expected/pathman_join_clause.out | 97 ++++++++++++++++++++++++++++++++ sql/pathman_join_clause.sql | 49 ++++++++++++++++ 3 files changed, 148 insertions(+), 1 deletion(-) create mode 100644 expected/pathman_join_clause.out create mode 100644 sql/pathman_join_clause.sql diff --git a/Makefile b/Makefile index c151604d..b908802b 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,8 @@ REGRESS = pathman_basic \ pathman_rowmarks \ pathman_runtime_nodes \ pathman_utility_stmt_hooking \ - pathman_calamity + pathman_calamity \ + pathman_join_clause EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out new file mode 100644 index 00000000..5fda3656 --- /dev/null +++ b/expected/pathman_join_clause.out @@ -0,0 +1,97 @@ +\set VERBOSITY terse +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO fk VALUES (1, 1); +INSERT INTO mytbl VALUES (1, 1, 5), (1,1,6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE fk; +ANALYZE mytbl; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key + FROM mytbl m JOIN fk USING(id1, id2) + WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------ + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + -> Bitmap Heap Scan on mytbl_0 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_0_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_1 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_1_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_2 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_2_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_3 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_3_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_4 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_4_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_5 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_5_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_6 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_6_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_7 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_7_pkey + Index Cond: (id1 = fk.id1) +(43 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key + FROM mytbl m JOIN fk USING(id1, id2) + WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +----------+-----+-----+-----+-----------+--------- + mytbl_6 | 1 | 1 | 5 | | +(1 row) + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman CASCADE; +NOTICE: drop cascades to 8 other objects +DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql new file mode 100644 index 00000000..02da659c --- /dev/null +++ b/sql/pathman_join_clause.sql @@ -0,0 +1,49 @@ +\set VERBOSITY terse + +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + + +/* + * Test push down a join clause into child nodes of append + */ + +/* create test tables */ +CREATE TABLE fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('mytbl', 'id1', 8); + +/* ...fill out with test data */ +INSERT INTO fk VALUES (1, 1); +INSERT INTO mytbl VALUES (1, 1, 5), (1,1,6); + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE fk; +ANALYZE mytbl; + +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key + FROM mytbl m JOIN fk USING(id1, id2) + WHERE NOT key <@ int4range(6, end_key); +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key + FROM mytbl m JOIN fk USING(id1, id2) + WHERE NOT key <@ int4range(6, end_key); + + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; + From 9998593921186028c7e3ac16304796a2f572616e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 7 Apr 2017 18:36:43 +0300 Subject: [PATCH 0360/1124] add Maksim Milyutin to authors --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 148f1ce1..8a72e16c 100644 --- a/README.md +++ b/README.md @@ -671,7 +671,8 @@ All sections and data will remain unchanged and will be handled by the standard Do not hesitate to post your issues, questions and new ideas at the [issues](https://github.com/postgrespro/pg_pathman/issues) page. ## Authors -Ildar Musin Postgres Professional Ltd., Russia -Alexander Korotkov Postgres Professional Ltd., Russia -Dmitry Ivanov Postgres Professional Ltd., Russia +Ildar Musin Postgres Professional Ltd., Russia +Alexander Korotkov Postgres Professional Ltd., Russia +Dmitry Ivanov Postgres Professional Ltd., Russia +Maksim Milyutin Postgres Professional Ltd., Russia From e67606038ec9dceb2842e75bd492075c827bcde6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 7 Apr 2017 21:16:43 +0300 Subject: [PATCH 0361/1124] fix definition of pathman_config_params_trigger (sqlsmith) --- init.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init.sql b/init.sql index 5acba319..a196e3d1 100644 --- a/init.sql +++ b/init.sql @@ -118,7 +118,7 @@ RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' LANGUAGE C; CREATE TRIGGER pathman_config_params_trigger -BEFORE INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); /* From 3b6db77d733c7e77c918be699871b0b5f4f8f81f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 8 Apr 2017 19:36:33 +0300 Subject: [PATCH 0362/1124] improve stability and error handling --- Makefile | 21 ++-- expected/pathman_calamity.out | 61 ++++++++++- expected/pathman_interval.out | 26 ++--- init.sql | 21 ++-- range.sql | 4 +- sql/pathman_calamity.sql | 29 ++++- src/init.c | 14 +-- src/partition_creation.c | 7 +- src/pl_funcs.c | 160 +++++++++++++++++---------- src/pl_hash_funcs.c | 20 +++- src/pl_range_funcs.c | 196 ++++++++++++++++++++++------------ 11 files changed, 377 insertions(+), 182 deletions(-) diff --git a/Makefile b/Makefile index 449f29a6..ece73c45 100644 --- a/Makefile +++ b/Makefile @@ -25,23 +25,24 @@ DATA = pg_pathman--1.0--1.1.sql \ PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" REGRESS = pathman_basic \ - pathman_only \ - pathman_cte \ pathman_bgw \ - pathman_inserts \ - pathman_updates \ - pathman_domains \ - pathman_interval \ + pathman_calamity \ pathman_callbacks \ + pathman_column_type \ + pathman_cte \ + pathman_domains \ pathman_foreign_keys \ + pathman_inserts \ + pathman_interval \ + pathman_join_clause \ + pathman_only \ pathman_permissions \ pathman_rowmarks \ - pathman_join_clause \ pathman_runtime_nodes \ - pathman_utility_stmt \ - pathman_column_type \ pathman_update_trigger \ - pathman_calamity + pathman_updates \ + pathman_utility_stmt + EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index ae9cfa37..1de6019b 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -244,24 +244,45 @@ SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); ERROR: no hash function for type calamity.part_test /* check function build_range_condition() */ -SELECT build_range_condition('calamity.part_test', 'val', 10, 20); +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'attribute' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ build_range_condition ------------------------------ ((val >= 10) AND (val < 20)) (1 row) -SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ build_range_condition ----------------------- ((val >= 10)) (1 row) -SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ build_range_condition ----------------------- ((val < 10)) (1 row) +/* check function validate_interval_value() */ +SELECT validate_interval_value(NULL, 'val', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('calamity.part_test', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'attname' should not be NULL +SELECT validate_interval_value('calamity.part_test', 'val', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('calamity.part_test', 'val', 2, '1 mon'); /* not ok */ +ERROR: invalid input syntax for integer: "1 mon" +SELECT validate_interval_value('calamity.part_test', 'val', 1, '1 mon'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('calamity.part_test', 'val', 2, NULL); /* OK */ + validate_interval_value +------------------------- + t +(1 row) + /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); validate_relname @@ -402,6 +423,21 @@ SELECT build_update_trigger_func_name(NULL) IS NULL; t (1 row) +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* ok */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::regclass); ERROR: cannot find worker for relation "1" @@ -453,6 +489,23 @@ SELECT generate_range_bounds('1-jan-2017'::DATE, {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} (1 row) +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: relation "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +SELECT has_update_trigger(NULL); + has_update_trigger +-------------------- + +(1 row) + +SELECT has_update_trigger(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist /* check invoke_on_partition_created_callback() */ CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ begin @@ -463,7 +516,7 @@ $$ LANGUAGE plpgsql; SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); ERROR: 'parent_relid' should not be NULL SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); -ERROR: 'partition' should not be NULL +ERROR: 'partition_relid' should not be NULL SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); invoke_on_partition_created_callback -------------------------------------- diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index ff7340ea..91f55505 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -21,14 +21,14 @@ INSERT INTO test_interval.abc VALUES (250); ERROR: cannot find appropriate partition for key '250' /* Set a trivial interval */ SELECT set_interval('test_interval.abc', 0); -ERROR: interval must not be trivial +ERROR: interval should not be trivial /* Set a negative interval */ SELECT set_interval('test_interval.abc', -100); -ERROR: interval must not be negative +ERROR: interval should not be negative /* We also shouldn't be able to set a trivial interval directly */ UPDATE pathman_config SET range_interval = '0' WHERE partrel = 'test_interval.abc'::REGCLASS; -ERROR: interval must not be trivial +ERROR: interval should not be trivial /* Set a normal interval */ SELECT set_interval('test_interval.abc', 1000); set_interval @@ -64,14 +64,14 @@ INSERT INTO test_interval.abc VALUES (250); ERROR: cannot find appropriate partition for key '250' /* Set a trivial interval */ SELECT set_interval('test_interval.abc', 0); -ERROR: interval must not be trivial +ERROR: interval should not be trivial /* Set a negative interval */ SELECT set_interval('test_interval.abc', -100); -ERROR: interval must not be negative +ERROR: interval should not be negative /* We also shouldn't be able to set a trivial interval directly */ UPDATE pathman_config SET range_interval = '0' WHERE partrel = 'test_interval.abc'::REGCLASS; -ERROR: interval must not be trivial +ERROR: interval should not be trivial /* Set a normal interval */ SELECT set_interval('test_interval.abc', 1000); set_interval @@ -107,14 +107,14 @@ INSERT INTO test_interval.abc VALUES (250); ERROR: cannot find appropriate partition for key '250' /* Set a trivial interval */ SELECT set_interval('test_interval.abc', 0); -ERROR: interval must not be trivial +ERROR: interval should not be trivial /* Set a negative interval */ SELECT set_interval('test_interval.abc', -100); -ERROR: interval must not be negative +ERROR: interval should not be negative /* We also shouldn't be able to set a trivial interval directly */ UPDATE pathman_config SET range_interval = '0' WHERE partrel = 'test_interval.abc'::REGCLASS; -ERROR: interval must not be trivial +ERROR: interval should not be trivial /* Set a normal interval */ SELECT set_interval('test_interval.abc', 1000); set_interval @@ -148,7 +148,7 @@ SELECT set_interval('test_interval.abc', NULL::INTERVAL); /* Set a trivial interval */ SELECT set_interval('test_interval.abc', '1 second'::INTERVAL); -ERROR: interval must not be trivial +ERROR: interval should not be trivial /* Set a normal interval */ SELECT set_interval('test_interval.abc', '1 month'::INTERVAL); set_interval @@ -180,7 +180,7 @@ SELECT set_interval('test_interval.abc', NULL::FLOAT4); /* Set a trivial interval */ SELECT set_interval('test_interval.abc', 0); -ERROR: interval must not be trivial +ERROR: interval should not be trivial /* Set NaN float as interval */ SELECT set_interval('test_interval.abc', 'NaN'::FLOAT4); ERROR: invalid floating point interval @@ -212,7 +212,7 @@ SELECT set_interval('test_interval.abc', NULL::FLOAT8); /* Set a trivial interval */ SELECT set_interval('test_interval.abc', 0); -ERROR: interval must not be trivial +ERROR: interval should not be trivial /* Set NaN float as interval */ SELECT set_interval('test_interval.abc', 'NaN'::FLOAT8); ERROR: invalid floating point interval @@ -244,7 +244,7 @@ SELECT set_interval('test_interval.abc', NULL::NUMERIC); /* Set a trivial interval */ SELECT set_interval('test_interval.abc', 0); -ERROR: interval must not be trivial +ERROR: interval should not be trivial /* Set NaN numeric as interval */ SELECT set_interval('test_interval.abc', 'NaN'::NUMERIC); ERROR: invalid numeric interval diff --git a/init.sql b/init.sql index a196e3d1..60adbbe4 100644 --- a/init.sql +++ b/init.sql @@ -709,16 +709,6 @@ END $$ LANGUAGE plpgsql; -/* - * Check if tuple from first relation can be converted to fit the second one. - */ -CREATE OR REPLACE FUNCTION @extschema@.is_tuple_convertible( - relation1 REGCLASS, - relation2 REGCLASS) -RETURNS BOOL AS 'pg_pathman', 'is_tuple_convertible' -LANGUAGE C STRICT; - - /* * Function for UPDATE triggers. */ @@ -859,11 +849,20 @@ LANGUAGE C STRICT; * Check if TYPE supports the specified operator. */ CREATE OR REPLACE FUNCTION @extschema@.is_operator_supported( - type_oid OID, + type_oid REGTYPE, opname TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'is_operator_supported' LANGUAGE C STRICT; +/* + * Check if tuple from first relation can be converted to fit the second one. + */ +CREATE OR REPLACE FUNCTION @extschema@.is_tuple_convertible( + relation1 REGCLASS, + relation2 REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'is_tuple_convertible' +LANGUAGE C STRICT; + /* * Build check constraint name for a specified relation's column. diff --git a/range.sql b/range.sql index 0f8043ee..eaa8a06c 100644 --- a/range.sql +++ b/range.sql @@ -1088,7 +1088,7 @@ SET client_min_messages = WARNING; * Construct CHECK constraint condition for a range partition. */ CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( - p_relid REGCLASS, + partition_relid REGCLASS, attribute TEXT, start_value ANYELEMENT, end_value ANYELEMENT) @@ -1098,7 +1098,7 @@ LANGUAGE C; CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( parent_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' -LANGUAGE C; +LANGUAGE C STRICT; /* diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 7864c90d..dfb938e8 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -36,6 +36,9 @@ SELECT count(*) FROM calamity.part_test; DELETE FROM calamity.part_test; + + + /* test function create_range_partitions_internal() */ SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ @@ -116,9 +119,19 @@ SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); /* check function build_range_condition() */ -SELECT build_range_condition('calamity.part_test', 'val', 10, 20); -SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); -SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + +/* check function validate_interval_value() */ +SELECT validate_interval_value(NULL, 'val', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('calamity.part_test', NULL, 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('calamity.part_test', 'val', NULL, '1 mon'); /* not ok */ +SELECT validate_interval_value('calamity.part_test', 'val', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('calamity.part_test', 'val', 1, '1 mon'); /* not ok */ +SELECT validate_interval_value('calamity.part_test', 'val', 2, NULL); /* OK */ /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); @@ -164,6 +177,11 @@ SELECT build_update_trigger_name(NULL) IS NULL; SELECT build_update_trigger_func_name('calamity.part_test'); SELECT build_update_trigger_func_name(NULL) IS NULL; +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* ok */ +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +SELECT build_sequence_name(NULL) IS NULL; + /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::regclass); @@ -183,6 +201,11 @@ SELECT generate_range_bounds('1-jan-2017'::DATE, '1 day'::INTERVAL, 4); /* OK */ +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ + +SELECT has_update_trigger(NULL); +SELECT has_update_trigger(0::REGCLASS); /* not ok */ /* check invoke_on_partition_created_callback() */ CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ diff --git a/src/init.c b/src/init.c index fff5ecd2..d6696dce 100644 --- a/src/init.c +++ b/src/init.c @@ -552,6 +552,7 @@ char * build_check_constraint_name_relid_internal(Oid relid, AttrNumber attno) { + AssertArg(OidIsValid(relid)); return build_check_constraint_name_relname_internal(get_rel_name(relid), attno); } @@ -573,6 +574,7 @@ build_check_constraint_name_relname_internal(const char *relname, char * build_sequence_name_internal(Oid relid) { + AssertArg(OidIsValid(relid)); return psprintf("%s_seq", get_rel_name(relid)); } @@ -583,6 +585,7 @@ build_sequence_name_internal(Oid relid) char * build_update_trigger_name_internal(Oid relid) { + AssertArg(OidIsValid(relid)); return psprintf("%s_upd_trig", get_rel_name(relid)); } @@ -593,12 +596,8 @@ build_update_trigger_name_internal(Oid relid) char * build_update_trigger_func_name_internal(Oid relid) { - Oid nspid = get_rel_namespace(relid); - - return psprintf("%s.%s", - quote_identifier(get_namespace_name(nspid)), - quote_identifier(psprintf("%s_upd_trig_func", - get_rel_name(relid)))); + AssertArg(OidIsValid(relid)); + return psprintf("%s_upd_trig_func", get_rel_name(relid)); } @@ -682,7 +681,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* * Loads additional pathman parameters like 'enable_parent' or 'auto' - * from PATHMAN_CONFIG_PARAMS + * from PATHMAN_CONFIG_PARAMS. */ bool read_pathman_params(Oid relid, Datum *values, bool *isnull) @@ -1128,6 +1127,7 @@ validate_hash_constraint(const Expr *expr, return false; } + /* needed for find_inheritance_children_array() function */ static int oid_cmp(const void *p1, const void *p2) diff --git a/src/partition_creation.c b/src/partition_creation.c index 1c2be12c..ee924f21 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1262,7 +1262,12 @@ check_range_available(Oid parent_relid, prel = get_pathman_relation_info(parent_relid); /* If there's no prel, return TRUE (overlap is not possible) */ - if (!prel) return true; + if (!prel) + { + ereport(WARNING, (errmsg("relation \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)))); + return true; + } /* Emit an error if it is not partitioned by RANGE */ shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ecc10c94..cc36a6e8 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -59,6 +59,7 @@ PG_FUNCTION_INFO_V1( build_check_constraint_name_attname ); PG_FUNCTION_INFO_V1( validate_relname ); PG_FUNCTION_INFO_V1( is_date_type ); PG_FUNCTION_INFO_V1( is_attribute_nullable ); +PG_FUNCTION_INFO_V1( is_operator_supported ); PG_FUNCTION_INFO_V1( is_tuple_convertible ); PG_FUNCTION_INFO_V1( add_to_pathman_config ); @@ -72,7 +73,6 @@ PG_FUNCTION_INFO_V1( invoke_on_partition_created_callback ); PG_FUNCTION_INFO_V1( check_security_policy ); -PG_FUNCTION_INFO_V1( is_operator_supported ); PG_FUNCTION_INFO_V1( create_update_triggers ); PG_FUNCTION_INFO_V1( pathman_update_trigger_func ); PG_FUNCTION_INFO_V1( create_single_update_trigger ); @@ -113,7 +113,7 @@ static void pathman_update_trigger_func_move_tuple(Relation source_rel, HeapTuple new_tuple); /* Extracted common check */ -static bool +static inline bool check_relation_exists(Oid relid) { return get_rel_type_id(relid) != InvalidOid; @@ -220,8 +220,9 @@ get_parent_of_partition_pl(PG_FUNCTION_ARGS) PG_RETURN_OID(parent); else { - elog(ERROR, "\"%s\" is not a partition", - get_rel_name_or_relid(partition)); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("\"%s\" is not a partition", + get_rel_name_or_relid(partition)))); PG_RETURN_NULL(); } @@ -602,7 +603,8 @@ validate_relname(PG_FUNCTION_ARGS) /* We don't accept NULL */ if (PG_ARGISNULL(0)) - ereport(ERROR, (errmsg("relation should not be NULL"), + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation should not be NULL"), errdetail("function " CppAsString(validate_relname) " received NULL argument"))); @@ -610,7 +612,8 @@ validate_relname(PG_FUNCTION_ARGS) relid = PG_GETARG_OID(0); if (!check_relation_exists(relid)) - ereport(ERROR, (errmsg("relation \"%u\" does not exist", relid), + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid), errdetail("triggered in function " CppAsString(validate_relname)))); @@ -646,6 +649,19 @@ is_attribute_nullable(PG_FUNCTION_ARGS) PG_RETURN_BOOL(result); /* keep compiler happy */ } +Datum +is_operator_supported(PG_FUNCTION_ARGS) +{ + Oid opid, + typid = PG_GETARG_OID(0); + char *opname = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + + opid = compatible_oper_opid(list_make1(makeString(opname)), + typid, typid, true); + + PG_RETURN_BOOL(OidIsValid(opid)); +} + Datum is_tuple_convertible(PG_FUNCTION_ARGS) { @@ -694,7 +710,8 @@ build_update_trigger_name(PG_FUNCTION_ARGS) /* Check that relation exists */ if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid))); result = quote_identifier(build_update_trigger_name_internal(relid)); @@ -705,13 +722,21 @@ Datum build_update_trigger_func_name(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); - const char *result; + Oid nspid; + const char *result, + *func_name; /* Check that relation exists */ if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid))); + + nspid = get_rel_namespace(relid); - result = build_update_trigger_func_name_internal(relid); + func_name = build_update_trigger_func_name_internal(relid); + result = psprintf("%s.%s", + quote_identifier(get_namespace_name(nspid)), + quote_identifier(func_name)); PG_RETURN_TEXT_P(cstring_to_text(result)); } @@ -724,12 +749,14 @@ build_check_constraint_name_attnum(PG_FUNCTION_ARGS) const char *result; if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid))); /* We explicitly do not support system attributes */ if (attnum == InvalidAttrNumber || attnum < 0) - elog(ERROR, "Cannot build check constraint name: " - "invalid attribute number %i", attnum); + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("cannot build check constraint name"), + errdetail("invalid attribute number %i", attnum))); result = build_check_constraint_name_relid_internal(relid, attnum); @@ -741,15 +768,21 @@ build_check_constraint_name_attname(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); text *attname = PG_GETARG_TEXT_P(1); - AttrNumber attnum = get_attnum(relid, text_to_cstring(attname)); + AttrNumber attnum; const char *result; if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid))); + + attnum = get_attnum(relid, text_to_cstring(attname)); if (attnum == InvalidAttrNumber) - elog(ERROR, "relation \"%s\" has no column \"%s\"", - get_rel_name_or_relid(relid), text_to_cstring(attname)); + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("cannot build check constraint name"), + errdetail("relation \"%s\" has no column \"%s\"", + get_rel_name_or_relid(relid), + text_to_cstring(attname)))); result = build_check_constraint_name_relid_internal(relid, attnum); @@ -782,23 +815,30 @@ add_to_pathman_config(PG_FUNCTION_ARGS) PathmanInitState init_state; MemoryContext old_mcxt = CurrentMemoryContext; - if (PG_ARGISNULL(0)) - elog(ERROR, "'parent_relid' should not be NULL"); - - if (PG_ARGISNULL(1)) - elog(ERROR, "'attname' should not be NULL"); + if (!PG_ARGISNULL(0)) + { + relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); - /* Read parameters */ - relid = PG_GETARG_OID(0); - attname = PG_GETARG_TEXT_P(1); + if (!PG_ARGISNULL(1)) + { + attname = PG_GETARG_TEXT_P(1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'attname' should not be NULL"))); /* Check that relation exists */ if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid))); if (get_attnum(relid, text_to_cstring(attname)) == InvalidAttrNumber) - elog(ERROR, "relation \"%s\" has no column \"%s\"", - get_rel_name_or_relid(relid), text_to_cstring(attname)); + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("relation \"%s\" has no column \"%s\"", + get_rel_name_or_relid(relid), + text_to_cstring(attname)))); /* Select partitioning type using 'range_interval' */ parttype = PG_ARGISNULL(2) ? PT_HASH : PT_RANGE; @@ -979,31 +1019,44 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) #define ARG_RANGE_START 3 /* start_value */ #define ARG_RANGE_END 4 /* end_value */ - Oid parent_oid = PG_GETARG_OID(ARG_PARENT), - partition_oid = PG_GETARG_OID(ARG_CHILD); - - Oid callback_oid = PG_GETARG_OID(ARG_CALLBACK); + Oid parent_relid, + partition_relid; + Oid callback_oid = InvalidOid; init_callback_params callback_params; + /* NOTE: callback may be NULL */ + if (!PG_ARGISNULL(ARG_CALLBACK)) + { + callback_oid = PG_GETARG_OID(ARG_CALLBACK); + } + /* If there's no callback function specified, we're done */ - if (PG_ARGISNULL(ARG_CALLBACK) || callback_oid == InvalidOid) + if (callback_oid == InvalidOid) PG_RETURN_VOID(); - if (PG_ARGISNULL(ARG_PARENT)) - elog(ERROR, "'parent_relid' should not be NULL"); + if (!PG_ARGISNULL(ARG_PARENT)) + { + parent_relid = PG_GETARG_OID(ARG_PARENT); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); - if (PG_ARGISNULL(ARG_CHILD)) - elog(ERROR, "'partition' should not be NULL"); + if (!PG_ARGISNULL(ARG_CHILD)) + { + partition_relid = PG_GETARG_OID(ARG_CHILD); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); switch (PG_NARGS()) { case 3: MakeInitCallbackHashParams(&callback_params, callback_oid, - parent_oid, - partition_oid); + parent_relid, + partition_relid); break; case 5: @@ -1025,8 +1078,8 @@ invoke_on_partition_created_callback(PG_FUNCTION_ARGS) MakeInitCallbackRangeParams(&callback_params, callback_oid, - parent_oid, - partition_oid, + parent_relid, + partition_relid, start, end, value_type); @@ -1067,22 +1120,6 @@ check_security_policy(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } -/* - * Check if type supports the specified operator ( + | - | etc ). - */ -Datum -is_operator_supported(PG_FUNCTION_ARGS) -{ - Oid opid, - typid = PG_GETARG_OID(0); - char *opname = TextDatumGetCString(PG_GETARG_TEXT_P(1)); - - opid = compatible_oper_opid(list_make1(makeString(opname)), - typid, typid, true); - - PG_RETURN_BOOL(OidIsValid(opid)); -} - /* * -------------------------- @@ -1358,7 +1395,14 @@ create_single_update_trigger(PG_FUNCTION_ARGS) Datum has_update_trigger(PG_FUNCTION_ARGS) { - PG_RETURN_BOOL(has_update_trigger_internal(PG_GETARG_OID(0))); + Oid parent_relid = PG_GETARG_OID(0); + + /* Check that relation exists */ + if (!check_relation_exists(parent_relid)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", parent_relid))); + + PG_RETURN_BOOL(has_update_trigger_internal(parent_relid)); } diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 9b6397f9..7c1e1cb0 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -61,7 +61,8 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) /* Check that there's no partitions yet */ if (get_pathman_relation_info(parent_relid)) - elog(ERROR, "cannot add new HASH partitions"); + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot add new HASH partitions"))); partitioned_col_type = get_attribute_type(parent_relid, partitioned_col_name, @@ -77,11 +78,15 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) /* Validate size of 'partition_names' */ if (partition_names && partition_names_size != partitions_count) - elog(ERROR, "size of 'partition_names' must be equal to 'partitions_count'"); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("size of 'partition_names' must be equal to 'partitions_count'"))); /* Validate size of 'tablespaces' */ if (tablespaces && tablespaces_size != partitions_count) - elog(ERROR, "size of 'tablespaces' must be equal to 'partitions_count'"); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("size of 'tablespaces' must be equal to 'partitions_count'"))); /* Convert partition names into RangeVars */ rangevars = qualified_relnames_to_rangevars(partition_names, partitions_count); @@ -135,13 +140,18 @@ build_hash_condition(PG_FUNCTION_ARGS) char *result; if (part_idx >= part_count) - elog(ERROR, "'partition_index' must be lower than 'partitions_count'"); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_index' must be lower than 'partitions_count'"))); tce = lookup_type_cache(atttype, TYPECACHE_HASH_PROC); /* Check that HASH function exists */ if (!OidIsValid(tce->hash_proc)) - elog(ERROR, "no hash function for type %s", format_type_be(atttype)); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("no hash function for type %s", + format_type_be(atttype)))); /* Create hash condition CSTRING */ result = psprintf("%s.get_hash_part_idx(%s(%s), %u) = %u", diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 8e3391ad..54ffb646 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -31,6 +31,23 @@ #include "utils/syscache.h" +/* Function declarations */ + +PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); +PG_FUNCTION_INFO_V1( create_range_partitions_internal ); +PG_FUNCTION_INFO_V1( check_range_available_pl ); +PG_FUNCTION_INFO_V1( generate_range_bounds_pl ); + +PG_FUNCTION_INFO_V1( get_part_range_by_oid ); +PG_FUNCTION_INFO_V1( get_part_range_by_idx ); + +PG_FUNCTION_INFO_V1( build_range_condition ); +PG_FUNCTION_INFO_V1( build_sequence_name ); +PG_FUNCTION_INFO_V1( merge_range_partitions ); +PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); +PG_FUNCTION_INFO_V1( validate_interval_value ); + + static char *deparse_constraint(Oid relid, Node *expr); static ArrayType *construct_infinitable_array(Bound *elems, int nelems, @@ -54,21 +71,12 @@ static bool interval_is_trivial(Oid atttype, Datum interval, Oid interval_type); -/* Function declarations */ - -PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); -PG_FUNCTION_INFO_V1( create_range_partitions_internal ); -PG_FUNCTION_INFO_V1( check_range_available_pl ); -PG_FUNCTION_INFO_V1( generate_range_bounds_pl ); - -PG_FUNCTION_INFO_V1( get_part_range_by_oid ); -PG_FUNCTION_INFO_V1( get_part_range_by_idx ); - -PG_FUNCTION_INFO_V1( build_range_condition ); -PG_FUNCTION_INFO_V1( build_sequence_name ); -PG_FUNCTION_INFO_V1( merge_range_partitions ); -PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); -PG_FUNCTION_INFO_V1( validate_interval_value ); +/* Extracted common check */ +static inline bool +check_relation_exists(Oid relid) +{ + return get_rel_type_id(relid) != InvalidOid; +} /* @@ -98,7 +106,8 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* Handle 'parent_relid' */ if (PG_ARGISNULL(0)) - elog(ERROR, "'parent_relid' should not be NULL"); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); /* Fetch mandatory args */ parent_relid = PG_GETARG_OID(0); @@ -381,31 +390,34 @@ generate_range_bounds_pl(PG_FUNCTION_ARGS) Datum get_part_range_by_oid(PG_FUNCTION_ARGS) { - Oid partition_relid = InvalidOid, + Oid partition_relid, parent_relid; PartParentSearch parent_search; - uint32 i; RangeEntry *ranges; const PartRelationInfo *prel; + uint32 i; - if (PG_ARGISNULL(0)) - elog(ERROR, "'partition_relid' should not be NULL"); - else + if (!PG_ARGISNULL(0)) + { partition_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); parent_relid = get_parent_of_partition(partition_relid, &parent_search); if (parent_search != PPS_ENTRY_PART_PARENT) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(partition_relid)); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition_relid)))); prel = get_pathman_relation_info(parent_relid); shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); /* Check type of 'dummy' (for correct output) */ if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 1)) != getBaseType(prel->atttype)) - elog(ERROR, "pg_typeof(dummy) should be %s", - format_type_be(getBaseType(prel->atttype))); - + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->atttype))))); ranges = PrelGetRangesArray(prel); @@ -427,9 +439,10 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) } /* No partition found, report error */ - elog(ERROR, "relation \"%s\" has no partition \"%s\"", - get_rel_name_or_relid(parent_relid), - get_rel_name_or_relid(partition_relid)); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" has no partition \"%s\"", + get_rel_name_or_relid(parent_relid), + get_rel_name_or_relid(partition_relid)))); PG_RETURN_NULL(); /* keep compiler happy */ } @@ -444,35 +457,42 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) Datum get_part_range_by_idx(PG_FUNCTION_ARGS) { - Oid parent_relid = InvalidOid; + Oid parent_relid; int partition_idx = 0; Bound elems[2]; RangeEntry *ranges; const PartRelationInfo *prel; - if (PG_ARGISNULL(0)) - elog(ERROR, "'parent_relid' should not be NULL"); - else + if (!PG_ARGISNULL(0)) + { parent_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); - if (PG_ARGISNULL(1)) - elog(ERROR, "'partition_idx' should not be NULL"); - else + if (!PG_ARGISNULL(1)) + { partition_idx = PG_GETARG_INT32(1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_idx' should not be NULL"))); prel = get_pathman_relation_info(parent_relid); shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); /* Check type of 'dummy' (for correct output) */ if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 2)) != getBaseType(prel->atttype)) - elog(ERROR, "pg_typeof(dummy) should be %s", - format_type_be(getBaseType(prel->atttype))); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->atttype))))); /* Now we have to deal with 'idx' */ if (partition_idx < -1) { - elog(ERROR, "negative indices other than -1 (last partition) are not allowed"); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("negative indices other than -1" + " (last partition) are not allowed"))); } else if (partition_idx == -1) { @@ -480,8 +500,9 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) } else if (((uint32) abs(partition_idx)) >= PrelChildrenCount(prel)) { - elog(ERROR, "partition #%d does not exist (total amount is %u)", - partition_idx, PrelChildrenCount(prel)); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partition #%d does not exist (total amount is %u)", + partition_idx, PrelChildrenCount(prel)))); } ranges = PrelGetRangesArray(prel); @@ -508,8 +529,8 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) Datum build_range_condition(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); + Oid partition_relid; + text *attname; Bound min, max; @@ -517,6 +538,20 @@ build_range_condition(PG_FUNCTION_ARGS) Constraint *con; char *result; + if (!PG_ARGISNULL(0)) + { + partition_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); + + if (!PG_ARGISNULL(1)) + { + attname = PG_GETARG_TEXT_P(1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'attribute' should not be NULL")));; + min = PG_ARGISNULL(2) ? MakeBoundInf(MINUS_INFINITY) : MakeBound(PG_GETARG_DATUM(2)); @@ -525,11 +560,12 @@ build_range_condition(PG_FUNCTION_ARGS) MakeBoundInf(PLUS_INFINITY) : MakeBound(PG_GETARG_DATUM(3)); - con = build_range_check_constraint(relid, text_to_cstring(attname), + con = build_range_check_constraint(partition_relid, + text_to_cstring(attname), &min, &max, bounds_type); - result = deparse_constraint(relid, con->raw_expr); + result = deparse_constraint(partition_relid, con->raw_expr); PG_RETURN_TEXT_P(cstring_to_text(result)); } @@ -542,6 +578,9 @@ build_sequence_name(PG_FUNCTION_ARGS) Oid parent_nsp; char *result; + if (!check_relation_exists(parent_relid)) + ereport(ERROR, (errmsg("relation \"%u\" does not exist", parent_relid))); + parent_nsp = get_rel_namespace(parent_relid); result = psprintf("%s.%s", @@ -781,33 +820,47 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) Datum validate_interval_value(PG_FUNCTION_ARGS) { - Oid partrel = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); - PartType parttype = DatumGetPartType(PG_GETARG_DATUM(2)); - Datum interval_text = PG_GETARG_DATUM(3); - Datum interval_value; - Oid interval_type; + Oid partrel; + text *attname; + PartType parttype; - if (PG_ARGISNULL(0)) - elog(ERROR, "'partrel' should not be NULL"); + if (!PG_ARGISNULL(0)) + { + partrel = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partrel' should not be NULL"))); - if (PG_ARGISNULL(1)) - elog(ERROR, "'attname' should not be NULL"); + if (!PG_ARGISNULL(1)) + { + attname = PG_GETARG_TEXT_P(1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'attname' should not be NULL"))); - if (PG_ARGISNULL(2)) - elog(ERROR, "'parttype' should not be NULL"); + if (!PG_ARGISNULL(2)) + { + parttype = DatumGetPartType(PG_GETARG_DATUM(2)); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parttype' should not be NULL"))); /* - * NULL interval is fine for both HASH and RANGE. But for RANGE we need - * to make some additional checks + * NULL interval is fine for both HASH and RANGE. + * But for RANGE we need to make some additional checks. */ if (!PG_ARGISNULL(3)) { + Datum interval_text = PG_GETARG_DATUM(3), + interval_value; + Oid interval_type; char *attname_cstr; Oid atttype; /* type of partitioned attribute */ if (parttype == PT_HASH) - elog(ERROR, "interval must be NULL for HASH partitioned table"); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should be NULL for HASH partitioned table"))); /* Convert attname to CSTRING and fetch column's type */ attname_cstr = text_to_cstring(attname); @@ -820,7 +873,9 @@ validate_interval_value(PG_FUNCTION_ARGS) /* Check that interval isn't trivial */ if (interval_is_trivial(atttype, interval_value, interval_type)) - elog(ERROR, "interval must not be trivial"); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should not be trivial"))); } PG_RETURN_BOOL(true); @@ -878,7 +933,8 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) float4 f = DatumGetFloat4(interval); if (isnan(f) || is_infinite(f)) - elog(ERROR, "invalid floating point interval"); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid floating point interval"))); default_value = Float4GetDatum(0); } break; @@ -888,7 +944,8 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) float8 f = DatumGetFloat8(interval); if (isnan(f) || is_infinite(f)) - elog(ERROR, "invalid floating point interval"); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid floating point interval"))); default_value = Float8GetDatum(0); } break; @@ -900,7 +957,8 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) /* Test for NaN */ if (numeric_is_nan(ni)) - elog(ERROR, "invalid numeric interval"); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid numeric interval"))); /* Building default value */ numeric = DatumGetNumeric( @@ -970,7 +1028,8 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) return true; else if (cmp_result > 0) /* Negative interval? */ - elog(ERROR, "interval must not be negative"); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should not be negative"))); /* Everything is OK */ return false; @@ -1112,9 +1171,10 @@ check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges) if ((cmp_bounds(&finfo, collid, &last->max, &cur->min) != 0) && (cmp_bounds(&finfo, collid, &cur->max, &last->min) != 0)) { - elog(ERROR, "partitions \"%s\" and \"%s\" are not adjacent", - get_rel_name(last->child_oid), - get_rel_name(cur->child_oid)); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partitions \"%s\" and \"%s\" are not adjacent", + get_rel_name(last->child_oid), + get_rel_name(cur->child_oid)))); } last = cur; From ddbae9dbffa467b57f1a272f00fa720e26e80011 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 8 Apr 2017 20:24:49 +0300 Subject: [PATCH 0363/1124] even more calamity tests --- expected/pathman_calamity.out | 54 ++++++++++++++++++++++++++--------- sql/pathman_calamity.sql | 45 ++++++++++++++++++----------- src/partition_creation.c | 2 +- src/pathman_workers.c | 12 +++++--- src/pl_funcs.c | 14 ++++----- src/pl_range_funcs.c | 11 +++---- 6 files changed, 90 insertions(+), 48 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 1de6019b..0d9168ab 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -102,23 +102,26 @@ SELECT count(*) FROM calamity.part_test; (1 row) DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL /* test function create_range_partitions_internal() */ -SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ ERROR: 'parent_relid' should not be NULL SELECT create_range_partitions_internal('calamity.part_test', - NULL::INT[], NULL, NULL); /* not ok */ + NULL::INT[], NULL, NULL); /* not ok */ ERROR: 'bounds' should not be NULL SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], - '{part_1}'::TEXT[], NULL); /* not ok */ + '{part_1}'::TEXT[], NULL); /* not ok */ ERROR: wrong length of 'partition_names' array SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], - NULL, '{tblspc_1}'::TEXT[]); /* not ok */ + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ ERROR: wrong length of 'tablespaces' array SELECT create_range_partitions_internal('calamity.part_test', - '{1, NULL}'::INT[], NULL, NULL); /* not ok */ + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ ERROR: only first bound can be NULL SELECT create_range_partitions_internal('calamity.part_test', - '{2, 1}'::INT[], NULL, NULL); /* not ok */ + '{2, 1}'::INT[], NULL, NULL); /* not ok */ ERROR: 'bounds' array must be ascending /* test function create_hash_partitions() */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, @@ -348,12 +351,16 @@ SELECT get_partition_key_type(NULL) IS NULL; (1 row) /* check function build_check_constraint_name_attnum() */ -SELECT build_check_constraint_name('calamity.part_test', 1::int2); +SELECT build_check_constraint_name('calamity.part_test', 1::int2); /* OK */ build_check_constraint_name ----------------------------- pathman_part_test_1_check (1 row) +SELECT build_check_constraint_name(0::REGCLASS, 1::int2); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name('calamity.part_test', -1::int2); /* not ok */ +ERROR: invalid attribute number -1 SELECT build_check_constraint_name('calamity.part_test', NULL::int2) IS NULL; ?column? ---------- @@ -373,12 +380,16 @@ SELECT build_check_constraint_name(NULL, NULL::int2) IS NULL; (1 row) /* check function build_check_constraint_name_attname() */ -SELECT build_check_constraint_name('calamity.part_test', 'val'); +SELECT build_check_constraint_name('calamity.part_test', 'val'); /* OK */ build_check_constraint_name ----------------------------- pathman_part_test_1_check (1 row) +SELECT build_check_constraint_name(0::REGCLASS, 'val'); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name('calamity.part_test', 'nocol'); /* not ok */ +ERROR: relation "part_test" has no column "nocol" SELECT build_check_constraint_name('calamity.part_test', NULL::text) IS NULL; ?column? ---------- @@ -398,12 +409,14 @@ SELECT build_check_constraint_name(NULL, NULL::text) IS NULL; (1 row) /* check function build_update_trigger_name() */ -SELECT build_update_trigger_name('calamity.part_test'); +SELECT build_update_trigger_name('calamity.part_test'); /* OK */ build_update_trigger_name --------------------------- part_test_upd_trig (1 row) +SELECT build_update_trigger_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist SELECT build_update_trigger_name(NULL) IS NULL; ?column? ---------- @@ -411,12 +424,14 @@ SELECT build_update_trigger_name(NULL) IS NULL; (1 row) /* check function build_update_trigger_func_name() */ -SELECT build_update_trigger_func_name('calamity.part_test'); +SELECT build_update_trigger_func_name('calamity.part_test'); /* OK */ build_update_trigger_func_name ---------------------------------- calamity.part_test_upd_trig_func (1 row) +SELECT build_update_trigger_func_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist SELECT build_update_trigger_func_name(NULL) IS NULL; ?column? ---------- @@ -424,13 +439,13 @@ SELECT build_update_trigger_func_name(NULL) IS NULL; (1 row) /* check function build_sequence_name() */ -SELECT build_sequence_name('calamity.part_test'); /* ok */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ build_sequence_name ------------------------ calamity.part_test_seq (1 row) -SELECT build_sequence_name(1::REGCLASS); /* not ok */ +SELECT build_sequence_name(1::REGCLASS); /* not ok */ ERROR: relation "1" does not exist SELECT build_sequence_name(NULL) IS NULL; ?column? @@ -438,11 +453,20 @@ SELECT build_sequence_name(NULL) IS NULL; t (1 row) +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: relation "1" has no partitions +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: relation "pg_class" has no partitions /* check function stop_concurrent_part_task() */ -SELECT stop_concurrent_part_task(1::regclass); +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ ERROR: cannot find worker for relation "1" /* check function drop_range_partition_expand_next() */ -SELECT drop_range_partition_expand_next('pg_class'); +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ ERROR: relation "pg_class" is not a partition SELECT drop_range_partition_expand_next(NULL) IS NULL; ?column? @@ -572,6 +596,8 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); /* check function add_to_pathman_config() -- PHASE #1 */ SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: relation "0" does not exist SELECT add_to_pathman_config('calamity.part_test', NULL); /* no column */ ERROR: 'attname' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong column */ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index dfb938e8..42203365 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -36,26 +36,26 @@ SELECT count(*) FROM calamity.part_test; DELETE FROM calamity.part_test; - - +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ /* test function create_range_partitions_internal() */ -SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ SELECT create_range_partitions_internal('calamity.part_test', - NULL::INT[], NULL, NULL); /* not ok */ + NULL::INT[], NULL, NULL); /* not ok */ SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], - '{part_1}'::TEXT[], NULL); /* not ok */ + '{part_1}'::TEXT[], NULL); /* not ok */ SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], - NULL, '{tblspc_1}'::TEXT[]); /* not ok */ + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ SELECT create_range_partitions_internal('calamity.part_test', - '{1, NULL}'::INT[], NULL, NULL); /* not ok */ + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ SELECT create_range_partitions_internal('calamity.part_test', - '{2, 1}'::INT[], NULL, NULL); /* not ok */ + '{2, 1}'::INT[], NULL, NULL); /* not ok */ /* test function create_hash_partitions() */ SELECT create_hash_partitions('calamity.part_test', 'val', 2, @@ -158,35 +158,47 @@ SELECT get_partition_key_type(0::regclass); SELECT get_partition_key_type(NULL) IS NULL; /* check function build_check_constraint_name_attnum() */ -SELECT build_check_constraint_name('calamity.part_test', 1::int2); +SELECT build_check_constraint_name('calamity.part_test', 1::int2); /* OK */ +SELECT build_check_constraint_name(0::REGCLASS, 1::int2); /* not ok */ +SELECT build_check_constraint_name('calamity.part_test', -1::int2); /* not ok */ SELECT build_check_constraint_name('calamity.part_test', NULL::int2) IS NULL; SELECT build_check_constraint_name(NULL, 1::int2) IS NULL; SELECT build_check_constraint_name(NULL, NULL::int2) IS NULL; /* check function build_check_constraint_name_attname() */ -SELECT build_check_constraint_name('calamity.part_test', 'val'); +SELECT build_check_constraint_name('calamity.part_test', 'val'); /* OK */ +SELECT build_check_constraint_name(0::REGCLASS, 'val'); /* not ok */ +SELECT build_check_constraint_name('calamity.part_test', 'nocol'); /* not ok */ SELECT build_check_constraint_name('calamity.part_test', NULL::text) IS NULL; SELECT build_check_constraint_name(NULL, 'val') IS NULL; SELECT build_check_constraint_name(NULL, NULL::text) IS NULL; /* check function build_update_trigger_name() */ -SELECT build_update_trigger_name('calamity.part_test'); +SELECT build_update_trigger_name('calamity.part_test'); /* OK */ +SELECT build_update_trigger_name(0::REGCLASS); /* not ok */ SELECT build_update_trigger_name(NULL) IS NULL; /* check function build_update_trigger_func_name() */ -SELECT build_update_trigger_func_name('calamity.part_test'); +SELECT build_update_trigger_func_name('calamity.part_test'); /* OK */ +SELECT build_update_trigger_func_name(0::REGCLASS); /* not ok */ SELECT build_update_trigger_func_name(NULL) IS NULL; /* check function build_sequence_name() */ -SELECT build_sequence_name('calamity.part_test'); /* ok */ -SELECT build_sequence_name(1::REGCLASS); /* not ok */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ +SELECT build_sequence_name(1::REGCLASS); /* not ok */ SELECT build_sequence_name(NULL) IS NULL; +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +SELECT partition_table_concurrently('pg_class'); /* not ok */ + /* check function stop_concurrent_part_task() */ -SELECT stop_concurrent_part_task(1::regclass); +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ /* check function drop_range_partition_expand_next() */ -SELECT drop_range_partition_expand_next('pg_class'); +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ SELECT drop_range_partition_expand_next(NULL) IS NULL; /* check function generate_range_bounds() */ @@ -235,6 +247,7 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); /* check function add_to_pathman_config() -- PHASE #1 */ SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ SELECT add_to_pathman_config('calamity.part_test', NULL); /* no column */ SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong column */ SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ diff --git a/src/partition_creation.c b/src/partition_creation.c index ee924f21..0a1ed2d0 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -298,7 +298,7 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) } } else - elog(ERROR, "relation \"%s\" is not partitioned by pg_pathman", + elog(ERROR, "relation \"%s\" is not partitioned", get_rel_name_or_relid(relid)); /* Check that 'last_partition' is valid */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 260920a5..94e3cc45 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -635,12 +635,14 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* Check batch_size */ if (batch_size < 1 || batch_size > 10000) - elog(ERROR, "\"batch_size\" should not be less than 1 " - "or greater than 10000"); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'batch_size' should not be less than 1" + " or greater than 10000"))); /* Check sleep_time */ if (sleep_time < 0.5) - elog(ERROR, "\"sleep_time\" should not be less than 0.5"); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'sleep_time' should not be less than 0.5"))); /* Check if relation is a partitioned table */ shout_if_prel_is_invalid(relid, @@ -656,7 +658,9 @@ partition_table_concurrently(PG_FUNCTION_ARGS) ereport(ERROR, (errmsg("cannot start %s", concurrent_part_bgw), errdetail("table is being partitioned now"))); } - else elog(ERROR, "cannot find relation %d", relid); + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not partitioned", + get_rel_name_or_relid(relid)))); /* * Look for an empty slot and also check that a concurrent diff --git a/src/pl_funcs.c b/src/pl_funcs.c index cc36a6e8..992bcc98 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -238,7 +238,7 @@ get_base_type_pl(PG_FUNCTION_ARGS) } /* - * Return partition key type + * Return partition key type. */ Datum get_partition_key_type(PG_FUNCTION_ARGS) @@ -253,7 +253,7 @@ get_partition_key_type(PG_FUNCTION_ARGS) } /* - * Return tablespace name for specified relation + * Return tablespace name of a specified relation. */ Datum get_tablespace_pl(PG_FUNCTION_ARGS) @@ -755,8 +755,7 @@ build_check_constraint_name_attnum(PG_FUNCTION_ARGS) /* We explicitly do not support system attributes */ if (attnum == InvalidAttrNumber || attnum < 0) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("cannot build check constraint name"), - errdetail("invalid attribute number %i", attnum))); + errmsg("invalid attribute number %i", attnum))); result = build_check_constraint_name_relid_internal(relid, attnum); @@ -779,10 +778,9 @@ build_check_constraint_name_attname(PG_FUNCTION_ARGS) if (attnum == InvalidAttrNumber) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("cannot build check constraint name"), - errdetail("relation \"%s\" has no column \"%s\"", - get_rel_name_or_relid(relid), - text_to_cstring(attname)))); + errmsg("relation \"%s\" has no column \"%s\"", + get_rel_name_or_relid(relid), + text_to_cstring(attname)))); result = build_check_constraint_name_relid_internal(relid, attnum); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 54ffb646..49ffc166 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -105,12 +105,13 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* Handle 'parent_relid' */ - if (PG_ARGISNULL(0)) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'parent_relid' should not be NULL"))); + if (!PG_ARGISNULL(0)) + { + parent_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); - /* Fetch mandatory args */ - parent_relid = PG_GETARG_OID(0); value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); start = PG_ARGISNULL(1) ? From bbb11784db1740d48b1fa2c6ed576d22218743bf Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 10 Apr 2017 14:48:31 +0300 Subject: [PATCH 0364/1124] bugfix: we cannot use an outer path that is parameterized by the inner rel --- src/hooks.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/hooks.c b/src/hooks.c index 53bf30a5..d861f74e 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -28,6 +28,11 @@ #include "utils/lsyscache.h" +/* Borrowed from joinpath.c */ +#define PATH_PARAM_BY_REL(path, rel) \ + ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids)) + + set_join_pathlist_hook_type set_join_pathlist_next = NULL; set_rel_pathlist_hook_type set_rel_pathlist_hook_next = NULL; planner_hook_type planner_hook_next = NULL; @@ -123,6 +128,12 @@ pathman_join_pathlist_hook(PlannerInfo *root, /* Select cheapest path for outerrel */ outer = outerrel->cheapest_total_path; + + /* We cannot use an outer path that is parameterized by the inner rel */ + if (PATH_PARAM_BY_REL(outer, innerrel)) + continue; + + /* Wrap 'outer' in unique path if needed */ if (saved_jointype == JOIN_UNIQUE_OUTER) { outer = (Path *) create_unique_path(root, outerrel, From 7e4709f69bffa76eaafd5cb2d4c3bd8b7691bff3 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 10 Apr 2017 18:30:53 +0300 Subject: [PATCH 0365/1124] Optimize inserts, fix update triggers --- init.sql | 11 +- range.sql | 2 +- src/debug_print.c | 29 ------ src/include/partition_filter.h | 3 +- src/include/relation_info.h | 1 + src/include/utils.h | 4 + src/partition_creation.c | 35 ++++--- src/partition_filter.c | 61 ++++++----- src/pl_funcs.c | 183 ++++++++++++++++++++------------- src/pl_range_funcs.c | 2 +- src/relation_info.c | 18 ++-- src/utility_stmt_hooking.c | 2 +- src/utils.c | 28 +++-- 13 files changed, 195 insertions(+), 184 deletions(-) diff --git a/init.sql b/init.sql index 744b06ff..56d1b41d 100644 --- a/init.sql +++ b/init.sql @@ -857,15 +857,8 @@ LANGUAGE C STRICT; * Build check constraint name for a specified relation's column. */ CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - partition_relid REGCLASS, - attribute INT2) -RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attnum' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - partition_relid REGCLASS, - attribute TEXT) -RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attname' + partition_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name' LANGUAGE C STRICT; /* diff --git a/range.sql b/range.sql index 6f4eebf4..e56ec018 100644 --- a/range.sql +++ b/range.sql @@ -63,7 +63,7 @@ $$ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( parent_relid REGCLASS, - expresssion TEXT, + expression TEXT, partition_data BOOLEAN) RETURNS VOID AS $$ diff --git a/src/debug_print.c b/src/debug_print.c index f1e93c87..03d28d53 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -101,32 +101,3 @@ irange_print(IndexRange irange) return str.data; } - -/* - * Print Datum as cstring - */ -#ifdef __GNUC__ -__attribute__((unused)) -#endif -static char * -datum_print(Datum origval, Oid typid) -{ - Oid typoutput; - bool typisvarlena; - Datum val; - - /* Query output function */ - getTypeOutputInfo(typid, &typoutput, &typisvarlena); - - if (typisvarlena && VARATT_IS_EXTERNAL_ONDISK(origval)) - return NULL; //unchanged-toast-datum - else if (!typisvarlena) - val = origval; - else - { - /* Definitely detoasted Datum */ - val = PointerGetDatum(PG_DETOAST_DATUM(origval)); - } - - return OidOutputFunctionCall(typoutput, val); -} diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index a18ca751..5608432d 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -96,8 +96,9 @@ typedef struct bool warning_triggered; /* warning message counter */ TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ - ExprContext *tup_convert_econtext; /* ExprContext for projections */ + + ExprState *expr_state; /* for partitioning expression */ } PartitionFilterState; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index adcb7fb5..bba0da29 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -15,6 +15,7 @@ #include "postgres.h" #include "access/attnum.h" #include "fmgr.h" +#include "nodes/bitmapset.h" #include "nodes/nodes.h" #include "nodes/primnodes.h" #include "port/atomics.h" diff --git a/src/include/utils.h b/src/include/utils.h index e4cb240c..cd622840 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -65,5 +65,9 @@ Datum extract_binary_interval_from_text(Datum interval_text, char ** deconstruct_text_array(Datum array, int *array_size); RangeVar ** qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); +AttrNumber *get_pathman_attributes_map(const PartRelationInfo *prel, + Relation child); + +List *get_part_expression_columns(const PartRelationInfo *prel); #endif /* PATHMAN_UTILS_H */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 56fc403b..1f7abbd1 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -66,7 +66,7 @@ static void create_single_partition_common(Oid parent_relid, Oid partition_relid, Constraint *check_constraint, init_callback_params *callback_params, - const char *partitioned_column); + List *trigger_columns); static Oid create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, @@ -167,6 +167,7 @@ create_single_hash_partition_internal(Oid parent_relid, Constraint *check_constr; Node *expr; init_callback_params callback_params; + List *trigger_columns; /* Generate a name if asked to */ if (!partition_rv) @@ -1697,6 +1698,10 @@ validate_part_expression(Node *node, void *context) " with partitioning relation"); return false; } + + if (IsA(node, Param)) + elog(ERROR, "Partitioning expression should not contain parameters"); + return expression_tree_walker(node, validate_part_expression, context); } @@ -1835,8 +1840,12 @@ get_part_expression_info(Oid relid, const char *expr_string, target_entry = linitial(plan->planTree->targetlist); expr_node = (Node *) target_entry->expr; + expr_node = eval_const_expressions(NULL, expr_node); validate_part_expression(expr_node, NULL); + if (contain_mutable_functions(expr_node)) + elog(ERROR, "Expression should not contain mutable functions"); + out_string = nodeToString(expr_node); MemoryContextSwitchTo(oldcontext); @@ -1852,32 +1861,27 @@ get_part_expression_info(Oid relid, const char *expr_string, return expr_info; } -struct extract_columns_names_context +struct extract_column_names_context { List *columns; }; /* Extract column names from raw expression */ static bool -extract_column_names(Node *node, struct extract_raw_columns_context *ctx) +extract_column_names(Node *node, struct extract_column_names_context *ctx) { if (node == NULL) - return false + return false; if (IsA(node, ColumnRef)) { ListCell *lc; - ColumnRef *col = (ColumnRef *) node; - foreach(lc, col->fields) - { - if (IsA(lfirst(lc)), Value) - { - ctx->columns = lappend(strVal(lfirst(lc))); - } - } + foreach(lc, ((ColumnRef *) node)->fields) + if (IsA(lfirst(lc), String)) + ctx->columns = lappend(ctx->columns, strVal(lfirst(lc))); } - return raw_expression_tree_walker(node, extract_raw_columns, ctx); + return raw_expression_tree_walker(node, extract_column_names, ctx); } /* @@ -1899,7 +1903,10 @@ get_constraint_expression(Oid parent_relid, Oid *expr_type, List **columns) elog(ERROR, "table \"%s\" is not partitioned", get_rel_name_or_relid(parent_relid)); - /* Fetch expression for constraint */ + /* + * We need expression type for hash functions. Range functions don't need + * this feature. + */ if (expr_type) *expr_type = DatumGetObjectId(config_values[Anum_pathman_config_atttype - 1]); diff --git a/src/partition_filter.c b/src/partition_filter.c index 16dfac0c..944762e5 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -523,6 +523,8 @@ partition_filter_create_scan_state(CustomScan *node) Assert(state->on_conflict_action >= ONCONFLICT_NONE || state->on_conflict_action <= ONCONFLICT_UPDATE); + state->expr_state = NULL; + /* There should be exactly one subplan */ Assert(list_length(node->custom_plans) == 1); @@ -532,11 +534,42 @@ partition_filter_create_scan_state(CustomScan *node) void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { - PartitionFilterState *state = (PartitionFilterState *) node; + Index varno = 1; + Node *expr; + MemoryContext old_cxt; + PartitionFilterState *state = (PartitionFilterState *) node; + const PartRelationInfo *prel; + ListCell *lc; /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); + if (state->expr_state == NULL) + { + /* Fetch PartRelationInfo for this partitioned relation */ + prel = get_pathman_relation_info(state->partitioned_table); + Assert(prel != NULL); + + /* Change varno in Vars according to range table */ + expr = copyObject(prel->expr); + foreach(lc, estate->es_range_table) + { + RangeTblEntry *entry = lfirst(lc); + if (entry->relid == state->partitioned_table) + { + if (varno > 1) + ChangeVarNodes(expr, 1, varno, 0); + break; + } + varno += 1; + } + + /* Prepare state for expression execution */ + old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); + state->expr_state = ExecInitExpr((Expr *) expr, NULL); + MemoryContextSwitchTo(old_cxt); + } + /* Init ResultRelInfo cache */ init_result_parts_storage(&state->result_parts, estate, state->on_conflict_action != ONCONFLICT_NONE, @@ -571,11 +604,7 @@ partition_filter_exec(CustomScanState *node) bool isnull; Datum value; ExprDoneCond itemIsDone; - ExprState *expr_state; - ListCell *lc; - Index varno = 1; TupleTableSlot *tmp_slot; - Node *expr; /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); @@ -589,33 +618,13 @@ partition_filter_exec(CustomScanState *node) return slot; } - /* Find proper varno for Vars in expression */ - foreach(lc, estate->es_range_table) - { - RangeTblEntry *entry = (RangeTblEntry *) lfirst(lc); - if (entry->relid == prel->key) - break; - - varno++; - } - - /* Change varno according to range table */ - expr = copyObject(prel->expr); - if (varno != 1) - ChangeVarNodes(expr, 1, varno, 0); - - /* Prepare state for expression execution */ - old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); - expr_state = ExecInitExpr((Expr *) expr, NULL); - MemoryContextSwitchTo(old_cxt); - /* Switch to per-tuple context */ old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); /* Execute expression */ tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; - value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); + value = ExecEvalExpr(state->expr_state, econtext, &isnull, &itemIsDone); econtext->ecxt_scantuple = tmp_slot; if (isnull) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 8886f492..98f192cf 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -16,6 +16,7 @@ #include "partition_filter.h" #include "relation_info.h" #include "xact_handling.h" +#include "utils.h" #include "access/tupconvert.h" #include "access/nbtree.h" @@ -25,6 +26,7 @@ #include "catalog/pg_type.h" #include "commands/tablespace.h" #include "commands/trigger.h" +#include "executor/executor.h" #include "executor/spi.h" #include "funcapi.h" #include "miscadmin.h" @@ -54,8 +56,7 @@ PG_FUNCTION_INFO_V1( show_partition_list_internal ); PG_FUNCTION_INFO_V1( build_update_trigger_name ); PG_FUNCTION_INFO_V1( build_update_trigger_func_name ); -PG_FUNCTION_INFO_V1( build_check_constraint_name_attnum ); -PG_FUNCTION_INFO_V1( build_check_constraint_name_attname ); +PG_FUNCTION_INFO_V1( build_check_constraint_name ); PG_FUNCTION_INFO_V1( validate_relname ); PG_FUNCTION_INFO_V1( is_date_type ); @@ -684,46 +685,18 @@ build_update_trigger_func_name(PG_FUNCTION_ARGS) } Datum -build_check_constraint_name_attnum(PG_FUNCTION_ARGS) +build_check_constraint_name(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); - AttrNumber attnum = PG_GETARG_INT16(1); const char *result; if (!check_relation_exists(relid)) elog(ERROR, "Invalid relation %u", relid); - /* We explicitly do not support system attributes */ - if (attnum == InvalidAttrNumber || attnum < 0) - elog(ERROR, "Cannot build check constraint name: " - "invalid attribute number %i", attnum); - result = build_check_constraint_name_relid_internal(relid); - PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); } -Datum -build_check_constraint_name_attname(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); - AttrNumber attnum = get_attnum(relid, text_to_cstring(attname)); - const char *result; - - if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); - - if (attnum == InvalidAttrNumber) - elog(ERROR, "relation \"%s\" has no column \"%s\"", - get_rel_name_or_relid(relid), text_to_cstring(attname)); - - result = build_check_constraint_name_relid_internal(relid); - - PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); -} - - /* * ------------------------ * Cache & config updates @@ -1080,6 +1053,87 @@ is_operator_supported(PG_FUNCTION_ARGS) PG_RETURN_BOOL(OidIsValid(opid)); } +struct change_vars_context +{ + HeapTuple tuple; + TupleDesc tuple_desc; + AttrNumber *attributes_map; +}; + +/* + * To prevent calculation of Vars in expression, we change them with + * Const, and fill them with values from current tuple + */ +static Node * +change_vars_to_consts(Node *node, struct change_vars_context *ctx) +{ + const TypeCacheEntry *typcache; + + if (IsA(node, Var)) + { + Var *var = (Var *) node; + AttrNumber varattno = ctx->attributes_map[var->varattno - 1]; + Oid atttype; + Const *new_const = makeNode(Const); + HeapTuple tp; + + Assert(var->varno == 1); + if (varattno == 0) + elog(ERROR, "Couldn't find attribute used in expression in child relation"); + + /* + * we get atttribute type using tuple description of child relation, + * because it could be changed earlier, so Var of parent relation + * will not be valid anymore. + */ + atttype = ctx->tuple_desc->attrs[varattno - 1]->atttypid; + + tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(atttype)); + if (HeapTupleIsValid(tp)) + { + Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); + new_const->consttypmod = typtup->typtypmod; + new_const->constcollid = typtup->typcollation; + ReleaseSysCache(tp); + } + else + elog(ERROR, "Something went wrong while getting type information"); + + typcache = lookup_type_cache(atttype, 0); + new_const->constbyval = typcache->typbyval; + new_const->constlen = typcache->typlen; + new_const->consttype = atttype; + new_const->location = -1; + + /* extract value from NEW tuple */ + new_const->constvalue = heap_getattr(ctx->tuple, + varattno, + ctx->tuple_desc, + &new_const->constisnull); + return (Node *) new_const; + } + return expression_tree_mutator(node, change_vars_to_consts, NULL); +} + +static ExprState * +prepare_expr_for_execution(const PartRelationInfo *prel, Relation source_rel, + HeapTuple tuple, Oid *value_type) +{ + struct change_vars_context ctx; + Node *expr; + ExprState *expr_state; + + Assert(value_type); + + ctx.tuple = tuple; + ctx.attributes_map = get_pathman_attributes_map(prel, source_rel); + ctx.tuple_desc = RelationGetDescr(source_rel); + expr = change_vars_to_consts(prel->expr, &ctx); + *value_type = exprType(expr); + expr_state = ExecInitExpr((Expr *) expr, NULL); + + return expr_state; +} /* * -------------------------- @@ -1102,17 +1156,19 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) HeapTuple old_tuple, new_tuple; - AttrNumber value_attnum; Datum value; Oid value_type; bool isnull; + ExprDoneCond itemIsDone; Oid *parts; int nparts; + ExprContext *econtext; + ExprState *expr_state; + MemoryContext old_cxt; PartParentSearch parent_search; const PartRelationInfo *prel; - AttrNumber *attributes_map; /* Handle user calls */ if (!CALLED_AS_TRIGGER(fcinfo)) @@ -1145,25 +1201,26 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) prel = get_pathman_relation_info(parent_relid); shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); - attributes_map = get_pathman_attributes_map(prel, source_rel); - prel_expr = copyObject(prel->expr); - modify_expression_attnums(prel_expr, attributes_map); - - /* Get attribute number of partitioning key (may differ from 'prel->attnum') */ - value_attnum = get_attnum(source_relid, get_attname(parent_relid, prel->attnum)); + /* Execute partitioning expression */ + econtext = CreateStandaloneExprContext(); + old_cxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + expr_state = prepare_expr_for_execution(prel, source_rel, new_tuple, + &value_type); + value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); + MemoryContextSwitchTo(old_cxt); - /* Extract partitioning key from NEW tuple */ - value = heap_getattr(new_tuple, - value_attnum, - RelationGetDescr(source_rel), - &isnull); + if (isnull) + elog(ERROR, ERR_PART_ATTR_NULL); - /* Extract value's type */ - value_type = RelationGetDescr(source_rel)->attrs[value_attnum - 1]->atttypid; + if (itemIsDone != ExprSingleResult) + elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); /* Search for matching partitions */ parts = find_partitions_for_value(value, value_type, prel, &nparts); + /* We can free expression context now */ + FreeExprContext(econtext, false); + if (nparts > 1) elog(ERROR, ERR_PART_ATTR_MULTIPLE); else if (nparts == 0) @@ -1310,9 +1367,7 @@ create_update_triggers(PG_FUNCTION_ARGS) const char *trigname; const PartRelationInfo *prel; uint32 i; - List *vars; - List *columns = NIL; - ListCell *lc; + List *columns; /* Check that table is partitioned */ prel = get_pathman_relation_info(parent); @@ -1321,17 +1376,8 @@ create_update_triggers(PG_FUNCTION_ARGS) /* Acquire trigger and attribute names */ trigname = build_update_trigger_name_internal(parent); - /* Generate list of columns used in expression */ - vars = get_part_expression_vars(prel); - - foreach(lc, vars) - { - Var *var = (Var *) lfirst(lc); - char *attname = get_attname(parent, var->varattno); - columns = lappend(columns, attname); - } - /* Create trigger for parent */ + columns = get_part_expression_columns(prel); create_single_update_trigger_internal(parent, trigname, columns); /* Fetch children array */ @@ -1350,12 +1396,9 @@ create_single_update_trigger(PG_FUNCTION_ARGS) { Oid parent = PG_GETARG_OID(0); Oid child = PG_GETARG_OID(1); - const char *trigname, - *attname; + const char *trigname; const PartRelationInfo *prel; - List *vars; - List *columns = NIL; - ListCell *lc; + List *columns; /* Check that table is partitioned */ prel = get_pathman_relation_info(parent); @@ -1365,16 +1408,8 @@ create_single_update_trigger(PG_FUNCTION_ARGS) trigname = build_update_trigger_name_internal(parent); /* Generate list of columns used in expression */ - vars = get_part_expression_vars(prel); - - foreach(lc, vars) - { - Var *var = (Var *) lfirst(lc); - char *attname = get_attname(parent, var->varattno); - columns = lappend(columns, attname); - } - - create_single_update_trigger_internal(child, trigname, attname); + columns = get_part_expression_columns(prel); + create_single_update_trigger_internal(child, trigname, columns); PG_RETURN_VOID(); } diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 7438e7ec..9cfc92ae 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -249,9 +249,9 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) char *tablespace = tablespaces ? tablespaces[i] : NULL; (void) create_single_range_partition_internal(parent_relid, + elemtype, &start, &end, - elemtype, name, tablespace); } diff --git a/src/relation_info.c b/src/relation_info.c index 30c2471f..2813b43c 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -99,7 +99,7 @@ static void fill_pbin_with_bounds(PartBoundInfo *pbin, static int cmp_range_entries(const void *p1, const void *p2, void *arg); static void update_parsed_expression(Oid relid, HeapTuple tuple, Datum *values, bool *nulls); -static void fill_part_expression_vars(const PartRelationInfo *prel); +static void fill_part_expression_vars(PartRelationInfo *prel); void init_relation_info_static_data(void) @@ -202,7 +202,7 @@ refresh_pathman_relation_info(Oid relid, prel->attname = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); prel->expr = (Node *) stringToNode(expr); fix_opfuncids(prel->expr); - fill_part_expression_vars(prel); + fill_part_expression_vars((PartRelationInfo *) prel); MemoryContextSwitchTo(oldcontext); @@ -340,18 +340,12 @@ extract_vars(Node *node, PartRelationInfo *prel) * This function fills 'expr_vars' and 'expr_atts' attributes in PartRelationInfo. */ static void -fill_part_expression_vars(const PartRelationInfo *prel) +fill_part_expression_vars(PartRelationInfo *prel) { - if (prel->expr_vars == NIL) - { - MemoryContext ctx; - - ctx = MemoryContextSwitchTo(PathmanRelationCacheContext); - extract_vars(prel->expr, (PartRelationInfo *) prel); - MemoryContextSwitchTo(ctx); - } + prel->expr_vars = NIL; + prel->expr_atts = NULL; - return prel->expr_vars; + extract_vars(prel->expr, prel); } /* Invalidate PartRelationInfo cache entry. Create new entry if 'found' is NULL. */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index b1c6b4ff..106f95d3 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -227,7 +227,7 @@ is_pathman_related_alter_column_type(Node *parsetree, /* Is it a column that used in expression? */ attnum = get_attnum(parent_relid, alter_table_cmd->name); if (!bms_is_member(attnum, prel->expr_atts)) - return; + continue; /* Return 'prel->attnum' */ if (attr_number_out) *attr_number_out = attnum; diff --git a/src/utils.c b/src/utils.c index 4f688e01..021a221b 100644 --- a/src/utils.c +++ b/src/utils.c @@ -584,7 +584,7 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) * And it should be faster if expression uses not all fields from relation. */ AttrNumber * -get_pathman_attributes_map(PartRelationInfo *prel, Relation child) +get_pathman_attributes_map(const PartRelationInfo *prel, Relation child) { AttrNumber i = -1; Oid parent_relid = prel->key; @@ -595,13 +595,12 @@ get_pathman_attributes_map(PartRelationInfo *prel, Relation child) while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { int j; - char *attname = get_attname(parent_relid, n); + char *attname = get_attname(parent_relid, i); - for (j = 0; j < natts; i++) + for (j = 0; j < natts; j++) { - Form_pg_attribute att = outdesc->attrs[i]; + Form_pg_attribute att = childDesc->attrs[j]; char *child_attname; - int j; if (att->attisdropped) continue; /* attrMap[i] is already 0 */ @@ -621,20 +620,17 @@ get_pathman_attributes_map(PartRelationInfo *prel, Relation child) return attrMap; } -bool -modify_expression_attnums(Node *node, AttrNumber *map) +List * +get_part_expression_columns(const PartRelationInfo *prel) { - if (node == NULL) - return false; + List *columns = NIL; + int j = -1; - if (IsA(node, Var)) + while ((j = bms_next_member(prel->expr_atts, j)) >= 0) { - Var *var; - AttrNumber orig = var->varattno; - AttrNumber dest = map[orig - 1]; - Assert(dest != 0); - var->varattno = dest; + char *attname = get_attname(prel->key, j); + columns = lappend(columns, makeString(attname)); } - return expression_tree_walker(node, modify_attnums_walker, (void *) map); + return columns; } From bf4ecb8ec86aad9c48c4a5139679abd2f0b29012 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 10 Apr 2017 18:46:51 +0300 Subject: [PATCH 0366/1124] Fix expression execution in update trigger --- src/partition_creation.c | 4 +++- src/pl_funcs.c | 8 ++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 1f7abbd1..eba55eab 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1844,7 +1844,9 @@ get_part_expression_info(Oid relid, const char *expr_string, expr_node = eval_const_expressions(NULL, expr_node); validate_part_expression(expr_node, NULL); if (contain_mutable_functions(expr_node)) - elog(ERROR, "Expression should not contain mutable functions"); + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("functions in partitioning expression must be marked IMMUTABLE"))); out_string = nodeToString(expr_node); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 98f192cf..ebe4f2cc 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1081,11 +1081,7 @@ change_vars_to_consts(Node *node, struct change_vars_context *ctx) if (varattno == 0) elog(ERROR, "Couldn't find attribute used in expression in child relation"); - /* - * we get atttribute type using tuple description of child relation, - * because it could be changed earlier, so Var of parent relation - * will not be valid anymore. - */ + /* we suppose that type can be different from parent */ atttype = ctx->tuple_desc->attrs[varattno - 1]->atttypid; tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(atttype)); @@ -1112,7 +1108,7 @@ change_vars_to_consts(Node *node, struct change_vars_context *ctx) &new_const->constisnull); return (Node *) new_const; } - return expression_tree_mutator(node, change_vars_to_consts, NULL); + return expression_tree_mutator(node, change_vars_to_consts, (void *) ctx); } static ExprState * From 085c33f4dc9730029e5350dcf1c1ae69f2511bbd Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 11 Apr 2017 12:32:45 +0300 Subject: [PATCH 0367/1124] Fix tests related with update triggers, build_constraint_name function and others --- expected/pathman_calamity.out | 43 +++---------------------------- expected/pathman_utility_stmt.out | 8 +++--- hash.sql | 8 +++--- init.sql | 6 +++-- range.sql | 18 ++++++------- sql/pathman_calamity.sql | 14 +++------- sql/pathman_utility_stmt.sql | 8 +++--- src/partition_creation.c | 2 +- src/pl_funcs.c | 6 +++-- 9 files changed, 35 insertions(+), 78 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 7615a036..82755dd7 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -301,51 +301,14 @@ SELECT get_partition_key_type(NULL) IS NULL; t (1 row) -/* check function build_check_constraint_name_attnum() */ -SELECT build_check_constraint_name('calamity.part_test', 1::int2); +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); build_check_constraint_name ----------------------------- pathman_part_test_check (1 row) -SELECT build_check_constraint_name('calamity.part_test', NULL::int2) IS NULL; - ?column? ----------- - t -(1 row) - -SELECT build_check_constraint_name(NULL, 1::int2) IS NULL; - ?column? ----------- - t -(1 row) - -SELECT build_check_constraint_name(NULL, NULL::int2) IS NULL; - ?column? ----------- - t -(1 row) - -/* check function build_check_constraint_name_attname() */ -SELECT build_check_constraint_name('calamity.part_test', 'val'); - build_check_constraint_name ------------------------------ - pathman_part_test_check -(1 row) - -SELECT build_check_constraint_name('calamity.part_test', NULL::text) IS NULL; - ?column? ----------- - t -(1 row) - -SELECT build_check_constraint_name(NULL, 'val') IS NULL; - ?column? ----------- - t -(1 row) - -SELECT build_check_constraint_name(NULL, NULL::text) IS NULL; +SELECT build_check_constraint_name(NULL) IS NULL; ?column? ---------- t diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index e7e09070..e36e2d08 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -282,10 +282,10 @@ Check constraints: Inherits: rename.test /* Generates check constraint for relation */ -CREATE OR REPLACE FUNCTION add_constraint(rel regclass, att text) +CREATE OR REPLACE FUNCTION add_constraint(rel regclass) RETURNS VOID AS $$ declare - constraint_name text := build_check_constraint_name(rel, 'a'); + constraint_name text := build_check_constraint_name(rel); BEGIN EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (a < 100);', rel, constraint_name); @@ -299,7 +299,7 @@ LANGUAGE plpgsql; CREATE TABLE rename.test_inh (LIKE rename.test INCLUDING ALL); CREATE TABLE rename.test_inh_1 (LIKE rename.test INCLUDING ALL); ALTER TABLE rename.test_inh_1 INHERIT rename.test_inh; -SELECT add_constraint('rename.test_inh_1', 'a'); +SELECT add_constraint('rename.test_inh_1'); add_constraint ---------------- @@ -319,7 +319,7 @@ Inherits: rename.test_inh /* Check that plain tables are not affected too */ CREATE TABLE rename.plain_test(a serial, b int); ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; -SELECT add_constraint('rename.plain_test_renamed', 'a'); +SELECT add_constraint('rename.plain_test_renamed'); add_constraint ---------------- diff --git a/hash.sql b/hash.sql index eed3fce6..b634d319 100644 --- a/hash.sql +++ b/hash.sql @@ -113,15 +113,14 @@ BEGIN RAISE EXCEPTION 'partition must have a compatible tuple format'; END IF; - /* Get partitioning key */ + /* Get partitioning expression */ part_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; IF part_attname IS NULL THEN RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; END IF; /* Fetch name of old_partition's HASH constraint */ - old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS, - part_attname); + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS); /* Fetch definition of old_partition's HASH constraint */ SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint @@ -138,8 +137,7 @@ BEGIN EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', new_partition, - @extschema@.build_check_constraint_name(new_partition::REGCLASS, - part_attname), + @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); /* Fetch init_callback from 'params' table */ diff --git a/init.sql b/init.sql index 56d1b41d..49b1e60a 100644 --- a/init.sql +++ b/init.sql @@ -879,13 +879,15 @@ LANGUAGE C STRICT; /* - * Attach a previously partitioned table. + * Add record to pathman_config. If parttype if not specified then determine + * partitioning type. */ CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( parent_relid REGCLASS, attname TEXT, range_interval TEXT DEFAULT NULL, - refresh_part_info BOOL DEFAULT TRUE + refresh_part_info BOOL DEFAULT TRUE, + parttype INT4 DEFAULT 0 ) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; diff --git a/range.sql b/range.sql index e56ec018..dd1d4318 100644 --- a/range.sql +++ b/range.sql @@ -290,7 +290,7 @@ $$ LANGUAGE plpgsql; */ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, - attribute TEXT, + expression TEXT, bounds ANYARRAY, partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL, @@ -308,17 +308,17 @@ BEGIN RAISE EXCEPTION 'Bounds array must have at least two values'; END IF; - attribute := lower(attribute); - PERFORM @extschema@.prepare_for_partitioning(parent_relid, attribute, partition_data); + expression := lower(expression); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, - attribute, + expression, bounds[0], bounds[array_length(bounds, 1) - 1]); - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, attribute, 2, NULL); + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, false, 2); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid) @@ -548,7 +548,7 @@ BEGIN /* Alter original partition */ v_cond := @extschema@.build_range_condition(partition_relid::regclass, v_attname, p_range[1], split_value); - v_check_name := @extschema@.build_check_constraint_name(partition_relid, v_attname); + v_check_name := @extschema@.build_check_constraint_name(partition_relid); EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', partition_relid::TEXT, @@ -959,7 +959,7 @@ BEGIN /* Set check constraint */ EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', partition_relid::TEXT, - @extschema@.build_check_constraint_name(partition_relid, v_attname), + @extschema@.build_check_constraint_name(partition_relid), @extschema@.build_range_condition(partition_relid, v_attname, start_value, @@ -1026,7 +1026,7 @@ BEGIN /* Remove check constraint */ EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', partition_relid::TEXT, - @extschema@.build_check_constraint_name(partition_relid, v_attname)); + @extschema@.build_check_constraint_name(partition_relid)); /* Remove update trigger */ EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 756f0d1e..a2204767 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -137,17 +137,9 @@ SELECT get_partition_key_type('calamity.part_test'); SELECT get_partition_key_type(0::regclass); SELECT get_partition_key_type(NULL) IS NULL; -/* check function build_check_constraint_name_attnum() */ -SELECT build_check_constraint_name('calamity.part_test', 1::int2); -SELECT build_check_constraint_name('calamity.part_test', NULL::int2) IS NULL; -SELECT build_check_constraint_name(NULL, 1::int2) IS NULL; -SELECT build_check_constraint_name(NULL, NULL::int2) IS NULL; - -/* check function build_check_constraint_name_attname() */ -SELECT build_check_constraint_name('calamity.part_test', 'val'); -SELECT build_check_constraint_name('calamity.part_test', NULL::text) IS NULL; -SELECT build_check_constraint_name(NULL, 'val') IS NULL; -SELECT build_check_constraint_name(NULL, NULL::text) IS NULL; +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); +SELECT build_check_constraint_name(NULL) IS NULL; /* check function build_update_trigger_name() */ SELECT build_update_trigger_name('calamity.part_test'); diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index 15367b86..b83831a6 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -154,10 +154,10 @@ ALTER TABLE rename.test_0 RENAME TO test_one; \d+ rename.test_one /* Generates check constraint for relation */ -CREATE OR REPLACE FUNCTION add_constraint(rel regclass, att text) +CREATE OR REPLACE FUNCTION add_constraint(rel regclass) RETURNS VOID AS $$ declare - constraint_name text := build_check_constraint_name(rel, 'a'); + constraint_name text := build_check_constraint_name(rel); BEGIN EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (a < 100);', rel, constraint_name); @@ -172,14 +172,14 @@ LANGUAGE plpgsql; CREATE TABLE rename.test_inh (LIKE rename.test INCLUDING ALL); CREATE TABLE rename.test_inh_1 (LIKE rename.test INCLUDING ALL); ALTER TABLE rename.test_inh_1 INHERIT rename.test_inh; -SELECT add_constraint('rename.test_inh_1', 'a'); +SELECT add_constraint('rename.test_inh_1'); ALTER TABLE rename.test_inh_1 RENAME TO test_inh_one; \d+ rename.test_inh_one /* Check that plain tables are not affected too */ CREATE TABLE rename.plain_test(a serial, b int); ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; -SELECT add_constraint('rename.plain_test_renamed', 'a'); +SELECT add_constraint('rename.plain_test_renamed'); \d+ rename.plain_test_renamed ALTER TABLE rename.plain_test_renamed RENAME TO plain_test; \d+ rename.plain_test diff --git a/src/partition_creation.c b/src/partition_creation.c index eba55eab..c4986769 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1880,7 +1880,7 @@ extract_column_names(Node *node, struct extract_column_names_context *ctx) ListCell *lc; foreach(lc, ((ColumnRef *) node)->fields) if (IsA(lfirst(lc), String)) - ctx->columns = lappend(ctx->columns, strVal(lfirst(lc))); + ctx->columns = lappend(ctx->columns, lfirst(lc)); } return raw_expression_tree_walker(node, extract_column_names, ctx); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ebe4f2cc..00891192 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -744,8 +744,10 @@ add_to_pathman_config(PG_FUNCTION_ARGS) get_rel_name_or_relid(relid)); } - /* Select partitioning type using 'range_interval' */ - parttype = PG_ARGISNULL(2) ? PT_HASH : PT_RANGE; + /* Select partitioning type */ + parttype = PG_GETARG_INT32(4); + if ((parttype != PT_HASH) && (parttype != PT_RANGE)) + parttype = PG_ARGISNULL(2) ? PT_HASH : PT_RANGE; /* Parse and check expression */ expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); From 55e8949bfc9aa61b4d9ca7cefc6750259dbcb056 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 11 Apr 2017 14:43:09 +0300 Subject: [PATCH 0368/1124] Fix clang compilation error --- src/pl_funcs.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index f7797563..096d5716 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -770,7 +770,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) parttype = PG_ARGISNULL(2) ? PT_HASH : PT_RANGE; /* Parse and check expression */ - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); expr_info = get_part_expression_info(relid, expression, (parttype == PT_HASH), true); Assert(expr_info->expr_datum != (Datum) 0); From 03652a7aa03f25f69dd54d548f8c4c330080be39 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 11 Apr 2017 15:40:18 +0300 Subject: [PATCH 0369/1124] Add tests for expressions --- expected/pathman_expressions.out | 78 +++++++++++++++++++++++++++----- sql/pathman_expressions.sql | 22 ++++++--- 2 files changed, 82 insertions(+), 18 deletions(-) diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 7ddbf216..b462bf20 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -72,12 +72,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE (value * value2) = 5; (3 rows) /* range */ -CREATE TABLE test.range_rel ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP, - txt TEXT); +CREATE TABLE test.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); INSERT INTO test.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: start value is less than min value of "random()" SELECT pathman.create_range_partitions('test.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); NOTICE: sequence "range_rel_seq" does not exist, skipping @@ -88,13 +87,68 @@ NOTICE: sequence "range_rel_seq" does not exist, skipping INSERT INTO test.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" -SELECT * FROM test.range_rel_6; - id | dt | txt -----+--------------------------+---------------------------------- - 61 | Wed Jan 01 00:00:00 2020 | 339e0b1f73322ffca5ec77523ff1adfa - 62 | Sat Feb 01 00:00:00 2020 | 3c09dde93bf2730744668c266845a828 - 63 | Sun Mar 01 00:00:00 2020 | e6c8aaac1e4a1eb6594309a2fd24a5e5 - 64 | Wed Apr 01 00:00:00 2020 | 8cea991c596b35cc412ad489af424341 -(4 rows) +SELECT COUNT(*) FROM test.range_rel_6; + count +------- + 4 +(1 row) INSERT INTO test.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_4 + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(3 rows) + +SELECT pathman.create_update_triggers('test.range_rel'); + create_update_triggers +------------------------ + +(1 row) + +SELECT COUNT(*) FROM test.range_rel; + count +------- + 65 +(1 row) + +SELECT COUNT(*) FROM test.range_rel_1; + count +------- + 12 +(1 row) + +SELECT COUNT(*) FROM test.range_rel_2; + count +------- + 12 +(1 row) + +UPDATE test.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; +/* counts in partitions should be changed */ +SELECT COUNT(*) FROM test.range_rel; + count +------- + 65 +(1 row) + +SELECT COUNT(*) FROM test.range_rel_1; + count +------- + 10 +(1 row) + +SELECT COUNT(*) FROM test.range_rel_2; + count +------- + 24 +(1 row) + diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 95fbea82..bc24e30f 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -28,16 +28,26 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 5; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE (value * value2) = 5; /* range */ -CREATE TABLE test.range_rel ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP, - txt TEXT); +CREATE TABLE test.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); INSERT INTO test.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); SELECT pathman.create_range_partitions('test.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); INSERT INTO test.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); -SELECT * FROM test.range_rel_6; +SELECT COUNT(*) FROM test.range_rel_6; INSERT INTO test.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); - +SELECT COUNT(*) FROM test.range_rel_6; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + +SELECT pathman.create_update_triggers('test.range_rel'); +SELECT COUNT(*) FROM test.range_rel; +SELECT COUNT(*) FROM test.range_rel_1; +SELECT COUNT(*) FROM test.range_rel_2; +UPDATE test.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; + +/* counts in partitions should be changed */ +SELECT COUNT(*) FROM test.range_rel; +SELECT COUNT(*) FROM test.range_rel_1; +SELECT COUNT(*) FROM test.range_rel_2; From 574f52de4e2e1a1e904cf202ad462bbc00519f42 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 11 Apr 2017 17:34:09 +0300 Subject: [PATCH 0370/1124] Add migration script --- init.sql | 8 +- pg_pathman--1.3--1.4.sql | 601 +++++++++++++++++++++++++++++++++++++++ pg_pathman.control | 2 +- 3 files changed, 606 insertions(+), 5 deletions(-) create mode 100644 pg_pathman--1.3--1.4.sql diff --git a/init.sql b/init.sql index 0b62e63a..5845b0dc 100644 --- a/init.sql +++ b/init.sql @@ -41,12 +41,12 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( upd_expr BOOL DEFAULT FALSE, /* update expression on next refresh? */ /* check for allowed part types */ - CHECK (parttype IN (1, 2)), + CONSTRAINT pathman_config_parttype_check CHECK (parttype IN (1, 2)), /* check for correct interval */ - CHECK (@extschema@.validate_interval_value(atttype, - parttype, - range_interval)) + CONSTRAINT pathman_config_interval_check CHECK (@extschema@.validate_interval_value(atttype, + parttype, + range_interval)) ); diff --git a/pg_pathman--1.3--1.4.sql b/pg_pathman--1.3--1.4.sql new file mode 100644 index 00000000..4595e795 --- /dev/null +++ b/pg_pathman--1.3--1.4.sql @@ -0,0 +1,601 @@ +/* ------------------------------------------------------------------------ + * + * pg_pathman--1.3--1.4.sql + * Migration scripts to version 1.4 + * + * Copyright (c) 2015-2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, TEXT); +CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( + atttype OID, + parttype INTEGER, + range_interval TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C; + +DROP FUNCTION @extschema@.is_attribute_nullable(REGCLASS, TEXT); + +ALTER TABLE @extschema@.pathman_config ADD COLUMN expression_p TEXT NOT NULL; +ALTER TABLE @extschema@.pathman_config ADD COLUMN atttype OID NOT NULL; +ALTER TABLE @extschema@.pathman_config ADD COLUMN upd_expr BOOL DEFAULT FALSE; + +/* update constraint */ +ALTER TABLE @extschema@.pathman_config DROP CONSTRAINT pathman_config_check; +ALTER TABLE @extschema@.pathman_config + ADD CONSTRAINT pathman_config_interval_check CHECK (@extschema@.validate_interval_value(atttype, + parttype, + range_interval)); + +/* mark 'expression_p' and 'atttype' to update on next start */ +UPDATE @extschema@.pathman_config SET upd_expr = TRUE; + +CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( + relation REGCLASS, + expression TEXT) +RETURNS BOOLEAN AS +$$ +DECLARE + v_rec RECORD; + is_referenced BOOLEAN; + rel_persistence CHAR; + +BEGIN + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = relation INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be partitioned', + relation::TEXT; + END IF; + + IF EXISTS (SELECT * FROM @extschema@.pathman_config + WHERE partrel = relation) THEN + RAISE EXCEPTION 'relation "%" has already been partitioned', relation; + END IF; + + /* Check if there are foreign keys that reference the relation */ + FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint + WHERE confrelid = relation::REGCLASS::OID) + LOOP + is_referenced := TRUE; + RAISE WARNING 'foreign key "%" references relation "%"', + v_rec.conname, relation; + END LOOP; + + IF is_referenced THEN + RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; + END IF; + + RETURN FALSE; +END +$$ +LANGUAGE plpgsql; + +DROP FUNCTION @extschema@.build_check_constraint_name(REGCLASS, INT2); +DROP FUNCTION @extschema@.build_check_constraint_name(REGCLASS, TEXT); + +CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( + partition_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name' +LANGUAGE C STRICT; + +DROP FUNCTION @extschema@.add_to_pathman_config(REGCLASS, TEXT, TEXT); + +CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( + parent_relid REGCLASS, + attname TEXT, + range_interval TEXT DEFAULT NULL, + refresh_part_info BOOL DEFAULT TRUE, + parttype INT4 DEFAULT 0 +) +RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( + parent_relid REGCLASS, + expression TEXT, + partitions_count INT4, + partition_data BOOLEAN DEFAULT TRUE, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) +RETURNS INTEGER AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + expression := lower(expression); + PERFORM @extschema@.common_relation_checks(parent_relid, expression); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, false); + + /* Create partitions */ + PERFORM @extschema@.create_hash_partitions_internal(parent_relid, + expression, + partitions_count, + partition_names, + tablespaces); + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Copy data */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN partitions_count; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; + +DROP FUNCTION @extschema@.build_hash_condition(REGTYPE, TEXT, INT4, INT4); + +CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS +$$ +DECLARE + v_min start_value%TYPE; + v_max start_value%TYPE; + v_count BIGINT; + +BEGIN + /* Get min and max values */ + EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) + FROM %2$s WHERE NOT %1$s IS NULL', + expression, parent_relid::TEXT) + INTO v_count, v_min, v_max; + + /* Check if column has NULL values */ + IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN + RAISE EXCEPTION 'expression "%" returns NULL values', expression; + END IF; + + /* Check lower boundary */ + IF start_value > v_min THEN + RAISE EXCEPTION 'start value is less than min value of "%"', expression; + END IF; + + /* Check upper boundary */ + IF end_value <= v_max THEN + RAISE EXCEPTION 'not enough partitions to fit all values of "%"', expression; + END IF; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( + parent_relid REGCLASS, + expression TEXT, + partition_data BOOLEAN) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + expression := lower(expression); + PERFORM @extschema@.common_relation_checks(parent_relid, expression); +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_atttype REGTYPE; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + expression := lower(expression); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION '"p_count" must not be less than 0'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + v_atttype := @extschema@.get_base_type(pg_typeof(start_value)); + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', $1, ''%s'', ''%s''::%s)', + parent_relid, + start_value, + end_value, + v_atttype::TEXT) + USING + expression; + END IF; + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT, false); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on numerical expression + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + expression := lower(expression); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION 'partitions count must not be less than zero'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + IF v_max IS NULL THEN + RAISE EXCEPTION 'expression "%" can return NULL values', expression; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + END IF; + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT, false); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, p_interval, p_count), + NULL, + NULL); + END IF; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on bounds array + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + bounds ANYARRAY, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER; +BEGIN + IF array_ndims(bounds) > 1 THEN + RAISE EXCEPTION 'Bounds array must be a one dimensional array'; + END IF; + + IF array_length(bounds, 1) < 2 THEN + RAISE EXCEPTION 'Bounds array must have at least two values'; + END IF; + + expression := lower(expression); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + bounds[0], + bounds[array_length(bounds, 1) - 1]); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, false, 2); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Create partitions */ + part_count := @extschema@.create_range_partitions_internal(parent_relid, + bounds, + partition_names, + tablespaces); + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ +LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified range + */ +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval ANYELEMENT, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + expression := lower(expression); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT, false); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + WHILE start_value <= end_value + LOOP + PERFORM @extschema@.create_single_range_partition( + parent_relid, + start_value, + start_value + p_interval, + tablespace := @extschema@.get_tablespace(parent_relid)); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified range based on datetime expression + */ +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval INTERVAL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + expression := lower(expression); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, + partition_data); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT, false); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + WHILE start_value <= end_value + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', + @extschema@.get_base_type(pg_typeof(start_value))::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_tablespace(parent_relid); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( + partition_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS 'pg_pathman', 'build_range_condition' +LANGUAGE C; diff --git a/pg_pathman.control b/pg_pathman.control index bace115b..0d6af5d3 100644 --- a/pg_pathman.control +++ b/pg_pathman.control @@ -1,4 +1,4 @@ # pg_pathman extension comment = 'Partitioning tool for PostgreSQL' -default_version = '1.3' +default_version = '1.4' module_pathname = '$libdir/pg_pathman' From e38be2b3101351246980b7c1bff2eea259673f5b Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 11 Apr 2017 19:51:26 +0300 Subject: [PATCH 0371/1124] Fix migration script --- pg_pathman--1.3--1.4.sql | 31 +++++++++++++++++++++++++++---- src/init.c | 36 ++++++++++++++++++++++++++++++++++-- 2 files changed, 61 insertions(+), 6 deletions(-) diff --git a/pg_pathman--1.3--1.4.sql b/pg_pathman--1.3--1.4.sql index 4595e795..f5c1cdeb 100644 --- a/pg_pathman--1.3--1.4.sql +++ b/pg_pathman--1.3--1.4.sql @@ -8,7 +8,7 @@ * ------------------------------------------------------------------------ */ -DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, TEXT); +DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, TEXT) CASCADE; CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( atttype OID, parttype INTEGER, @@ -18,12 +18,11 @@ LANGUAGE C; DROP FUNCTION @extschema@.is_attribute_nullable(REGCLASS, TEXT); -ALTER TABLE @extschema@.pathman_config ADD COLUMN expression_p TEXT NOT NULL; -ALTER TABLE @extschema@.pathman_config ADD COLUMN atttype OID NOT NULL; +ALTER TABLE @extschema@.pathman_config ADD COLUMN expression_p TEXT DEFAULT '--not set--'; +ALTER TABLE @extschema@.pathman_config ADD COLUMN atttype OID DEFAULT 1; ALTER TABLE @extschema@.pathman_config ADD COLUMN upd_expr BOOL DEFAULT FALSE; /* update constraint */ -ALTER TABLE @extschema@.pathman_config DROP CONSTRAINT pathman_config_check; ALTER TABLE @extschema@.pathman_config ADD CONSTRAINT pathman_config_interval_check CHECK (@extschema@.validate_interval_value(atttype, parttype, @@ -32,6 +31,7 @@ ALTER TABLE @extschema@.pathman_config /* mark 'expression_p' and 'atttype' to update on next start */ UPDATE @extschema@.pathman_config SET upd_expr = TRUE; +DROP FUNCTION @extschema@.common_relation_checks(REGCLASS, TEXT); CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( relation REGCLASS, expression TEXT) @@ -95,6 +95,9 @@ CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; +DROP FUNCTION @extschema@.create_hash_partitions(REGCLASS, TEXT, INT4, BOOLEAN, + TEXT[], TEXT[]); + CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( parent_relid REGCLASS, expression TEXT, @@ -146,6 +149,7 @@ SET client_min_messages = WARNING; DROP FUNCTION @extschema@.build_hash_condition(REGTYPE, TEXT, INT4, INT4); +DROP FUNCTION @extschema@.check_boundaries(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT); CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( parent_relid REGCLASS, expression TEXT, @@ -183,6 +187,7 @@ END $$ LANGUAGE plpgsql; +DROP FUNCTION @extschema@.prepare_for_partitioning(REGCLASS, TEXT, BOOLEAN); CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( parent_relid REGCLASS, expression TEXT, @@ -208,6 +213,9 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on datetime attribute */ +DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYELEMENT, + INTERVAL, INTEGER, BOOLEAN); + CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, @@ -311,6 +319,9 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on numerical expression */ +DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYELEMENT, + ANYELEMENT, INTEGER, BOOLEAN); + CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, @@ -410,6 +421,9 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on bounds array */ +DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYARRAY, + TEXT[], TEXT[], BOOLEAN); + CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, @@ -471,6 +485,9 @@ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified range */ +DROP FUNCTION @extschema@.create_partitions_from_range(REGCLASS, TEXT, ANYELEMENT, + ANYELEMENT, ANYELEMENT, BOOLEAN); + CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( parent_relid REGCLASS, expression TEXT, @@ -531,6 +548,9 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified range based on datetime expression */ +DROP FUNCTION @extschema@.create_partitions_from_range(REGCLASS, TEXT, + ANYELEMENT, ANYELEMENT, INTERVAL, BOOLEAN); + CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( parent_relid REGCLASS, expression TEXT, @@ -592,6 +612,9 @@ BEGIN END $$ LANGUAGE plpgsql; +DROP FUNCTION @extschema@.build_range_condition(REGCLASS, TEXT, + ANYELEMENT, ANYELEMENT); + CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( partition_relid REGCLASS, expression TEXT, diff --git a/src/init.c b/src/init.c index ab3a9875..1d43084c 100644 --- a/src/init.c +++ b/src/init.c @@ -733,6 +733,16 @@ read_pathman_config(void) HeapScanDesc scan; Snapshot snapshot; HeapTuple htup; + Oid *relids = NULL; + Size relids_index = 0, + relids_count = 100, + j; + + /* + * Initialize relids array, we keep here relations that require + * update their expression. + */ + relids = (Oid *) palloc(sizeof(Oid) * relids_count); /* Open PATHMAN_CONFIG with latest snapshot available */ rel = heap_open(get_pathman_config_relid(false), AccessShareLock); @@ -752,7 +762,8 @@ read_pathman_config(void) while((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) { Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; + bool upd_expr, + isnull[Natts_pathman_config]; Oid relid; /* partitioned table */ /* Extract Datums from tuple 'htup' */ @@ -763,6 +774,20 @@ read_pathman_config(void) Assert(!isnull[Anum_pathman_config_parttype - 1]); Assert(!isnull[Anum_pathman_config_expression - 1]); Assert(!isnull[Anum_pathman_config_expression_p - 1]); + Assert(!isnull[Anum_pathman_config_upd_expression - 1]); + + upd_expr = DatumGetBool(values[Anum_pathman_config_upd_expression - 1]); + if (upd_expr) + { + if (relids_index >= relids_count) + { + relids_count += 100; + relids = (Oid *) repalloc(relids, sizeof(Oid) * relids_count); + } + + relids[relids_index] = relid; + relids_index += 1; + } /* Extract values from Datums */ relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); @@ -778,7 +803,8 @@ read_pathman_config(void) } /* get_pathman_relation_info() will refresh this entry */ - refresh_pathman_relation_info(relid, + if (!upd_expr) + refresh_pathman_relation_info(relid, values, true); /* allow lazy prel loading */ } @@ -787,6 +813,12 @@ read_pathman_config(void) heap_endscan(scan); UnregisterSnapshot(snapshot); heap_close(rel, AccessShareLock); + + /* Update expressions */ + for (j = 0; j < relids_index; j++) + get_pathman_relation_info(relids[j]); + + pfree(relids); } From fb85322ad5a05356ace48a779e82d03b620e30a9 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 12 Apr 2017 14:20:16 +0300 Subject: [PATCH 0372/1124] Fix migration script, return to old version, so tests can work --- pg_pathman--1.3--1.4.sql | 25 +++++++++++++++++ pg_pathman.control | 2 +- src/include/relation_info.h | 1 + src/init.c | 35 +++--------------------- src/relation_info.c | 53 ++++++++++++++++++++++--------------- 5 files changed, 62 insertions(+), 54 deletions(-) diff --git a/pg_pathman--1.3--1.4.sql b/pg_pathman--1.3--1.4.sql index f5c1cdeb..d4e9a80b 100644 --- a/pg_pathman--1.3--1.4.sql +++ b/pg_pathman--1.3--1.4.sql @@ -31,6 +31,31 @@ ALTER TABLE @extschema@.pathman_config /* mark 'expression_p' and 'atttype' to update on next start */ UPDATE @extschema@.pathman_config SET upd_expr = TRUE; +/* we've changed the format of constraint names, and we need rename them */ +CREATE OR REPLACE FUNCTION @extschema@.update_constraints() +RETURNS BOOLEAN AS +$$ +DECLARE + v_rec RECORD; +BEGIN + FOR v_rec IN (SELECT conrelid::regclass AS t, conname, regexp_replace(conname, '\d+_check', 'check') as new_conname + FROM pg_constraint + WHERE conname ~ 'pathman_.*_\d+_\d+_check') + LOOP + EXECUTE format('ALTER TABLE %s RENAME CONSTRAINT %s TO %s', + v_rec.t, v_rec.conname, v_rec.new_conname); + END LOOP; + + RETURN TRUE; +END +$$ +LANGUAGE plpgsql; + +SELECT @extschema@.update_constraints(); + +/* we don't need this function anymore */ +DROP FUNCTION @extschema@.update_constraints(); + DROP FUNCTION @extschema@.common_relation_checks(REGCLASS, TEXT); CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( relation REGCLASS, diff --git a/pg_pathman.control b/pg_pathman.control index 0d6af5d3..bace115b 100644 --- a/pg_pathman.control +++ b/pg_pathman.control @@ -1,4 +1,4 @@ # pg_pathman extension comment = 'Partitioning tool for PostgreSQL' -default_version = '1.4' +default_version = '1.3' module_pathname = '$libdir/pg_pathman' diff --git a/src/include/relation_info.h b/src/include/relation_info.h index bba0da29..e572dbab 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -237,6 +237,7 @@ PrelLastChild(const PartRelationInfo *prel) } +PartRelationInfo *create_pathman_relation_info(Oid relid); const PartRelationInfo *refresh_pathman_relation_info(Oid relid, Datum *values, bool allow_incomplete); diff --git a/src/init.c b/src/init.c index 1d43084c..d33f4d05 100644 --- a/src/init.c +++ b/src/init.c @@ -733,16 +733,6 @@ read_pathman_config(void) HeapScanDesc scan; Snapshot snapshot; HeapTuple htup; - Oid *relids = NULL; - Size relids_index = 0, - relids_count = 100, - j; - - /* - * Initialize relids array, we keep here relations that require - * update their expression. - */ - relids = (Oid *) palloc(sizeof(Oid) * relids_count); /* Open PATHMAN_CONFIG with latest snapshot available */ rel = heap_open(get_pathman_config_relid(false), AccessShareLock); @@ -776,21 +766,9 @@ read_pathman_config(void) Assert(!isnull[Anum_pathman_config_expression_p - 1]); Assert(!isnull[Anum_pathman_config_upd_expression - 1]); - upd_expr = DatumGetBool(values[Anum_pathman_config_upd_expression - 1]); - if (upd_expr) - { - if (relids_index >= relids_count) - { - relids_count += 100; - relids = (Oid *) repalloc(relids, sizeof(Oid) * relids_count); - } - - relids[relids_index] = relid; - relids_index += 1; - } - /* Extract values from Datums */ relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); + upd_expr = DatumGetBool(values[Anum_pathman_config_upd_expression - 1]); /* Check that relation 'relid' exists */ if (get_rel_type_id(relid) == InvalidOid) @@ -802,8 +780,9 @@ read_pathman_config(void) errhint(INIT_ERROR_HINT))); } - /* get_pathman_relation_info() will refresh this entry */ - if (!upd_expr) + if (upd_expr) + create_pathman_relation_info(relid); + else refresh_pathman_relation_info(relid, values, true); /* allow lazy prel loading */ @@ -813,12 +792,6 @@ read_pathman_config(void) heap_endscan(scan); UnregisterSnapshot(snapshot); heap_close(rel, AccessShareLock); - - /* Update expressions */ - for (j = 0; j < relids_index; j++) - get_pathman_relation_info(relids[j]); - - pfree(relids); } diff --git a/src/relation_info.c b/src/relation_info.c index 2813b43c..2b28a243 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -116,35 +116,18 @@ init_relation_info_static_data(void) NULL); } - -/* - * refresh\invalidate\get\remove PartRelationInfo functions. - */ - /* Create or update PartRelationInfo in local cache. Might emit ERROR. */ -const PartRelationInfo * -refresh_pathman_relation_info(Oid relid, - Datum *values, - bool allow_incomplete) +PartRelationInfo * +create_pathman_relation_info(Oid relid) { - const LOCKMODE lockmode = AccessShareLock; - const TypeCacheEntry *typcache; - Oid *prel_children; - uint32 prel_children_count = 0, - i; - bool found_entry; PartRelationInfo *prel; - Datum param_values[Natts_pathman_config_params]; - bool param_isnull[Natts_pathman_config_params]; - char *expr; - HeapTuple tp; - MemoryContext oldcontext; + bool found_entry; AssertTemporaryContext(); - prel = (PartRelationInfo *) pathman_cache_search_relid(partitioned_rels, relid, HASH_ENTER, &found_entry); + elog(DEBUG2, found_entry ? "Refreshing record for relation %u in pg_pathman's cache [%u]" : @@ -166,7 +149,33 @@ refresh_pathman_relation_info(Oid relid, } /* First we assume that this entry is invalid */ - prel->valid = false; + prel->valid = false; + return prel; +} + +/* + * refresh\invalidate\get\remove PartRelationInfo functions. + */ + +const PartRelationInfo * +refresh_pathman_relation_info(Oid relid, + Datum *values, + bool allow_incomplete) +{ + const LOCKMODE lockmode = AccessShareLock; + const TypeCacheEntry *typcache; + Oid *prel_children; + uint32 prel_children_count = 0, + i; + PartRelationInfo *prel; + Datum param_values[Natts_pathman_config_params]; + bool param_isnull[Natts_pathman_config_params]; + char *expr; + HeapTuple tp; + MemoryContext oldcontext; + + AssertTemporaryContext(); + prel = create_pathman_relation_info(relid); /* Try locking parent, exit fast if 'allow_incomplete' */ if (allow_incomplete) From aa9dd8f633c3e05b36d59c9edb17a92a880f64f7 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 12 Apr 2017 14:37:11 +0300 Subject: [PATCH 0373/1124] Add comment in get_part_expression_info --- src/partition_creation.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/partition_creation.c b/src/partition_creation.c index d06f232d..c4eb5f74 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1840,6 +1840,7 @@ get_part_expression_info(Oid relid, const char *expr_string, /* Plan this query. We reuse 'expr_node' here */ plan = pg_plan_query(query, 0, NULL); if (IsA(plan->planTree, IndexOnlyScan)) + /* we get IndexOnlyScan in targetlist if expression is primary key */ target_entry = linitial(((IndexOnlyScan *) plan->planTree)->indextlist); else target_entry = linitial(plan->planTree->targetlist); From 3dcf247860240f951a62c81fd6eef3e491b5ca9d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 13 Apr 2017 15:05:47 +0300 Subject: [PATCH 0374/1124] remove useless calls of get_plain_schema_and_relname() --- range.sql | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/range.sql b/range.sql index eaa8a06c..705991ef 100644 --- a/range.sql +++ b/range.sql @@ -157,8 +157,7 @@ BEGIN VALUES (parent_relid, attribute, 2, p_interval::TEXT); /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + PERFORM @extschema@.create_or_replace_sequence(parent_relid); IF p_count != 0 THEN part_count := @extschema@.create_range_partitions_internal( @@ -258,8 +257,7 @@ BEGIN VALUES (parent_relid, attribute, 2, p_interval::TEXT); /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + PERFORM @extschema@.create_or_replace_sequence(parent_relid); IF p_count != 0 THEN part_count := @extschema@.create_range_partitions_internal( @@ -320,8 +318,7 @@ BEGIN VALUES (parent_relid, attribute, 2, NULL); /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + PERFORM @extschema@.create_or_replace_sequence(parent_relid); /* Create partitions */ part_count := @extschema@.create_range_partitions_internal(parent_relid, @@ -375,8 +372,7 @@ BEGIN VALUES (parent_relid, attribute, 2, p_interval::TEXT); /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + PERFORM @extschema@.create_or_replace_sequence(parent_relid); WHILE start_value <= end_value LOOP @@ -435,8 +431,7 @@ BEGIN VALUES (parent_relid, attribute, 2, p_interval::TEXT); /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + PERFORM @extschema@.create_or_replace_sequence(parent_relid); WHILE start_value <= end_value LOOP From 7ae45b5092cacb91b9480a3c3bc6240db162cf97 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 14 Apr 2017 12:13:34 +0300 Subject: [PATCH 0375/1124] Fix COPY operation --- src/utility_stmt_hooking.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 106f95d3..f84e833c 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -620,19 +620,28 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, expr_state = ExecInitExpr((Expr *) expr, NULL); } + /* Switch into per tuple memory context */ + MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + if (!NextCopyFrom(cstate, econtext, values, nulls, &tuple_oid)) break; - /* And now we can form the input tuple. */ + /* We can form the input tuple. */ tuple = heap_form_tuple(tupDesc, values, nulls); + if (tuple_oid != InvalidOid) + HeapTupleSetOid(tuple, tuple_oid); + + /* + * Constraints might reference the tableoid column, so initialize + * t_tableOid before evaluating them. + */ + tuple->t_tableOid = RelationGetRelid(child_result_rel->ri_RelationDesc); + /* Place tuple in tuple slot --- but slot shouldn't free it */ slot = myslot; ExecStoreTuple(tuple, slot, InvalidBuffer, false); - /* Switch into per tuple memory context */ - MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - /* Execute expression */ tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; @@ -663,15 +672,6 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, heap_freetuple(tuple_old); } - if (tuple_oid != InvalidOid) - HeapTupleSetOid(tuple, tuple_oid); - - /* - * Constraints might reference the tableoid column, so initialize - * t_tableOid before evaluating them. - */ - tuple->t_tableOid = RelationGetRelid(child_result_rel->ri_RelationDesc); - /* Triggers and stuff need to be invoked in query context. */ MemoryContextSwitchTo(oldcontext); From 4dd25a9322718f5b7c171f6491c5ef8338b14996 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 14 Apr 2017 12:25:24 +0300 Subject: [PATCH 0376/1124] Fix table_oid assign in COPY --- src/utility_stmt_hooking.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index f84e833c..48de35c7 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -632,12 +632,6 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, if (tuple_oid != InvalidOid) HeapTupleSetOid(tuple, tuple_oid); - /* - * Constraints might reference the tableoid column, so initialize - * t_tableOid before evaluating them. - */ - tuple->t_tableOid = RelationGetRelid(child_result_rel->ri_RelationDesc); - /* Place tuple in tuple slot --- but slot shouldn't free it */ slot = myslot; ExecStoreTuple(tuple, slot, InvalidBuffer, false); @@ -661,6 +655,12 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, child_result_rel = rri_holder->result_rel_info; estate->es_result_relation_info = child_result_rel; + /* + * Constraints might reference the tableoid column, so initialize + * t_tableOid before evaluating them. + */ + tuple->t_tableOid = RelationGetRelid(child_result_rel->ri_RelationDesc); + /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) { From 9ef3944f8b6d6d5972a10250e6382b2b4b11d96e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 14 Apr 2017 14:55:16 +0300 Subject: [PATCH 0377/1124] Add new files for PartitionUpdate --- Makefile | 2 +- src/hooks.c | 3 ++ src/include/partition_update.h | 0 src/partition_filter.c | 1 - src/partition_update.c | 15 +++++++++ src/planner_tree_modification.c | 58 ++++++++++++++++++++++++++++++++- 6 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 src/include/partition_update.h create mode 100644 src/partition_update.c diff --git a/Makefile b/Makefile index ece73c45..6d7d56a4 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/relation_tags.o src/compat/expand_rte_hook.o \ - src/compat/rowmarks_fix.o $(WIN32RES) + src/compat/rowmarks_fix.o src/partition_update.o $(WIN32RES) PG_CPPFLAGS = -I$(CURDIR)/src/include diff --git a/src/hooks.c b/src/hooks.c index ae214eeb..b4d5d3a8 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -509,6 +509,9 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); + /* Add PartitionFilter node for UPDATE queries */ + ExecuteForPlanTree(result, add_partition_update_nodes); + /* Decrement relation tags refcount */ decr_refcount_relation_tags(); diff --git a/src/include/partition_update.h b/src/include/partition_update.h new file mode 100644 index 00000000..e69de29b diff --git a/src/partition_filter.c b/src/partition_filter.c index 8fa09d88..db21c110 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -718,7 +718,6 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) return result_tlist; } - /* * ---------------------------------------------- * Additional init steps for ResultPartsStorage diff --git a/src/partition_update.c b/src/partition_update.c new file mode 100644 index 00000000..b24829c9 --- /dev/null +++ b/src/partition_update.c @@ -0,0 +1,15 @@ + +/* + * -------------------------------- + * PartitionUpdate implementation + * -------------------------------- + */ + +Plan * +make_partition_update(Plan *subplan, Oid parent_relid, + OnConflictAction conflict_action, + List *returning_list) + +{ +} + diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index ff18611d..a475165e 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -339,7 +339,7 @@ handle_modification_query(Query *parse) /* * ------------------------------- - * PartitionFilter-related stuff + * PartitionFilter and PartitionUpdate-related stuff * ------------------------------- */ @@ -351,6 +351,14 @@ add_partition_filters(List *rtable, Plan *plan) plan_tree_walker(plan, partition_filter_visitor, rtable); } +/* Add PartitionUpdate nodes to the plan tree */ +void +add_partition_update_nodes(List *rtable, Plan *plan) +{ + if (pg_pathman_enable_partition_updaters) + plan_tree_walker(plan, partition_update_visitor, rtable); +} + /* * Add partition filters to ModifyTable node's children. * @@ -399,6 +407,54 @@ partition_filter_visitor(Plan *plan, void *context) } +/* + * Add partition updaters to ModifyTable node's children. + * + * 'context' should point to the PlannedStmt->rtable. + */ +static void +partition_update_visitor(Plan *plan, void *context) +{ + List *rtable = (List *) context; + ModifyTable *modify_table = (ModifyTable *) plan; + ListCell *lc1, + *lc2, + *lc3; + + /* Skip if not ModifyTable with 'INSERT' command */ + if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) + return; + + Assert(rtable && IsA(rtable, List)); + + lc3 = list_head(modify_table->returningLists); + forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) + { + Index rindex = lfirst_int(lc2); + Oid relid = getrelid(rindex, rtable); + const PartRelationInfo *prel = get_pathman_relation_info(relid); + + /* Check that table is partitioned */ + if (prel) + { + List *returning_list = NIL; + + /* Extract returning list if possible */ + if (lc3) + { + returning_list = lfirst(lc3); + lc3 = lnext(lc3); + } + + lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), + relid, + modify_table->onConflictAction, + returning_list); + } + } +} + + /* * ----------------------------------------------- * Parenthood safety checks (SELECT * FROM ONLY) From d57501d0db23507ff15224086a6677d24aa45afc Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 15 Apr 2017 01:09:48 +0300 Subject: [PATCH 0378/1124] improve child path generation for Runtime[Merge]Append (using get_cheapest_parameterized_child_path()) --- expected/pathman_only.out | 12 ++- src/hooks.c | 35 +++++---- src/include/pathman.h | 3 + src/include/runtimeappend.h | 1 - src/nodes_common.c | 45 ++++++++--- src/pg_pathman.c | 152 ++++++++++++++++++------------------ src/runtime_merge_append.c | 17 +--- src/runtimeappend.c | 20 +---- 8 files changed, 149 insertions(+), 136 deletions(-) diff --git a/expected/pathman_only.out b/expected/pathman_only.out index 6870ca6a..61f74d27 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -179,16 +179,26 @@ SELECT * FROM test_only.from_only_test JOIN q1 USING(val); -> CTE Scan on q1 -> Custom Scan (RuntimeAppend) -> Seq Scan on from_only_test_1 from_only_test + Filter: (q1.val = val) -> Seq Scan on from_only_test_2 from_only_test + Filter: (q1.val = val) -> Seq Scan on from_only_test_3 from_only_test + Filter: (q1.val = val) -> Seq Scan on from_only_test_4 from_only_test + Filter: (q1.val = val) -> Seq Scan on from_only_test_5 from_only_test + Filter: (q1.val = val) -> Seq Scan on from_only_test_6 from_only_test + Filter: (q1.val = val) -> Seq Scan on from_only_test_7 from_only_test + Filter: (q1.val = val) -> Seq Scan on from_only_test_8 from_only_test + Filter: (q1.val = val) -> Seq Scan on from_only_test_9 from_only_test + Filter: (q1.val = val) -> Seq Scan on from_only_test_10 from_only_test -(15 rows) + Filter: (q1.val = val) +(25 rows) /* should be OK */ EXPLAIN (COSTS OFF) diff --git a/src/hooks.c b/src/hooks.c index ae214eeb..c697418c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -120,7 +120,8 @@ pathman_join_pathlist_hook(PlannerInfo *root, NestPath *nest_path; /* NestLoop we're creating */ ParamPathInfo *ppi; /* parameterization info */ Relids inner_required; /* required paremeterization relids */ - List *filtered_joinclauses = NIL; + List *filtered_joinclauses = NIL, + *saved_ppi_list; ListCell *rinfo_lc; if (!IsA(cur_inner_path, AppendPath)) @@ -128,6 +129,8 @@ pathman_join_pathlist_hook(PlannerInfo *root, /* Select cheapest path for outerrel */ outer = outerrel->cheapest_total_path; + + /* Wrap outer path with Unique if needed */ if (saved_jointype == JOIN_UNIQUE_OUTER) { outer = (Path *) create_unique_path(root, outerrel, @@ -135,12 +138,21 @@ pathman_join_pathlist_hook(PlannerInfo *root, Assert(outer); } + /* No way to do this in a parameterized inner path */ + if (saved_jointype == JOIN_UNIQUE_INNER) + return; + /* Make innerrel path depend on outerrel's column */ inner_required = bms_union(PATH_REQ_OUTER((Path *) cur_inner_path), bms_make_singleton(outerrel->relid)); + /* Preserve existing ppis built by get_appendrel_parampathinfo() */ + saved_ppi_list = innerrel->ppilist; + /* Get the ParamPathInfo for a parameterized path */ + innerrel->ppilist = NIL; ppi = get_baserel_parampathinfo(root, innerrel, inner_required); + innerrel->ppilist = saved_ppi_list; /* Skip ppi->ppi_clauses don't reference partition attribute */ if (!(ppi && get_partitioned_attr_clauses(ppi->ppi_clauses, @@ -149,8 +161,8 @@ pathman_join_pathlist_hook(PlannerInfo *root, continue; inner = create_runtimeappend_path(root, cur_inner_path, ppi, paramsel); - if (saved_jointype == JOIN_UNIQUE_INNER) - return; /* No way to do this with a parameterized inner path */ + if (!inner) + return; /* could not build it, retreat! */ initial_cost_nestloop(root, &workspace, jointype, outer, inner, /* built paths */ @@ -388,13 +400,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, Relids inner_required = PATH_REQ_OUTER((Path *) cur_path); Path *inner_path = NULL; ParamPathInfo *ppi; - List *ppi_part_clauses = NIL; - - /* Fetch ParamPathInfo & try to extract part-related clauses */ - ppi = get_baserel_parampathinfo(root, rel, inner_required); - if (ppi && ppi->ppi_clauses) - ppi_part_clauses = get_partitioned_attr_clauses(ppi->ppi_clauses, - prel, rel->relid); /* Skip if rel contains some join-related stuff or path type mismatched */ if (!(IsA(cur_path, AppendPath) || IsA(cur_path, MergeAppendPath)) || @@ -403,13 +408,13 @@ pathman_rel_pathlist_hook(PlannerInfo *root, continue; } - /* - * Skip if neither rel->baserestrictinfo nor - * ppi->ppi_clauses reference partition attribute - */ - if (!(rel_part_clauses || ppi_part_clauses)) + /* Skip if rel->baserestrictinfo doesn't reference partition attribute */ + if (!rel_part_clauses) continue; + /* Get existing parameterization */ + ppi = get_appendrel_parampathinfo(rel, inner_required); + if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) inner_path = create_runtimeappend_path(root, cur_path, ppi, paramsel); diff --git a/src/include/pathman.h b/src/include/pathman.h index 23860924..5911a7e1 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -123,6 +123,9 @@ Bitmapset *translate_col_privs(const Bitmapset *parent_privs, void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, PathKey *pathkeyAsc, PathKey *pathkeyDesc); +Path *get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, + Relids required_outer); + typedef struct { diff --git a/src/include/runtimeappend.h b/src/include/runtimeappend.h index 579afc2a..912ce18e 100644 --- a/src/include/runtimeappend.h +++ b/src/include/runtimeappend.h @@ -37,7 +37,6 @@ typedef struct /* Restrictions to be checked during ReScan and Exec */ List *custom_exprs; - List *custom_expr_states; /* All available plans \ plan states */ HTAB *children_table; diff --git a/src/nodes_common.c b/src/nodes_common.c index ad6bfa8c..5a3c307e 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -350,14 +350,45 @@ create_append_path_common(PlannerInfo *root, result->nchildren = list_length(inner_append->subpaths); result->children = (ChildScanCommon *) - palloc(result->nchildren * sizeof(ChildScanCommon)); + palloc(result->nchildren * sizeof(ChildScanCommon)); + i = 0; foreach (lc, inner_append->subpaths) { - Path *path = lfirst(lc); - Index relindex = path->parent->relid; + Path *path = (Path *) lfirst(lc); + RelOptInfo *childrel = path->parent; ChildScanCommon child; + /* Do we have parameterization? */ + if (param_info) + { + Relids required_outer = param_info->ppi_req_outer; + + /* Rebuild path using new 'required_outer' */ + path = get_cheapest_parameterized_child_path(root, childrel, + required_outer); + } + + /* + * We were unable to re-parameterize child path, + * which means that we can't use Runtime[Merge]Append, + * since its children can't evaluate join quals. + */ + if (!path) + { + int j; + + for (j = 0; j < i; j++) + pfree(result->children[j]); + pfree(result->children); + + list_free_deep(result->cpath.custom_paths); + + pfree(result); + + return NULL; /* notify caller */ + } + child = (ChildScanCommon) palloc(sizeof(ChildScanCommonData)); result->cpath.path.startup_cost += path->startup_cost; @@ -365,7 +396,7 @@ create_append_path_common(PlannerInfo *root, child->content_type = CHILD_PATH; child->content.path = path; - child->relid = root->simple_rte_array[relindex]->relid; + child->relid = root->simple_rte_array[childrel->relid]->relid; Assert(child->relid != InvalidOid); result->cpath.custom_paths = lappend(result->cpath.custom_paths, @@ -476,12 +507,6 @@ create_append_scan_state_common(CustomScan *node, void begin_append_common(CustomScanState *node, EState *estate, int eflags) { - RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - - scan_state->custom_expr_states = - (List *) ExecInitExpr((Expr *) scan_state->custom_exprs, - (PlanState *) scan_state); - node->ss.ps.ps_TupFromTlist = false; } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 8c99b75a..e6f66f96 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -92,10 +92,6 @@ static void generate_mergeappend_paths(PlannerInfo *root, PathKey *pathkeyAsc, PathKey *pathkeyDesc); -static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, - RelOptInfo *rel, - Relids required_outer); - /* We can transform Param into Const provided that 'econtext' is available */ #define IsConstValue(wcxt, node) \ @@ -293,7 +289,7 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, { childquals = NIL; - forboth(lc1, wrappers, lc2, parent_rel->baserestrictinfo) + forboth (lc1, wrappers, lc2, parent_rel->baserestrictinfo) { WrapperNode *wrap = (WrapperNode *) lfirst(lc1); Node *new_clause; @@ -1333,78 +1329,6 @@ accumulate_append_subpath(List *subpaths, Path *path) return lappend(subpaths, path); } -/* - * get_cheapest_parameterized_child_path - * Get cheapest path for this relation that has exactly the requested - * parameterization. - * - * Returns NULL if unable to create such a path. - */ -static Path * -get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, - Relids required_outer) -{ - Path *cheapest; - ListCell *lc; - - /* - * Look up the cheapest existing path with no more than the needed - * parameterization. If it has exactly the needed parameterization, we're - * done. - */ - cheapest = get_cheapest_path_for_pathkeys(rel->pathlist, - NIL, - required_outer, - TOTAL_COST); - Assert(cheapest != NULL); - if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer)) - return cheapest; - - /* - * Otherwise, we can "reparameterize" an existing path to match the given - * parameterization, which effectively means pushing down additional - * joinquals to be checked within the path's scan. However, some existing - * paths might check the available joinquals already while others don't; - * therefore, it's not clear which existing path will be cheapest after - * reparameterization. We have to go through them all and find out. - */ - cheapest = NULL; - foreach(lc, rel->pathlist) - { - Path *path = (Path *) lfirst(lc); - - /* Can't use it if it needs more than requested parameterization */ - if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer)) - continue; - - /* - * Reparameterization can only increase the path's cost, so if it's - * already more expensive than the current cheapest, forget it. - */ - if (cheapest != NULL && - compare_path_costs(cheapest, path, TOTAL_COST) <= 0) - continue; - - /* Reparameterize if needed, then recheck cost */ - if (!bms_equal(PATH_REQ_OUTER(path), required_outer)) - { - path = reparameterize_path(root, path, required_outer, 1.0); - if (path == NULL) - continue; /* failed to reparameterize this one */ - Assert(bms_equal(PATH_REQ_OUTER(path), required_outer)); - - if (cheapest != NULL && - compare_path_costs(cheapest, path, TOTAL_COST) <= 0) - continue; - } - - /* We have a new best path */ - cheapest = path; - } - - /* Return the best path, or NULL if we found no suitable candidate */ - return cheapest; -} /* * generate_mergeappend_paths @@ -1945,7 +1869,6 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, if (parallel_workers > 0) { - /* Generate a partial append path. */ appendpath = create_append_path_compat(rel, partial_subpaths, NULL, parallel_workers); @@ -2006,3 +1929,76 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, create_append_path_compat(rel, subpaths, required_outer, 0)); } } + +/* + * get_cheapest_parameterized_child_path + * Get cheapest path for this relation that has exactly the requested + * parameterization. + * + * Returns NULL if unable to create such a path. + */ +Path * +get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, + Relids required_outer) +{ + Path *cheapest; + ListCell *lc; + + /* + * Look up the cheapest existing path with no more than the needed + * parameterization. If it has exactly the needed parameterization, we're + * done. + */ + cheapest = get_cheapest_path_for_pathkeys(rel->pathlist, + NIL, + required_outer, + TOTAL_COST); + Assert(cheapest != NULL); + if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer)) + return cheapest; + + /* + * Otherwise, we can "reparameterize" an existing path to match the given + * parameterization, which effectively means pushing down additional + * joinquals to be checked within the path's scan. However, some existing + * paths might check the available joinquals already while others don't; + * therefore, it's not clear which existing path will be cheapest after + * reparameterization. We have to go through them all and find out. + */ + cheapest = NULL; + foreach(lc, rel->pathlist) + { + Path *path = (Path *) lfirst(lc); + + /* Can't use it if it needs more than requested parameterization */ + if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer)) + continue; + + /* + * Reparameterization can only increase the path's cost, so if it's + * already more expensive than the current cheapest, forget it. + */ + if (cheapest != NULL && + compare_path_costs(cheapest, path, TOTAL_COST) <= 0) + continue; + + /* Reparameterize if needed, then recheck cost */ + if (!bms_equal(PATH_REQ_OUTER(path), required_outer)) + { + path = reparameterize_path(root, path, required_outer, 1.0); + if (path == NULL) + continue; /* failed to reparameterize this one */ + Assert(bms_equal(PATH_REQ_OUTER(path), required_outer)); + + if (cheapest != NULL && + compare_path_costs(cheapest, path, TOTAL_COST) <= 0) + continue; + } + + /* We have a new best path */ + cheapest = path; + } + + /* Return the best path, or NULL if we found no suitable candidate */ + return cheapest; +} diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index ecdd29c0..b24ae2b3 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -389,8 +389,6 @@ fetch_next_tuple(CustomScanState *node) for (;;) { - bool quals; - scan_state->ms_slots[i] = ExecProcNode(ps); if (TupIsNull(scan_state->ms_slots[i])) @@ -399,17 +397,8 @@ fetch_next_tuple(CustomScanState *node) break; } - node->ss.ps.ps_ExprContext->ecxt_scantuple = scan_state->ms_slots[i]; - quals = ExecQual(rstate->custom_expr_states, - node->ss.ps.ps_ExprContext, false); - - ResetExprContext(node->ss.ps.ps_ExprContext); - - if (quals) - { - binaryheap_replace_first(scan_state->ms_heap, Int32GetDatum(i)); - break; - } + binaryheap_replace_first(scan_state->ms_heap, Int32GetDatum(i)); + break; } } @@ -417,13 +406,11 @@ fetch_next_tuple(CustomScanState *node) { /* All the subplans are exhausted, and so is the heap */ rstate->slot = NULL; - return; } else { i = DatumGetInt32(binaryheap_first(scan_state->ms_heap)); rstate->slot = scan_state->ms_slots[i]; - return; } } diff --git a/src/runtimeappend.c b/src/runtimeappend.c index 7260ab2c..17b7f5f5 100644 --- a/src/runtimeappend.c +++ b/src/runtimeappend.c @@ -94,39 +94,27 @@ static void fetch_next_tuple(CustomScanState *node) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - TupleTableSlot *slot = NULL; while (scan_state->running_idx < scan_state->ncur_plans) { ChildScanCommon child = scan_state->cur_plans[scan_state->running_idx]; PlanState *state = child->content.plan_state; - bool quals; for (;;) { - slot = ExecProcNode(state); + TupleTableSlot *slot = ExecProcNode(state); if (TupIsNull(slot)) break; - node->ss.ps.ps_ExprContext->ecxt_scantuple = slot; - quals = ExecQual(scan_state->custom_expr_states, - node->ss.ps.ps_ExprContext, false); - - ResetExprContext(node->ss.ps.ps_ExprContext); - - if (quals) - { - scan_state->slot = slot; - return; - } + scan_state->slot = slot; + return; } scan_state->running_idx++; } - scan_state->slot = slot; - return; + scan_state->slot = NULL; } TupleTableSlot * From e90d686bf74bbfff6c14f2d50df88379ccc6028a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 15 Apr 2017 02:08:00 +0300 Subject: [PATCH 0379/1124] more tests for RuntimeAppend --- expected/pathman_runtime_nodes.out | 87 ++++++++++++++++++++++++++++++ sql/pathman_runtime_nodes.sql | 25 +++++++++ 2 files changed, 112 insertions(+) diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index 98b08710..eed7a2ff 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -285,6 +285,93 @@ select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ ok (1 row) +/* RuntimeAppend (join, enabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', true); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + -> Index Only Scan using runtime_test_1_pkey on runtime_test_1 t1 + Filter: (run_values.val = id) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(18 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, disabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', false); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(16 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, additional projections) */ +select generate_series(1, 2) from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + generate_series +----------------- + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 2 +(8 rows) + DROP SCHEMA test CASCADE; NOTICE: drop cascades to 30 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index 517995b9..55b1d3ca 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -266,6 +266,31 @@ select test.pathman_test_4(); /* RuntimeMergeAppend (lateral) */ select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ +/* RuntimeAppend (join, enabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', true); + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + +/* RuntimeAppend (join, disabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', false); + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + +/* RuntimeAppend (join, additional projections) */ +select generate_series(1, 2) from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; From 518038208a6c12bc039a3fe4aa558d649182f578 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 15 Apr 2017 03:11:45 +0300 Subject: [PATCH 0380/1124] select_required_plans(): pfree array if it's empty --- src/nodes_common.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/nodes_common.c b/src/nodes_common.c index 5a3c307e..c59bec66 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -49,9 +49,12 @@ transform_plans_into_states(RuntimeAppendState *scan_state, for (i = 0; i < n; i++) { - ChildScanCommon child = selected_plans[i]; + ChildScanCommon child; PlanState *ps; + AssertArg(selected_plans); + child = selected_plans[i]; + /* Create new node since this plan hasn't been used yet */ if (child->content_type != CHILD_PLAN_STATE) { @@ -109,6 +112,13 @@ select_required_plans(HTAB *children_table, Oid *parts, int nparts, int *nres) result[used++] = child; } + /* Get rid of useless array */ + if (used == 0) + { + pfree(result); + result = NULL; + } + *nres = used; return result; } From 9e615f019b96cfbd371a99e54b2f228205e40ad4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 15 Apr 2017 12:44:38 +0300 Subject: [PATCH 0381/1124] improve tlist handling in Runtime[Merge]Append --- expected/pathman_runtime_nodes.out | 35 ++++++++-- sql/pathman_runtime_nodes.sql | 27 ++++++-- src/nodes_common.c | 100 +++++++++++++++++++++++------ 3 files changed, 131 insertions(+), 31 deletions(-) diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index eed7a2ff..db506a3b 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -180,9 +180,21 @@ begin into res; /* test empty tlist */ + select id * 2, id, 17 + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test computations */ + + + select test.vals.* from test.vals, lateral (select from test.runtime_test_3 + where id = test.vals.val) as q + into res; /* test lateral */ + + select id, generate_series(1, 2) gen, val from test.runtime_test_3 - where id = any (select * from test.vals order by val limit 5) + where id = (select * from test.vals order by val limit 1) order by id, gen, val offset 1 limit 1 into res; /* without IndexOnlyScan */ @@ -248,11 +260,7 @@ select pathman.create_hash_partitions('test.runtime_test_3', 'id', 4); create index on test.runtime_test_3 (id); create index on test.runtime_test_3_0 (id); -analyze test.run_values; -analyze test.runtime_test_1; -analyze test.runtime_test_2; -analyze test.runtime_test_3; -analyze test.runtime_test_3_0; +VACUUM ANALYZE; set pg_pathman.enable_runtimeappend = on; set pg_pathman.enable_runtimemergeappend = on; select test.pathman_test_1(); /* RuntimeAppend (select ... where id = (subquery)) */ @@ -285,6 +293,19 @@ select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ ok (1 row) +/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ +select count(*) = 0 from pathman.pathman_partition_list +where parent = 'test.runtime_test_1'::regclass and range_max::int < 0; + ?column? +---------- + t +(1 row) + +select from test.runtime_test_1 +where id = any (select generate_series(-10, -1)); /* should be empty */ +-- +(0 rows) + /* RuntimeAppend (join, enabled parent) */ select pathman.set_enable_parent('test.runtime_test_1', true); set_enable_parent @@ -301,7 +322,7 @@ join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; -> Limit -> Seq Scan on run_values -> Custom Scan (RuntimeAppend) - -> Index Only Scan using runtime_test_1_pkey on runtime_test_1 t1 + -> Seq Scan on runtime_test_1 t1 Filter: (run_values.val = id) -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 Index Cond: (id = run_values.val) diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index 55b1d3ca..4fbc0bce 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -190,9 +190,21 @@ begin into res; /* test empty tlist */ + select id * 2, id, 17 + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test computations */ + + + select test.vals.* from test.vals, lateral (select from test.runtime_test_3 + where id = test.vals.val) as q + into res; /* test lateral */ + + select id, generate_series(1, 2) gen, val from test.runtime_test_3 - where id = any (select * from test.vals order by val limit 5) + where id = (select * from test.vals order by val limit 1) order by id, gen, val offset 1 limit 1 into res; /* without IndexOnlyScan */ @@ -250,11 +262,8 @@ create index on test.runtime_test_3 (id); create index on test.runtime_test_3_0 (id); -analyze test.run_values; -analyze test.runtime_test_1; -analyze test.runtime_test_2; -analyze test.runtime_test_3; -analyze test.runtime_test_3_0; +VACUUM ANALYZE; + set pg_pathman.enable_runtimeappend = on; set pg_pathman.enable_runtimemergeappend = on; @@ -265,6 +274,12 @@ select test.pathman_test_3(); /* RuntimeAppend (a join b on a.id = b.val) */ select test.pathman_test_4(); /* RuntimeMergeAppend (lateral) */ select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ +/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ +select count(*) = 0 from pathman.pathman_partition_list +where parent = 'test.runtime_test_1'::regclass and range_max::int < 0; + +select from test.runtime_test_1 +where id = any (select generate_series(-10, -1)); /* should be empty */ /* RuntimeAppend (join, enabled parent) */ select pathman.set_enable_parent('test.runtime_test_1', true); diff --git a/src/nodes_common.c b/src/nodes_common.c index c59bec66..a6d3bfaa 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -14,6 +14,7 @@ #include "access/sysattr.h" #include "optimizer/restrictinfo.h" +#include "optimizer/tlist.h" #include "optimizer/var.h" #include "rewrite/rewriteManip.h" #include "utils/memutils.h" @@ -123,40 +124,91 @@ select_required_plans(HTAB *children_table, Oid *parts, int nparts, int *nres) return result; } -/* Replace 'varno' of child's Vars with the 'append_rel_rti' */ +/* Adapt child's tlist for parent relation (change varnos and varattnos) */ static List * -replace_tlist_varnos(List *tlist, Index old_varno, Index new_varno) +build_parent_tlist(List *tlist, AppendRelInfo *appinfo) { - List *temp_tlist; - - AssertArg(old_varno != 0); - AssertArg(new_varno != 0); + List *temp_tlist, + *pulled_vars; + ListCell *lc1, + *lc2; temp_tlist = copyObject(tlist); - ChangeVarNodes((Node *) temp_tlist, old_varno, new_varno, 0); + pulled_vars = pull_vars_of_level((Node *) temp_tlist, 0); + + foreach (lc1, pulled_vars) + { + Var *tlist_var = (Var *) lfirst(lc1); + + AttrNumber attnum = 0; + foreach (lc2, appinfo->translated_vars) + { + Var *translated_var = (Var *) lfirst(lc2); + + attnum++; + + if (translated_var->varattno == tlist_var->varattno) + tlist_var->varattno = attnum; + } + } + + ChangeVarNodes((Node *) temp_tlist, + appinfo->child_relid, + appinfo->parent_relid, + 0); return temp_tlist; } +/* Is tlist 'a' subset of tlist 'b'? (in terms of Vars) */ +static bool +tlist_is_var_subset(List *a, List *b) +{ + ListCell *lc; + + foreach (lc, b) + { + TargetEntry *te = (TargetEntry *) lfirst(lc); + + if (!IsA(te->expr, Var) && !IsA(te->expr, RelabelType)) + continue; + + if (!tlist_member_ignore_relabel((Node *) te->expr, a)) + return true; + } + + return false; +} + /* Append partition attribute in case it's not present in target list */ static List * -append_part_attr_to_tlist(List *tlist, Index relno, const PartRelationInfo *prel) +append_part_attr_to_tlist(List *tlist, + AppendRelInfo *appinfo, + const PartRelationInfo *prel) { ListCell *lc; + AttrNumber part_attr; + Var *part_attr_tvar; bool part_attr_found = false; + /* Get attribute number of partitioned column (may differ) */ + part_attr_tvar = (Var *) list_nth(appinfo->translated_vars, + AttrNumberGetAttrOffset(prel->attnum)); + Assert(part_attr_tvar); + part_attr = (part_attr_tvar)->varoattno; + foreach (lc, tlist) { TargetEntry *te = (TargetEntry *) lfirst(lc); Var *var = (Var *) te->expr; - if (IsA(var, Var) && var->varoattno == prel->attnum) + if (IsA(var, Var) && var->varoattno == part_attr) part_attr_found = true; } if (!part_attr_found) { - Var *newvar = makeVar(relno, + Var *newvar = makeVar(appinfo->child_relid, prel->attnum, prel->atttype, prel->atttypmod, @@ -450,13 +502,27 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, { Plan *child_plan = (Plan *) lfirst(lc2); RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; + AppendRelInfo *appinfo = find_childrel_appendrelinfo(root, child_rel); /* Replace rel's tlist with a matching one (for ExecQual()) */ if (!processed_rel_tlist) { - tlist = replace_tlist_varnos(child_plan->targetlist, - child_rel->relid, - rel->relid); + List *temp_tlist = build_parent_tlist(child_plan->targetlist, + appinfo); + + /* + * HACK: PostgreSQL may return a physical tlist, + * which is bad (we may have child IndexOnlyScans). + * If we find out that CustomScan's tlist is a + * Var-superset of child's tlist, we replace it + * with the latter, else we'll have a broken tlist + * labeling (Assert). + * + * NOTE: physical tlist may only be used if we're not + * asked to produce tuples of exact format (CP_EXACT_TLIST). + */ + if (tlist_is_var_subset(temp_tlist, tlist)) + tlist = temp_tlist; /* Done, new target list has been built */ processed_rel_tlist = true; @@ -464,14 +530,12 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, /* Add partition attribute if necessary (for ExecQual()) */ child_plan->targetlist = append_part_attr_to_tlist(child_plan->targetlist, - child_rel->relid, - prel); + appinfo, prel); /* Now make custom_scan_tlist match child plans' targetlists */ if (!cscan->custom_scan_tlist) - cscan->custom_scan_tlist = replace_tlist_varnos(child_plan->targetlist, - child_rel->relid, - rel->relid); + cscan->custom_scan_tlist = build_parent_tlist(child_plan->targetlist, + appinfo); } } From 21a26827fc17438efaaa741b4adcb233b9a9288c Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Sun, 16 Apr 2017 19:07:44 +0300 Subject: [PATCH 0382/1124] Light fix pathman_join_clause test --- expected/pathman_join_clause.out | 28 ++++++++++++++-------------- sql/pathman_join_clause.sql | 20 ++++++++++---------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out index 5fda3656..bc50b0d2 100644 --- a/expected/pathman_join_clause.out +++ b/expected/pathman_join_clause.out @@ -6,34 +6,34 @@ CREATE SCHEMA test; * Test push down a join clause into child nodes of append */ /* create test tables */ -CREATE TABLE fk ( +CREATE TABLE test.fk ( id1 INT NOT NULL, id2 INT NOT NULL, start_key INT, end_key INT, PRIMARY KEY (id1, id2)); -CREATE TABLE mytbl ( +CREATE TABLE test.mytbl ( id1 INT NOT NULL, id2 INT NOT NULL, key INT NOT NULL, - CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES fk(id1, id2), + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), PRIMARY KEY (id1, key)); -SELECT pathman.create_hash_partitions('mytbl', 'id1', 8); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); create_hash_partitions ------------------------ 8 (1 row) /* ...fill out with test data */ -INSERT INTO fk VALUES (1, 1); -INSERT INTO mytbl VALUES (1, 1, 5), (1,1,6); +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1,1,6); /* gather statistics on test tables to have deterministic plans */ -ANALYZE fk; -ANALYZE mytbl; +ANALYZE test.fk; +ANALYZE test.mytbl; /* run test queries */ EXPLAIN (COSTS OFF) /* test plan */ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key - FROM mytbl m JOIN fk USING(id1, id2) + FROM test.mytbl m JOIN test.fk USING(id1, id2) WHERE NOT key <@ int4range(6, end_key); QUERY PLAN ------------------------------------------------------------------------------------ @@ -84,14 +84,14 @@ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key /* test joint data */ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key - FROM mytbl m JOIN fk USING(id1, id2) + FROM test.mytbl m JOIN test.fk USING(id1, id2) WHERE NOT key <@ int4range(6, end_key); - tableoid | id1 | id2 | key | start_key | end_key -----------+-----+-----+-----+-----------+--------- - mytbl_6 | 1 | 1 | 5 | | + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | (1 row) DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 10 other objects DROP EXTENSION pg_pathman CASCADE; -NOTICE: drop cascades to 8 other objects DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql index 02da659c..594e9305 100644 --- a/sql/pathman_join_clause.sql +++ b/sql/pathman_join_clause.sql @@ -10,36 +10,36 @@ CREATE SCHEMA test; */ /* create test tables */ -CREATE TABLE fk ( +CREATE TABLE test.fk ( id1 INT NOT NULL, id2 INT NOT NULL, start_key INT, end_key INT, PRIMARY KEY (id1, id2)); -CREATE TABLE mytbl ( +CREATE TABLE test.mytbl ( id1 INT NOT NULL, id2 INT NOT NULL, key INT NOT NULL, - CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES fk(id1, id2), + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), PRIMARY KEY (id1, key)); -SELECT pathman.create_hash_partitions('mytbl', 'id1', 8); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); /* ...fill out with test data */ -INSERT INTO fk VALUES (1, 1); -INSERT INTO mytbl VALUES (1, 1, 5), (1,1,6); +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1,1,6); /* gather statistics on test tables to have deterministic plans */ -ANALYZE fk; -ANALYZE mytbl; +ANALYZE test.fk; +ANALYZE test.mytbl; /* run test queries */ EXPLAIN (COSTS OFF) /* test plan */ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key - FROM mytbl m JOIN fk USING(id1, id2) + FROM test.mytbl m JOIN test.fk USING(id1, id2) WHERE NOT key <@ int4range(6, end_key); /* test joint data */ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key - FROM mytbl m JOIN fk USING(id1, id2) + FROM test.mytbl m JOIN test.fk USING(id1, id2) WHERE NOT key <@ int4range(6, end_key); From b5e479d9acdd83b16c291c673b006acdd9adf9bd Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 18 Apr 2017 14:04:21 +0300 Subject: [PATCH 0383/1124] Add base methods for PartitionUpdate --- src/include/partition_update.h | 55 +++++++++ src/include/planner_tree_modification.h | 1 + src/partition_filter.c | 35 +----- src/partition_update.c | 145 ++++++++++++++++++++++-- src/planner_tree_modification.c | 13 +-- 5 files changed, 206 insertions(+), 43 deletions(-) diff --git a/src/include/partition_update.h b/src/include/partition_update.h index e69de29b..b9607c5c 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -0,0 +1,55 @@ +/* ------------------------------------------------------------------------ + * + * partition_update.h + * Insert row to right partition in UPDATE operation + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PARTITION_UPDATE_H +#define PARTITION_UPDATE_H + +#include "relation_info.h" +#include "utils.h" + +#include "postgres.h" +#include "commands/explain.h" +#include "optimizer/planner.h" + +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + +typedef struct PartitionUpdateState +{ + CustomScanState css; + + Oid partitioned_table; + List *returning_list; + ModifyTableState *parent_state; + Plan *subplan; /* proxy variable to store subplan */ +} PartitionUpdateState; + +extern bool pg_pathman_enable_partition_update; + +extern CustomScanMethods partition_update_plan_methods; +extern CustomExecMethods partition_update_exec_methods; + +void init_partition_update_static_data(void); +Node *partition_update_create_scan_state(CustomScan *node); + +void partition_update_begin(CustomScanState *node, EState *estate, int eflags); +void partition_update_end(CustomScanState *node); +void partition_update_rescan(CustomScanState *node); +void partition_update_explain(CustomScanState *node, List *ancestors, + ExplainState *es); + +TupleTableSlot *partition_update_exec(CustomScanState *node); + +Plan *make_partition_update(Plan *subplan, + Oid parent_relid, + List *returning_list); + +#endif /* PARTITION_UPDATE_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index 17e17fb4..8b4a480f 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -34,6 +34,7 @@ void pathman_transform_query(Query *parse); /* These functions scribble on Plan tree */ void add_partition_filters(List *rtable, Plan *plan); +void add_partition_update_nodes(List *rtable, Plan *plan); /* used by assign_rel_parenthood_status() etc */ diff --git a/src/partition_filter.c b/src/partition_filter.c index db21c110..d7657c2b 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -677,35 +677,12 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) Expr *col_expr; Form_pg_attribute attr; - /* Make sure that this attribute exists */ - if (i > RelationGetDescr(parent_rel)->natts) - elog(ERROR, "error in function " CppAsString(pfilter_build_tlist)); - - /* Fetch pg_attribute entry for this column */ - attr = RelationGetDescr(parent_rel)->attrs[i - 1]; - - /* If this column is dropped, create a placeholder Const */ - if (attr->attisdropped) - { - /* Insert NULL for dropped column */ - col_expr = (Expr *) makeConst(INT4OID, - -1, - InvalidOid, - sizeof(int32), - (Datum) 0, - true, - true); - } - /* Otherwise we should create a Var referencing subplan's output */ - else - { - col_expr = (Expr *) makeVar(INDEX_VAR, /* point to subplan's elements */ - i, /* direct attribute mapping */ - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); - } + col_expr = (Expr *) makeVar(INDEX_VAR, /* point to subplan's elements */ + i, /* direct attribute mapping */ + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); result_tlist = lappend(result_tlist, makeTargetEntry(col_expr, diff --git a/src/partition_update.c b/src/partition_update.c index b24829c9..01ecd940 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -1,15 +1,146 @@ - -/* - * -------------------------------- - * PartitionUpdate implementation - * -------------------------------- +/* ------------------------------------------------------------------------ + * + * partition_update.c + * Insert row to right partition in UPDATE operation + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ */ +#include "partition_filter.h" +#include "partition_update.h" + +#include "utils/guc.h" + +bool pg_pathman_enable_partition_update = true; + +CustomScanMethods partition_update_plan_methods; +CustomExecMethods partition_update_exec_methods; + + +void +init_partition_update_static_data(void) +{ + partition_update_plan_methods.CustomName = "PartitionUpdate"; + partition_update_plan_methods.CreateCustomScanState = partition_update_create_scan_state; + + partition_update_exec_methods.CustomName = "PartitionUpdate"; + partition_update_exec_methods.BeginCustomScan = partition_update_begin; + partition_update_exec_methods.ExecCustomScan = partition_update_exec; + partition_update_exec_methods.EndCustomScan = partition_update_end; + partition_update_exec_methods.ReScanCustomScan = partition_update_rescan; + partition_update_exec_methods.MarkPosCustomScan = NULL; + partition_update_exec_methods.RestrPosCustomScan = NULL; + partition_update_exec_methods.ExplainCustomScan = partition_update_explain; + + DefineCustomBoolVariable("pg_pathman.enable_partitionupdate", + "Enables the planner's use of PartitionUpdate custom node.", + NULL, + &pg_pathman_enable_partition_update, + true, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); +} + + Plan * -make_partition_update(Plan *subplan, Oid parent_relid, - OnConflictAction conflict_action, +make_partition_update(Plan *subplan, + Oid parent_relid, List *returning_list) { + Plan *pfilter; + CustomScan *cscan = makeNode(CustomScan); + + /* Copy costs etc */ + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; + + /* Setup methods and child plan */ + cscan->methods = &partition_update_plan_methods; + pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, + returning_list); + cscan->custom_plans = list_make1(pfilter); + + /* No physical relation will be scanned */ + cscan->scan.scanrelid = 0; + cscan->custom_scan_tlist = subplan->targetlist; + cscan->custom_private = NULL; + + return &cscan->scan.plan; } +Node * +partition_update_create_scan_state(CustomScan *node) +{ + PartitionUpdateState *state; + + state = (PartitionUpdateState *) palloc0(sizeof(PartitionUpdateState)); + NodeSetTag(state, T_CustomScanState); + + state->css.flags = node->flags; + state->css.methods = &partition_update_exec_methods; + + /* Extract necessary variables */ + state->subplan = (Plan *) linitial(node->custom_plans); + return (Node *) state; +} + +void +partition_update_begin(CustomScanState *node, EState *estate, int eflags) +{ + PartitionUpdateState *state = (PartitionUpdateState *) node; + + /* Initialize PartitionFilter child node */ + node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); +} + +TupleTableSlot * +partition_update_exec(CustomScanState *node) +{ + PartitionFilterState *state = (PartitionFilterState *) node; + PlanState *child_ps = (PlanState *) linitial(node->custom_ps); + EState *estate = node->ss.ps.state; + TupleTableSlot *slot; + ResultRelInfo *saved_rel_info; + + /* save original ResultRelInfo */ + saved_rel_info = estate->es_result_relation_info; + + slot = ExecProcNode(child_ps); + if (!TupIsNull(slot)) + { + /* we got the slot that can be inserted to child partition */ + return slot; + } + + return NULL; +} + +void +partition_update_end(CustomScanState *node) +{ + PartitionUpdateState *state = (PartitionUpdateState *) node; + + Assert(list_length(node->custom_ps) == 1); + ExecEndNode((PlanState *) linitial(node->custom_ps)); +} + +void +partition_update_rescan(CustomScanState *node) +{ + Assert(list_length(node->custom_ps) == 1); + ExecReScan((PlanState *) linitial(node->custom_ps)); +} + +void +partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *es) +{ + /* Nothing to do here now */ +} diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index a475165e..5f6a9b4a 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -14,6 +14,7 @@ #include "nodes_common.h" #include "partition_filter.h" +#include "partition_update.h" #include "planner_tree_modification.h" #include "miscadmin.h" @@ -32,6 +33,7 @@ static void disable_standard_inheritance(Query *parse); static void handle_modification_query(Query *parse); static void partition_filter_visitor(Plan *plan, void *context); +static void partition_update_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); @@ -249,9 +251,7 @@ handle_modification_query(Query *parse) result_rel = parse->resultRelation; /* Exit if it's not a DELETE or UPDATE query */ - if (result_rel == 0 || - (parse->commandType != CMD_UPDATE && - parse->commandType != CMD_DELETE)) + if (result_rel == 0 || parse->commandType != CMD_DELETE) return; rte = rt_fetch(result_rel, parse->rtable); @@ -355,7 +355,7 @@ add_partition_filters(List *rtable, Plan *plan) void add_partition_update_nodes(List *rtable, Plan *plan) { - if (pg_pathman_enable_partition_updaters) + if (pg_pathman_enable_partition_update) plan_tree_walker(plan, partition_update_visitor, rtable); } @@ -408,7 +408,7 @@ partition_filter_visitor(Plan *plan, void *context) /* - * Add partition updaters to ModifyTable node's children. + * Add partition update to ModifyTable node's children. * * 'context' should point to the PlannedStmt->rtable. */ @@ -421,7 +421,7 @@ partition_update_visitor(Plan *plan, void *context) *lc2, *lc3; - /* Skip if not ModifyTable with 'INSERT' command */ + /* Skip if not ModifyTable with 'UPDATE' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) return; @@ -448,7 +448,6 @@ partition_update_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), relid, - modify_table->onConflictAction, returning_list); } } From 6280d4ee1c91e0711e74039952eee2ab31205af4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Apr 2017 16:01:06 +0300 Subject: [PATCH 0384/1124] improve regression tests for RuntimeAppend --- expected/pathman_runtime_nodes.out | 41 ++++++++++++++++++++---------- sql/pathman_runtime_nodes.sql | 23 ++++++++++++----- src/pg_pathman.c | 34 +++++++++++-------------- src/pl_funcs.c | 2 +- 4 files changed, 60 insertions(+), 40 deletions(-) diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index db506a3b..ffcd0d2a 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -260,6 +260,15 @@ select pathman.create_hash_partitions('test.runtime_test_3', 'id', 4); create index on test.runtime_test_3 (id); create index on test.runtime_test_3_0 (id); +create table test.runtime_test_4(val text, id int not null); +insert into test.runtime_test_4(id, val) select * from generate_series(1, 10000) k, md5(k::text); +select pathman.create_range_partitions('test.runtime_test_4', 'id', 1, 2000); +NOTICE: sequence "runtime_test_4_seq" does not exist, skipping + create_range_partitions +------------------------- + 5 +(1 row) + VACUUM ANALYZE; set pg_pathman.enable_runtimeappend = on; set pg_pathman.enable_runtimemergeappend = on; @@ -293,19 +302,6 @@ select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ ok (1 row) -/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ -select count(*) = 0 from pathman.pathman_partition_list -where parent = 'test.runtime_test_1'::regclass and range_max::int < 0; - ?column? ----------- - t -(1 row) - -select from test.runtime_test_1 -where id = any (select generate_series(-10, -1)); /* should be empty */ --- -(0 rows) - /* RuntimeAppend (join, enabled parent) */ select pathman.set_enable_parent('test.runtime_test_1', true); set_enable_parent @@ -393,7 +389,24 @@ join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; 2 (8 rows) +/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ +select count(*) = 0 from pathman.pathman_partition_list +where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < 0; + ?column? +---------- + t +(1 row) + +set enable_hashjoin = off; +set enable_mergejoin = off; +select from test.runtime_test_4 +where id = any (select generate_series(-10, -1)); /* should be empty */ +-- +(0 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 30 other objects +NOTICE: drop cascades to 37 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index 4fbc0bce..6a65a557 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -261,6 +261,10 @@ select pathman.create_hash_partitions('test.runtime_test_3', 'id', 4); create index on test.runtime_test_3 (id); create index on test.runtime_test_3_0 (id); +create table test.runtime_test_4(val text, id int not null); +insert into test.runtime_test_4(id, val) select * from generate_series(1, 10000) k, md5(k::text); +select pathman.create_range_partitions('test.runtime_test_4', 'id', 1, 2000); + VACUUM ANALYZE; @@ -274,12 +278,6 @@ select test.pathman_test_3(); /* RuntimeAppend (a join b on a.id = b.val) */ select test.pathman_test_4(); /* RuntimeMergeAppend (lateral) */ select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ -/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ -select count(*) = 0 from pathman.pathman_partition_list -where parent = 'test.runtime_test_1'::regclass and range_max::int < 0; - -select from test.runtime_test_1 -where id = any (select generate_series(-10, -1)); /* should be empty */ /* RuntimeAppend (join, enabled parent) */ select pathman.set_enable_parent('test.runtime_test_1', true); @@ -305,6 +303,19 @@ join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; select generate_series(1, 2) from test.runtime_test_1 as t1 join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ +select count(*) = 0 from pathman.pathman_partition_list +where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < 0; + +set enable_hashjoin = off; +set enable_mergejoin = off; + +select from test.runtime_test_4 +where id = any (select generate_series(-10, -1)); /* should be empty */ + +set enable_hashjoin = on; +set enable_mergejoin = on; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index e6f66f96..54a05b4a 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -70,9 +70,6 @@ static bool pull_var_param(const WalkerContext *ctx, static Const *extract_const(WalkerContext *wcxt, Param *param); -static double estimate_paramsel_using_prel(const PartRelationInfo *prel, - int strategy); - /* Copied from PostgreSQL (allpaths.c) */ static void set_plain_rel_size(PlannerInfo *root, @@ -104,6 +101,21 @@ static void generate_mergeappend_paths(PlannerInfo *root, ((Const *) (node)) \ ) +/* Selectivity estimator for common 'paramsel' */ +static inline double +estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) +{ + /* If it's "=", divide by partitions number */ + if (strategy == BTEqualStrategyNumber) + return 1.0 / (double) PrelChildrenCount(prel); + + /* Default selectivity estimate for inequalities */ + else if (prel->parttype == PT_RANGE && strategy > 0) + return DEFAULT_INEQ_SEL; + + /* Else there's not much to do */ + else return 1.0; +} /* @@ -1214,22 +1226,6 @@ extract_const(WalkerContext *wcxt, Param *param) value, isnull, get_typbyval(param->paramtype)); } -/* Selectivity estimator for common 'paramsel' */ -static double -estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) -{ - /* If it's "=", divide by partitions number */ - if (strategy == BTEqualStrategyNumber) - return 1.0 / (double) PrelChildrenCount(prel); - - /* Default selectivity estimate for inequalities */ - else if (prel->parttype == PT_RANGE && strategy > 0) - return DEFAULT_INEQ_SEL; - - /* Else there's not much to do */ - else return 1.0; -} - /* diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 992bcc98..61b77782 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -458,7 +458,7 @@ show_partition_list_internal(PG_FUNCTION_ARGS) usercxt = (show_partition_list_cxt *) funccxt->user_fctx; /* Iterate through pathman cache */ - for(;;) + for (;;) { const PartRelationInfo *prel; HeapTuple htup; From 6ca6feb0feed067639fbb596f500bf5a78e09d6c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Apr 2017 19:23:29 +0300 Subject: [PATCH 0385/1124] skip right outer joins in pathman_join_pathlist_hook() --- src/hooks.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index c697418c..5525d745 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -70,8 +70,8 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady() || !pg_pathman_enable_runtimeappend) return; - if (jointype == JOIN_FULL) - return; /* handling full joins is meaningless */ + if (jointype == JOIN_FULL || jointype == JOIN_RIGHT) + return; /* we can't handle full or right outer joins */ /* Check that innerrel is a BASEREL with inheritors & PartRelationInfo */ if (innerrel->reloptkind != RELOPT_BASEREL || !inner_rte->inh || From 26a78b8cb890602455c6102247828a991cb46f11 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Apr 2017 19:40:53 +0300 Subject: [PATCH 0386/1124] update README.md (extension conflicts) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a90f5436..5c155db5 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ Modify the **`shared_preload_libraries`** parameter in `postgresql.conf` as foll ``` shared_preload_libraries = 'pg_pathman' ``` -> **Important:** `pg_pathman` may have conflicts with some other extensions which uses the same hook functions. For example, `pg_pathman` uses `ProcessUtility_hook` hook to handle COPY queries for partitioned tables. And it could sometimes interfere with `pg_stat_statements` extension which uses the same hook. In this case try to list libraries in certain order: `shared_preload_libraries = 'pg_pathman, pg_stat_statements'` +> **Important:** `pg_pathman` may cause conflicts with some other extensions that use the same hook functions. For example, `pg_pathman` uses `ProcessUtility_hook` to handle COPY queries for partitioned tables, which means it may interfere with `pg_stat_statements` from time to time. In this case, try listing libraries in certain order: `shared_preload_libraries = 'pg_stat_statements, pg_pathman'`. It is essential to restart the PostgreSQL instance. After that, execute the following query in psql: ```plpgsql From 632e86414cd926c986869bd9df72cc5343e4fd48 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 19 Apr 2017 15:29:54 +0300 Subject: [PATCH 0387/1124] show pruning clause of Runtime[Merge]Append --- expected/pathman_join_clause.out | 3 ++- expected/pathman_only.out | 6 ++++-- expected/pathman_rowmarks.out | 6 ++++-- expected/pathman_runtime_nodes.out | 6 ++++-- src/include/nodes_common.h | 4 +++- src/nodes_common.c | 23 ++++++++++++++++++++++- src/runtime_merge_append.c | 4 +++- src/runtimeappend.c | 4 +++- 8 files changed, 45 insertions(+), 11 deletions(-) diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out index bc50b0d2..747d6e54 100644 --- a/expected/pathman_join_clause.out +++ b/expected/pathman_join_clause.out @@ -40,6 +40,7 @@ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key Nested Loop -> Seq Scan on fk -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) -> Bitmap Heap Scan on mytbl_0 m Recheck Cond: (id1 = fk.id1) Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) @@ -80,7 +81,7 @@ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) -> Bitmap Index Scan on mytbl_7_pkey Index Cond: (id1 = fk.id1) -(43 rows) +(44 rows) /* test joint data */ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key diff --git a/expected/pathman_only.out b/expected/pathman_only.out index 61f74d27..43ff6bb9 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -178,6 +178,7 @@ SELECT * FROM test_only.from_only_test JOIN q1 USING(val); -> Seq Scan on from_only_test from_only_test_1 -> CTE Scan on q1 -> Custom Scan (RuntimeAppend) + Prune by: (q1.val = from_only_test.val) -> Seq Scan on from_only_test_1 from_only_test Filter: (q1.val = val) -> Seq Scan on from_only_test_2 from_only_test @@ -198,7 +199,7 @@ SELECT * FROM test_only.from_only_test JOIN q1 USING(val); Filter: (q1.val = val) -> Seq Scan on from_only_test_10 from_only_test Filter: (q1.val = val) -(25 rows) +(26 rows) /* should be OK */ EXPLAIN (COSTS OFF) @@ -209,6 +210,7 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test QUERY PLAN ----------------------------------------------------------------- Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) InitPlan 1 (returns $0) -> Limit -> Sort @@ -234,7 +236,7 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test Filter: (val = $0) -> Seq Scan on from_only_test_10 from_only_test Filter: (val = $0) -(26 rows) +(27 rows) DROP SCHEMA test_only CASCADE; NOTICE: drop cascades to 12 other objects diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 40bd14e6..e66c41d9 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -100,6 +100,7 @@ FOR SHARE; -> Seq Scan on first_3 -> Seq Scan on first_4 -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) -> Seq Scan on first_0 first Filter: (id = $1) -> Seq Scan on first_1 first @@ -110,7 +111,7 @@ FOR SHARE; Filter: (id = $1) -> Seq Scan on first_4 first Filter: (id = $1) -(23 rows) +(24 rows) /* A little harder (execution) */ SELECT * FROM rowmarks.first @@ -142,6 +143,7 @@ FOR SHARE; Sort Key: second.id -> Seq Scan on second -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) -> Seq Scan on first_0 first Filter: (id = $1) -> Seq Scan on first_1 first @@ -152,7 +154,7 @@ FOR SHARE; Filter: (id = $1) -> Seq Scan on first_4 first Filter: (id = $1) -(18 rows) +(19 rows) /* Two tables (execution) */ SELECT * FROM rowmarks.first diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index ffcd0d2a..4db58bdb 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -318,6 +318,7 @@ join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; -> Limit -> Seq Scan on run_values -> Custom Scan (RuntimeAppend) + Prune by: (run_values.val = t1.id) -> Seq Scan on runtime_test_1 t1 Filter: (run_values.val = id) -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 @@ -332,7 +333,7 @@ join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; Index Cond: (id = run_values.val) -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 Index Cond: (id = run_values.val) -(18 rows) +(19 rows) select from test.runtime_test_1 as t1 join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; @@ -355,6 +356,7 @@ join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; -> Limit -> Seq Scan on run_values -> Custom Scan (RuntimeAppend) + Prune by: (run_values.val = t1.id) -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 Index Cond: (id = run_values.val) -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 @@ -367,7 +369,7 @@ join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; Index Cond: (id = run_values.val) -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 Index Cond: (id = run_values.val) -(16 rows) +(17 rows) select from test.runtime_test_1 as t1 join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; diff --git a/src/include/nodes_common.h b/src/include/nodes_common.h index 17818d2d..2648663b 100644 --- a/src/include/nodes_common.h +++ b/src/include/nodes_common.h @@ -98,8 +98,10 @@ void end_append_common(CustomScanState *node); void rescan_append_common(CustomScanState *node); void explain_append_common(CustomScanState *node, + List *ancestors, + ExplainState *es, HTAB *children_table, - ExplainState *es); + List *custom_exprs); #endif /* NODES_COMMON_H */ diff --git a/src/nodes_common.c b/src/nodes_common.c index a6d3bfaa..92617640 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -13,11 +13,13 @@ #include "utils.h" #include "access/sysattr.h" +#include "optimizer/clauses.h" #include "optimizer/restrictinfo.h" #include "optimizer/tlist.h" #include "optimizer/var.h" #include "rewrite/rewriteManip.h" #include "utils/memutils.h" +#include "utils/ruleutils.h" /* Allocation settings */ @@ -688,8 +690,27 @@ rescan_append_common(CustomScanState *node) } void -explain_append_common(CustomScanState *node, HTAB *children_table, ExplainState *es) +explain_append_common(CustomScanState *node, + List *ancestors, + ExplainState *es, + HTAB *children_table, + List *custom_exprs) { + List *deparse_context; + char *exprstr; + + /* Set up deparsing context */ + deparse_context = set_deparse_context_planstate(es->deparse_cxt, + (Node *) node, + ancestors); + + /* Deparse the expression */ + exprstr = deparse_expression((Node *) make_ands_explicit(custom_exprs), + deparse_context, true, false); + + /* And add to es->str */ + ExplainPropertyText("Prune by", exprstr, es); + /* Construct excess PlanStates */ if (!es->analyze) { diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index b24ae2b3..16622f02 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -482,7 +482,9 @@ runtimemergeappend_explain(CustomScanState *node, List *ancestors, ExplainState { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; - explain_append_common(node, scan_state->rstate.children_table, es); + explain_append_common(node, ancestors, es, + scan_state->rstate.children_table, + scan_state->rstate.custom_exprs); /* We should print sort keys as well */ show_sort_group_keys((PlanState *) &node->ss.ps, "Sort Key", diff --git a/src/runtimeappend.c b/src/runtimeappend.c index 17b7f5f5..86554b0e 100644 --- a/src/runtimeappend.c +++ b/src/runtimeappend.c @@ -140,5 +140,7 @@ runtimeappend_explain(CustomScanState *node, List *ancestors, ExplainState *es) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - explain_append_common(node, scan_state->children_table, es); + explain_append_common(node, ancestors, es, + scan_state->children_table, + scan_state->custom_exprs); } From f684b8d059d56674fb1c7649ed1b23a708928f03 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Apr 2017 16:21:24 +0300 Subject: [PATCH 0388/1124] Try another targetlist generation --- src/partition_filter.c | 28 +++++++++++++--------------- src/partition_update.c | 1 + src/pg_pathman.c | 2 ++ 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index d7657c2b..802bae4d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -669,27 +669,25 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) { List *result_tlist = NIL; ListCell *lc; - int i = 1; foreach (lc, tlist) { TargetEntry *tle = (TargetEntry *) lfirst(lc); - Expr *col_expr; + TargetEntry *newtle; Form_pg_attribute attr; - col_expr = (Expr *) makeVar(INDEX_VAR, /* point to subplan's elements */ - i, /* direct attribute mapping */ - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); - - result_tlist = lappend(result_tlist, - makeTargetEntry(col_expr, - i, - NULL, - tle->resjunk)); - i++; /* next resno */ + if (tle->expr != NULL && IsA(tle->expr, Var)) + { + Var *var = (Var *) palloc(sizeof(Var)); + *var = *((Var *)(tle->expr)); + var->varno = INDEX_VAR; + newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, + tle->resjunk); + } + else + newtle = copyObject(tle); + + result_tlist = lappend(result_tlist, newtle); } return result_tlist; diff --git a/src/partition_update.c b/src/partition_update.c index 01ecd940..cd8cd73b 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -67,6 +67,7 @@ make_partition_update(Plan *subplan, pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, returning_list); cscan->custom_plans = list_make1(pfilter); + cscan->scan.plan.targetlist = pfilter->targetlist; /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 8c99b75a..7061fe9f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -16,6 +16,7 @@ #include "hooks.h" #include "pathman.h" #include "partition_filter.h" +#include "partition_update.h" #include "planner_tree_modification.h" #include "runtimeappend.h" #include "runtime_merge_append.h" @@ -164,6 +165,7 @@ _PG_init(void) init_runtimeappend_static_data(); init_runtime_merge_append_static_data(); init_partition_filter_static_data(); + init_partition_update_static_data(); } /* Get cached PATHMAN_CONFIG relation Oid */ From cb7658e57cefae2e189154c6d83923f5f7f27c6b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 19 Apr 2017 17:50:29 +0300 Subject: [PATCH 0389/1124] fix parameterization (instead of stupid bms_make_singleton(outerrel->relid)) --- src/hooks.c | 72 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 54 insertions(+), 18 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 3c415c25..d2222acd 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -4,6 +4,8 @@ * definitions of rel_pathlist and join_pathlist hooks * * Copyright (c) 2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ */ @@ -38,6 +40,23 @@ ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids)) +static inline bool +allow_star_schema_join(PlannerInfo *root, + Path *outer_path, + Path *inner_path) +{ + Relids innerparams = PATH_REQ_OUTER(inner_path); + Relids outerrelids = outer_path->parent->relids; + + /* + * It's a star-schema case if the outer rel provides some but not all of + * the inner rel's parameterization. + */ + return (bms_overlap(innerparams, outerrelids) && + bms_nonempty_difference(innerparams, outerrelids)); +} + + set_join_pathlist_hook_type set_join_pathlist_next = NULL; set_rel_pathlist_hook_type set_rel_pathlist_hook_next = NULL; planner_hook_type planner_hook_next = NULL; @@ -59,8 +78,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, JoinType saved_jointype = jointype; RangeTblEntry *inner_rte = root->simple_rte_array[innerrel->relid]; const PartRelationInfo *inner_prel; - List *pathkeys = NIL, - *joinclauses, + List *joinclauses, *otherclauses; ListCell *lc; WalkerContext context; @@ -124,7 +142,8 @@ pathman_join_pathlist_hook(PlannerInfo *root, *inner; NestPath *nest_path; /* NestLoop we're creating */ ParamPathInfo *ppi; /* parameterization info */ - Relids inner_required; /* required paremeterization relids */ + Relids required_nestloop, + required_inner; List *filtered_joinclauses = NIL, *saved_ppi_list; ListCell *rinfo_lc; @@ -151,16 +170,17 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (saved_jointype == JOIN_UNIQUE_INNER) return; - /* Make innerrel path depend on outerrel's column */ - inner_required = bms_union(PATH_REQ_OUTER((Path *) cur_inner_path), - bms_make_singleton(outerrel->relid)); + + /* Make inner path depend on outerrel's columns */ + required_inner = bms_union(PATH_REQ_OUTER((Path *) cur_inner_path), + outerrel->relids); /* Preserve existing ppis built by get_appendrel_parampathinfo() */ saved_ppi_list = innerrel->ppilist; /* Get the ParamPathInfo for a parameterized path */ innerrel->ppilist = NIL; - ppi = get_baserel_parampathinfo(root, innerrel, inner_required); + ppi = get_baserel_parampathinfo(root, innerrel, required_inner); innerrel->ppilist = saved_ppi_list; /* Skip ppi->ppi_clauses don't reference partition attribute */ @@ -173,17 +193,34 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (!inner) return; /* could not build it, retreat! */ + + required_nestloop = calc_nestloop_required_outer(outer, inner); + + /* + * Check to see if proposed path is still parameterized, and reject if the + * parameterization wouldn't be sensible --- unless allow_star_schema_join + * says to allow it anyway. Also, we must reject if have_dangerous_phv + * doesn't like the look of it, which could only happen if the nestloop is + * still parameterized. + */ + if (required_nestloop && + ((!bms_overlap(required_nestloop, extra->param_source_rels) && + !allow_star_schema_join(root, outer, inner)) || + have_dangerous_phv(root, outer->parent->relids, required_inner))) + return; + + initial_cost_nestloop(root, &workspace, jointype, outer, inner, /* built paths */ extra->sjinfo, &extra->semifactors); - pathkeys = build_join_pathkeys(root, joinrel, jointype, outer->pathkeys); - nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, extra->sjinfo, &extra->semifactors, outer, inner, extra->restrictlist, - pathkeys, - calc_nestloop_required_outer(outer, inner)); + build_join_pathkeys(root, joinrel, + jointype, + outer->pathkeys), + required_nestloop); /* Discard all clauses that are to be evaluated by 'inner' */ foreach (rinfo_lc, extra->restrictlist) @@ -196,16 +233,15 @@ pathman_join_pathlist_hook(PlannerInfo *root, } /* - * Override 'rows' value produced by standard estimator. + * NOTE: Override 'rows' value produced by standard estimator. * Currently we use get_parameterized_joinrel_size() since * it works just fine, but this might change some day. */ - nest_path->path.rows = get_parameterized_joinrel_size_compat(root, - joinrel, - outer, - inner, - extra->sjinfo, - filtered_joinclauses); + nest_path->path.rows = + get_parameterized_joinrel_size_compat(root, joinrel, + outer, inner, + extra->sjinfo, + filtered_joinclauses); /* Finally we can add the new NestLoop path */ add_path(joinrel, (Path *) nest_path); From d640ff17d546aed072a4ae391e9673a9c7274831 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 19 Apr 2017 18:26:50 +0300 Subject: [PATCH 0390/1124] test sophisticated queries with lateral (by Ivan Frolkov) --- Makefile | 1 + expected/pathman_lateral.out | 118 +++++++++++++++++++++++++++++++++++ sql/pathman_lateral.sql | 40 ++++++++++++ 3 files changed, 159 insertions(+) create mode 100644 expected/pathman_lateral.out create mode 100644 sql/pathman_lateral.sql diff --git a/Makefile b/Makefile index ece73c45..07afeddc 100644 --- a/Makefile +++ b/Makefile @@ -35,6 +35,7 @@ REGRESS = pathman_basic \ pathman_inserts \ pathman_interval \ pathman_join_clause \ + pathman_lateral \ pathman_only \ pathman_permissions \ pathman_rowmarks \ diff --git a/expected/pathman_lateral.out b/expected/pathman_lateral.out new file mode 100644 index 00000000..808a4d64 --- /dev/null +++ b/expected/pathman_lateral.out @@ -0,0 +1,118 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------- + Nested Loop Semi Join + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t3.id) + -> Append + -> Seq Scan on data_0 t3 + -> Seq Scan on data_1 t3_1 + -> Seq Scan on data_2 t3_2 + -> Seq Scan on data_3 t3_3 + -> Seq Scan on data_4 t3_4 + -> Seq Scan on data_5 t3_5 + -> Seq Scan on data_6 t3_6 + -> Seq Scan on data_7 t3_7 + -> Seq Scan on data_8 t3_8 + -> Seq Scan on data_9 t3_9 + -> Materialize + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Custom Scan (RuntimeAppend) + Prune by: (t3.id = t.id) + -> Seq Scan on data_0 t + Filter: (t3.id = id) + -> Seq Scan on data_1 t + Filter: (t3.id = id) + -> Seq Scan on data_2 t + Filter: (t3.id = id) + -> Seq Scan on data_3 t + Filter: (t3.id = id) + -> Seq Scan on data_4 t + Filter: (t3.id = id) + -> Seq Scan on data_5 t + Filter: (t3.id = id) + -> Seq Scan on data_6 t + Filter: (t3.id = id) + -> Seq Scan on data_7 t + Filter: (t3.id = id) + -> Seq Scan on data_8 t + Filter: (t3.id = id) + -> Seq Scan on data_9 t + Filter: (t3.id = id) +(82 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP SCHEMA test_lateral CASCADE; +NOTICE: drop cascades to 11 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_lateral.sql b/sql/pathman_lateral.sql new file mode 100644 index 00000000..49dee604 --- /dev/null +++ b/sql/pathman_lateral.sql @@ -0,0 +1,40 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; + + +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); +insert into test_lateral.data select generate_series(1, 10000); + + +VACUUM ANALYZE; + + +set enable_hashjoin = off; +set enable_mergejoin = off; + + +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + + +set enable_hashjoin = on; +set enable_mergejoin = on; + + + +DROP SCHEMA test_lateral CASCADE; +DROP EXTENSION pg_pathman; From 11f6d4abf8d47b9a45a0aa0a587f2063f8850c48 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 19 Apr 2017 19:14:29 +0300 Subject: [PATCH 0391/1124] introduce convenience macro list_make1_irange_full() --- src/hooks.c | 2 +- src/include/rangeset.h | 6 +++++- src/nodes_common.c | 2 +- src/pg_pathman.c | 19 +++++++------------ src/planner_tree_modification.c | 2 +- 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index d2222acd..cc348872 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -337,7 +337,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, rte->inh = true; children = PrelGetChildrenArray(prel); - ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_COMPLETE)); + ranges = list_make1_irange_full(prel, IR_COMPLETE); /* Make wrappers over restrictions and collect final rangeset */ InitWalkerContext(&context, rti, prel, NULL, false); diff --git a/src/include/rangeset.h b/src/include/rangeset.h index 5f273fd3..9e1d8cbb 100644 --- a/src/include/rangeset.h +++ b/src/include/rangeset.h @@ -50,6 +50,11 @@ typedef struct { #define linitial_irange(list) ( lfirst_irange(list_head(list)) ) +/* convenience macro (requires relation_info.h) */ +#define list_make1_irange_full(prel, lossy) \ + ( list_make1_irange(make_irange(0, PrelLastChild(prel), (lossy))) ) + + inline static IndexRange make_irange(uint32 lower, uint32 upper, bool lossy) { @@ -141,5 +146,4 @@ List *irange_list_intersection(List *a, List *b); int irange_list_length(List *rangeset); bool irange_list_find(List *rangeset, int index, bool *lossy); - #endif /* PATHMAN_RANGESET_H */ diff --git a/src/nodes_common.c b/src/nodes_common.c index 92617640..ffab69d2 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -656,7 +656,7 @@ rescan_append_common(CustomScanState *node) Assert(prel); /* First we select all available partitions... */ - ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_COMPLETE)); + ranges = list_make1_irange_full(prel, IR_COMPLETE); InitWalkerContext(&wcxt, INDEX_VAR, prel, econtext, false); foreach (lc, scan_state->custom_exprs) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 54a05b4a..26cde78e 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -622,8 +622,7 @@ walk_expr_tree(Expr *expr, WalkerContext *context) result->args = NIL; result->paramsel = 1.0; - result->rangeset = list_make1_irange( - make_irange(0, PrelLastChild(context->prel), IR_LOSSY)); + result->rangeset = list_make1_irange_full(context->prel, IR_LOSSY); return result; } @@ -804,9 +803,7 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) result->paramsel = 1.0; if (expr->boolop == AND_EXPR) - result->rangeset = list_make1_irange(make_irange(0, - PrelLastChild(prel), - IR_COMPLETE)); + result->rangeset = list_make1_irange_full(prel, IR_COMPLETE); else result->rangeset = NIL; @@ -831,9 +828,7 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) break; default: - result->rangeset = list_make1_irange(make_irange(0, - PrelLastChild(prel), - IR_LOSSY)); + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); break; } } @@ -1003,7 +998,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) result->paramsel = DEFAULT_INEQ_SEL; handle_arrexpr_return: - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); result->paramsel = 1.0; return result; } @@ -1036,7 +1031,7 @@ handle_opexpr(const OpExpr *expr, WalkerContext *context) } } - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); result->paramsel = 1.0; return result; } @@ -1131,7 +1126,7 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, } binary_opexpr_return: - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); result->paramsel = 1.0; } @@ -1155,7 +1150,7 @@ handle_binary_opexpr_param(const PartRelationInfo *prel, tce = lookup_type_cache(vartype, TYPECACHE_BTREE_OPFAMILY); strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), IR_LOSSY)); + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); result->paramsel = estimate_paramsel_using_prel(prel, strategy); } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index ff18611d..786a8ab4 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -268,7 +268,7 @@ handle_modification_query(Query *parse) if (prel->enable_parent) return; /* Parse syntax tree and extract partition ranges */ - ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); + ranges = list_make1_irange_full(prel, IR_COMPLETE); expr = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); /* Exit if there's no expr (no use) */ From 4d692b78444c018a47b04dbcb85c7f2b1c9e257a Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Apr 2017 19:18:01 +0300 Subject: [PATCH 0392/1124] Fix plan on child partition --- src/hooks.c | 2 +- src/planner_tree_modification.c | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index b4d5d3a8..00e8ff37 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -509,7 +509,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); - /* Add PartitionFilter node for UPDATE queries */ + /* Add PartitionUpdate node for UPDATE queries */ ExecuteForPlanTree(result, add_partition_update_nodes); /* Decrement relation tags refcount */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 5f6a9b4a..ee92605d 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -16,6 +16,7 @@ #include "partition_filter.h" #include "partition_update.h" #include "planner_tree_modification.h" +#include "relation_info.h" #include "miscadmin.h" #include "optimizer/clauses.h" @@ -251,7 +252,8 @@ handle_modification_query(Query *parse) result_rel = parse->resultRelation; /* Exit if it's not a DELETE or UPDATE query */ - if (result_rel == 0 || parse->commandType != CMD_DELETE) + if (result_rel == 0 || (parse->commandType != CMD_UPDATE && + parse->commandType != CMD_DELETE)) return; rte = rt_fetch(result_rel, parse->rtable); @@ -430,10 +432,24 @@ partition_update_visitor(Plan *plan, void *context) lc3 = list_head(modify_table->returningLists); forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) { + Oid parent_relid; Index rindex = lfirst_int(lc2); Oid relid = getrelid(rindex, rtable); const PartRelationInfo *prel = get_pathman_relation_info(relid); + /* query can be changed earlier to point on child partition, + * so we're possibly now looking at query that updates child partition + */ + if (prel == NULL) + { + parent_relid = get_parent_of_partition(relid, NULL); + if (parent_relid) + { + prel = get_pathman_relation_info(parent_relid); + relid = parent_relid; + } + } + /* Check that table is partitioned */ if (prel) { From caf6194e278093dc85df3b82d074aa6c28db3d97 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 19 Apr 2017 22:06:09 +0300 Subject: [PATCH 0393/1124] test SELECT ... WHERE value = NULL --- expected/pathman_basic.out | 14 ++++++++++++++ sql/pathman_basic.sql | 2 ++ 2 files changed, 16 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 631d35f6..7fce7779 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -450,6 +450,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; -> Seq Scan on hash_rel_2 (4 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; QUERY PLAN ------------------------------ @@ -602,6 +609,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; -> Seq Scan on hash_rel_2 (4 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; QUERY PLAN ------------------------------ diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 1411c930..5f9edf4c 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -165,6 +165,7 @@ SET enable_bitmapscan = OFF; SET enable_seqscan = ON; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; @@ -188,6 +189,7 @@ SET enable_bitmapscan = OFF; SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (2); From 0bd4bc0134ee0581f61164eafffa9214b244872f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 20 Apr 2017 12:14:06 +0300 Subject: [PATCH 0394/1124] Fix target list generation for INSERTs --- src/partition_filter.c | 13 +++++++++++-- src/partition_update.c | 4 ++-- src/planner_tree_modification.c | 2 ++ 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 802bae4d..2f88ac09 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -674,7 +674,6 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) { TargetEntry *tle = (TargetEntry *) lfirst(lc); TargetEntry *newtle; - Form_pg_attribute attr; if (tle->expr != NULL && IsA(tle->expr, Var)) { @@ -685,7 +684,17 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) tle->resjunk); } else - newtle = copyObject(tle); + { + Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ + tle->resno, + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); + + newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, + tle->resjunk); + } result_tlist = lappend(result_tlist, newtle); } diff --git a/src/partition_update.c b/src/partition_update.c index cd8cd73b..314ce7d4 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -22,10 +22,10 @@ CustomExecMethods partition_update_exec_methods; void init_partition_update_static_data(void) { - partition_update_plan_methods.CustomName = "PartitionUpdate"; + partition_update_plan_methods.CustomName = "PrepareInsert"; partition_update_plan_methods.CreateCustomScanState = partition_update_create_scan_state; - partition_update_exec_methods.CustomName = "PartitionUpdate"; + partition_update_exec_methods.CustomName = "PrepareInsert"; partition_update_exec_methods.BeginCustomScan = partition_update_begin; partition_update_exec_methods.ExecCustomScan = partition_update_exec; partition_update_exec_methods.EndCustomScan = partition_update_end; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index ee92605d..d4558c4b 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -455,6 +455,8 @@ partition_update_visitor(Plan *plan, void *context) { List *returning_list = NIL; + modify_table->operation = CMD_INSERT; + /* Extract returning list if possible */ if (lc3) { From a3e77da92abe75489869f747ae00d0bd6cae1dfd Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 20 Apr 2017 14:13:23 +0300 Subject: [PATCH 0395/1124] Add little optimization --- src/hooks.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 03db4a4d..8b96b6f1 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -130,9 +130,12 @@ pathman_join_pathlist_hook(PlannerInfo *root, } /* Make copy of partitioning expression and fix Var's varno attributes */ - expr = copyObject(inner_prel->expr); + expr = inner_prel->expr; if (innerrel->relid != 1) + { + expr = copyObject(expr); ChangeVarNodes(expr, 1, innerrel->relid, 0); + } paramsel = 1.0; foreach (lc, joinclauses) @@ -206,8 +209,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, required_nestloop = calc_nestloop_required_outer(outer, inner); /* - * Check to see if proposed path is still parameterized, and reject if the - * parameterization wouldn't be sensible --- unless allow_star_schema_join * says to allow it anyway. Also, we must reject if have_dangerous_phv * doesn't like the look of it, which could only happen if the nestloop is * still parameterized. From a9e0a12f4ec435b9086d6deee6d30a386981c50c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 20 Apr 2017 14:37:43 +0300 Subject: [PATCH 0396/1124] Fix removed comment --- src/hooks.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/hooks.c b/src/hooks.c index 8b96b6f1..d9c9e51a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -209,6 +209,8 @@ pathman_join_pathlist_hook(PlannerInfo *root, required_nestloop = calc_nestloop_required_outer(outer, inner); /* + * Check to see if proposed path is still parameterized, and reject if the + * parameterization wouldn't be sensible --- unless allow_star_schema_join * says to allow it anyway. Also, we must reject if have_dangerous_phv * doesn't like the look of it, which could only happen if the nestloop is * still parameterized. From e299b75b60afdfb6b3f768d280cc6d51ae49cf0f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 20 Apr 2017 15:13:53 +0300 Subject: [PATCH 0397/1124] Fix tests --- src/nodes_common.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/nodes_common.c b/src/nodes_common.c index 820fe7e6..760ccb39 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -367,6 +367,11 @@ get_partitioned_attr_clauses(List *restrictinfo_list, ctx.count = 0; ctx.prel_expr = prel->expr; + if (partitioned_rel != 1) + { + ctx.prel_expr = copyObject(prel->expr); + ChangeVarNodes(ctx.prel_expr, 1, partitioned_rel, 0); + } check_clause_for_expression((Node *) rinfo->clause, &ctx); if (ctx.count == 1) From f7d62b56b748b1ca5e81e592d9e67e10daadd09f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 20 Apr 2017 15:20:28 +0300 Subject: [PATCH 0398/1124] copy relkind in handle_modification_query() --- src/planner_tree_modification.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 786a8ab4..eb5d18ab 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -16,9 +16,11 @@ #include "partition_filter.h" #include "planner_tree_modification.h" +#include "access/htup_details.h" #include "miscadmin.h" #include "optimizer/clauses.h" #include "storage/lmgr.h" +#include "utils/lsyscache.h" #include "utils/syscache.h" @@ -302,9 +304,23 @@ handle_modification_query(Query *parse) LOCKMODE lockmode = RowExclusiveLock; /* UPDATE | DELETE */ - /* Make sure that 'child' exists */ + HeapTuple syscache_htup; + char child_relkind; + + /* Lock 'child' table */ LockRelationOid(child, lockmode); - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(child))) + + /* Make sure that 'child' exists */ + syscache_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(child)); + if (HeapTupleIsValid(syscache_htup)) + { + Form_pg_class reltup = (Form_pg_class) GETSTRUCT(syscache_htup); + + /* Fetch child's relkind and free cache entry */ + child_relkind = reltup->relkind; + ReleaseSysCache(syscache_htup); + } + else { UnlockRelationOid(child, lockmode); return; /* nothing to do here */ @@ -327,8 +343,9 @@ handle_modification_query(Query *parse) if (tuple_map) /* just checking the pointer! */ return; - /* Update RTE's relid */ + /* Update RTE's relid and relkind (for FDW) */ rte->relid = child; + rte->relkind = child_relkind; /* HACK: unset the 'inh' flag (no children) */ rte->inh = false; From 9811d4c5b9553f37a11ba24cae31aefbeb6c6500 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 20 Apr 2017 18:49:24 +0300 Subject: [PATCH 0399/1124] Add DELETE support before INSERT --- src/partition_update.c | 127 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 124 insertions(+), 3 deletions(-) diff --git a/src/partition_update.c b/src/partition_update.c index 314ce7d4..200badbb 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -4,6 +4,8 @@ * Insert row to right partition in UPDATE operation * * Copyright (c) 2017, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ */ @@ -11,6 +13,8 @@ #include "partition_filter.h" #include "partition_update.h" +#include "access/xact.h" +#include "executor/nodeModifyTable.h" #include "utils/guc.h" bool pg_pathman_enable_partition_update = true; @@ -18,6 +22,8 @@ bool pg_pathman_enable_partition_update = true; CustomScanMethods partition_update_plan_methods; CustomExecMethods partition_update_exec_methods; +static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, HeapTuple oldtuple, + EPQState *epqstate, EState *estate); void init_partition_update_static_data(void) @@ -105,7 +111,6 @@ partition_update_begin(CustomScanState *node, EState *estate, int eflags) TupleTableSlot * partition_update_exec(CustomScanState *node) { - PartitionFilterState *state = (PartitionFilterState *) node; PlanState *child_ps = (PlanState *) linitial(node->custom_ps); EState *estate = node->ss.ps.state; TupleTableSlot *slot; @@ -114,9 +119,50 @@ partition_update_exec(CustomScanState *node) /* save original ResultRelInfo */ saved_rel_info = estate->es_result_relation_info; + /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); + if (!TupIsNull(slot)) { + char relkind; + Datum datum; + bool isNull; + ResultRelInfo *resultRelInfo; + HeapTuple oldtuple; + ItemPointer tupleid; + ItemPointerData tuple_ctid; + JunkFilter *junkfilter; + EPQState epqstate; + AttrNumber ctid_attno; + + resultRelInfo = estate->es_result_relation_info; + junkfilter = resultRelInfo->ri_junkFilter; + Assert(junkfilter != NULL); + + EvalPlanQualSetSlot(&epqstate, slot); + oldtuple = NULL; + + /* + * extract the 'ctid' junk attribute. + */ + relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; + Assert(relkind == RELKIND_RELATION); + ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); + datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* be sure we don't free + * ctid!! */ + tupleid = &tuple_ctid; + + /* delete old tuple */ + estate->es_result_relation_info = saved_rel_info; + ExecDeleteInternal(tupleid, oldtuple, &epqstate, estate); + estate->es_result_relation_info = resultRelInfo; + /* we got the slot that can be inserted to child partition */ return slot; } @@ -127,8 +173,6 @@ partition_update_exec(CustomScanState *node) void partition_update_end(CustomScanState *node) { - PartitionUpdateState *state = (PartitionUpdateState *) node; - Assert(list_length(node->custom_ps) == 1); ExecEndNode((PlanState *) linitial(node->custom_ps)); } @@ -145,3 +189,80 @@ partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *e { /* Nothing to do here now */ } + + +/* ---------------------------------------------------------------- + * ExecDeleteInternal + * Basicly copy of ExecDelete from executor/nodeModifyTable.c + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecDeleteInternal(ItemPointer tupleid, + HeapTuple oldtuple, + EPQState *epqstate, + EState *estate) +{ + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + HeapUpdateFailureData hufd; + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + +ldelete:; + result = heap_delete(resultRelationDesc, tupleid, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ , + &hufd); + switch (result) + { + case HeapTupleSelfUpdated: + if (hufd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + + /* Else, already deleted by self; nothing to do */ + return NULL; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + if (!ItemPointerEquals(tupleid, &hufd.ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + epqstate, + resultRelationDesc, + resultRelInfo->ri_RangeTableIndex, + LockTupleExclusive, + &hufd.ctid, + hufd.xmax); + if (!TupIsNull(epqslot)) + { + *tupleid = hufd.ctid; + goto ldelete; + } + } + /* tuple already deleted; nothing to do */ + return NULL; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + return NULL; + } + + return NULL; +} From 5567629cc184fdcf73f9a14e410742bdd49d38f8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 21 Apr 2017 00:43:47 +0300 Subject: [PATCH 0400/1124] remove useless event handlers (@extschema@.on_*) --- hash.sql | 6 ----- init.sql | 23 ------------------ range.sql | 32 ------------------------- src/pl_funcs.c | 65 -------------------------------------------------- 4 files changed, 126 deletions(-) diff --git a/hash.sql b/hash.sql index c3abe1d9..f840d792 100644 --- a/hash.sql +++ b/hash.sql @@ -45,9 +45,6 @@ BEGIN partition_names, tablespaces); - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - /* Copy data */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -156,9 +153,6 @@ BEGIN new_partition, p_init_callback); - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN new_partition; END $$ diff --git a/init.sql b/init.sql index 60adbbe4..9c43b995 100644 --- a/init.sql +++ b/init.sql @@ -432,9 +432,6 @@ BEGIN /* Drop triggers on update */ PERFORM @extschema@.drop_triggers(parent_relid); - - /* Notify backend about changes */ - PERFORM @extschema@.on_remove_partitions(parent_relid); END $$ LANGUAGE plpgsql STRICT; @@ -637,9 +634,6 @@ BEGIN v_part_count := v_part_count + 1; END LOOP; - /* Notify backend about changes */ - PERFORM @extschema@.on_remove_partitions(parent_relid); - RETURN v_part_count; END $$ LANGUAGE plpgsql @@ -762,23 +756,6 @@ ON sql_drop EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); - -CREATE OR REPLACE FUNCTION @extschema@.on_create_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_created' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.on_update_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_updated' -LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.on_remove_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_removed' -LANGUAGE C STRICT; - - /* * Get number of partitions managed by pg_pathman. */ diff --git a/range.sql b/range.sql index 705991ef..daac995f 100644 --- a/range.sql +++ b/range.sql @@ -169,9 +169,6 @@ BEGIN NULL); END IF; - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -267,9 +264,6 @@ BEGIN NULL); END IF; - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -326,9 +320,6 @@ BEGIN partition_names, tablespaces); - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -386,9 +377,6 @@ BEGIN part_count := part_count + 1; END LOOP; - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -448,9 +436,6 @@ BEGIN part_count := part_count + 1; END LOOP; - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -551,9 +536,6 @@ BEGIN partition_relid::TEXT, v_check_name, v_cond); - - /* Tell backend to reload configuration */ - PERFORM @extschema@.on_update_partitions(v_parent); END $$ LANGUAGE plpgsql; @@ -615,8 +597,6 @@ BEGIN INTO v_part_name; - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); RETURN v_part_name; END $$ @@ -725,8 +705,6 @@ BEGIN INTO v_part_name; - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); RETURN v_part_name; END $$ @@ -828,7 +806,6 @@ BEGIN end_value, partition_name, tablespace); - PERFORM @extschema@.on_update_partitions(parent_relid); RETURN v_part_name; END @@ -893,9 +870,6 @@ BEGIN EXECUTE format('DROP TABLE %s', partition_relid::TEXT); END IF; - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN part_name; END $$ @@ -978,9 +952,6 @@ BEGIN start_value, end_value); - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN partition_relid; END $$ @@ -1026,9 +997,6 @@ BEGIN @extschema@.build_update_trigger_name(parent_relid), partition_relid::TEXT); - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN partition_relid; END $$ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 61b77782..65251435 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -38,10 +38,6 @@ /* Function declarations */ -PG_FUNCTION_INFO_V1( on_partitions_created ); -PG_FUNCTION_INFO_V1( on_partitions_updated ); -PG_FUNCTION_INFO_V1( on_partitions_removed ); - PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); PG_FUNCTION_INFO_V1( get_base_type_pl ); @@ -103,10 +99,6 @@ typedef struct } show_cache_stats_cxt; -static void on_partitions_created_internal(Oid partitioned_table, bool add_callbacks); -static void on_partitions_updated_internal(Oid partitioned_table, bool add_callbacks); -static void on_partitions_removed_internal(Oid partitioned_table, bool add_callbacks); - static void pathman_update_trigger_func_move_tuple(Relation source_rel, Relation target_rel, HeapTuple old_tuple, @@ -120,63 +112,6 @@ check_relation_exists(Oid relid) } -/* - * ---------------------------- - * Partition events callbacks - * ---------------------------- - */ - -static void -on_partitions_created_internal(Oid partitioned_table, bool add_callbacks) -{ - elog(DEBUG2, "on_partitions_created() [add_callbacks = %s] " - "triggered for relation %u", - (add_callbacks ? "true" : "false"), partitioned_table); -} - -static void -on_partitions_updated_internal(Oid partitioned_table, bool add_callbacks) -{ - bool entry_found; - - elog(DEBUG2, "on_partitions_updated() [add_callbacks = %s] " - "triggered for relation %u", - (add_callbacks ? "true" : "false"), partitioned_table); - - invalidate_pathman_relation_info(partitioned_table, &entry_found); -} - -static void -on_partitions_removed_internal(Oid partitioned_table, bool add_callbacks) -{ - elog(DEBUG2, "on_partitions_removed() [add_callbacks = %s] " - "triggered for relation %u", - (add_callbacks ? "true" : "false"), partitioned_table); -} - - -Datum -on_partitions_created(PG_FUNCTION_ARGS) -{ - on_partitions_created_internal(PG_GETARG_OID(0), true); - PG_RETURN_NULL(); -} - -Datum -on_partitions_updated(PG_FUNCTION_ARGS) -{ - on_partitions_updated_internal(PG_GETARG_OID(0), true); - PG_RETURN_NULL(); -} - -Datum -on_partitions_removed(PG_FUNCTION_ARGS) -{ - on_partitions_removed_internal(PG_GETARG_OID(0), true); - PG_RETURN_NULL(); -} - - /* * ------------------------ * Various useful getters From fce742d99598a4fe413511acb77e21131187f29f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 21 Apr 2017 10:39:29 +0300 Subject: [PATCH 0401/1124] improved function drop_partitions() --- init.sql | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/init.sql b/init.sql index 9c43b995..8c10b918 100644 --- a/init.sql +++ b/init.sql @@ -581,23 +581,22 @@ DECLARE v_rec RECORD; v_rows BIGINT; v_part_count INTEGER := 0; - conf_num_del INTEGER; + conf_num INTEGER; v_relkind CHAR; BEGIN PERFORM @extschema@.validate_relname(parent_relid); - /* Drop trigger first */ - PERFORM @extschema@.drop_triggers(parent_relid); + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); - WITH config_num_deleted AS (DELETE FROM @extschema@.pathman_config - WHERE partrel = parent_relid - RETURNING *) - SELECT count(*) from config_num_deleted INTO conf_num_del; + /* First, drop all triggers */ + PERFORM @extschema@.drop_triggers(parent_relid); - DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + SELECT count(*) FROM @extschema@.pathman_config + WHERE partrel = parent_relid INTO conf_num; - IF conf_num_del = 0 THEN + IF conf_num = 0 THEN RAISE EXCEPTION 'relation "%" has no partitions', parent_relid::TEXT; END IF; @@ -621,8 +620,8 @@ BEGIN INTO v_relkind; /* - * Determine the kind of child relation. It can be either regular - * table (r) or foreign table (f). Depending on relkind we use + * Determine the kind of child relation. It can be either a regular + * table (r) or a foreign table (f). Depending on relkind we use * DROP TABLE or DROP FOREIGN TABLE. */ IF v_relkind = 'f' THEN @@ -634,6 +633,10 @@ BEGIN v_part_count := v_part_count + 1; END LOOP; + /* Finally delete both config entries */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + RETURN v_part_count; END $$ LANGUAGE plpgsql From 389ec25fef63798744a81e5e97f508fcaee29075 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Apr 2017 14:29:22 +0300 Subject: [PATCH 0402/1124] Set proper tuple descriptor in COPY command in case of tuple conversion (#87) --- src/utility_stmt_hooking.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index bce69ce1..1ceea7c0 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -587,6 +587,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Place tuple in tuple slot --- but slot shouldn't free it */ slot = myslot; + ExecSetSlotDescriptor(slot, RelationGetDescr(child_result_rel->ri_RelationDesc)); ExecStoreTuple(tuple, slot, InvalidBuffer, false); skip_tuple = false; From 728c468e1b9092da12f6cc29319119ed19e49691 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Apr 2017 15:25:33 +0300 Subject: [PATCH 0403/1124] Add more tests related with #87 issue --- expected/pathman_utility_stmt_hooking.out | 27 ++++++++++++++++++++++- sql/pathman_utility_stmt_hooking.sql | 17 ++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/expected/pathman_utility_stmt_hooking.out b/expected/pathman_utility_stmt_hooking.out index f6642d1b..db7f0d7a 100644 --- a/expected/pathman_utility_stmt_hooking.out +++ b/expected/pathman_utility_stmt_hooking.out @@ -256,8 +256,33 @@ SELECT * FROM copy_stmt_hooking.test ORDER BY val; 6 | hash_2 | 0 | 0 (2 rows) +/* Check dropped colums before partitioning */ +CREATE TABLE copy_stmt_hooking.test2 ( + a varchar(50), + b varchar(50), + t timestamp without time zone not null +); +ALTER TABLE copy_stmt_hooking.test2 DROP COLUMN a; +SELECT create_range_partitions('copy_stmt_hooking.test2', + 't', + '2017-01-01 00:00:00'::timestamp, + interval '1 hour', 5, false +); +NOTICE: sequence "test2_seq" does not exist, skipping + create_range_partitions +------------------------- + 5 +(1 row) + +COPY copy_stmt_hooking.test2(t) FROM stdin; +SELECT COUNT(*) FROM copy_stmt_hooking.test2; + count +------- + 1 +(1 row) + DROP SCHEMA copy_stmt_hooking CASCADE; -NOTICE: drop cascades to 7 other objects +NOTICE: drop cascades to 798 other objects /* * Test auto check constraint renaming */ diff --git a/sql/pathman_utility_stmt_hooking.sql b/sql/pathman_utility_stmt_hooking.sql index 15367b86..5fb6665b 100644 --- a/sql/pathman_utility_stmt_hooking.sql +++ b/sql/pathman_utility_stmt_hooking.sql @@ -139,6 +139,23 @@ COPY copy_stmt_hooking.test FROM stdin; SELECT count(*) FROM ONLY copy_stmt_hooking.test; SELECT * FROM copy_stmt_hooking.test ORDER BY val; +/* Check dropped colums before partitioning */ +CREATE TABLE copy_stmt_hooking.test2 ( + a varchar(50), + b varchar(50), + t timestamp without time zone not null +); +ALTER TABLE copy_stmt_hooking.test2 DROP COLUMN a; +SELECT create_range_partitions('copy_stmt_hooking.test2', + 't', + '2017-01-01 00:00:00'::timestamp, + interval '1 hour', 5, false +); +COPY copy_stmt_hooking.test2(t) FROM stdin; +2017-02-02 20:00:00 +\. +SELECT COUNT(*) FROM copy_stmt_hooking.test2; + DROP SCHEMA copy_stmt_hooking CASCADE; From 9a354add7a64d2c63a03475767ef50cddb09163e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Apr 2017 14:29:22 +0300 Subject: [PATCH 0404/1124] Set proper tuple descriptor in COPY command in case of tuple conversion (#87) --- src/utility_stmt_hooking.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 2f0b6fa6..5bca60f1 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -652,6 +652,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Place tuple in tuple slot --- but slot shouldn't free it */ slot = myslot; + ExecSetSlotDescriptor(slot, RelationGetDescr(child_result_rel->ri_RelationDesc)); ExecStoreTuple(tuple, slot, InvalidBuffer, false); skip_tuple = false; From 78a295a5e20d29f3f81408be5a70aa6fc9c3bda2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Apr 2017 15:25:33 +0300 Subject: [PATCH 0405/1124] Add more tests related with #87 issue --- expected/pathman_utility_stmt.out | 27 ++++++++++++++++++++++++++- sql/pathman_utility_stmt.sql | 17 +++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index f6642d1b..db7f0d7a 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -256,8 +256,33 @@ SELECT * FROM copy_stmt_hooking.test ORDER BY val; 6 | hash_2 | 0 | 0 (2 rows) +/* Check dropped colums before partitioning */ +CREATE TABLE copy_stmt_hooking.test2 ( + a varchar(50), + b varchar(50), + t timestamp without time zone not null +); +ALTER TABLE copy_stmt_hooking.test2 DROP COLUMN a; +SELECT create_range_partitions('copy_stmt_hooking.test2', + 't', + '2017-01-01 00:00:00'::timestamp, + interval '1 hour', 5, false +); +NOTICE: sequence "test2_seq" does not exist, skipping + create_range_partitions +------------------------- + 5 +(1 row) + +COPY copy_stmt_hooking.test2(t) FROM stdin; +SELECT COUNT(*) FROM copy_stmt_hooking.test2; + count +------- + 1 +(1 row) + DROP SCHEMA copy_stmt_hooking CASCADE; -NOTICE: drop cascades to 7 other objects +NOTICE: drop cascades to 798 other objects /* * Test auto check constraint renaming */ diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index 15367b86..5fb6665b 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -139,6 +139,23 @@ COPY copy_stmt_hooking.test FROM stdin; SELECT count(*) FROM ONLY copy_stmt_hooking.test; SELECT * FROM copy_stmt_hooking.test ORDER BY val; +/* Check dropped colums before partitioning */ +CREATE TABLE copy_stmt_hooking.test2 ( + a varchar(50), + b varchar(50), + t timestamp without time zone not null +); +ALTER TABLE copy_stmt_hooking.test2 DROP COLUMN a; +SELECT create_range_partitions('copy_stmt_hooking.test2', + 't', + '2017-01-01 00:00:00'::timestamp, + interval '1 hour', 5, false +); +COPY copy_stmt_hooking.test2(t) FROM stdin; +2017-02-02 20:00:00 +\. +SELECT COUNT(*) FROM copy_stmt_hooking.test2; + DROP SCHEMA copy_stmt_hooking CASCADE; From 17dce6fe9f14a60d1a9db97d7830a63637eb8ca2 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Fri, 21 Apr 2017 19:16:38 +0300 Subject: [PATCH 0406/1124] Add fixes to build pathman with pg10 --- src/compat/pg_compat.c | 40 ++++++++++++ src/hooks.c | 80 +++++++++++++++++++++++ src/include/compat/pg_compat.h | 40 +++++++----- src/include/hooks.h | 10 +++ src/init.c | 4 ++ src/nodes_common.c | 18 ++++++ src/partition_creation.c | 104 ++++++++++++++++++++++++++++++ src/partition_filter.c | 18 ++++++ src/pathman_workers.c | 2 + src/pg_pathman.c | 113 ++++++++++++++++++++++++++++++++- src/pl_funcs.c | 81 +++++++++++++++++++++++ src/pl_range_funcs.c | 8 +++ src/utility_stmt_hooking.c | 17 +++++ src/utils.c | 3 + 14 files changed, 521 insertions(+), 17 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 6e441980..ea6804fe 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -107,8 +107,12 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, if (proparallel != PROPARALLEL_SAFE) return; +#if PG_VERSION_NUM >= 100000 + if (!is_parallel_safe(root, (Node *) rte->tablesample->args)) +#else if (has_parallel_hazard((Node *) rte->tablesample->args, false)) +#endif return; } @@ -161,13 +165,21 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, case RTE_FUNCTION: /* Check for parallel-restricted functions. */ +#if PG_VERSION_NUM >= 100000 + if (!is_parallel_safe(root, (Node *) rte->functions)) +#else if (has_parallel_hazard((Node *) rte->functions, false)) +#endif return; break; case RTE_VALUES: /* Check for parallel-restricted functions. */ +#if PG_VERSION_NUM >= 100000 + if (!is_parallel_safe(root, (Node *) rte->values_lists)) +#else if (has_parallel_hazard((Node *) rte->values_lists, false)) +#endif return; break; @@ -181,6 +193,9 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, * executed only once. */ return; + + default: + ; } /* @@ -192,14 +207,22 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, * outer join clauses work correctly. It would likely break equivalence * classes, too. */ +#if PG_VERSION_NUM >= 100000 + if (!is_parallel_safe(root, (Node *) rel->baserestrictinfo)) +#else if (has_parallel_hazard((Node *) rel->baserestrictinfo, false)) +#endif return; /* * Likewise, if the relation's outputs are not parallel-safe, give up. * (Usually, they're just Vars, but sometimes they're not.) */ +#if PG_VERSION_NUM >= 100000 + if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs)) +#else if (has_parallel_hazard((Node *) rel->reltarget->exprs, false)) +#endif return; /* We have a winner. */ @@ -211,6 +234,22 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, * create_plain_partial_paths * Build partial access paths for parallel scan of a plain relation */ +#if PG_VERSION_NUM >= 100000 +void +create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) +{ + int parallel_workers; + + parallel_workers = compute_parallel_worker(rel, rel->pages, -1); + + /* If any limit was set to zero, the user doesn't want a parallel scan. */ + if (parallel_workers <= 0) + return; + + /* Add an unordered partial path based on a parallel sequential scan. */ + add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); +} +#else void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) { @@ -267,6 +306,7 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) /* Add an unordered partial path based on a parallel sequential scan. */ add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); } +#endif /* * Examine contents of MemoryContext. diff --git a/src/hooks.c b/src/hooks.c index cc348872..b01237d4 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -210,6 +210,19 @@ pathman_join_pathlist_hook(PlannerInfo *root, return; +#if PG_VERSION_NUM >= 100000 + initial_cost_nestloop(root, &workspace, jointype, + outer, inner, /* built paths */ + extra); + + nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, + extra, outer, inner, + extra->restrictlist, + build_join_pathkeys(root, joinrel, + jointype, + outer->pathkeys), + required_nestloop); +#else initial_cost_nestloop(root, &workspace, jointype, outer, inner, /* built paths */ extra->sjinfo, &extra->semifactors); @@ -221,6 +234,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, jointype, outer->pathkeys), required_nestloop); +#endif /* Discard all clauses that are to be evaluated by 'inner' */ foreach (rinfo_lc, extra->restrictlist) @@ -752,6 +766,71 @@ pathman_relcache_hook(Datum arg, Oid relid) /* * Utility function invoker hook. */ +#if PG_VERSION_NUM >= 100000 +void +pathman_process_utility_hook(PlannedStmt *pstmt, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, char *completionTag) +{ + Node *parsetree = pstmt->utilityStmt; + + if (IsPathmanReady()) + { + Oid relation_oid; + PartType part_type; + AttrNumber attr_number; + + /* Override standard COPY statement if needed */ + if (is_pathman_related_copy(parsetree)) + { + uint64 processed; + + /* Handle our COPY case (and show a special cmd name) */ + PathmanDoCopy((CopyStmt *) parsetree, queryString, &processed); + if (completionTag) + snprintf(completionTag, COMPLETION_TAG_BUFSIZE, + "PATHMAN COPY " UINT64_FORMAT, processed); + + return; /* don't call standard_ProcessUtility() or hooks */ + } + + /* Override standard RENAME statement if needed */ + else if (is_pathman_related_table_rename(parsetree, + &relation_oid, + &attr_number)) + PathmanRenameConstraint(relation_oid, + attr_number, + (const RenameStmt *) parsetree); + + /* Override standard ALTER COLUMN TYPE statement if needed */ + else if (is_pathman_related_alter_column_type(parsetree, + &relation_oid, + &attr_number, + &part_type) && + part_type == PT_HASH) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot change type of column \"%s\"" + " of table \"%s\" partitioned by HASH", + get_attname(relation_oid, attr_number), + get_rel_name(relation_oid)))); + } + + /* Call hooks set by other extensions if needed */ + if (process_utility_hook_next) + process_utility_hook_next(pstmt, queryString, + context, params, queryEnv, + dest, completionTag); + /* Else call internal implementation */ + else + standard_ProcessUtility(pstmt, queryString, + context, params, queryEnv, + dest, completionTag); +} +#else void pathman_process_utility_hook(Node *parsetree, const char *queryString, @@ -813,3 +892,4 @@ pathman_process_utility_hook(Node *parsetree, context, params, dest, completionTag); } +#endif diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index e731268e..e72948dd 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -34,19 +34,15 @@ * ---------- */ -#if PG_VERSION_NUM >= 90600 -/* adjust_appendrel_attrs() */ -#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ - do { \ - (dst_rel)->reltarget->exprs = (List *) \ - adjust_appendrel_attrs((root), \ - (Node *) (src_rel)->reltarget->exprs, \ - (appinfo)); \ - } while (0) +/* create_append_path() */ +#if PG_VERSION_NUM >= 100000 +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NULL) + +#elif PG_VERSION_NUM >= 90600 -/* create_append_path() */ #ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), (parallel_workers)) @@ -56,6 +52,25 @@ false, NIL, (parallel_workers)) #endif +#else /* for v9.5 */ + +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path((rel), (subpaths), (required_outer)) + +#endif + + +#if PG_VERSION_NUM >= 90600 + +/* adjust_appendrel_attrs() */ +#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ + do { \ + (dst_rel)->reltarget->exprs = (List *) \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltarget->exprs, \ + (appinfo)); \ + } while (0) + /* check_index_predicates() */ #define check_index_predicates_compat(rool, rel) \ @@ -120,11 +135,6 @@ extern void set_rel_consider_parallel(PlannerInfo *root, } while (0) -/* create_append_path() */ -#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ - create_append_path((rel), (subpaths), (required_outer)) - - /* check_partial_indexes() */ #define check_index_predicates_compat(rool, rel) \ check_partial_indexes((root), (rel)) diff --git a/src/include/hooks.h b/src/include/hooks.h index 95400fe2..49d431bd 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -53,12 +53,22 @@ void pathman_shmem_startup_hook(void); void pathman_relcache_hook(Datum arg, Oid relid); +#if PG_VERSION_NUM >= 100000 +void pathman_process_utility_hook(PlannedStmt *pstmt, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + char *completionTag); +#else void pathman_process_utility_hook(Node *parsetree, const char *queryString, ProcessUtilityContext context, ParamListInfo params, DestReceiver *dest, char *completionTag); +#endif #endif /* PATHMAN_HOOKS_H */ diff --git a/src/init.c b/src/init.c index d6696dce..1196be9b 100644 --- a/src/init.c +++ b/src/init.c @@ -86,7 +86,9 @@ static bool read_opexpr_const(const OpExpr *opexpr, const AttrNumber part_attno, Datum *value); +#if PG_VERSION_NUM < 100000 static int oid_cmp(const void *p1, const void *p2); +#endif /* Validate SQL facade */ @@ -1128,6 +1130,7 @@ validate_hash_constraint(const Expr *expr, return false; } +#if PG_VERSION_NUM < 100000 /* needed for find_inheritance_children_array() function */ static int oid_cmp(const void *p1, const void *p2) @@ -1141,6 +1144,7 @@ oid_cmp(const void *p1, const void *p2) return 1; return 0; } +#endif /* Parse cstring and build uint32 representing the version */ diff --git a/src/nodes_common.c b/src/nodes_common.c index ffab69d2..7228b5ea 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -175,8 +175,13 @@ tlist_is_var_subset(List *a, List *b) if (!IsA(te->expr, Var) && !IsA(te->expr, RelabelType)) continue; +#if PG_VERSION_NUM >= 100000 + if (!tlist_member_ignore_relabel(te->expr, a)) + return true; +#else if (!tlist_member_ignore_relabel((Node *) te->expr, a)) return true; +#endif } return false; @@ -583,7 +588,9 @@ create_append_scan_state_common(CustomScan *node, void begin_append_common(CustomScanState *node, EState *estate, int eflags) { +#if PG_VERSION_NUM < 100000 node->ss.ps.ps_TupFromTlist = false; +#endif } TupleTableSlot * @@ -599,6 +606,7 @@ exec_append_common(CustomScanState *node, for (;;) { /* Fetch next tuple if we're done with Projections */ +#if PG_VERSION_NUM < 100000 if (!node->ss.ps.ps_TupFromTlist) { fetch_next_tuple(node); /* use specific callback */ @@ -606,6 +614,7 @@ exec_append_common(CustomScanState *node, if (TupIsNull(scan_state->slot)) return NULL; } +#endif if (node->ss.ps.ps_ProjInfo) { @@ -615,8 +624,13 @@ exec_append_common(CustomScanState *node, ResetExprContext(node->ss.ps.ps_ExprContext); node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = scan_state->slot; +#if PG_VERSION_NUM >= 100000 + result = ExecProject(node->ss.ps.ps_ProjInfo); +#else result = ExecProject(node->ss.ps.ps_ProjInfo, &isDone); +#endif +#if PG_VERSION_NUM < 100000 if (isDone != ExprEndResult) { node->ss.ps.ps_TupFromTlist = (isDone == ExprMultipleResult); @@ -625,6 +639,10 @@ exec_append_common(CustomScanState *node, } else node->ss.ps.ps_TupFromTlist = false; +#else + if (isDone != ExprEndResult) + return result; +#endif } else return scan_state->slot; diff --git a/src/partition_creation.c b/src/partition_creation.c index 0a1ed2d0..e01bd990 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -810,12 +810,22 @@ create_single_partition_internal(Oid parent_relid, * call will stash the objects so created into our * event trigger context. */ +#if PG_VERSION_NUM >= 100000 + ProcessUtility(NULL, + "we have to provide a query string", + PROCESS_UTILITY_SUBCOMMAND, + NULL, + NULL, + None_Receiver, + NULL); +#else ProcessUtility(cur_stmt, "we have to provide a query string", PROCESS_UTILITY_SUBCOMMAND, NULL, None_Receiver, NULL); +#endif } /* Update config one more time */ @@ -847,7 +857,11 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) GUC_ACTION_SAVE, true, 0, false); /* Create new partition owned by parent's posessor */ +#if PG_VERSION_NUM >= 100000 + table_addr = DefineRelation(create_stmt, RELKIND_RELATION, relowner, NULL, NULL); +#else table_addr = DefineRelation(create_stmt, RELKIND_RELATION, relowner, NULL); +#endif /* Save data about a simple DDL command that was just executed */ EventTriggerCollectSimpleCommand(table_addr, @@ -877,6 +891,96 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) return table_addr; } +#if PG_VERSION_NUM >= 100000 +#include "catalog/index.h" +static void +CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) +{ + int i; + int numIndexes; + RelationPtr relationDescs; + Relation heapRelation; + TupleTableSlot *slot; + IndexInfo **indexInfoArray; + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + + /* HOT update does not require index inserts */ + if (HeapTupleIsHeapOnly(heapTuple)) + return; + + /* + * Get information from the state structure. Fall out if nothing to do. + */ + numIndexes = indstate->ri_NumIndices; + if (numIndexes == 0) + return; + relationDescs = indstate->ri_IndexRelationDescs; + indexInfoArray = indstate->ri_IndexRelationInfo; + heapRelation = indstate->ri_RelationDesc; + + /* Need a slot to hold the tuple being examined */ + slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation)); + ExecStoreTuple(heapTuple, slot, InvalidBuffer, false); + + /* + * for each index, form and insert the index tuple + */ + for (i = 0; i < numIndexes; i++) + { + IndexInfo *indexInfo; + + indexInfo = indexInfoArray[i]; + + /* If the index is marked as read-only, ignore it */ + if (!indexInfo->ii_ReadyForInserts) + continue; + + /* + * Expressional and partial indexes on system catalogs are not + * supported, nor exclusion constraints, nor deferred uniqueness + */ + Assert(indexInfo->ii_Expressions == NIL); + Assert(indexInfo->ii_Predicate == NIL); + Assert(indexInfo->ii_ExclusionOps == NULL); + Assert(relationDescs[i]->rd_index->indimmediate); + + /* + * FormIndexDatum fills in its values and isnull parameters with the + * appropriate values for the column(s) of the index. + */ + FormIndexDatum(indexInfo, + slot, + NULL, /* no expression eval to do */ + values, + isnull); + + /* + * The index AM does the rest. + */ + index_insert(relationDescs[i], /* index relation */ + values, /* array of index Datums */ + isnull, /* is-null flags */ + &(heapTuple->t_self), /* tid of heap tuple */ + heapRelation, + relationDescs[i]->rd_index->indisunique ? + UNIQUE_CHECK_YES : UNIQUE_CHECK_NO, + indexInfo); + } + + ExecDropSingleTupleTableSlot(slot); +} +static void +CatalogUpdateIndexes(Relation heapRel, HeapTuple heapTuple) +{ + CatalogIndexState indstate; + + indstate = CatalogOpenIndexes(heapRel); + CatalogIndexInsert(indstate, heapTuple); + CatalogCloseIndexes(indstate); +} +#endif + /* Copy ACL privileges of parent table and set "attislocal" = true */ static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) diff --git a/src/partition_filter.c b/src/partition_filter.c index 8fa09d88..0269077b 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -295,10 +295,18 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) if (!parts_storage->saved_rel_info) elog(ERROR, "ResultPartsStorage contains no saved_rel_info"); +#if PG_VERSION_NUM >= 100000 InitResultRelInfo(child_result_rel_info, child_rel, child_rte_idx, + parent_rel, parts_storage->estate->es_instrument); +#else + InitResultRelInfo(child_result_rel_info, + child_rel, + child_rte_idx, + parts_storage->estate->es_instrument); +#endif if (parts_storage->command_type != CMD_DELETE) ExecOpenIndices(child_result_rel_info, parts_storage->speculative_inserts); @@ -775,12 +783,22 @@ prepare_rri_returning_for_insert(EState *estate, rri_holder)); /* Build new projection info */ +#if PG_VERSION_NUM >= 100000 + child_rri->ri_projectReturning = + ExecBuildProjectionInfo((List *) ExecInitExpr((Expr *) returning_list, + /* HACK: no PlanState */ NULL), + pfstate->tup_convert_econtext, + parent_rri->ri_projectReturning->pi_state.resultslot, + (PlanState *) pfstate, + RelationGetDescr(child_rri->ri_RelationDesc)); +#else child_rri->ri_projectReturning = ExecBuildProjectionInfo((List *) ExecInitExpr((Expr *) returning_list, /* HACK: no PlanState */ NULL), pfstate->tup_convert_econtext, parent_rri->ri_projectReturning->pi_slot, RelationGetDescr(child_rri->ri_RelationDesc)); +#endif } /* Prepare FDW access structs */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 94e3cc45..e7cf0ad3 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -192,7 +192,9 @@ start_bg_worker(const char bgworker_name[BGW_MAXLEN], BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; +#if PG_VERSION_NUM < 100000 worker.bgw_main = NULL; +#endif worker.bgw_main_arg = bgw_arg; worker.bgw_notify_pid = MyProcPid; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 26cde78e..02a16432 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -212,6 +212,68 @@ get_pathman_config_params_relid(bool invalid_is_ok) * ---------------------------------------- */ +#if PG_VERSION_NUM >= 100000 +static List * +get_all_actual_clauses(List *restrictinfo_list) +{ + List *result = NIL; + ListCell *l; + + foreach(l, restrictinfo_list) + { + RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); + + Assert(IsA(rinfo, RestrictInfo)); + + result = lappend(result, rinfo->clause); + } + return result; +} + +#include "optimizer/var.h" + +static List * +make_restrictinfos_from_actual_clauses(PlannerInfo *root, + List *clause_list) +{ + List *result = NIL; + ListCell *l; + + foreach(l, clause_list) + { + Expr *clause = (Expr *) lfirst(l); + bool pseudoconstant; + RestrictInfo *rinfo; + + /* + * It's pseudoconstant if it contains no Vars and no volatile + * functions. We probably can't see any sublinks here, so + * contain_var_clause() would likely be enough, but for safety use + * contain_vars_of_level() instead. + */ + pseudoconstant = + !contain_vars_of_level((Node *) clause, 0) && + !contain_volatile_functions((Node *) clause); + if (pseudoconstant) + { + /* tell createplan.c to check for gating quals */ + root->hasPseudoConstantQuals = true; + } + + rinfo = make_restrictinfo(clause, + true, + false, + pseudoconstant, + root->qual_security_level, + NULL, + NULL, + NULL); + result = lappend(result, rinfo); + } + return result; +} +#endif + /* * Creates child relation and adds it to root. * Returns child index in simple_rel_array. @@ -256,7 +318,11 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, root->simple_rte_array[childRTindex] = child_rte; /* Create RelOptInfo for this child (and make some estimates as well) */ +#if PG_VERSION_NUM >= 100000 + child_rel = build_simple_rel(root, childRTindex, parent_rel); +#else child_rel = build_simple_rel(root, childRTindex, RELOPT_OTHER_MEMBER_REL); +#endif /* Increase total_table_pages using the 'child_rel' */ root->total_table_pages += (double) child_rel->pages; @@ -699,10 +765,10 @@ wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) return (Node *) result; } else - return copyObject(wrap->orig); + return (Node *) copyObject(wrap->orig); } else - return copyObject(wrap->orig); + return (Node *) copyObject(wrap->orig); } @@ -1214,7 +1280,11 @@ extract_const(WalkerContext *wcxt, Param *param) { ExprState *estate = ExecInitExpr((Expr *) param, NULL); bool isnull; +#if PG_VERSION_NUM >= 100000 + Datum value = ExecEvalExpr(estate, wcxt->econtext, &isnull); +#else Datum value = ExecEvalExpr(estate, wcxt->econtext, &isnull, NULL); +#endif return makeConst(param->paramtype, param->paramtypmod, param->paramcollid, get_typlen(param->paramtype), @@ -1369,6 +1439,20 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, *cheapest_total; /* Locate the right paths, if they are available. */ +#if PG_VERSION_NUM >= 100000 + cheapest_startup = + get_cheapest_path_for_pathkeys(childrel->pathlist, + pathkeys, + NULL, + STARTUP_COST, + true); + cheapest_total = + get_cheapest_path_for_pathkeys(childrel->pathlist, + pathkeys, + NULL, + TOTAL_COST, + true); +#else cheapest_startup = get_cheapest_path_for_pathkeys(childrel->pathlist, pathkeys, @@ -1379,6 +1463,7 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, pathkeys, NULL, TOTAL_COST); +#endif /* * If we can't find any paths with the right order just use the @@ -1453,6 +1538,21 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, else { /* ... and build the MergeAppend paths */ +#if PG_VERSION_NUM >= 100000 + add_path(rel, (Path *) create_merge_append_path(root, + rel, + startup_subpaths, + pathkeys, + NULL, + NULL)); + if (startup_neq_total) + add_path(rel, (Path *) create_merge_append_path(root, + rel, + total_subpaths, + pathkeys, + NULL, + NULL)); +#else add_path(rel, (Path *) create_merge_append_path(root, rel, startup_subpaths, @@ -1464,6 +1564,7 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, total_subpaths, pathkeys, NULL)); +#endif } } } @@ -1940,10 +2041,18 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, * parameterization. If it has exactly the needed parameterization, we're * done. */ +#if PG_VERSION_NUM >= 100000 + cheapest = get_cheapest_path_for_pathkeys(rel->pathlist, + NIL, + required_outer, + TOTAL_COST, + true); +#else cheapest = get_cheapest_path_for_pathkeys(rel->pathlist, NIL, required_outer, TOTAL_COST); +#endif Assert(cheapest != NULL); if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer)) return cheapest; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 61b77782..a3fc7a1d 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -794,6 +794,87 @@ build_check_constraint_name_attname(PG_FUNCTION_ARGS) * ------------------------ */ +#if PG_VERSION_NUM >= 100000 +#include "catalog/index.h" +static void +CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) +{ + int i; + int numIndexes; + RelationPtr relationDescs; + Relation heapRelation; + TupleTableSlot *slot; + IndexInfo **indexInfoArray; + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + + /* HOT update does not require index inserts */ + if (HeapTupleIsHeapOnly(heapTuple)) + return; + + /* + * Get information from the state structure. Fall out if nothing to do. + */ + numIndexes = indstate->ri_NumIndices; + if (numIndexes == 0) + return; + relationDescs = indstate->ri_IndexRelationDescs; + indexInfoArray = indstate->ri_IndexRelationInfo; + heapRelation = indstate->ri_RelationDesc; + + /* Need a slot to hold the tuple being examined */ + slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation)); + ExecStoreTuple(heapTuple, slot, InvalidBuffer, false); + + /* + * for each index, form and insert the index tuple + */ + for (i = 0; i < numIndexes; i++) + { + IndexInfo *indexInfo; + + indexInfo = indexInfoArray[i]; + + /* If the index is marked as read-only, ignore it */ + if (!indexInfo->ii_ReadyForInserts) + continue; + + /* + * Expressional and partial indexes on system catalogs are not + * supported, nor exclusion constraints, nor deferred uniqueness + */ + Assert(indexInfo->ii_Expressions == NIL); + Assert(indexInfo->ii_Predicate == NIL); + Assert(indexInfo->ii_ExclusionOps == NULL); + Assert(relationDescs[i]->rd_index->indimmediate); + + /* + * FormIndexDatum fills in its values and isnull parameters with the + * appropriate values for the column(s) of the index. + */ + FormIndexDatum(indexInfo, + slot, + NULL, /* no expression eval to do */ + values, + isnull); + + /* + * The index AM does the rest. + */ + index_insert(relationDescs[i], /* index relation */ + values, /* array of index Datums */ + isnull, /* is-null flags */ + &(heapTuple->t_self), /* tid of heap tuple */ + heapRelation, + relationDescs[i]->rd_index->indisunique ? + UNIQUE_CHECK_YES : UNIQUE_CHECK_NO, + indexInfo); + } + + ExecDropSingleTupleTableSlot(slot); +} +#endif + /* * Try to add previously partitioned table to PATHMAN_CONFIG. */ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 49ffc166..ec743870 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -30,6 +30,12 @@ #include "utils/ruleutils.h" #include "utils/syscache.h" +#if PG_VERSION_NUM >= 100000 +#include "utils/regproc.h" +#include "utils/varlena.h" +#include +#endif + /* Function declarations */ @@ -1207,7 +1213,9 @@ drop_table_by_oid(Oid relid) n->removeType = OBJECT_TABLE; n->missing_ok = false; n->objects = list_make1(stringToQualifiedNameList(relname)); +#if PG_VERSION_NUM < 100000 n->arguments = NIL; +#endif n->behavior = DROP_RESTRICT; /* default behavior */ n->concurrent = false; diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 2f0b6fa6..4999cb0f 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -495,8 +495,13 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) PreventCommandIfReadOnly("PATHMAN COPY FROM"); PreventCommandIfParallelMode("PATHMAN COPY FROM"); +#if PG_VERSION_NUM >= 100000 + cstate = BeginCopyFrom(NULL, rel, stmt->filename, stmt->is_program, + NULL, stmt->attlist, stmt->options); +#else cstate = BeginCopyFrom(rel, stmt->filename, stmt->is_program, stmt->attlist, stmt->options); +#endif *processed = PathmanCopyFrom(cstate, rel, range_table, is_old_protocol); EndCopyFrom(cstate); } @@ -514,7 +519,11 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) modified_copy_stmt.query = query; /* Call standard DoCopy using a new CopyStmt */ +#if PG_VERSION_NUM >= 100000 + DoCopy(NULL, &modified_copy_stmt, 0, 0, processed); +#else DoCopy(&modified_copy_stmt, queryString, processed); +#endif } /* @@ -552,10 +561,18 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, tupDesc = RelationGetDescr(parent_rel); parent_result_rel = makeNode(ResultRelInfo); +#if PG_VERSION_NUM >= 100000 InitResultRelInfo(parent_result_rel, parent_rel, 1, /* dummy rangetable index */ + NULL, 0); +#else + InitResultRelInfo(parent_result_rel, + parent_rel, + 1, /* dummy rangetable index */ + 0); +#endif ExecOpenIndices(parent_result_rel, false); estate->es_result_relations = parent_result_rel; diff --git a/src/utils.c b/src/utils.c index 099f5a74..0461eeaf 100644 --- a/src/utils.c +++ b/src/utils.c @@ -32,6 +32,9 @@ #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM >= 100000 +#include "utils/regproc.h" +#endif static bool clause_contains_params_walker(Node *node, void *context); From 9fff4848e91280ac34e4b41f5a79abc7c9b270d0 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 24 Apr 2017 16:13:49 +0300 Subject: [PATCH 0407/1124] Refactor compat.c and compat.h to specify versions for each incompatible routines --- src/compat/pg_compat.c | 446 +++++++++++++++++++++------------ src/include/compat/pg_compat.h | 198 ++++++++------- 2 files changed, 396 insertions(+), 248 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index ea6804fe..d49f2003 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -27,19 +27,97 @@ #include - /* * ---------- * Variants * ---------- */ -#if PG_VERSION_NUM >= 90600 + +/* + * create_plain_partial_paths + * Build partial access paths for parallel scan of a plain relation + */ +#if PG_VERSION_NUM >= 100000 +void +create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) +{ + int parallel_workers; + + parallel_workers = compute_parallel_worker(rel, rel->pages, -1); + + /* If any limit was set to zero, the user doesn't want a parallel scan. */ + if (parallel_workers <= 0) + return; + + /* Add an unordered partial path based on a parallel sequential scan. */ + add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); +} +#elif PG_VERSION_NUM >= 90600 +void +create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) +{ + int parallel_workers; + + /* + * If the user has set the parallel_workers reloption, use that; otherwise + * select a default number of workers. + */ + if (rel->rel_parallel_workers != -1) + parallel_workers = rel->rel_parallel_workers; + else + { + int parallel_threshold; + + /* + * If this relation is too small to be worth a parallel scan, just + * return without doing anything ... unless it's an inheritance child. + * In that case, we want to generate a parallel path here anyway. It + * might not be worthwhile just for this relation, but when combined + * with all of its inheritance siblings it may well pay off. + */ + if (rel->pages < (BlockNumber) min_parallel_relation_size && + rel->reloptkind == RELOPT_BASEREL) + return; + + /* + * Select the number of workers based on the log of the size of the + * relation. This probably needs to be a good deal more + * sophisticated, but we need something here for now. Note that the + * upper limit of the min_parallel_relation_size GUC is chosen to + * prevent overflow here. + */ + parallel_workers = 1; + parallel_threshold = Max(min_parallel_relation_size, 1); + while (rel->pages >= (BlockNumber) (parallel_threshold * 3)) + { + parallel_workers++; + parallel_threshold *= 3; + if (parallel_threshold > INT_MAX / 3) + break; /* avoid overflow */ + } + } + + /* + * In no case use more than max_parallel_workers_per_gather workers. + */ + parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather); + + /* If any limit was set to zero, the user doesn't want a parallel scan. */ + if (parallel_workers <= 0) + return; + + /* Add an unordered partial path based on a parallel sequential scan. */ + add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); +} +#endif + /* * make_result * Build a Result plan node */ +#if PG_VERSION_NUM >= 90600 Result * make_result(List *tlist, Node *resconstantqual, @@ -56,12 +134,87 @@ make_result(List *tlist, return node; } +#endif + + +/* + * Examine contents of MemoryContext. + */ +#if PG_VERSION_NUM >= 90600 +void +McxtStatsInternal(MemoryContext context, int level, + bool examine_children, + MemoryContextCounters *totals) +{ + MemoryContextCounters local_totals; + MemoryContext child; + + AssertArg(MemoryContextIsValid(context)); + + /* Examine the context itself */ + (*context->methods->stats) (context, level, false, totals); + + memset(&local_totals, 0, sizeof(local_totals)); + + if (!examine_children) + return; + + /* Examine children */ + for (child = context->firstchild; + child != NULL; + child = child->nextchild) + { + + McxtStatsInternal(child, level + 1, + examine_children, + &local_totals); + } + + /* Save children stats */ + totals->nblocks += local_totals.nblocks; + totals->freechunks += local_totals.freechunks; + totals->totalspace += local_totals.totalspace; + totals->freespace += local_totals.freespace; +} +#endif + + +/* + * set_dummy_rel_pathlist + * Build a dummy path for a relation that's been excluded by constraints + * + * Rather than inventing a special "dummy" path type, we represent this as an + * AppendPath with no members (see also IS_DUMMY_PATH/IS_DUMMY_REL macros). + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 +void +set_dummy_rel_pathlist(RelOptInfo *rel) +{ + /* Set dummy size estimates --- we leave attr_widths[] as zeroes */ + rel->rows = 0; + rel->width = 0; + + /* Discard any pre-existing paths; no further need for them */ + rel->pathlist = NIL; + + add_path(rel, (Path *) create_append_path(rel, NIL, NULL)); + + /* + * We set the cheapest path immediately, to ensure that IS_DUMMY_REL() + * will recognize the relation as dummy if anyone asks. This is redundant + * when we're called from set_rel_size(), but not when called from + * elsewhere, and doing it twice is harmless anyway. + */ + set_cheapest(rel); +} +#endif /* * If this relation could possibly be scanned from within a worker, then set * its consider_parallel flag. */ +#if PG_VERSION_NUM >= 100000 void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) @@ -76,8 +229,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, Assert(root->glob->parallelModeOK); /* This should only be called for baserels and appendrel children. */ - Assert(rel->reloptkind == RELOPT_BASEREL || - rel->reloptkind == RELOPT_OTHER_MEMBER_REL); + Assert(IS_SIMPLE_REL(rel)); /* Assorted checks based on rtekind. */ switch (rte->rtekind) @@ -103,16 +255,11 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, */ if (rte->tablesample != NULL) { - Oid proparallel = func_parallel(rte->tablesample->tsmhandler); + char proparallel = func_parallel(rte->tablesample->tsmhandler); if (proparallel != PROPARALLEL_SAFE) return; -#if PG_VERSION_NUM >= 100000 if (!is_parallel_safe(root, (Node *) rte->tablesample->args)) -#else - if (has_parallel_hazard((Node *) rte->tablesample->args, - false)) -#endif return; } @@ -165,21 +312,17 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, case RTE_FUNCTION: /* Check for parallel-restricted functions. */ -#if PG_VERSION_NUM >= 100000 if (!is_parallel_safe(root, (Node *) rte->functions)) -#else - if (has_parallel_hazard((Node *) rte->functions, false)) -#endif return; break; + case RTE_TABLEFUNC: + /* not parallel safe */ + return; + case RTE_VALUES: /* Check for parallel-restricted functions. */ -#if PG_VERSION_NUM >= 100000 if (!is_parallel_safe(root, (Node *) rte->values_lists)) -#else - if (has_parallel_hazard((Node *) rte->values_lists, false)) -#endif return; break; @@ -194,8 +337,12 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, */ return; - default: - ; + case RTE_NAMEDTUPLESTORE: + /* + * tuplestore cannot be shared, at least without more + * infrastructure to support that. + */ + return; } /* @@ -207,176 +354,164 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, * outer join clauses work correctly. It would likely break equivalence * classes, too. */ -#if PG_VERSION_NUM >= 100000 if (!is_parallel_safe(root, (Node *) rel->baserestrictinfo)) -#else - if (has_parallel_hazard((Node *) rel->baserestrictinfo, false)) -#endif return; /* * Likewise, if the relation's outputs are not parallel-safe, give up. * (Usually, they're just Vars, but sometimes they're not.) */ -#if PG_VERSION_NUM >= 100000 if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs)) -#else - if (has_parallel_hazard((Node *) rel->reltarget->exprs, false)) -#endif return; /* We have a winner. */ rel->consider_parallel = true; } - - -/* - * create_plain_partial_paths - * Build partial access paths for parallel scan of a plain relation - */ -#if PG_VERSION_NUM >= 100000 -void -create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) -{ - int parallel_workers; - - parallel_workers = compute_parallel_worker(rel, rel->pages, -1); - - /* If any limit was set to zero, the user doesn't want a parallel scan. */ - if (parallel_workers <= 0) - return; - - /* Add an unordered partial path based on a parallel sequential scan. */ - add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); -} -#else +#elif PG_VERSION_NUM >= 90600 void -create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) +set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, + RangeTblEntry *rte) { - int parallel_workers; - /* - * If the user has set the parallel_workers reloption, use that; otherwise - * select a default number of workers. + * The flag has previously been initialized to false, so we can just + * return if it becomes clear that we can't safely set it. */ - if (rel->rel_parallel_workers != -1) - parallel_workers = rel->rel_parallel_workers; - else - { - int parallel_threshold; - - /* - * If this relation is too small to be worth a parallel scan, just - * return without doing anything ... unless it's an inheritance child. - * In that case, we want to generate a parallel path here anyway. It - * might not be worthwhile just for this relation, but when combined - * with all of its inheritance siblings it may well pay off. - */ - if (rel->pages < (BlockNumber) min_parallel_relation_size && - rel->reloptkind == RELOPT_BASEREL) - return; - - /* - * Select the number of workers based on the log of the size of the - * relation. This probably needs to be a good deal more - * sophisticated, but we need something here for now. Note that the - * upper limit of the min_parallel_relation_size GUC is chosen to - * prevent overflow here. - */ - parallel_workers = 1; - parallel_threshold = Max(min_parallel_relation_size, 1); - while (rel->pages >= (BlockNumber) (parallel_threshold * 3)) - { - parallel_workers++; - parallel_threshold *= 3; - if (parallel_threshold > INT_MAX / 3) - break; /* avoid overflow */ - } - } + Assert(!rel->consider_parallel); - /* - * In no case use more than max_parallel_workers_per_gather workers. - */ - parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather); + /* Don't call this if parallelism is disallowed for the entire query. */ + Assert(root->glob->parallelModeOK); - /* If any limit was set to zero, the user doesn't want a parallel scan. */ - if (parallel_workers <= 0) - return; + /* This should only be called for baserels and appendrel children. */ + Assert(rel->reloptkind == RELOPT_BASEREL || + rel->reloptkind == RELOPT_OTHER_MEMBER_REL); - /* Add an unordered partial path based on a parallel sequential scan. */ - add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); -} -#endif + /* Assorted checks based on rtekind. */ + switch (rte->rtekind) + { + case RTE_RELATION: -/* - * Examine contents of MemoryContext. - */ -void -McxtStatsInternal(MemoryContext context, int level, - bool examine_children, - MemoryContextCounters *totals) -{ - MemoryContextCounters local_totals; - MemoryContext child; + /* + * Currently, parallel workers can't access the leader's temporary + * tables. We could possibly relax this if the wrote all of its + * local buffers at the start of the query and made no changes + * thereafter (maybe we could allow hint bit changes), and if we + * taught the workers to read them. Writing a large number of + * temporary buffers could be expensive, though, and we don't have + * the rest of the necessary infrastructure right now anyway. So + * for now, bail out if we see a temporary table. + */ + if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP) + return; - AssertArg(MemoryContextIsValid(context)); + /* + * Table sampling can be pushed down to workers if the sample + * function and its arguments are safe. + */ + if (rte->tablesample != NULL) + { + char proparallel = func_parallel(rte->tablesample->tsmhandler); - /* Examine the context itself */ - (*context->methods->stats) (context, level, false, totals); + if (proparallel != PROPARALLEL_SAFE) + return; + if (has_parallel_hazard((Node *) rte->tablesample->args, + false)) + return; + } - memset(&local_totals, 0, sizeof(local_totals)); + /* + * Ask FDWs whether they can support performing a ForeignScan + * within a worker. Most often, the answer will be no. For + * example, if the nature of the FDW is such that it opens a TCP + * connection with a remote server, each parallel worker would end + * up with a separate connection, and these connections might not + * be appropriately coordinated between workers and the leader. + */ + if (rte->relkind == RELKIND_FOREIGN_TABLE) + { + Assert(rel->fdwroutine); + if (!rel->fdwroutine->IsForeignScanParallelSafe) + return; + if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte)) + return; + } - if (!examine_children) - return; + /* + * There are additional considerations for appendrels, which we'll + * deal with in set_append_rel_size and set_append_rel_pathlist. + * For now, just set consider_parallel based on the rel's own + * quals and targetlist. + */ + break; - /* Examine children */ - for (child = context->firstchild; - child != NULL; - child = child->nextchild) - { + case RTE_SUBQUERY: - McxtStatsInternal(child, level + 1, - examine_children, - &local_totals); - } + /* + * There's no intrinsic problem with scanning a subquery-in-FROM + * (as distinct from a SubPlan or InitPlan) in a parallel worker. + * If the subquery doesn't happen to have any parallel-safe paths, + * then flagging it as consider_parallel won't change anything, + * but that's true for plain tables, too. We must set + * consider_parallel based on the rel's own quals and targetlist, + * so that if a subquery path is parallel-safe but the quals and + * projection we're sticking onto it are not, we correctly mark + * the SubqueryScanPath as not parallel-safe. (Note that + * set_subquery_pathlist() might push some of these quals down + * into the subquery itself, but that doesn't change anything.) + */ + break; - /* Save children stats */ - totals->nblocks += local_totals.nblocks; - totals->freechunks += local_totals.freechunks; - totals->totalspace += local_totals.totalspace; - totals->freespace += local_totals.freespace; -} + case RTE_JOIN: + /* Shouldn't happen; we're only considering baserels here. */ + Assert(false); + return; + case RTE_FUNCTION: + /* Check for parallel-restricted functions. */ + if (has_parallel_hazard((Node *) rte->functions, false)) + return; + break; -#else /* PG_VERSION_NUM >= 90500 */ + case RTE_VALUES: + /* Check for parallel-restricted functions. */ + if (has_parallel_hazard((Node *) rte->values_lists, false)) + return; + break; -/* - * set_dummy_rel_pathlist - * Build a dummy path for a relation that's been excluded by constraints - * - * Rather than inventing a special "dummy" path type, we represent this as an - * AppendPath with no members (see also IS_DUMMY_PATH/IS_DUMMY_REL macros). - */ -void -set_dummy_rel_pathlist(RelOptInfo *rel) -{ - /* Set dummy size estimates --- we leave attr_widths[] as zeroes */ - rel->rows = 0; - rel->width = 0; + case RTE_CTE: - /* Discard any pre-existing paths; no further need for them */ - rel->pathlist = NIL; + /* + * CTE tuplestores aren't shared among parallel workers, so we + * force all CTE scans to happen in the leader. Also, populating + * the CTE would require executing a subplan that's not available + * in the worker, might be parallel-restricted, and must get + * executed only once. + */ + return; + } - add_path(rel, (Path *) create_append_path(rel, NIL, NULL)); + /* + * If there's anything in baserestrictinfo that's parallel-restricted, we + * give up on parallelizing access to this relation. We could consider + * instead postponing application of the restricted quals until we're + * above all the parallelism in the plan tree, but it's not clear that + * that would be a win in very many cases, and it might be tricky to make + * outer join clauses work correctly. It would likely break equivalence + * classes, too. + */ + if (has_parallel_hazard((Node *) rel->baserestrictinfo, false)) + return; /* - * We set the cheapest path immediately, to ensure that IS_DUMMY_REL() - * will recognize the relation as dummy if anyone asks. This is redundant - * when we're called from set_rel_size(), but not when called from - * elsewhere, and doing it twice is harmless anyway. + * Likewise, if the relation's outputs are not parallel-safe, give up. + * (Usually, they're just Vars, but sometimes they're not.) */ - set_cheapest(rel); + if (has_parallel_hazard((Node *) rel->reltarget->exprs, false)) + return; + + /* We have a winner. */ + rel->consider_parallel = true; } +#endif /* @@ -384,6 +519,7 @@ set_dummy_rel_pathlist(RelOptInfo *rel) * * NOTE: this function is implemented in 9.6 */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 char get_rel_persistence(Oid relid) { @@ -401,9 +537,7 @@ get_rel_persistence(Oid relid) return result; } - - -#endif /* PG_VERSION_NUM >= 90600 */ +#endif diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index e72948dd..6c9e82b9 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -21,162 +21,176 @@ #include "optimizer/paths.h" #include "utils/memutils.h" -/* Define ALLOCSET_DEFAULT_SIZES for our precious MemoryContexts */ -#if PG_VERSION_NUM < 90600 + +/* + * ---------- + * Variants + * ---------- + */ + + +/* + * adjust_appendrel_attrs() + */ +#if PG_VERSION_NUM >= 90600 +#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ + do { \ + (dst_rel)->reltarget->exprs = (List *) \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltarget->exprs, \ + (appinfo)); \ + } while (0) +#elif PG_VERSION_NUM >= 90500 +#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ + do { \ + (dst_rel)->reltargetlist = (List *) \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltargetlist, \ + (appinfo)); \ + } while (0) +#endif + + +/* + * Define ALLOCSET_DEFAULT_SIZES for our precious MemoryContexts + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 #define ALLOCSET_DEFAULT_SIZES \ ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE #endif /* - * ---------- - * Variants - * ---------- + * check_index_predicates() */ +#if PG_VERSION_NUM >= 90600 +#define check_index_predicates_compat(rool, rel) \ + check_index_predicates((root), (rel)) +#elif PG_VERSION_NUM >= 90500 +#define check_index_predicates_compat(rool, rel) \ + check_partial_indexes((root), (rel)) +#endif -/* create_append_path() */ +/* + * create_append_path() + */ #if PG_VERSION_NUM >= 100000 - #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NULL) - #elif PG_VERSION_NUM >= 90600 #ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), (parallel_workers)) -#else +#else /* ifdef PGPRO_VERSION */ #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), \ false, NIL, (parallel_workers)) -#endif - -#else /* for v9.5 */ +#endif /* PGPRO_VERSION */ +#elif PG_VERSION_NUM >= 90500 #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer)) - -#endif +#endif /* PG_VERSION_NUM */ +/* + * create_plain_partial_paths() + */ #if PG_VERSION_NUM >= 90600 - -/* adjust_appendrel_attrs() */ -#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ - do { \ - (dst_rel)->reltarget->exprs = (List *) \ - adjust_appendrel_attrs((root), \ - (Node *) (src_rel)->reltarget->exprs, \ - (appinfo)); \ - } while (0) - - -/* check_index_predicates() */ -#define check_index_predicates_compat(rool, rel) \ - check_index_predicates((root), (rel)) - - -/* create_plain_partial_paths() */ extern void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel); #define create_plain_partial_paths_compat(root, rel) \ create_plain_partial_paths((root), (rel)) +#endif -/* get_parameterized_joinrel_size() */ +/* + * get_parameterized_joinrel_size() + */ +#if PG_VERSION_NUM >= 90600 #define get_parameterized_joinrel_size_compat(root, rel, outer_path, \ inner_path, sjinfo, \ restrict_clauses) \ get_parameterized_joinrel_size((root), (rel), (outer_path), \ (inner_path), (sjinfo), \ (restrict_clauses)) +#elif PG_VERSION_NUM >= 90500 +#define get_parameterized_joinrel_size_compat(root, rel, \ + outer_path, \ + inner_path, \ + sjinfo, restrict_clauses) \ + get_parameterized_joinrel_size((root), (rel), \ + (outer_path)->rows, \ + (inner_path)->rows, \ + (sjinfo), (restrict_clauses)) +#endif -/* make_result() */ +/* + * make_result() + */ +#if PG_VERSION_NUM >= 90600 extern Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan); #define make_result_compat(root, tlist, resconstantqual, subplan) \ make_result((tlist), (resconstantqual), (subplan)) +#elif PG_VERSION_NUM >= 90500 +#define make_result_compat(root, tlist, resconstantqual, subplan) \ + make_result((root), (tlist), (resconstantqual), (subplan)) +#endif -/* McxtStatsInternal() */ +/* + * McxtStatsInternal() + */ +#if PG_VERSION_NUM >= 90600 void McxtStatsInternal(MemoryContext context, int level, bool examine_children, MemoryContextCounters *totals); +#endif -/* pull_var_clause() */ +/* + * pull_var_clause() + */ +#if PG_VERSION_NUM >= 90600 #define pull_var_clause_compat(node, aggbehavior, phbehavior) \ pull_var_clause((node), (aggbehavior) | (phbehavior)) +#elif PG_VERSION_NUM >= 90500 +#define pull_var_clause_compat(node, aggbehavior, phbehavior) \ + pull_var_clause((node), (aggbehavior), (phbehavior)) +#endif + + +/* + * set_dummy_rel_pathlist() + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 +void set_dummy_rel_pathlist(RelOptInfo *rel); +#endif -/* set_rel_consider_parallel() */ +/* + * set_rel_consider_parallel() + */ +#if PG_VERSION_NUM >= 90600 extern void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); #define set_rel_consider_parallel_compat(root, rel, rte) \ set_rel_consider_parallel((root), (rel), (rte)) +#endif -#else /* PG_VERSION_NUM >= 90500 */ - -#define ALLOCSET_DEFAULT_SIZES \ - ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE - -/* adjust_appendrel_attrs() */ -#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ - do { \ - (dst_rel)->reltargetlist = (List *) \ - adjust_appendrel_attrs((root), \ - (Node *) (src_rel)->reltargetlist, \ - (appinfo)); \ - } while (0) - - -/* check_partial_indexes() */ -#define check_index_predicates_compat(rool, rel) \ - check_partial_indexes((root), (rel)) - - -/* create_plain_partial_paths() */ -#define create_plain_partial_paths_compat(root, rel) ((void) true) - - -/* get_parameterized_joinrel_size() */ -#define get_parameterized_joinrel_size_compat(root, rel, \ - outer_path, \ - inner_path, \ - sjinfo, restrict_clauses) \ - get_parameterized_joinrel_size((root), (rel), \ - (outer_path)->rows, \ - (inner_path)->rows, \ - (sjinfo), (restrict_clauses)) - - -/* make_result() */ -#define make_result_compat(root, tlist, resconstantqual, subplan) \ - make_result((root), (tlist), (resconstantqual), (subplan)) - - -/* pull_var_clause() */ -#define pull_var_clause_compat(node, aggbehavior, phbehavior) \ - pull_var_clause((node), (aggbehavior), (phbehavior)) - - -/* set_rel_consider_parallel() */ -#define set_rel_consider_parallel_compat(root, rel, rte) ((void) true) - - -/* set_dummy_rel_pathlist() */ -void set_dummy_rel_pathlist(RelOptInfo *rel); - - -/* get_rel_persistence() */ +/* + * get_rel_persistence() + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 char get_rel_persistence(Oid relid); - -#endif /* PG_VERSION_NUM */ +#endif /* From 5a1eb53d107e62f1f3c890866fade1449edfa61d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 24 Apr 2017 17:05:48 +0300 Subject: [PATCH 0408/1124] Fix inserts --- src/partition_update.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/partition_update.c b/src/partition_update.c index 200badbb..a2e3cb24 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -22,8 +22,7 @@ bool pg_pathman_enable_partition_update = true; CustomScanMethods partition_update_plan_methods; CustomExecMethods partition_update_exec_methods; -static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, HeapTuple oldtuple, - EPQState *epqstate, EState *estate); +static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, EPQState *epqstate, EState *estate); void init_partition_update_static_data(void) @@ -44,7 +43,7 @@ init_partition_update_static_data(void) "Enables the planner's use of PartitionUpdate custom node.", NULL, &pg_pathman_enable_partition_update, - true, + false, PGC_USERSET, 0, NULL, @@ -111,13 +110,8 @@ partition_update_begin(CustomScanState *node, EState *estate, int eflags) TupleTableSlot * partition_update_exec(CustomScanState *node) { - PlanState *child_ps = (PlanState *) linitial(node->custom_ps); - EState *estate = node->ss.ps.state; - TupleTableSlot *slot; - ResultRelInfo *saved_rel_info; - - /* save original ResultRelInfo */ - saved_rel_info = estate->es_result_relation_info; + PlanState *child_ps = (PlanState *) linitial(node->custom_ps); + TupleTableSlot *slot; /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); @@ -128,19 +122,21 @@ partition_update_exec(CustomScanState *node) Datum datum; bool isNull; ResultRelInfo *resultRelInfo; - HeapTuple oldtuple; ItemPointer tupleid; ItemPointerData tuple_ctid; JunkFilter *junkfilter; EPQState epqstate; AttrNumber ctid_attno; + PartitionFilterState *child_state = (PartitionFilterState *) child_ps; + EState *estate = node->ss.ps.state; + + resultRelInfo = estate->es_result_relation_info; junkfilter = resultRelInfo->ri_junkFilter; Assert(junkfilter != NULL); EvalPlanQualSetSlot(&epqstate, slot); - oldtuple = NULL; /* * extract the 'ctid' junk attribute. @@ -159,11 +155,11 @@ partition_update_exec(CustomScanState *node) tupleid = &tuple_ctid; /* delete old tuple */ - estate->es_result_relation_info = saved_rel_info; - ExecDeleteInternal(tupleid, oldtuple, &epqstate, estate); + estate->es_result_relation_info = child_state->result_parts.saved_rel_info; + ExecDeleteInternal(tupleid, &epqstate, estate); estate->es_result_relation_info = resultRelInfo; - /* we got the slot that can be inserted to child partition */ + /* we've got the slot that can be inserted to child partition */ return slot; } @@ -198,7 +194,6 @@ partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *e */ static TupleTableSlot * ExecDeleteInternal(ItemPointer tupleid, - HeapTuple oldtuple, EPQState *epqstate, EState *estate) { From d056e0283972c0ae1d212752a4649466b000ce9d Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 24 Apr 2017 18:01:48 +0300 Subject: [PATCH 0409/1124] Add prototype of CatalogInsertIndex routine and stub of CatalogUpdateIndex into pg_pathman --- src/compat/pg_compat.c | 92 ++++++++++++++++++++++++++++++++++ src/include/compat/pg_compat.h | 19 +++++++ src/include/pathman.h | 7 --- src/partition_creation.c | 91 +-------------------------------- src/pl_funcs.c | 80 ----------------------------- 5 files changed, 112 insertions(+), 177 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index d49f2003..012ef741 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -34,6 +34,98 @@ */ +/* + * CatalogIndexInsert is the copy of static prototype having the same name from + * src/backend/catalog/indexing.c + * + * CatalogUpdateIndexes + */ +#if PG_VERSION_NUM >= 100000 +#include "catalog/index.h" +void +CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) +{ + int i; + int numIndexes; + RelationPtr relationDescs; + Relation heapRelation; + TupleTableSlot *slot; + IndexInfo **indexInfoArray; + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + + /* HOT update does not require index inserts */ + if (HeapTupleIsHeapOnly(heapTuple)) + return; + + /* + * Get information from the state structure. Fall out if nothing to do. + */ + numIndexes = indstate->ri_NumIndices; + if (numIndexes == 0) + return; + relationDescs = indstate->ri_IndexRelationDescs; + indexInfoArray = indstate->ri_IndexRelationInfo; + heapRelation = indstate->ri_RelationDesc; + + /* Need a slot to hold the tuple being examined */ + slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation)); + ExecStoreTuple(heapTuple, slot, InvalidBuffer, false); + + /* + * for each index, form and insert the index tuple + */ + for (i = 0; i < numIndexes; i++) + { + IndexInfo *indexInfo; + + indexInfo = indexInfoArray[i]; + + /* If the index is marked as read-only, ignore it */ + if (!indexInfo->ii_ReadyForInserts) + continue; + + /* + * Expressional and partial indexes on system catalogs are not + * supported, nor exclusion constraints, nor deferred uniqueness + */ + Assert(indexInfo->ii_Expressions == NIL); + Assert(indexInfo->ii_Predicate == NIL); + Assert(indexInfo->ii_ExclusionOps == NULL); + Assert(relationDescs[i]->rd_index->indimmediate); + + /* + * FormIndexDatum fills in its values and isnull parameters with the + * appropriate values for the column(s) of the index. + */ + FormIndexDatum(indexInfo, + slot, + NULL, /* no expression eval to do */ + values, + isnull); + + /* + * The index AM does the rest. + */ + index_insert(relationDescs[i], /* index relation */ + values, /* array of index Datums */ + isnull, /* is-null flags */ + &(heapTuple->t_self), /* tid of heap tuple */ + heapRelation, + relationDescs[i]->rd_index->indisunique ? + UNIQUE_CHECK_YES : UNIQUE_CHECK_NO, + indexInfo); + } + + ExecDropSingleTupleTableSlot(slot); +} +void +CatalogUpdateIndexes(Relation heapRel, HeapTuple heapTuple) +{ +} +#endif + + /* * create_plain_partial_paths * Build partial access paths for parallel scan of a plain relation diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 6c9e82b9..a583a513 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -11,6 +11,14 @@ #ifndef PG_COMPAT_H #define PG_COMPAT_H +/* Check PostgreSQL version (9.5.4 contains an important fix for BGW) */ +#include "pg_config.h" +#if PG_VERSION_NUM < 90503 + #error "Cannot build pg_pathman with PostgreSQL version lower than 9.5.3" +#elif PG_VERSION_NUM < 90504 + #warning "It is STRONGLY recommended to use pg_pathman with PostgreSQL 9.5.4 since it contains important fixes" +#endif + #include "compat/debug_compat_features.h" #include "postgres.h" @@ -60,6 +68,17 @@ #endif +/* + * CatalogIndexInsert + * CatalogUpdateIndexes + */ +#if PG_VERSION_NUM >= 100000 +#include "catalog/indexing.h" +void CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple); +void CatalogUpdateIndexes(Relation heapRel, HeapTuple heapTuple); +#endif + + /* * check_index_predicates() */ diff --git a/src/include/pathman.h b/src/include/pathman.h index 5911a7e1..fc870b8f 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -23,13 +23,6 @@ #include "parser/parsetree.h" -/* Check PostgreSQL version (9.5.4 contains an important fix for BGW) */ -#if PG_VERSION_NUM < 90503 - #error "Cannot build pg_pathman with PostgreSQL version lower than 9.5.3" -#elif PG_VERSION_NUM < 90504 - #warning "It is STRONGLY recommended to use pg_pathman with PostgreSQL 9.5.4 since it contains important fixes" -#endif - /* Get CString representation of Datum (simple wrapper) */ #ifdef USE_ASSERT_CHECKING #include "utils.h" diff --git a/src/partition_creation.c b/src/partition_creation.c index e01bd990..e056fcd3 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -13,6 +13,7 @@ #include "partition_filter.h" #include "pathman.h" #include "pathman_workers.h" +#include "compat/pg_compat.h" #include "xact_handling.h" #include "access/htup_details.h" @@ -891,96 +892,6 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) return table_addr; } -#if PG_VERSION_NUM >= 100000 -#include "catalog/index.h" -static void -CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) -{ - int i; - int numIndexes; - RelationPtr relationDescs; - Relation heapRelation; - TupleTableSlot *slot; - IndexInfo **indexInfoArray; - Datum values[INDEX_MAX_KEYS]; - bool isnull[INDEX_MAX_KEYS]; - - /* HOT update does not require index inserts */ - if (HeapTupleIsHeapOnly(heapTuple)) - return; - - /* - * Get information from the state structure. Fall out if nothing to do. - */ - numIndexes = indstate->ri_NumIndices; - if (numIndexes == 0) - return; - relationDescs = indstate->ri_IndexRelationDescs; - indexInfoArray = indstate->ri_IndexRelationInfo; - heapRelation = indstate->ri_RelationDesc; - - /* Need a slot to hold the tuple being examined */ - slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation)); - ExecStoreTuple(heapTuple, slot, InvalidBuffer, false); - - /* - * for each index, form and insert the index tuple - */ - for (i = 0; i < numIndexes; i++) - { - IndexInfo *indexInfo; - - indexInfo = indexInfoArray[i]; - - /* If the index is marked as read-only, ignore it */ - if (!indexInfo->ii_ReadyForInserts) - continue; - - /* - * Expressional and partial indexes on system catalogs are not - * supported, nor exclusion constraints, nor deferred uniqueness - */ - Assert(indexInfo->ii_Expressions == NIL); - Assert(indexInfo->ii_Predicate == NIL); - Assert(indexInfo->ii_ExclusionOps == NULL); - Assert(relationDescs[i]->rd_index->indimmediate); - - /* - * FormIndexDatum fills in its values and isnull parameters with the - * appropriate values for the column(s) of the index. - */ - FormIndexDatum(indexInfo, - slot, - NULL, /* no expression eval to do */ - values, - isnull); - - /* - * The index AM does the rest. - */ - index_insert(relationDescs[i], /* index relation */ - values, /* array of index Datums */ - isnull, /* is-null flags */ - &(heapTuple->t_self), /* tid of heap tuple */ - heapRelation, - relationDescs[i]->rd_index->indisunique ? - UNIQUE_CHECK_YES : UNIQUE_CHECK_NO, - indexInfo); - } - - ExecDropSingleTupleTableSlot(slot); -} -static void -CatalogUpdateIndexes(Relation heapRel, HeapTuple heapTuple) -{ - CatalogIndexState indstate; - - indstate = CatalogOpenIndexes(heapRel); - CatalogIndexInsert(indstate, heapTuple); - CatalogCloseIndexes(indstate); -} -#endif - /* Copy ACL privileges of parent table and set "attislocal" = true */ static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index a3fc7a1d..3b44fb5f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -794,86 +794,6 @@ build_check_constraint_name_attname(PG_FUNCTION_ARGS) * ------------------------ */ -#if PG_VERSION_NUM >= 100000 -#include "catalog/index.h" -static void -CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) -{ - int i; - int numIndexes; - RelationPtr relationDescs; - Relation heapRelation; - TupleTableSlot *slot; - IndexInfo **indexInfoArray; - Datum values[INDEX_MAX_KEYS]; - bool isnull[INDEX_MAX_KEYS]; - - /* HOT update does not require index inserts */ - if (HeapTupleIsHeapOnly(heapTuple)) - return; - - /* - * Get information from the state structure. Fall out if nothing to do. - */ - numIndexes = indstate->ri_NumIndices; - if (numIndexes == 0) - return; - relationDescs = indstate->ri_IndexRelationDescs; - indexInfoArray = indstate->ri_IndexRelationInfo; - heapRelation = indstate->ri_RelationDesc; - - /* Need a slot to hold the tuple being examined */ - slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation)); - ExecStoreTuple(heapTuple, slot, InvalidBuffer, false); - - /* - * for each index, form and insert the index tuple - */ - for (i = 0; i < numIndexes; i++) - { - IndexInfo *indexInfo; - - indexInfo = indexInfoArray[i]; - - /* If the index is marked as read-only, ignore it */ - if (!indexInfo->ii_ReadyForInserts) - continue; - - /* - * Expressional and partial indexes on system catalogs are not - * supported, nor exclusion constraints, nor deferred uniqueness - */ - Assert(indexInfo->ii_Expressions == NIL); - Assert(indexInfo->ii_Predicate == NIL); - Assert(indexInfo->ii_ExclusionOps == NULL); - Assert(relationDescs[i]->rd_index->indimmediate); - - /* - * FormIndexDatum fills in its values and isnull parameters with the - * appropriate values for the column(s) of the index. - */ - FormIndexDatum(indexInfo, - slot, - NULL, /* no expression eval to do */ - values, - isnull); - - /* - * The index AM does the rest. - */ - index_insert(relationDescs[i], /* index relation */ - values, /* array of index Datums */ - isnull, /* is-null flags */ - &(heapTuple->t_self), /* tid of heap tuple */ - heapRelation, - relationDescs[i]->rd_index->indisunique ? - UNIQUE_CHECK_YES : UNIQUE_CHECK_NO, - indexInfo); - } - - ExecDropSingleTupleTableSlot(slot); -} -#endif /* * Try to add previously partitioned table to PATHMAN_CONFIG. From 1999403fa5de6926889a7da6637b9d657c0bf8d5 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 24 Apr 2017 18:47:12 +0300 Subject: [PATCH 0410/1124] Fix target list for INSERTs on creation stage --- src/partition_filter.c | 55 ++++++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 23 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 2f88ac09..2f491830 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -86,7 +86,7 @@ static Node *fix_returning_list_mutator(Node *node, void *state); static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); -static List * pfilter_build_tlist(Relation parent_rel, List *tlist); +static List * pfilter_build_tlist(Relation parent_rel, Plan *subplan); static void pf_memcxt_callback(void *arg); static estate_mod_data * fetch_estate_mod_data(EState *estate); @@ -486,7 +486,7 @@ make_partition_filter(Plan *subplan, Oid parent_relid, /* Build an appropriate target list using a cached Relation entry */ parent_rel = RelationIdGetRelation(parent_relid); - cscan->scan.plan.targetlist = pfilter_build_tlist(parent_rel, subplan->targetlist); + cscan->scan.plan.targetlist = pfilter_build_tlist(parent_rel, subplan); RelationClose(parent_rel); /* No physical relation will be scanned */ @@ -665,35 +665,44 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e * Build partition filter's target list pointing to subplan tuple's elements. */ static List * -pfilter_build_tlist(Relation parent_rel, List *tlist) +pfilter_build_tlist(Relation parent_rel, Plan *subplan) { List *result_tlist = NIL; ListCell *lc; - foreach (lc, tlist) + foreach (lc, subplan->targetlist) { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - TargetEntry *newtle; + TargetEntry *tle = (TargetEntry *) lfirst(lc), + *newtle = NULL; + + if (IsA(tle->expr, Const)) + newtle = makeTargetEntry(copyObject(tle->expr), tle->resno, tle->resname, + tle->resjunk); - if (tle->expr != NULL && IsA(tle->expr, Var)) - { - Var *var = (Var *) palloc(sizeof(Var)); - *var = *((Var *)(tle->expr)); - var->varno = INDEX_VAR; - newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, - tle->resjunk); - } else { - Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ - tle->resno, - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); - - newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, - tle->resjunk); + if (tle->expr != NULL && IsA(tle->expr, Var)) + { + Var *var = (Var *) palloc(sizeof(Var)); + *var = *((Var *)(tle->expr)); + var->varno = INDEX_VAR; + var->varattno = tle->resno; + + newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, + tle->resjunk); + } + else + { + Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ + tle->resno, + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); + + newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, + tle->resjunk); + } } result_tlist = lappend(result_tlist, newtle); From c2bb1d8bf3adc047db8f91d3458277d462892ea2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 24 Apr 2017 18:59:16 +0300 Subject: [PATCH 0411/1124] Fix clang warning --- src/partition_update.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/partition_update.c b/src/partition_update.c index a2e3cb24..842e1287 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -118,7 +118,6 @@ partition_update_exec(CustomScanState *node) if (!TupIsNull(slot)) { - char relkind; Datum datum; bool isNull; ResultRelInfo *resultRelInfo; @@ -131,7 +130,6 @@ partition_update_exec(CustomScanState *node) PartitionFilterState *child_state = (PartitionFilterState *) child_ps; EState *estate = node->ss.ps.state; - resultRelInfo = estate->es_result_relation_info; junkfilter = resultRelInfo->ri_junkFilter; Assert(junkfilter != NULL); @@ -141,8 +139,7 @@ partition_update_exec(CustomScanState *node) /* * extract the 'ctid' junk attribute. */ - relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; - Assert(relkind == RELKIND_RELATION); + Assert(resultRelInfo->ri_RelationDesc->rd_rel->relkind == RELKIND_RELATION); ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); /* shouldn't ever get a null result... */ From c436b29cc9f9e2db950ea62b77458c7ad37cb6ff Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 24 Apr 2017 19:29:22 +0300 Subject: [PATCH 0412/1124] Combine simple_heap_update and CatalogUpdateIndexes routines in pg version 9.6 and below into one routine CatalogTupleUpdate as it is in pg10 --- src/compat/pg_compat.c | 6 ------ src/include/compat/pg_compat.h | 16 +++++++++++++--- src/partition_creation.c | 14 ++++---------- 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 012ef741..c50b1c26 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -37,8 +37,6 @@ /* * CatalogIndexInsert is the copy of static prototype having the same name from * src/backend/catalog/indexing.c - * - * CatalogUpdateIndexes */ #if PG_VERSION_NUM >= 100000 #include "catalog/index.h" @@ -119,10 +117,6 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) ExecDropSingleTupleTableSlot(slot); } -void -CatalogUpdateIndexes(Relation heapRel, HeapTuple heapTuple) -{ -} #endif diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index a583a513..d7d81679 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -69,13 +69,23 @@ /* - * CatalogIndexInsert - * CatalogUpdateIndexes + * CatalogIndexInsert() */ #if PG_VERSION_NUM >= 100000 #include "catalog/indexing.h" void CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple); -void CatalogUpdateIndexes(Relation heapRel, HeapTuple heapTuple); +#endif + + +/* + * CatalogTupleUpdate() + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 100000 +#define CatalogTupleUpdate(heapRel, updTid, heapTuple) \ + do { \ + simple_heap_update((heapRel), (updTid), (heapTuple)); \ + CatalogUpdateIndexes((heapRel), (heapTuple)); \ + } while (0) #endif diff --git a/src/partition_creation.c b/src/partition_creation.c index e056fcd3..0648ff29 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -981,11 +981,8 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) /* Build new tuple with parent's ACL */ htup = heap_modify_tuple(htup, pg_class_desc, values, nulls, replaces); - /* Update child's tuple */ - simple_heap_update(pg_class_rel, &iptr, htup); - - /* Don't forget to update indexes */ - CatalogUpdateIndexes(pg_class_rel, htup); + /* Update child's tuple with related indexes */ + CatalogTupleUpdate(pg_class_rel, &iptr, htup); } systable_endscan(scan); @@ -1085,11 +1082,8 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) subhtup = heap_modify_tuple(subhtup, pg_attribute_desc, values, nulls, replaces); - /* Update child's tuple */ - simple_heap_update(pg_attribute_rel, &iptr, subhtup); - - /* Don't forget to update indexes */ - CatalogUpdateIndexes(pg_attribute_rel, subhtup); + /* Update child's tuple and related indexes */ + CatalogTupleUpdate(pg_attribute_rel, &iptr, subhtup); } systable_endscan(subscan); From b19f72cf82ccfc9d0d54f4ec0772c3d0801ebd51 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 24 Apr 2017 20:14:13 +0300 Subject: [PATCH 0413/1124] Define compatible version for RelationDefine routine --- src/include/compat/pg_compat.h | 15 +++++++++++++++ src/partition_creation.c | 7 ++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index d7d81679..57301e9a 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -68,6 +68,21 @@ #endif +/* + * DefineRelation() + * + * for v10 set NULL on 'queryString' parameter as it uses only under vanilla + * partition creating + */ +#if PG_VERSION_NUM >= 100000 +#define DefineRelationCompat(createstmt, relkind, ownerId, typaddress) \ + DefineRelation((createstmt), (relkind), (ownerId), (typaddress), NULL) +#elif PG_VERSION_NUM >= 90500 +#define DefineRelationCompat(createstmt, relkind, ownerId, typaddress) \ + DefineRelation((createstmt), (relkind), (ownerId), (typaddress)) +#endif + + /* * CatalogIndexInsert() */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 0648ff29..13b131c3 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -858,11 +858,8 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) GUC_ACTION_SAVE, true, 0, false); /* Create new partition owned by parent's posessor */ -#if PG_VERSION_NUM >= 100000 - table_addr = DefineRelation(create_stmt, RELKIND_RELATION, relowner, NULL, NULL); -#else - table_addr = DefineRelation(create_stmt, RELKIND_RELATION, relowner, NULL); -#endif + table_addr = DefineRelationCompat(create_stmt, RELKIND_RELATION, relowner, + NULL); /* Save data about a simple DDL command that was just executed */ EventTriggerCollectSimpleCommand(table_addr, From c9f5368a15c24c52d83c23b3a82ddc5445db1abf Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 24 Apr 2017 20:58:04 +0300 Subject: [PATCH 0414/1124] Define compatible version for InitResultRelInfo routine --- src/include/compat/pg_compat.h | 21 ++++++++++++++++++++- src/partition_filter.c | 17 +++++------------ src/utility_stmt_hooking.c | 17 +++++------------ 3 files changed, 30 insertions(+), 25 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 57301e9a..3b3f14bc 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -71,7 +71,7 @@ /* * DefineRelation() * - * for v10 set NULL on 'queryString' parameter as it uses only under vanilla + * for v10 set NULL into 'queryString' argument as it's used only under vanilla * partition creating */ #if PG_VERSION_NUM >= 100000 @@ -172,6 +172,25 @@ extern void create_plain_partial_paths(PlannerInfo *root, #endif +/* + * InitResultRelInfo + * + * for v10 set NULL into 'partition_root' argument to specify that result + * relation is not vanilla partition + */ +#if PG_VERSION_NUM >= 100000 +#define InitResultRelInfoCompat(resultRelInfo, resultRelationDesc, \ + resultRelationIndex, instrument_options) \ + InitResultRelInfo((resultRelInfo), (resultRelationDesc), \ + (resultRelationIndex), NULL, (instrument_options)) +#elif PG_VERSION_NUM >= 90500 +#define InitResultRelInfoCompat(resultRelInfo, resultRelationDesc, \ + resultRelationIndex, instrument_options) \ + InitResultRelInfo((resultRelInfo), (resultRelationDesc), \ + (resultRelationIndex), (instrument_options)) +#endif + + /* * make_result() */ diff --git a/src/partition_filter.c b/src/partition_filter.c index 0269077b..6d3eb6b9 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -8,6 +8,7 @@ * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" #include "init.h" #include "nodes_common.h" #include "pathman.h" @@ -295,18 +296,10 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) if (!parts_storage->saved_rel_info) elog(ERROR, "ResultPartsStorage contains no saved_rel_info"); -#if PG_VERSION_NUM >= 100000 - InitResultRelInfo(child_result_rel_info, - child_rel, - child_rte_idx, - parent_rel, - parts_storage->estate->es_instrument); -#else - InitResultRelInfo(child_result_rel_info, - child_rel, - child_rte_idx, - parts_storage->estate->es_instrument); -#endif + InitResultRelInfoCompat(child_result_rel_info, + child_rel, + child_rte_idx, + parts_storage->estate->es_instrument); if (parts_storage->command_type != CMD_DELETE) ExecOpenIndices(child_result_rel_info, parts_storage->speculative_inserts); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 4999cb0f..0e837dcf 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -12,6 +12,7 @@ */ #include "compat/debug_compat_features.h" +#include "compat/pg_compat.h" #include "init.h" #include "utility_stmt_hooking.h" #include "partition_filter.h" @@ -561,18 +562,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, tupDesc = RelationGetDescr(parent_rel); parent_result_rel = makeNode(ResultRelInfo); -#if PG_VERSION_NUM >= 100000 - InitResultRelInfo(parent_result_rel, - parent_rel, - 1, /* dummy rangetable index */ - NULL, - 0); -#else - InitResultRelInfo(parent_result_rel, - parent_rel, - 1, /* dummy rangetable index */ - 0); -#endif + InitResultRelInfoCompat(parent_result_rel, + parent_rel, + 1, /* dummy rangetable index */ + 0); ExecOpenIndices(parent_result_rel, false); estate->es_result_relations = parent_result_rel; From 76da280eb18cf89ee23fce863ab2d9a062cbd76f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 25 Apr 2017 13:28:06 +0300 Subject: [PATCH 0415/1124] Add delete support in FDW tables --- src/partition_update.c | 211 ++++++++++++++++++++++++++++------------- 1 file changed, 147 insertions(+), 64 deletions(-) diff --git a/src/partition_update.c b/src/partition_update.c index 842e1287..fab7f6f2 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -14,7 +14,10 @@ #include "partition_update.h" #include "access/xact.h" +#include "access/htup_details.h" +#include "commands/trigger.h" #include "executor/nodeModifyTable.h" +#include "foreign/fdwapi.h" #include "utils/guc.h" bool pg_pathman_enable_partition_update = true; @@ -22,7 +25,11 @@ bool pg_pathman_enable_partition_update = true; CustomScanMethods partition_update_plan_methods; CustomExecMethods partition_update_exec_methods; -static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, EPQState *epqstate, EState *estate); +static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, + HeapTuple oldtuple, + TupleTableSlot *planSlot, + EPQState *epqstate, + EState *estate); void init_partition_update_static_data(void) @@ -120,12 +127,15 @@ partition_update_exec(CustomScanState *node) { Datum datum; bool isNull; + char relkind; ResultRelInfo *resultRelInfo; - ItemPointer tupleid; + ItemPointer tupleid = NULL; ItemPointerData tuple_ctid; JunkFilter *junkfilter; EPQState epqstate; AttrNumber ctid_attno; + HeapTupleData oldtupdata; + HeapTuple oldtuple; PartitionFilterState *child_state = (PartitionFilterState *) child_ps; EState *estate = node->ss.ps.state; @@ -136,24 +146,59 @@ partition_update_exec(CustomScanState *node) EvalPlanQualSetSlot(&epqstate, slot); - /* - * extract the 'ctid' junk attribute. - */ - Assert(resultRelInfo->ri_RelationDesc->rd_rel->relkind == RELKIND_RELATION); - ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); - datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); - /* shouldn't ever get a null result... */ - if (isNull) - elog(ERROR, "ctid is NULL"); - - tupleid = (ItemPointer) DatumGetPointer(datum); - tuple_ctid = *tupleid; /* be sure we don't free - * ctid!! */ - tupleid = &tuple_ctid; + oldtuple = NULL; + relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; + if (relkind == RELKIND_RELATION) + { + /* + * extract the 'ctid' junk attribute. + */ + Assert(resultRelInfo->ri_RelationDesc->rd_rel->relkind == RELKIND_RELATION); + ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); + datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* be sure we don't free + * ctid!! */ + tupleid = &tuple_ctid; + } + else if (relkind == RELKIND_FOREIGN_TABLE) + { + if (AttributeNumberIsValid(junkfilter->jf_junkAttNo)) + { + datum = ExecGetJunkAttribute(slot, + junkfilter->jf_junkAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "wholerow is NULL"); + + oldtupdata.t_data = DatumGetHeapTupleHeader(datum); + oldtupdata.t_len = + HeapTupleHeaderGetDatumLength(oldtupdata.t_data); + ItemPointerSetInvalid(&(oldtupdata.t_self)); + + /* Historically, view triggers see invalid t_tableOid. */ + oldtupdata.t_tableOid =RelationGetRelid(resultRelInfo->ri_RelationDesc); + + oldtuple = &oldtupdata; + } + } + else + elog(ERROR, "PartitionUpdate supports only relations and foreign tables"); /* delete old tuple */ estate->es_result_relation_info = child_state->result_parts.saved_rel_info; - ExecDeleteInternal(tupleid, &epqstate, estate); + + /* + * We have two cases here: + * normal relations - tupleid points to actual tuple + * foreign tables - tupleid is invalid, slot is required + */ + ExecDeleteInternal(tupleid, oldtuple, slot, &epqstate, estate); estate->es_result_relation_info = resultRelInfo; /* we've got the slot that can be inserted to child partition */ @@ -191,8 +236,10 @@ partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *e */ static TupleTableSlot * ExecDeleteInternal(ItemPointer tupleid, - EPQState *epqstate, - EState *estate) + HeapTuple oldtuple, + TupleTableSlot *planSlot, + EPQState *epqstate, + EState *estate) { ResultRelInfo *resultRelInfo; Relation resultRelationDesc; @@ -205,56 +252,92 @@ ExecDeleteInternal(ItemPointer tupleid, resultRelInfo = estate->es_result_relation_info; resultRelationDesc = resultRelInfo->ri_RelationDesc; -ldelete:; - result = heap_delete(resultRelationDesc, tupleid, - estate->es_output_cid, - estate->es_crosscheck_snapshot, - true /* wait for commit */ , - &hufd); - switch (result) + /* BEFORE ROW DELETE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->trig_delete_before_row) { - case HeapTupleSelfUpdated: - if (hufd.cmax != estate->es_output_cid) - ereport(ERROR, - (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), - errmsg("tuple to be updated was already modified by an operation triggered by the current command"), - errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); - - /* Else, already deleted by self; nothing to do */ - return NULL; - - case HeapTupleMayBeUpdated: - break; - - case HeapTupleUpdated: - if (IsolationUsesXactSnapshot()) - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); - if (!ItemPointerEquals(tupleid, &hufd.ctid)) - { - TupleTableSlot *epqslot; - - epqslot = EvalPlanQual(estate, - epqstate, - resultRelationDesc, - resultRelInfo->ri_RangeTableIndex, - LockTupleExclusive, - &hufd.ctid, - hufd.xmax); - if (!TupIsNull(epqslot)) + bool dodelete; + + dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo, + tupleid, oldtuple); + + if (!dodelete) + elog(ERROR, "In partitioned tables the old row always should be deleted"); + } + + if (resultRelInfo->ri_FdwRoutine) + { + TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(resultRelationDesc)); + + /* + * delete from foreign table: let the FDW do it + */ + ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); + resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate, + resultRelInfo, + slot, + planSlot); + + /* we don't need slot anymore */ + ExecDropSingleTupleTableSlot(slot); + } + else + { + /* delete the tuple */ +ldelete:; + result = heap_delete(resultRelationDesc, tupleid, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ , + &hufd); + switch (result) + { + case HeapTupleSelfUpdated: + if (hufd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + + /* Else, already deleted by self; nothing to do */ + return NULL; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + if (!ItemPointerEquals(tupleid, &hufd.ctid)) { - *tupleid = hufd.ctid; - goto ldelete; + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + epqstate, + resultRelationDesc, + resultRelInfo->ri_RangeTableIndex, + LockTupleExclusive, + &hufd.ctid, + hufd.xmax); + if (!TupIsNull(epqslot)) + { + *tupleid = hufd.ctid; + goto ldelete; + } } - } - /* tuple already deleted; nothing to do */ - return NULL; + /* tuple already deleted; nothing to do */ + return NULL; - default: - elog(ERROR, "unrecognized heap_delete status: %u", result); - return NULL; + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + return NULL; + } } + /* AFTER ROW DELETE Triggers */ + ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple); + return NULL; } From 2b91a499e1e02206b98edae4a77407fc33d1109a Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 26 Apr 2017 13:07:06 +0300 Subject: [PATCH 0416/1124] Make changes by review --- expected/pathman_basic.out | 16 ++++----- expected/pathman_calamity.out | 35 +++++++++++++++--- expected/pathman_column_type.out | 16 ++++----- expected/pathman_interval.out | 24 ++++++------- expected/pathman_permissions.out | 6 ++-- hash.sql | 11 ++++++ init.sql | 5 ++- sql/pathman_calamity.sql | 17 ++++++--- sql/pathman_column_type.sql | 4 +-- src/include/pathman.h | 3 +- src/include/relation_info.h | 3 +- src/init.c | 7 ++-- src/pl_funcs.c | 3 -- src/pl_hash_funcs.c | 42 ++++++++++++++++++++++ src/relation_info.c | 62 ++++++++------------------------ 15 files changed, 148 insertions(+), 106 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 995ca0d9..2c96c7bc 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1461,16 +1461,16 @@ INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); */ ALTER TABLE test.range_rel DROP COLUMN data; SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype | upd_expr -----------------+---------+----------+----------------+-------------------------------------------------------------------------------------------------------------------------+---------+---------- - test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} | 1114 | f + partrel | attname | parttype | range_interval | expression_p | atttype +----------------+---------+----------+----------------+-------------------------------------------------------------------------------------------------------------------------+--------- + test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} | 1114 (1 row) DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 20 other objects SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype | upd_expr ----------+---------+----------+----------------+--------------+---------+---------- + partrel | attname | parttype | range_interval | expression_p | atttype +---------+---------+----------+----------------+--------------+--------- (0 rows) /* Check overlaps */ @@ -1632,9 +1632,9 @@ SELECT pathman.create_partitions_from_range('test."RangeRel"', 'dt', '2015-01-01 DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 5 other objects SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype | upd_expr ---------------------+---------+----------+----------------+------------------------------------------------------------------------------------------------------------------------+---------+---------- - test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 23 | f + partrel | attname | parttype | range_interval | expression_p | atttype +--------------------+---------+----------+----------------+------------------------------------------------------------------------------------------------------------------------+--------- + test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 23 (1 row) CREATE TABLE test."RangeRel" ( diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 2267030d..8cac52b6 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -221,6 +221,31 @@ SELECT drop_partitions('calamity.part_test', true); (1 row) DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); +ERROR: no hash function for type calamity.part_test /* check function build_range_condition() */ SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ ERROR: 'partition_relid' should not be NULL @@ -245,15 +270,15 @@ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ (1 row) /* check function validate_interval_value() */ -SELECT validate_interval_value(NULL, 2, '1 mon'); /* not ok */ +SELECT validate_interval_value(NULL, 2, '1 mon'); /* not ok */ ERROR: 'atttype' should not be NULL -SELECT validate_interval_value(1186, NULL, '1 mon'); /* not ok */ +SELECT validate_interval_value('interval'::regtype, NULL, '1 mon'); /* not ok */ ERROR: 'parttype' should not be NULL -SELECT validate_interval_value(23, 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('int4'::regtype, 2, '1 mon'); /* not ok */ ERROR: invalid input syntax for integer: "1 mon" -SELECT validate_interval_value(1186, 1, '1 mon'); /* not ok */ +SELECT validate_interval_value('interval'::regtype, 1, '1 mon'); /* not ok */ ERROR: interval should be NULL for HASH partitioned table -SELECT validate_interval_value(1186, 2, NULL); /* OK */ +SELECT validate_interval_value('interval'::regtype, 2, NULL); /* OK */ validate_interval_value ------------------------- t diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index 4996af26..b89bc448 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -31,11 +31,11 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that upd_expr is true */ +/* check that parsed expression was cleared */ SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype | upd_expr ------------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+---------+---------- - test_column_type.test | val | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 23 | t + partrel | attname | parttype | range_interval | expression_p | atttype +-----------------------+---------+----------+----------------+--------------+--------- + test_column_type.test | val | 2 | 10 | | (1 row) /* make sure that everything works properly */ @@ -44,11 +44,11 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -/* check that expression, atttype is changed and upd_expr is false */ +/* check that expression, atttype is changed */ SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype | upd_expr ------------------------+---------+----------+----------------+-------------------------------------------------------------------------------------------------------------------------+---------+---------- - test_column_type.test | val | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 1700 | f + partrel | attname | parttype | range_interval | expression_p | atttype +-----------------------+---------+----------+----------------+-------------------------------------------------------------------------------------------------------------------------+--------- + test_column_type.test | val | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 1700 (1 row) SELECT context, entries FROM pathman_cache_stats ORDER BY context; diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 819fbbb9..9a66e947 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -38,9 +38,9 @@ SELECT set_interval('test_interval.abc', 1000); INSERT INTO test_interval.abc VALUES (250); SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype | upd_expr --------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+---------+---------- - test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 21 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 21 | f + partrel | attname | parttype | range_interval | expression_p | atttype +-------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- + test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 21 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 21 (1 row) DROP TABLE test_interval.abc CASCADE; @@ -81,9 +81,9 @@ SELECT set_interval('test_interval.abc', 1000); INSERT INTO test_interval.abc VALUES (250); SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype | upd_expr --------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+---------+---------- - test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 23 | f + partrel | attname | parttype | range_interval | expression_p | atttype +-------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- + test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 23 (1 row) DROP TABLE test_interval.abc CASCADE; @@ -124,9 +124,9 @@ SELECT set_interval('test_interval.abc', 1000); INSERT INTO test_interval.abc VALUES (250); SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype | upd_expr --------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+---------+---------- - test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 20 | f + partrel | attname | parttype | range_interval | expression_p | atttype +-------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- + test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 20 (1 row) DROP TABLE test_interval.abc CASCADE; @@ -157,9 +157,9 @@ SELECT set_interval('test_interval.abc', '1 month'::INTERVAL); (1 row) SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype | upd_expr --------------------+---------+----------+----------------+-------------------------------------------------------------------------------------------------------------------------+---------+---------- - test_interval.abc | dt | 2 | @ 1 mon | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 1082 | f + partrel | attname | parttype | range_interval | expression_p | atttype +-------------------+---------+----------+----------------+-------------------------------------------------------------------------------------------------------------------------+--------- + test_interval.abc | dt | 2 | @ 1 mon | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 1082 (1 row) DROP TABLE test_interval.abc CASCADE; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 24775fd4..6814c442 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -32,9 +32,9 @@ NOTICE: sequence "user1_table_seq" does not exist, skipping /* Should be able to see */ SET ROLE user2; SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype | upd_expr --------------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+---------+---------- - permissions.user1_table | id | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 23 | f + partrel | attname | parttype | range_interval | expression_p | atttype +-------------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- + permissions.user1_table | id | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 23 (1 row) SELECT * FROM pathman_config_params; diff --git a/hash.sql b/hash.sql index 8ff5cc54..fbdcd97a 100644 --- a/hash.sql +++ b/hash.sql @@ -173,3 +173,14 @@ LANGUAGE C; CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INT4, INT4) RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' LANGUAGE C STRICT; + +/* + * Build hash condition for a CHECK CONSTRAINT + */ +CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( + attribute_type REGTYPE, + attribute TEXT, + partitions_count INT4, + partition_index INT4) +RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' +LANGUAGE C STRICT; diff --git a/init.sql b/init.sql index dde86cae..f58c0a75 100644 --- a/init.sql +++ b/init.sql @@ -36,9 +36,8 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( attname TEXT NOT NULL, /* expression */ parttype INTEGER NOT NULL, range_interval TEXT, - expression_p TEXT NOT NULL, /* parsed expression (until plan) */ - atttype OID NOT NULL, /* expression type */ - upd_expr BOOL DEFAULT FALSE, /* update expression on next refresh? */ + expression_p TEXT, /* parsed expression (until plan) */ + atttype OID, /* expression type */ /* check for allowed part types */ CONSTRAINT pathman_config_parttype_check CHECK (parttype IN (1, 2)), diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 593d19b7..8ccb1723 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -110,6 +110,13 @@ SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ SELECT drop_partitions('calamity.part_test', true); DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); +SELECT build_hash_condition('text', 'val', 10, 1); +SELECT build_hash_condition('int4', 'val', 1, 1); +SELECT build_hash_condition('int4', 'val', 10, 20); +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); /* check function build_range_condition() */ SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ @@ -119,11 +126,11 @@ SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ /* check function validate_interval_value() */ -SELECT validate_interval_value(NULL, 2, '1 mon'); /* not ok */ -SELECT validate_interval_value(1186, NULL, '1 mon'); /* not ok */ -SELECT validate_interval_value(23, 2, '1 mon'); /* not ok */ -SELECT validate_interval_value(1186, 1, '1 mon'); /* not ok */ -SELECT validate_interval_value(1186, 2, NULL); /* OK */ +SELECT validate_interval_value(NULL, 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('interval'::regtype, NULL, '1 mon'); /* not ok */ +SELECT validate_interval_value('int4'::regtype, 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('interval'::regtype, 1, '1 mon'); /* not ok */ +SELECT validate_interval_value('interval'::regtype, 2, NULL); /* OK */ /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index 6284ae35..94609a2a 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -20,13 +20,13 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that upd_expr is true */ +/* check that parsed expression was cleared */ SELECT * FROM pathman_config; /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -/* check that expression, atttype is changed and upd_expr is false */ +/* check that expression, atttype is changed */ SELECT * FROM pathman_config; SELECT context, entries FROM pathman_cache_stats ORDER BY context; diff --git a/src/include/pathman.h b/src/include/pathman.h index 98e42824..090b2176 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -44,14 +44,13 @@ * Definitions for the "pathman_config" table. */ #define PATHMAN_CONFIG "pathman_config" -#define Natts_pathman_config 7 +#define Natts_pathman_config 6 #define Anum_pathman_config_partrel 1 /* partitioned relation (regclass) */ #define Anum_pathman_config_expression 2 /* partition expression (original) */ #define Anum_pathman_config_parttype 3 /* partitioning type (1|2) */ #define Anum_pathman_config_range_interval 4 /* interval for RANGE pt. (text) */ #define Anum_pathman_config_expression_p 5 /* parsed partition expression (text) */ #define Anum_pathman_config_atttype 6 /* partitioned atttype */ -#define Anum_pathman_config_upd_expression 7 /* expression needs update */ /* type modifier (typmod) for 'range_interval' */ #define PATHMAN_CONFIG_interval_typmod -1 diff --git a/src/include/relation_info.h b/src/include/relation_info.h index e572dbab..a926b96f 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -237,11 +237,10 @@ PrelLastChild(const PartRelationInfo *prel) } -PartRelationInfo *create_pathman_relation_info(Oid relid); const PartRelationInfo *refresh_pathman_relation_info(Oid relid, Datum *values, bool allow_incomplete); -void invalidate_pathman_relation_info(Oid relid, bool *found); +PartRelationInfo * invalidate_pathman_relation_info(Oid relid, bool *found); void remove_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, diff --git a/src/init.c b/src/init.c index d33f4d05..98504343 100644 --- a/src/init.c +++ b/src/init.c @@ -644,7 +644,6 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Perform checks for non-NULL columns */ Assert(!isnull[Anum_pathman_config_partrel - 1]); Assert(!isnull[Anum_pathman_config_expression - 1]); - Assert(!isnull[Anum_pathman_config_expression_p - 1]); Assert(!isnull[Anum_pathman_config_parttype - 1]); } @@ -763,12 +762,10 @@ read_pathman_config(void) Assert(!isnull[Anum_pathman_config_partrel - 1]); Assert(!isnull[Anum_pathman_config_parttype - 1]); Assert(!isnull[Anum_pathman_config_expression - 1]); - Assert(!isnull[Anum_pathman_config_expression_p - 1]); - Assert(!isnull[Anum_pathman_config_upd_expression - 1]); /* Extract values from Datums */ relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); - upd_expr = DatumGetBool(values[Anum_pathman_config_upd_expression - 1]); + upd_expr = isnull[Anum_pathman_config_expression_p - 1]; /* Check that relation 'relid' exists */ if (get_rel_type_id(relid) == InvalidOid) @@ -781,7 +778,7 @@ read_pathman_config(void) } if (upd_expr) - create_pathman_relation_info(relid); + invalidate_pathman_relation_info(relid, NULL); else refresh_pathman_relation_info(relid, values, diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 788d59ae..1d23ea59 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -726,9 +726,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_info->expr_type); isnull[Anum_pathman_config_atttype - 1] = false; - values[Anum_pathman_config_upd_expression - 1] = BoolGetDatum(false); - isnull[Anum_pathman_config_upd_expression - 1] = false; - if (parttype == PT_RANGE) { values[Anum_pathman_config_range_interval - 1] = PG_GETARG_DATUM(2); diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 9383074c..7b056f3b 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -26,6 +26,7 @@ PG_FUNCTION_INFO_V1( create_hash_partitions_internal ); PG_FUNCTION_INFO_V1( get_hash_part_idx ); +PG_FUNCTION_INFO_V1( build_hash_condition ); /* @@ -113,3 +114,44 @@ get_hash_part_idx(PG_FUNCTION_ARGS) PG_RETURN_UINT32(hash_to_part_index(value, part_count)); } + +/* + * Build hash condition for a CHECK CONSTRAINT + */ +Datum +build_hash_condition(PG_FUNCTION_ARGS) +{ + Oid atttype = PG_GETARG_OID(0); + text *attname = PG_GETARG_TEXT_P(1); + uint32 part_count = PG_GETARG_UINT32(2), + part_idx = PG_GETARG_UINT32(3); + + TypeCacheEntry *tce; + char *attname_cstring = text_to_cstring(attname); + + char *result; + + if (part_idx >= part_count) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_index' must be lower than 'partitions_count'"))); + + tce = lookup_type_cache(atttype, TYPECACHE_HASH_PROC); + + /* Check that HASH function exists */ + if (!OidIsValid(tce->hash_proc)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("no hash function for type %s", + format_type_be(atttype)))); + + /* Create hash condition CSTRING */ + result = psprintf("%s.get_hash_part_idx(%s(%s), %u) = %u", + get_namespace_name(get_pathman_schema()), + get_func_name(tce->hash_proc), + attname_cstring, + part_count, + part_idx); + + PG_RETURN_TEXT_P(cstring_to_text(result)); +} diff --git a/src/relation_info.c b/src/relation_info.c index 2b28a243..e7f7bcb5 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -116,43 +116,6 @@ init_relation_info_static_data(void) NULL); } -/* Create or update PartRelationInfo in local cache. Might emit ERROR. */ -PartRelationInfo * -create_pathman_relation_info(Oid relid) -{ - PartRelationInfo *prel; - bool found_entry; - - AssertTemporaryContext(); - prel = (PartRelationInfo *) pathman_cache_search_relid(partitioned_rels, - relid, HASH_ENTER, - &found_entry); - - elog(DEBUG2, - found_entry ? - "Refreshing record for relation %u in pg_pathman's cache [%u]" : - "Creating new record for relation %u in pg_pathman's cache [%u]", - relid, MyProcPid); - - /* - * NOTE: Trick clang analyzer (first access without NULL pointer check). - * Access to field 'valid' results in a dereference of a null pointer. - */ - prel->cmp_proc = InvalidOid; - - /* Clear outdated resources */ - if (found_entry && PrelIsValid(prel)) - { - FreeChildrenArray(prel); - FreeRangesArray(prel); - FreeIfNotNull(prel->attname); - } - - /* First we assume that this entry is invalid */ - prel->valid = false; - return prel; -} - /* * refresh\invalidate\get\remove PartRelationInfo functions. */ @@ -175,7 +138,7 @@ refresh_pathman_relation_info(Oid relid, MemoryContext oldcontext; AssertTemporaryContext(); - prel = create_pathman_relation_info(relid); + prel = invalidate_pathman_relation_info(relid, NULL); /* Try locking parent, exit fast if 'allow_incomplete' */ if (allow_incomplete) @@ -358,7 +321,7 @@ fill_part_expression_vars(PartRelationInfo *prel) } /* Invalidate PartRelationInfo cache entry. Create new entry if 'found' is NULL. */ -void +PartRelationInfo * invalidate_pathman_relation_info(Oid relid, bool *found) { bool prel_found; @@ -393,6 +356,8 @@ invalidate_pathman_relation_info(Oid relid, bool *found) elog(DEBUG2, "Invalidating record for relation %u in pg_pathman's cache [%u]", relid, MyProcPid); + + return prel; } /* Update expression in pathman_config */ @@ -408,6 +373,7 @@ update_parsed_expression(Oid relid, HeapTuple tuple, Datum *values, bool *nulls) /* get and parse expression */ expression = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); + Assert(nulls[Anum_pathman_config_expression_p - 1]); expr_info = get_part_expression_info(relid, expression, false, true); Assert(expr_info->expr_datum != (Datum) 0); pfree(expression); @@ -419,13 +385,9 @@ update_parsed_expression(Oid relid, HeapTuple tuple, Datum *values, bool *nulls) values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_info->expr_type); nulls[Anum_pathman_config_atttype - 1] = false; - values[Anum_pathman_config_upd_expression - 1] = BoolGetDatum(false); - nulls[Anum_pathman_config_upd_expression - 1] = false; - MemSet(replaces, false, sizeof(replaces)); replaces[Anum_pathman_config_expression_p - 1] = true; replaces[Anum_pathman_config_atttype - 1] = true; - replaces[Anum_pathman_config_upd_expression - 1] = true; /* update row */ rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); @@ -451,11 +413,15 @@ mark_pathman_expression_for_update(Oid relid) HeapTuple newtuple; bool replaces[Natts_pathman_config]; - values[Anum_pathman_config_upd_expression - 1] = BoolGetDatum(true); - nulls[Anum_pathman_config_upd_expression - 1] = false; + values[Anum_pathman_config_expression_p - 1] = (Datum) 0; + nulls[Anum_pathman_config_expression_p - 1] = true; + + values[Anum_pathman_config_atttype - 1] = (Datum) 0; + nulls[Anum_pathman_config_atttype - 1] = true; MemSet(replaces, false, sizeof(replaces)); - replaces[Anum_pathman_config_upd_expression - 1] = true; + replaces[Anum_pathman_config_expression_p - 1] = true; + replaces[Anum_pathman_config_atttype - 1] = true; /* update row */ rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); @@ -484,7 +450,7 @@ get_pathman_relation_info(Oid relid) /* Check that PATHMAN_CONFIG table contains this relation */ if (pathman_config_contains_relation(relid, values, isnull, NULL, &tuple)) { - bool upd_expr = DatumGetBool(values[Anum_pathman_config_upd_expression - 1]); + bool upd_expr = isnull[Anum_pathman_config_expression_p - 1]; if (upd_expr) update_parsed_expression(relid, tuple, values, isnull); @@ -997,7 +963,7 @@ try_perform_parent_refresh(Oid parent) if (pathman_config_contains_relation(parent, values, isnull, NULL, &tuple)) { - bool upd_expr = DatumGetBool(values[Anum_pathman_config_upd_expression - 1]); + bool upd_expr = isnull[Anum_pathman_config_expression_p - 1]; if (upd_expr) update_parsed_expression(parent, tuple, values, isnull); From 453eb2745b6d957f0780dada03bd7388a9830c96 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 26 Apr 2017 15:49:44 +0300 Subject: [PATCH 0417/1124] restore lazy loading in read_pathman_config() --- src/init.c | 12 +++--------- src/relation_info.c | 1 + 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/init.c b/src/init.c index 98504343..cabffc98 100644 --- a/src/init.c +++ b/src/init.c @@ -751,8 +751,7 @@ read_pathman_config(void) while((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) { Datum values[Natts_pathman_config]; - bool upd_expr, - isnull[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; Oid relid; /* partitioned table */ /* Extract Datums from tuple 'htup' */ @@ -765,7 +764,6 @@ read_pathman_config(void) /* Extract values from Datums */ relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); - upd_expr = isnull[Anum_pathman_config_expression_p - 1]; /* Check that relation 'relid' exists */ if (get_rel_type_id(relid) == InvalidOid) @@ -777,12 +775,8 @@ read_pathman_config(void) errhint(INIT_ERROR_HINT))); } - if (upd_expr) - invalidate_pathman_relation_info(relid, NULL); - else - refresh_pathman_relation_info(relid, - values, - true); /* allow lazy prel loading */ + /* get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(relid, NULL); } /* Clean resources */ diff --git a/src/relation_info.c b/src/relation_info.c index e7f7bcb5..068568e8 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -139,6 +139,7 @@ refresh_pathman_relation_info(Oid relid, AssertTemporaryContext(); prel = invalidate_pathman_relation_info(relid, NULL); + Assert(prel); /* Try locking parent, exit fast if 'allow_incomplete' */ if (allow_incomplete) From d3658f2f6f0b51892f709e551cc16d90be6f446c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 26 Apr 2017 15:51:20 +0300 Subject: [PATCH 0418/1124] Change supported version to python3 in tests --- tests/python/partitioning_test.py | 151 ++++++++++++++++-------------- 1 file changed, 82 insertions(+), 69 deletions(-) mode change 100644 => 100755 tests/python/partitioning_test.py diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py old mode 100644 new mode 100755 index 6ca18970..cda00c62 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1,20 +1,23 @@ +#!/usr/bin/env python3 # coding: utf-8 + """ concurrent_partitioning_test.py Tests concurrent partitioning worker with simultaneous update queries - Copyright (c) 2015-2016, Postgres Professional + Copyright (c) 2015-2017, Postgres Professional """ import unittest import math -from testgres import get_new_node, stop_all import time import os import re import subprocess import threading +from testgres import get_new_node, stop_all + # Helper function for json equality def ordered(obj): @@ -25,6 +28,7 @@ def ordered(obj): else: return obj + def if_fdw_enabled(func): """To run tests with FDW support set environment variable TEST_FDW=1""" def wrapper(*args, **kwargs): @@ -110,7 +114,7 @@ def test_concurrent(self): self.assertEqual(data[0][0], 300000) node.stop() - except Exception, e: + except Exception as e: self.printlog(node.logs_dir + '/postgresql.log') raise e @@ -175,7 +179,7 @@ def test_replication(self): node.execute('postgres', 'select count(*) from abc')[0][0], 0 ) - except Exception, e: + except Exception as e: self.printlog(node.logs_dir + '/postgresql.log') self.printlog(replica.logs_dir + '/postgresql.log') raise e @@ -199,7 +203,7 @@ def get(self): # There is one flag for each thread which shows if thread have done # its work - flags = [Flag(False) for i in xrange(3)] + flags = [Flag(False) for i in range(3)] # All threads synchronizes though this lock lock = threading.Lock() @@ -275,9 +279,9 @@ def add_partition(node, flag, query): 'postgres', 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' ), - '6\n' + b'6\n' ) - except Exception, e: + except Exception as e: self.printlog(node.logs_dir + '/postgresql.log') raise e @@ -422,14 +426,14 @@ def test_foreign_table(self): # Check that table attached to partitioned table self.assertEqual( master.safe_psql('postgres', 'select * from ftable'), - '25|foreign\n' + b'25|foreign\n' ) # Check that we can successfully insert new data into foreign partition master.safe_psql('postgres', 'insert into abc values (26, \'part\')') self.assertEqual( master.safe_psql('postgres', 'select * from ftable order by id'), - '25|foreign\n26|part\n' + b'25|foreign\n26|part\n' ) # Testing drop partitions (including foreign partitions) @@ -459,7 +463,7 @@ def test_foreign_table(self): self.assertEqual( master.safe_psql('postgres', 'select * from hash_test'), - '1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' + b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' ) master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') @@ -851,63 +855,72 @@ def turnon_pathman(node): "--dbname=copy"], cmp_full), # dump in archive format ] - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) - - if (preproc != None): - preproc(node) - - # transfer and restore data + try: FNULL = open(os.devnull, 'w') - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - p2 = subprocess.Popen(pg_restore_params, stdin=p1.stdout, stdout=FNULL, stderr=FNULL) - p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. - p2.communicate() - - if (postproc != None): - postproc(node) - - # check validity of data - with node.connect('initial') as con1, node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual(cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" % dump_restore_cmd) - self.assertNotEqual(cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') + + for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: + + dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) + + if (preproc != None): + preproc(node) + + # transfer and restore data + p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) + stdoutdata, _ = p1.communicate() + p2 = subprocess.Popen(pg_restore_params, stdin=subprocess.PIPE, + stdout=FNULL, stderr=FNULL) + p2.communicate(input=stdoutdata) + + if (postproc != None): + postproc(node) + + # check validity of data + with node.connect('initial') as con1, node.connect('copy') as con2: + + # compare plans and contents of initial and copy + cmp_result = cmp_dbs(con1, con2) + self.assertNotEqual(cmp_result, PLANS_MISMATCH, + "mismatch in plans of select query on partitioned tables under the command: %s" % dump_restore_cmd) + self.assertNotEqual(cmp_result, CONTENTS_MISMATCH, + "mismatch in contents of partitioned tables under the command: %s" % dump_restore_cmd) + + # compare enable_parent flag and callback function + config_params_query = """ + select partrel, enable_parent, init_callback from pathman_config_params + """ + config_params_initial, config_params_copy = {}, {} + for row in con1.execute(config_params_query): + config_params_initial[row[0]] = row[1:] + for row in con2.execute(config_params_query): + config_params_copy[row[0]] = row[1:] + self.assertEqual(config_params_initial, config_params_copy, \ + "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) + + # compare constraints on each partition + constraints_query = """ + select r.relname, c.conname, c.consrc from + pg_constraint c join pg_class r on c.conrelid=r.oid + where relname similar to '(range|hash)_partitioned_\d+' + """ + constraints_initial, constraints_copy = {}, {} + for row in con1.execute(constraints_query): + constraints_initial[row[0]] = row[1:] + for row in con2.execute(constraints_query): + constraints_copy[row[0]] = row[1:] + self.assertEqual(constraints_initial, constraints_copy, \ + "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) + + # clear copy database + node.psql('copy', 'drop schema public cascade') + node.psql('copy', 'create schema public') + node.psql('copy', 'drop extension pg_pathman cascade') + + except: + raise + finally: + FNULL.close() # Stop instance and finish work node.stop() @@ -958,24 +971,24 @@ def test_concurrent_detach(self): "-T", "%i" % (test_interval+inserts_advance) ]) time.sleep(inserts_advance) - detachs = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ + detachs = node.pgbench(stdout=FNULL, stderr=FNULL, options=[ "-D", "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, "-T", "%i" % test_interval ]) # Wait for completion of processes - inserts.wait() + _, stderrdata = inserts.communicate() detachs.wait() # Obtain error log from inserts process - inserts_errors = inserts.stderr.read() - self.assertIsNone(re.search("ERROR|FATAL|PANIC", inserts_errors), + self.assertIsNone(re.search("ERROR|FATAL|PANIC", str(stderrdata)), msg="Race condition between detach and concurrent inserts with append partition is expired") # Stop instance and finish work node.stop() node.cleanup() + FNULL.close() if __name__ == "__main__": From edc7a1f5d760303553603f1c81fec7b273df0046 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 26 Apr 2017 16:05:01 +0300 Subject: [PATCH 0419/1124] replace check_relation_exists() with syscache search --- src/init.c | 2 +- src/pl_funcs.c | 21 +++++++-------------- src/pl_range_funcs.c | 9 +-------- 3 files changed, 9 insertions(+), 23 deletions(-) diff --git a/src/init.c b/src/init.c index cabffc98..27ec63f4 100644 --- a/src/init.c +++ b/src/init.c @@ -766,7 +766,7 @@ read_pathman_config(void) relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); /* Check that relation 'relid' exists */ - if (get_rel_type_id(relid) == InvalidOid) + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) { DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 1d23ea59..187ea11f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -105,13 +105,6 @@ static void pathman_update_trigger_func_move_tuple(Relation source_rel, HeapTuple old_tuple, HeapTuple new_tuple); -/* Extracted common check */ -static inline bool -check_relation_exists(Oid relid) -{ - return get_rel_type_id(relid) != InvalidOid; -} - /* * ------------------------ @@ -538,7 +531,7 @@ validate_relname(PG_FUNCTION_ARGS) /* Fetch relation's Oid */ relid = PG_GETARG_OID(0); - if (!check_relation_exists(relid)) + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation \"%u\" does not exist", relid), errdetail("triggered in function " @@ -600,7 +593,7 @@ build_update_trigger_name(PG_FUNCTION_ARGS) const char *result; /* Check that relation exists */ - if (!check_relation_exists(relid)) + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation \"%u\" does not exist", relid))); @@ -618,7 +611,7 @@ build_update_trigger_func_name(PG_FUNCTION_ARGS) *func_name; /* Check that relation exists */ - if (!check_relation_exists(relid)) + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation \"%u\" does not exist", relid))); @@ -638,7 +631,7 @@ build_check_constraint_name(PG_FUNCTION_ARGS) Oid relid = PG_GETARG_OID(0); const char *result; - if (!check_relation_exists(relid)) + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation \"%u\" does not exist", relid))); @@ -681,7 +674,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) errmsg("'parent_relid' should not be NULL"))); /* Check that relation exists */ - if (!check_relation_exists(relid)) + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation \"%u\" does not exist", relid))); @@ -827,7 +820,7 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) partrel = DatumGetObjectId(partrel_datum); /* Finally trigger pg_pathman's cache invalidation event */ - if (check_relation_exists(partrel)) + if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partrel))) CacheInvalidateRelcacheByRelid(partrel); pathman_config_params_trigger_func_return: @@ -1383,7 +1376,7 @@ has_update_trigger(PG_FUNCTION_ARGS) Oid parent_relid = PG_GETARG_OID(0); /* Check that relation exists */ - if (!check_relation_exists(parent_relid)) + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation \"%u\" does not exist", parent_relid))); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 04e3c480..12a34666 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -70,13 +70,6 @@ static bool interval_is_trivial(Oid atttype, Datum interval, Oid interval_type); -/* Extracted common check */ -static inline bool -check_relation_exists(Oid relid) -{ - return get_rel_type_id(relid) != InvalidOid; -} - /* * ----------------------------- @@ -580,7 +573,7 @@ build_sequence_name(PG_FUNCTION_ARGS) Oid parent_nsp; char *result; - if (!check_relation_exists(parent_relid)) + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) ereport(ERROR, (errmsg("relation \"%u\" does not exist", parent_relid))); parent_nsp = get_rel_namespace(parent_relid); From ec3ef23666467f6f1776c99f542e6fb9c64b1fae Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 26 Apr 2017 16:19:22 +0300 Subject: [PATCH 0420/1124] Fix travis configuration for python3 --- Makefile | 2 ++ travis/apt.postgresql.org.sh | 1 + travis/pg-travis-test.sh | 4 +--- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 07afeddc..98a0d891 100644 --- a/Makefile +++ b/Makefile @@ -74,3 +74,5 @@ isolationcheck: | submake-isolation --temp-config=$(top_srcdir)/$(subdir)/conf.add \ --outputdir=./isolation_output \ $(ISOLATIONCHECKS) +partitioning_tests: + $(MAKE) -C tests/python partitioning_tests diff --git a/travis/apt.postgresql.org.sh b/travis/apt.postgresql.org.sh index 22814fa7..a157f290 100644 --- a/travis/apt.postgresql.org.sh +++ b/travis/apt.postgresql.org.sh @@ -126,6 +126,7 @@ EOF echo "Running apt-get update ..." apt-get update +apt-get install python3 cat < Date: Wed, 26 Apr 2017 16:19:42 +0300 Subject: [PATCH 0421/1124] Add Makefile for python tests too --- tests/python/Makefile | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 tests/python/Makefile diff --git a/tests/python/Makefile b/tests/python/Makefile new file mode 100644 index 00000000..cb2bc50d --- /dev/null +++ b/tests/python/Makefile @@ -0,0 +1,2 @@ +partitioning_tests: + python3 -m unittest partitioning_test.py From bcc374439d3925aed1f28b12481deab13ccd98f7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 26 Apr 2017 16:20:00 +0300 Subject: [PATCH 0422/1124] reuse invalidate_pathman_relation_info() in remove_pathman_relation_info() --- src/relation_info.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index 068568e8..b865c5dd 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -508,19 +508,14 @@ get_pathman_relation_info_after_lock(Oid relid, void remove_pathman_relation_info(Oid relid) { - PartRelationInfo *prel = pathman_cache_search_relid(partitioned_rels, - relid, HASH_FIND, - NULL); - if (PrelIsValid(prel)) - { - FreeChildrenArray(prel); - FreeRangesArray(prel); - FreeIfNotNull(prel->attname); - } + bool found; + + /* Free resources */ + invalidate_pathman_relation_info(relid, &found); /* Now let's remove the entry completely */ - pathman_cache_search_relid(partitioned_rels, relid, - HASH_REMOVE, NULL); + if (found) + pathman_cache_search_relid(partitioned_rels, relid, HASH_REMOVE, NULL); elog(DEBUG2, "Removing record for relation %u in pg_pathman's cache [%u]", From 694dd1823b96ddd93d3df641680e1ee6211ae3de Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 26 Apr 2017 16:30:42 +0300 Subject: [PATCH 0423/1124] Use pip3 in travis config --- travis/apt.postgresql.org.sh | 1 - travis/pg-travis-test.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/travis/apt.postgresql.org.sh b/travis/apt.postgresql.org.sh index a157f290..22814fa7 100644 --- a/travis/apt.postgresql.org.sh +++ b/travis/apt.postgresql.org.sh @@ -126,7 +126,6 @@ EOF echo "Running apt-get update ..." apt-get update -apt-get install python3 cat < Date: Wed, 26 Apr 2017 16:41:10 +0300 Subject: [PATCH 0424/1124] remove useless includes in debug_print.c --- src/debug_print.c | 2 -- src/include/relation_info.h | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/debug_print.c b/src/debug_print.c index 03d28d53..36016861 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -11,11 +11,9 @@ #include "rangeset.h" #include "postgres.h" -#include "fmgr.h" #include "nodes/bitmapset.h" #include "nodes/pg_list.h" #include "lib/stringinfo.h" -#include "utils/lsyscache.h" /* diff --git a/src/include/relation_info.h b/src/include/relation_info.h index a926b96f..91678175 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -142,6 +142,7 @@ typedef struct Node *expr; /* planned expression */ List *expr_vars; /* vars from expression, lazy */ Bitmapset *expr_atts; /* set with attnums from expression */ + Oid atttype; /* expression type */ int32 atttypmod; /* expression type modifier */ bool attbyval; /* is partitioned column stored by value? */ From 536ef572fe2d5934adf6457b62de2f7ce2ff0e91 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 26 Apr 2017 16:46:31 +0300 Subject: [PATCH 0425/1124] Fix README for python tests --- tests/python/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/python/README.md b/tests/python/README.md index 8d07cc44..4e065e11 100644 --- a/tests/python/README.md +++ b/tests/python/README.md @@ -9,13 +9,13 @@ First of all you need to install `testgres` python module which contains useful functions to start postgres clusters and make queries: ``` -pip install testgres +pip3 install testgres ``` To run tests execute: ``` -python -m unittest partitioning_test +python3 -m unittest partitioning_test ``` from current directory. If you want to run a specific postgres build then From aa45c1e12d15826707039f0ae2ac0e2f17bdbc60 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Wed, 26 Apr 2017 17:33:33 +0300 Subject: [PATCH 0426/1124] Define compatible version of creating CreateStmt for new partition --- src/compat/pg_compat.c | 46 +++++++++++++++++++++ src/include/compat/pg_compat.h | 3 ++ src/partition_creation.c | 73 +++++++++++----------------------- 3 files changed, 73 insertions(+), 49 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index c50b1c26..86f060ac 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -19,6 +19,7 @@ #include "optimizer/clauses.h" #include "optimizer/pathnode.h" #include "optimizer/prep.h" +#include "parser/parse_utilcmd.h" #include "port.h" #include "utils.h" #include "utils/lsyscache.h" @@ -681,3 +682,48 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) rel->tuples = parent_rows; } + +/* + * Construct the sequence of utility statements to create a new partition + */ +List * +init_createstmts_for_partition(RangeVar *parent_rv, + RangeVar *partition_rv, + char *tablespace) +{ + TableLikeClause like_clause; + CreateStmt create_stmt; + List *result; + + /* Initialize TableLikeClause structure */ + NodeSetTag(&like_clause, T_TableLikeClause); + like_clause.relation = copyObject(parent_rv); + like_clause.options = CREATE_TABLE_LIKE_DEFAULTS | + CREATE_TABLE_LIKE_INDEXES | + CREATE_TABLE_LIKE_STORAGE; + + /* Initialize CreateStmt structure */ + NodeSetTag(&create_stmt, T_CreateStmt); + create_stmt.relation = copyObject(partition_rv); + create_stmt.tableElts = list_make1(copyObject(&like_clause)); + create_stmt.inhRelations = list_make1(copyObject(parent_rv)); + create_stmt.ofTypename = NULL; + create_stmt.constraints = NIL; + create_stmt.options = NIL; + create_stmt.oncommit = ONCOMMIT_NOOP; + create_stmt.tablespacename = tablespace; + create_stmt.if_not_exists = false; + +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 90600 + create_stmt.partition_info = NULL; +#endif + +#if PG_VERSION_NUM >= 100000 + create_stmt.partbound = NULL; + create_stmt.partspec = NULL; +#endif + + result = transformCreateStmt(&create_stmt, NULL); + + return result; +} diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 3b3f14bc..14a07da1 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -263,6 +263,9 @@ char get_rel_persistence(Oid relid); */ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); +List *init_createstmts_for_partition(RangeVar *parent_rv, + RangeVar *partition_rv, + char *tablespace); #endif /* PG_COMPAT_H */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 13b131c3..d514ef60 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -34,7 +34,6 @@ #include "miscadmin.h" #include "parser/parse_func.h" #include "parser/parse_relation.h" -#include "parser/parse_utilcmd.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/datum.h" @@ -681,8 +680,6 @@ create_single_partition_internal(Oid parent_relid, /* Elements of the "CREATE TABLE" query tree */ RangeVar *parent_rv; - TableLikeClause like_clause; - CreateStmt create_stmt; List *create_stmts; ListCell *lc; @@ -702,6 +699,27 @@ create_single_partition_internal(Oid parent_relid, elog(ERROR, "table \"%s\" is not partitioned", get_rel_name_or_relid(parent_relid)); + /* Do we have to escalate privileges? */ + if (need_priv_escalation) + { + /* Get current user's Oid and security context */ + GetUserIdAndSecContext(&save_userid, &save_sec_context); + + /* Check that user's allowed to spawn partitions */ + if (ACLCHECK_OK != pg_class_aclcheck(parent_relid, save_userid, + ACL_SPAWN_PARTITIONS)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied for parent relation \"%s\"", + get_rel_name_or_relid(parent_relid)), + errdetail("user is not allowed to create new partitions"), + errhint("consider granting INSERT privilege"))); + + /* Become superuser in order to bypass various ACL checks */ + SetUserIdAndSecContext(BOOTSTRAP_SUPERUSERID, + save_sec_context | SECURITY_LOCAL_USERID_CHANGE); + } + /* Cache parent's namespace and name */ parent_name = get_rel_name(parent_relid); parent_nsp = get_rel_namespace(parent_relid); @@ -725,52 +743,9 @@ create_single_partition_internal(Oid parent_relid, if (!tablespace) tablespace = get_tablespace_name(get_rel_tablespace(parent_relid)); - /* Initialize TableLikeClause structure */ - NodeSetTag(&like_clause, T_TableLikeClause); - like_clause.relation = copyObject(parent_rv); - like_clause.options = CREATE_TABLE_LIKE_DEFAULTS | - CREATE_TABLE_LIKE_INDEXES | - CREATE_TABLE_LIKE_STORAGE; - - /* Initialize CreateStmt structure */ - NodeSetTag(&create_stmt, T_CreateStmt); - create_stmt.relation = copyObject(partition_rv); - create_stmt.tableElts = list_make1(copyObject(&like_clause)); - create_stmt.inhRelations = list_make1(copyObject(parent_rv)); - create_stmt.ofTypename = NULL; - create_stmt.constraints = NIL; - create_stmt.options = NIL; - create_stmt.oncommit = ONCOMMIT_NOOP; - create_stmt.tablespacename = tablespace; - create_stmt.if_not_exists = false; - -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 90600 - create_stmt.partition_info = NULL; -#endif - - /* Do we have to escalate privileges? */ - if (need_priv_escalation) - { - /* Get current user's Oid and security context */ - GetUserIdAndSecContext(&save_userid, &save_sec_context); - - /* Check that user's allowed to spawn partitions */ - if (ACLCHECK_OK != pg_class_aclcheck(parent_relid, save_userid, - ACL_SPAWN_PARTITIONS)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied for parent relation \"%s\"", - get_rel_name_or_relid(parent_relid)), - errdetail("user is not allowed to create new partitions"), - errhint("consider granting INSERT privilege"))); - - /* Become superuser in order to bypass various ACL checks */ - SetUserIdAndSecContext(BOOTSTRAP_SUPERUSERID, - save_sec_context | SECURITY_LOCAL_USERID_CHANGE); - } - - /* Generate columns using the parent table */ - create_stmts = transformCreateStmt(&create_stmt, NULL); + /* Obtain the sequence of Stmts to create partition and link it to parent */ + create_stmts = init_createstmts_for_partition(parent_rv, partition_rv, + tablespace); /* Create the partition and all required relations */ foreach (lc, create_stmts) From d1557d21b60ff202de364966f041a0100895cc1a Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Wed, 26 Apr 2017 18:43:06 +0300 Subject: [PATCH 0427/1124] Make compatible version of ProcessUtility routine --- src/include/compat/pg_compat.h | 26 ++++++++++++++++++++++++++ src/partition_creation.c | 22 ++++++---------------- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 14a07da1..edb4176f 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -216,6 +216,32 @@ void McxtStatsInternal(MemoryContext context, int level, #endif +/* + * ProcessUtility + * + * for v10 set NULL into 'queryEnv' argument + */ +#if PG_VERSION_NUM >= 100000 +#define ProcessUtilityCompat(parsetree, queryString, context, params, dest, \ + completionTag) \ + do { \ + PlannedStmt *stmt = makeNode(PlannedStmt); \ + stmt->commandType = CMD_UTILITY; \ + stmt->canSetTag = true; \ + stmt->utilityStmt = (parsetree); \ + stmt->stmt_location = -1; \ + stmt->stmt_len = 0; \ + ProcessUtility(stmt, (queryString), (context), (params), NULL, \ + (dest), (completionTag)); \ + } while (0) +#elif PG_VERSION_NUM >= 90500 +#define ProcessUtilityCompat(parsetree, queryString, context, params, dest, \ + completionTag) \ + ProcessUtility((parsetree), (queryString), (context), (params), \ + (dest), (completionTag)) +#endif + + /* * pull_var_clause() */ diff --git a/src/partition_creation.c b/src/partition_creation.c index d514ef60..7c8eca9b 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -786,22 +786,12 @@ create_single_partition_internal(Oid parent_relid, * call will stash the objects so created into our * event trigger context. */ -#if PG_VERSION_NUM >= 100000 - ProcessUtility(NULL, - "we have to provide a query string", - PROCESS_UTILITY_SUBCOMMAND, - NULL, - NULL, - None_Receiver, - NULL); -#else - ProcessUtility(cur_stmt, - "we have to provide a query string", - PROCESS_UTILITY_SUBCOMMAND, - NULL, - None_Receiver, - NULL); -#endif + ProcessUtilityCompat(cur_stmt, + "we have to provide a query string", + PROCESS_UTILITY_SUBCOMMAND, + NULL, + None_Receiver, + NULL); } /* Update config one more time */ From 5d314ae240f2f33252bceebe8cab4914e25ba62c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 26 Apr 2017 19:25:34 +0300 Subject: [PATCH 0428/1124] Move some logic from PartitionUpdate to PartitionFilter --- Makefile | 1 + expected/pathman_update_node.out | 279 +++++++++++++++++++++++++++++++ sql/pathman_update_node.sql | 162 ++++++++++++++++++ src/include/partition_filter.h | 6 +- src/partition_filter.c | 48 +++++- src/partition_update.c | 19 +-- src/planner_tree_modification.c | 3 +- 7 files changed, 497 insertions(+), 21 deletions(-) create mode 100644 expected/pathman_update_node.out create mode 100644 sql/pathman_update_node.sql diff --git a/Makefile b/Makefile index 6d7d56a4..52c0652b 100644 --- a/Makefile +++ b/Makefile @@ -40,6 +40,7 @@ REGRESS = pathman_basic \ pathman_rowmarks \ pathman_runtime_nodes \ pathman_update_trigger \ + pathman_update_node \ pathman_updates \ pathman_utility_stmt diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out new file mode 100644 index 00000000..e15d04f9 --- /dev/null +++ b/expected/pathman_update_node.out @@ -0,0 +1,279 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_trigger; +SET pg_pathman.enable_partitionupdate=on; +/* Partition table by RANGE (NUMERIC) */ +CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); +NOTICE: sequence "test_range_seq" does not exist, skipping + create_range_partitions +------------------------- + 10 +(1 row) + +/* Update values in 1st partition (rows remain there) */ +UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val < 10 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_1 | 5 | 1 + test_update_trigger.test_range_1 | 5 | 10 + test_update_trigger.test_range_1 | 5 | 2 + test_update_trigger.test_range_1 | 5 | 3 + test_update_trigger.test_range_1 | 5 | 4 + test_update_trigger.test_range_1 | 5 | 5 + test_update_trigger.test_range_1 | 5 | 6 + test_update_trigger.test_range_1 | 5 | 7 + test_update_trigger.test_range_1 | 5 | 8 + test_update_trigger.test_range_1 | 5 | 9 +(10 rows) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Update values in 2nd partition (rows move to 3rd partition) */ +UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val > 20 AND val <= 30 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_3 | 21 | 11 + test_update_trigger.test_range_3 | 22 | 12 + test_update_trigger.test_range_3 | 23 | 13 + test_update_trigger.test_range_3 | 24 | 14 + test_update_trigger.test_range_3 | 25 | 15 + test_update_trigger.test_range_3 | 26 | 16 + test_update_trigger.test_range_3 | 27 | 17 + test_update_trigger.test_range_3 | 28 | 18 + test_update_trigger.test_range_3 | 29 | 19 + test_update_trigger.test_range_3 | 30 | 20 + test_update_trigger.test_range_3 | 21 | 21 + test_update_trigger.test_range_3 | 22 | 22 + test_update_trigger.test_range_3 | 23 | 23 + test_update_trigger.test_range_3 | 24 | 24 + test_update_trigger.test_range_3 | 25 | 25 + test_update_trigger.test_range_3 | 26 | 26 + test_update_trigger.test_range_3 | 27 | 27 + test_update_trigger.test_range_3 | 28 | 28 + test_update_trigger.test_range_3 | 29 | 29 + test_update_trigger.test_range_3 | 30 | 30 +(20 rows) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Move single row */ +UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; +/* Check values #3 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 90 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_9 | 90 | 80 + test_update_trigger.test_range_9 | 90 | 90 +(2 rows) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Move single row (create new partition) */ +UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; +/* Check values #4 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = -1 +ORDER BY comment; + tableoid | val | comment +-----------------------------------+-----+--------- + test_update_trigger.test_range_11 | -1 | 50 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Update non-key column */ +UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; +/* Check values #5 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 100 +ORDER BY comment; + tableoid | val | comment +-----------------------------------+-----+--------- + test_update_trigger.test_range_10 | 100 | test! +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 100 +(1 row) + +/* Try moving row into a gap (ERROR) */ +DROP TABLE test_update_trigger.test_range_4; +UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; +ERROR: cannot spawn a partition +/* Check values #6 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 70 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_7 | 70 | 70 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + +/* Test trivial move (same key) */ +UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; +/* Check values #7 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 65 +ORDER BY comment; + tableoid | val | comment +----------------------------------+-----+--------- + test_update_trigger.test_range_7 | 65 | 65 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + +/* Test tuple conversion (attached partition) */ +CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_trigger.test_range', + 'test_update_trigger.test_range_inv', + 101::NUMERIC, 111::NUMERIC); + attach_range_partition +------------------------------------ + test_update_trigger.test_range_inv +(1 row) + +UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; +/* Check values #8 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 105 +ORDER BY comment; + tableoid | val | comment +------------------------------------+-----+--------- + test_update_trigger.test_range_inv | 105 | 60 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + +/* Test tuple conversion (dropped column) */ +ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_trigger.test_range'); + append_range_partition +----------------------------------- + test_update_trigger.test_range_12 +(1 row) + +UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; +/* Check values #9 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 115; + tableoid | val +-----------------------------------+----- + test_update_trigger.test_range_12 | 115 +(1 row) + +SELECT count(*) FROM test_update_trigger.test_range; + count +------- + 90 +(1 row) + +/* Partition table by HASH (INT4) */ +CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* Move all rows into single partition */ +UPDATE test_update_trigger.test_hash SET val = 1; +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 1 +ORDER BY comment; + tableoid | val | comment +---------------------------------+-----+--------- + test_update_trigger.test_hash_2 | 1 | 1 + test_update_trigger.test_hash_2 | 1 | 10 + test_update_trigger.test_hash_2 | 1 | 2 + test_update_trigger.test_hash_2 | 1 | 3 + test_update_trigger.test_hash_2 | 1 | 4 + test_update_trigger.test_hash_2 | 1 | 5 + test_update_trigger.test_hash_2 | 1 | 6 + test_update_trigger.test_hash_2 | 1 | 7 + test_update_trigger.test_hash_2 | 1 | 8 + test_update_trigger.test_hash_2 | 1 | 9 +(10 rows) + +SELECT count(*) FROM test_update_trigger.test_hash; + count +------- + 10 +(1 row) + +/* Don't move any rows */ +UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 3 +ORDER BY comment; + tableoid | val | comment +----------+-----+--------- +(0 rows) + +SELECT count(*) FROM test_update_trigger.test_hash; + count +------- + 10 +(1 row) + +DROP SCHEMA test_update_trigger CASCADE; +NOTICE: drop cascades to 18 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql new file mode 100644 index 00000000..c3cc8d4d --- /dev/null +++ b/sql/pathman_update_node.sql @@ -0,0 +1,162 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_trigger; +SET pg_pathman.enable_partitionupdate=on; + + +/* Partition table by RANGE (NUMERIC) */ +CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); + + +/* Update values in 1st partition (rows remain there) */ +UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; + +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val < 10 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Update values in 2nd partition (rows move to 3rd partition) */ +UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; + +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val > 20 AND val <= 30 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Move single row */ +UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; + +/* Check values #3 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 90 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Move single row (create new partition) */ +UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; + +/* Check values #4 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = -1 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Update non-key column */ +UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; + +/* Check values #5 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 100 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Try moving row into a gap (ERROR) */ +DROP TABLE test_update_trigger.test_range_4; +UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; + +/* Check values #6 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 70 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Test trivial move (same key) */ +UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; + +/* Check values #7 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 65 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Test tuple conversion (attached partition) */ +CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_trigger.test_range', + 'test_update_trigger.test_range_inv', + 101::NUMERIC, 111::NUMERIC); +UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; + +/* Check values #8 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 105 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_range; + + +/* Test tuple conversion (dropped column) */ +ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_trigger.test_range'); +UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; + +/* Check values #9 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_range +WHERE val = 115; + +SELECT count(*) FROM test_update_trigger.test_range; + + + +/* Partition table by HASH (INT4) */ +CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); + + +/* Move all rows into single partition */ +UPDATE test_update_trigger.test_hash SET val = 1; + +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 1 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_hash; + + +/* Don't move any rows */ +UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; + +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_trigger.test_hash +WHERE val = 3 +ORDER BY comment; + +SELECT count(*) FROM test_update_trigger.test_hash; + + + +DROP SCHEMA test_update_trigger CASCADE; +DROP EXTENSION pg_pathman; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 893200af..c6792451 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -94,6 +94,9 @@ typedef struct bool warning_triggered; /* warning message counter */ TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ + ItemPointer ctid; /* ctid of rubuilt tuple + if there any, or NULL */ + bool keep_ctid; /* if false ctid will not filled */ ExprContext *tup_convert_econtext; /* ExprContext for projections */ } PartitionFilterState; @@ -140,7 +143,8 @@ ResultRelInfoHolder * select_partition_for_insert(Datum value, Oid value_type, Plan * make_partition_filter(Plan *subplan, Oid parent_relid, OnConflictAction conflict_action, - List *returning_list); + List *returning_list, + bool keep_ctid); Node * partition_filter_create_scan_state(CustomScan *node); diff --git a/src/partition_filter.c b/src/partition_filter.c index 2f491830..94826129 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -463,7 +463,8 @@ select_partition_for_insert(Datum value, Oid value_type, Plan * make_partition_filter(Plan *subplan, Oid parent_relid, OnConflictAction conflict_action, - List *returning_list) + List *returning_list, + bool keep_ctid) { CustomScan *cscan = makeNode(CustomScan); Relation parent_rel; @@ -494,9 +495,10 @@ make_partition_filter(Plan *subplan, Oid parent_relid, cscan->custom_scan_tlist = subplan->targetlist; /* Pack partitioned table's Oid and conflict_action */ - cscan->custom_private = list_make3(makeInteger(parent_relid), + cscan->custom_private = list_make4(makeInteger(parent_relid), makeInteger(conflict_action), - returning_list); + returning_list, + makeInteger((int) keep_ctid)); return &cscan->scan.plan; } @@ -517,6 +519,7 @@ partition_filter_create_scan_state(CustomScan *node) state->partitioned_table = intVal(linitial(node->custom_private)); state->on_conflict_action = intVal(lsecond(node->custom_private)); state->returning_list = lthird(node->custom_private); + state->keep_ctid = (bool) intVal(lfourth(node->custom_private)); /* Check boundaries */ Assert(state->on_conflict_action >= ONCONFLICT_NONE || @@ -556,6 +559,9 @@ partition_filter_exec(CustomScanState *node) PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; + /* clean ctid for old slot */ + state->ctid = NULL; + slot = ExecProcNode(child_ps); /* Save original ResultRelInfo */ @@ -569,6 +575,7 @@ partition_filter_exec(CustomScanState *node) ResultRelInfoHolder *rri_holder; bool isnull; Datum value; + ResultRelInfo *resultRelInfo; /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); @@ -601,14 +608,45 @@ partition_filter_exec(CustomScanState *node) ResetExprContext(econtext); /* Magic: replace parent's ResultRelInfo with ours */ - estate->es_result_relation_info = rri_holder->result_rel_info; + resultRelInfo = rri_holder->result_rel_info; + estate->es_result_relation_info = resultRelInfo; + + if (state->keep_ctid) + { + JunkFilter *junkfilter; + Datum datum; + char relkind; + + /* + * extract `ctid` junk attribute and save it in state, + * we need this step because if there will be conversion + * junk attributes will be removed from slot + */ + junkfilter = resultRelInfo->ri_junkFilter; + Assert(junkfilter != NULL); + + relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; + if (relkind == RELKIND_RELATION) + { + AttrNumber ctid_attno; + bool isNull; + + ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); + datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + state->ctid = (ItemPointer) DatumGetPointer(datum); + } + } /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) { HeapTuple htup_old, htup_new; - Relation child_rel = rri_holder->result_rel_info->ri_RelationDesc; + Relation child_rel = resultRelInfo->ri_RelationDesc; htup_old = ExecMaterializeSlot(slot); htup_new = do_convert_tuple(htup_old, rri_holder->tuple_map); diff --git a/src/partition_update.c b/src/partition_update.c index fab7f6f2..b95b7c43 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -77,7 +77,7 @@ make_partition_update(Plan *subplan, /* Setup methods and child plan */ cscan->methods = &partition_update_plan_methods; pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, - returning_list); + returning_list, true); cscan->custom_plans = list_make1(pfilter); cscan->scan.plan.targetlist = pfilter->targetlist; @@ -133,13 +133,14 @@ partition_update_exec(CustomScanState *node) ItemPointerData tuple_ctid; JunkFilter *junkfilter; EPQState epqstate; - AttrNumber ctid_attno; HeapTupleData oldtupdata; HeapTuple oldtuple; PartitionFilterState *child_state = (PartitionFilterState *) child_ps; EState *estate = node->ss.ps.state; + Assert(child_state->keep_ctid); + resultRelInfo = estate->es_result_relation_info; junkfilter = resultRelInfo->ri_junkFilter; Assert(junkfilter != NULL); @@ -148,19 +149,9 @@ partition_update_exec(CustomScanState *node) oldtuple = NULL; relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION) + if (relkind == RELKIND_RELATION && child_state->ctid != NULL) { - /* - * extract the 'ctid' junk attribute. - */ - Assert(resultRelInfo->ri_RelationDesc->rd_rel->relkind == RELKIND_RELATION); - ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); - datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); - /* shouldn't ever get a null result... */ - if (isNull) - elog(ERROR, "ctid is NULL"); - - tupleid = (ItemPointer) DatumGetPointer(datum); + tupleid = child_state->ctid; tuple_ctid = *tupleid; /* be sure we don't free * ctid!! */ tupleid = &tuple_ctid; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index d4558c4b..5522dfaa 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -403,7 +403,8 @@ partition_filter_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, modify_table->onConflictAction, - returning_list); + returning_list, + false); } } } From 11b1d5e626ddf7ad0aed6e863f3882127111a831 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 27 Apr 2017 10:59:15 +0300 Subject: [PATCH 0429/1124] light refactoring (utils.c etc) --- src/include/partition_creation.h | 14 +++-- src/include/relation_info.h | 18 ++++++- src/include/utils.h | 4 +- src/init.c | 2 +- src/nodes_common.c | 4 +- src/partition_creation.c | 89 ++++++++++++++------------------ src/pg_pathman.c | 6 +-- src/pl_funcs.c | 4 +- src/pl_range_funcs.c | 14 ++--- src/utils.c | 64 +++++++++-------------- 10 files changed, 105 insertions(+), 114 deletions(-) diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index ed66dec9..b1fd86ac 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -19,7 +19,7 @@ /* ACL privilege for partition creation */ -#define ACL_SPAWN_PARTITIONS ACL_INSERT +#define ACL_SPAWN_PARTITIONS ACL_INSERT /* Create RANGE partitions to store some value */ @@ -87,10 +87,14 @@ typedef struct /* Expression parsing functions */ PartExpressionInfo *get_part_expression_info(Oid relid, - const char *expr_string, bool check_hash_func, bool make_plan); - -Node *get_raw_expression(Oid relid, const char *expr, char **query_string_out, - Node **parsetree); + const char *expr_string, + bool check_hash_func, + bool make_plan); + +Node *parse_partitioning_expression(Oid relid, + const char *expression, + char **query_string_out, + Node **parsetree_out); /* Update triggers */ void create_single_update_trigger_internal(Oid partition_relid, diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 91678175..1d0f3254 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -18,10 +18,11 @@ #include "nodes/bitmapset.h" #include "nodes/nodes.h" #include "nodes/primnodes.h" +#include "nodes/value.h" #include "port/atomics.h" #include "storage/lock.h" #include "utils/datum.h" -#include "nodes/primnodes.h" +#include "utils/lsyscache.h" /* Range bound */ @@ -237,6 +238,21 @@ PrelLastChild(const PartRelationInfo *prel) return PrelChildrenCount(prel) - 1; /* last partition */ } +static inline List * +PrelExpressionColumnNames(const PartRelationInfo *prel) +{ + List *columns = NIL; + int j = -1; + + while ((j = bms_next_member(prel->expr_atts, j)) >= 0) + { + char *attname = get_attname(prel->key, j); + columns = lappend(columns, makeString(attname)); + } + + return columns; +} + const PartRelationInfo *refresh_pathman_relation_info(Oid relid, Datum *values, diff --git a/src/include/utils.h b/src/include/utils.h index cd622840..8da250f8 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -28,7 +28,7 @@ bool clause_contains_params(Node *clause); bool is_date_type_internal(Oid typid); bool check_security_policy_internal(Oid relid, Oid role); -bool match_expr_to_operand(Node *operand, Node *expr); +bool expr_matches_operand(Node *operand, Node *expr); /* * Misc. @@ -68,6 +68,4 @@ RangeVar ** qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); AttrNumber *get_pathman_attributes_map(const PartRelationInfo *prel, Relation child); -List *get_part_expression_columns(const PartRelationInfo *prel); - #endif /* PATHMAN_UTILS_H */ diff --git a/src/init.c b/src/init.c index 27ec63f4..632190b8 100644 --- a/src/init.c +++ b/src/init.c @@ -1037,7 +1037,7 @@ validate_hash_constraint(const Expr *expr, hash_arg = (Node *) linitial(type_hash_proc_expr->args); /* Check arg of TYPE_HASH_PROC() */ - if (!match_expr_to_operand(prel->expr, hash_arg)) + if (!expr_matches_operand(prel->expr, hash_arg)) return false; /* Check that PARTITIONS_COUNT is equal to total amount of partitions */ diff --git a/src/nodes_common.c b/src/nodes_common.c index 760ccb39..3c217f50 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -335,10 +335,10 @@ check_clause_for_expression(Node *node, struct check_clause_context *ctx) Node *left = linitial(expr->args), *right = lsecond(expr->args); - if (match_expr_to_operand(left, ctx->prel_expr)) + if (expr_matches_operand(left, ctx->prel_expr)) ctx->count += 1; - if (match_expr_to_operand(right, ctx->prel_expr)) + if (expr_matches_operand(right, ctx->prel_expr)) ctx->count += 1; return false; diff --git a/src/partition_creation.c b/src/partition_creation.c index c4eb5f74..2aeb9163 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -84,11 +84,13 @@ static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_rel static Oid text_to_regprocedure(text *proname_args); static Constraint *make_constraint_common(char *name, Node *raw_expr); -static Node *get_constraint_expression(Oid parent_relid, - Oid *expr_type, List **columns); static Value make_string_value_struct(char *str); static Value make_int_value_struct(int int_val); +static Node *get_partitioning_expression(Oid parent_relid, + Oid *expr_type, + List **columns); + /* * --------------------------------------- * Public interface (partition creation) @@ -123,7 +125,7 @@ create_single_range_partition_internal(Oid parent_relid, } /* check pathman config and fill variables */ - expr = get_constraint_expression(parent_relid, NULL, &trigger_columns); + expr = get_partitioning_expression(parent_relid, NULL, &trigger_columns); /* Create a partition & get 'partitioning expression' */ partition_relid = create_single_partition_internal(parent_relid, @@ -163,7 +165,7 @@ create_single_hash_partition_internal(Oid parent_relid, char *tablespace) { Oid partition_relid, - value_type; + expr_type; Constraint *check_constr; Node *expr; init_callback_params callback_params; @@ -187,14 +189,14 @@ create_single_hash_partition_internal(Oid parent_relid, tablespace); /* check pathman config and fill variables */ - expr = get_constraint_expression(parent_relid, &value_type, &trigger_columns); + expr = get_partitioning_expression(parent_relid, &expr_type, &trigger_columns); /* Build check constraint for HASH partition */ check_constr = build_hash_check_constraint(partition_relid, expr, part_idx, part_count, - value_type); + expr_type); /* Cook args for init_callback */ MakeInitCallbackHashParams(&callback_params, @@ -1710,39 +1712,33 @@ validate_part_expression(Node *node, void *context) return expression_tree_walker(node, validate_part_expression, context); } -/* Wraps expression by SELECT query and returns parsed tree */ +/* Wraps expression by SELECT query and returns parse tree */ Node * -get_raw_expression(Oid relid, const char *expr, char **query_string_out, - Node **parsetree) +parse_partitioning_expression(Oid relid, + const char *expression, + char **query_string_out, + Node **parsetree_out) { - Node *result; - SelectStmt *select_stmt; - ResTarget *target; + SelectStmt *select_stmt; + List *parsetree_list; - char *fmt = "SELECT (%s) FROM ONLY %s.\"%s\""; - char *relname = get_rel_name(relid), - *namespace_name = get_namespace_name(get_rel_namespace(relid)); - List *parsetree_list; - char *query_string = psprintf(fmt, expr, namespace_name, relname); + char *sql = "SELECT (%s) FROM ONLY %s.\"%s\""; + char *relname = get_rel_name(relid), + *nspname = get_namespace_name(get_rel_namespace(relid)); + char *query_string = psprintf(sql, expression, nspname, relname); parsetree_list = raw_parser(query_string); Assert(list_length(parsetree_list) == 1); + select_stmt = (SelectStmt *) linitial(parsetree_list); + if (query_string_out) - { *query_string_out = query_string; - } - select_stmt = (SelectStmt *) linitial(parsetree_list); + if (parsetree_out) + *parsetree_out = (Node *) select_stmt; - if (parsetree) - { - *parsetree = (Node *) select_stmt; - } - - target = (ResTarget *) linitial(select_stmt->targetList); - result = (Node *) target->val; - return result; + return ((ResTarget *) linitial(select_stmt->targetList))->val; } /* @@ -1751,7 +1747,7 @@ get_raw_expression(Oid relid, const char *expr, char **query_string_out, */ PartExpressionInfo * get_part_expression_info(Oid relid, const char *expr_string, - bool check_hash_func, bool make_plan) + bool check_hash_func, bool make_plan) { Node *expr_node, *parsetree; @@ -1766,12 +1762,12 @@ get_part_expression_info(Oid relid, const char *expr_string, expr_info = palloc(sizeof(PartExpressionInfo)); pathman_parse_context = AllocSetContextCreate(TopPathmanContext, - "pathman parse context", - ALLOCSET_DEFAULT_SIZES); + "pathman parse context", + ALLOCSET_DEFAULT_SIZES); /* Keep raw expression */ - expr_info->raw_expr = get_raw_expression(relid, expr_string, - &query_string, &parsetree); + expr_info->raw_expr = parse_partitioning_expression(relid, expr_string, + &query_string, &parsetree); /* If expression is just column we check that is not null */ if (IsA(expr_info->raw_expr, ColumnRef)) @@ -1892,12 +1888,11 @@ extract_column_names(Node *node, struct extract_column_names_context *ctx) return raw_expression_tree_walker(node, extract_column_names, ctx); } -/* - * Returns raw partitioning expression, and if specified returns - * columns from expression and its type - */ +/* Returns raw partitioning expression + expr_type + columns */ static Node * -get_constraint_expression(Oid parent_relid, Oid *expr_type, List **columns) +get_partitioning_expression(Oid parent_relid, + Oid *expr_type, /* ret val #1 */ + List **columns) /* ret val #2 */ { /* Values extracted from PATHMAN_CONFIG */ Datum config_values[Natts_pathman_config]; @@ -1906,28 +1901,24 @@ get_constraint_expression(Oid parent_relid, Oid *expr_type, List **columns) char *expr_string; /* Check that table is registered in PATHMAN_CONFIG */ - if (!pathman_config_contains_relation(parent_relid, - config_values, config_nulls, NULL, NULL)) + if (!pathman_config_contains_relation(parent_relid, config_values, + config_nulls, NULL, NULL)) elog(ERROR, "table \"%s\" is not partitioned", get_rel_name_or_relid(parent_relid)); - /* - * We need expression type for hash functions. Range functions don't need - * this feature. - */ + /* We need expression type for hash functions */ if (expr_type) *expr_type = DatumGetObjectId(config_values[Anum_pathman_config_atttype - 1]); expr_string = TextDatumGetCString(config_values[Anum_pathman_config_expression - 1]); - expr = get_raw_expression(parent_relid, expr_string, NULL, NULL); + expr = parse_partitioning_expression(parent_relid, expr_string, NULL, NULL); pfree(expr_string); if (columns) { - struct extract_column_names_context ctx; - ctx.columns = NIL; - extract_column_names(expr, &ctx); - *columns = ctx.columns; + struct extract_column_names_context context = { NIL }; + extract_column_names(expr, &context); + *columns = context.columns; } return expr; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index fde8fcbe..9bf4837a 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -868,7 +868,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) Assert(exprnode != NULL); - if (!match_expr_to_operand(context->prel_expr, exprnode)) + if (!expr_matches_operand(context->prel_expr, exprnode)) goto handle_arrexpr_return; if (arraynode && IsA(arraynode, Const) && @@ -1146,14 +1146,14 @@ pull_var_param(const WalkerContext *ctx, Node *left = linitial(expr->args), *right = lsecond(expr->args); - if (match_expr_to_operand(left, ctx->prel_expr)) + if (expr_matches_operand(left, ctx->prel_expr)) { *var_ptr = left; *param_ptr = right; return true; } - if (match_expr_to_operand(right, ctx->prel_expr)) + if (expr_matches_operand(right, ctx->prel_expr)) { *var_ptr = right; *param_ptr = left; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 187ea11f..6951d79a 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1332,7 +1332,7 @@ create_update_triggers(PG_FUNCTION_ARGS) trigname = build_update_trigger_name_internal(parent); /* Create trigger for parent */ - columns = get_part_expression_columns(prel); + columns = PrelExpressionColumnNames(prel); create_single_update_trigger_internal(parent, trigname, columns); /* Fetch children array */ @@ -1363,7 +1363,7 @@ create_single_update_trigger(PG_FUNCTION_ARGS) trigname = build_update_trigger_name_internal(parent); /* Generate list of columns used in expression */ - columns = get_part_expression_columns(prel); + columns = PrelExpressionColumnNames(prel); create_single_update_trigger_internal(child, trigname, columns); PG_RETURN_VOID(); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 12a34666..e08c2c93 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -60,8 +60,8 @@ static void merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts); static void modify_range_constraint(Oid child_relid, - const char *attname, - Oid atttype, + const char *expression, + Oid expression_type, const Bound *lower, const Bound *upper); static char *get_qualified_rel_name(Oid relid); @@ -554,7 +554,7 @@ build_range_condition(PG_FUNCTION_ARGS) MakeBoundInf(PLUS_INFINITY) : MakeBound(PG_GETARG_DATUM(3)); - expr = get_raw_expression(partition_relid, expression, NULL, NULL); + expr = parse_partitioning_expression(partition_relid, expression, NULL, NULL); con = build_range_check_constraint(partition_relid, expr, &min, &max, @@ -1016,8 +1016,8 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) */ static void modify_range_constraint(Oid child_relid, - const char *attname, - Oid atttype, + const char *expression, + Oid expression_type, const Bound *lower, const Bound *upper) { @@ -1029,14 +1029,14 @@ modify_range_constraint(Oid child_relid, drop_check_constraint(child_relid); /* Parse expression */ - expr = get_raw_expression(child_relid, attname, NULL, NULL); + expr = parse_partitioning_expression(child_relid, expression, NULL, NULL); /* Build a new one */ constraint = build_range_check_constraint(child_relid, expr, lower, upper, - atttype); + expression_type); /* Open the relation and add new check constraint */ partition_rel = heap_open(child_relid, AccessExclusiveLock); diff --git a/src/utils.c b/src/utils.c index 99089ae8..251f5757 100644 --- a/src/utils.c +++ b/src/utils.c @@ -105,6 +105,20 @@ check_security_policy_internal(Oid relid, Oid role) return true; } +/* Compare clause operand with expression */ +bool +expr_matches_operand(Node *operand, Node *expr) +{ + /* strip relabeling for both operand and expr */ + if (operand && IsA(operand, RelabelType)) + operand = (Node *) ((RelabelType *) operand)->arg; + + if (expr && IsA(expr, RelabelType)) + expr = (Node *) ((RelabelType *) expr)->arg; + + /* compare expressions and return result right away */ + return equal(expr, operand); +} /* @@ -199,8 +213,8 @@ get_rel_name_or_relid(Oid relid) char *relname = get_rel_name(relid); if (!relname) - return DatumGetCString(DirectFunctionCall1(oidout, - ObjectIdGetDatum(relid))); + return DatumGetCString(DirectFunctionCall1(oidout, ObjectIdGetDatum(relid))); + return relname; } @@ -315,8 +329,8 @@ fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2) void extract_op_func_and_ret_type(char *opname, Oid type1, Oid type2, - Oid *op_func, /* returned value #1 */ - Oid *op_ret_type) /* returned value #2 */ + Oid *op_func, /* ret value #1 */ + Oid *op_ret_type) /* ret value #2 */ { Operator op; @@ -428,7 +442,7 @@ perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success) Datum extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ Oid part_atttype, /* expression type */ - Oid *interval_type) /* returned value */ + Oid *interval_type) /* ret value #1 */ { Datum interval_binary; const char *interval_cstring; @@ -480,23 +494,6 @@ extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ return interval_binary; } -/* - * Compare clause operand with expression - */ -bool -match_expr_to_operand(Node *operand, Node *expr) -{ - /* strip relabeling for both operand and expr */ - if (operand && IsA(operand, RelabelType)) - operand = (Node *) ((RelabelType *) operand)->arg; - - if (expr && IsA(expr, RelabelType)) - expr = (Node *) ((RelabelType *) expr)->arg; - - /* compare expressions and return result right away */ - return equal(expr, operand); -} - /* Convert Datum into CSTRING array */ char ** deconstruct_text_array(Datum array, int *array_size) @@ -594,13 +591,13 @@ get_pathman_attributes_map(const PartRelationInfo *prel, Relation child) while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { - int j; - char *attname = get_attname(parent_relid, i); + int j; + char *attname = get_attname(parent_relid, i); for (j = 0; j < natts; j++) { - Form_pg_attribute att = childDesc->attrs[j]; - char *child_attname; + Form_pg_attribute att = childDesc->attrs[j]; + char *child_attname; if (att->attisdropped) continue; /* attrMap[i] is already 0 */ @@ -619,18 +616,3 @@ get_pathman_attributes_map(const PartRelationInfo *prel, Relation child) return attrMap; } - -List * -get_part_expression_columns(const PartRelationInfo *prel) -{ - List *columns = NIL; - int j = -1; - - while ((j = bms_next_member(prel->expr_atts, j)) >= 0) - { - char *attname = get_attname(prel->key, j); - columns = lappend(columns, makeString(attname)); - } - - return columns; -} From 595617d6be8f23d345a94cfcd10d3d2797b22a9c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 27 Apr 2017 18:55:07 +0300 Subject: [PATCH 0430/1124] Fix update node when columns have different order --- src/hooks.c | 33 +++++++++++++++++++ src/include/hooks.h | 3 ++ src/include/partition_filter.h | 14 +++++--- src/include/partition_update.h | 1 + src/partition_filter.c | 57 ++++++++++++++++++++++++++------- src/partition_update.c | 31 ++++++++++-------- src/pg_pathman.c | 1 + src/planner_tree_modification.c | 2 +- src/utility_stmt_hooking.c | 3 +- src/utils.c | 8 ++--- 10 files changed, 116 insertions(+), 37 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 00e8ff37..ac2674e5 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -16,6 +16,7 @@ #include "hooks.h" #include "init.h" #include "partition_filter.h" +#include "partition_update.h" #include "pathman_workers.h" #include "planner_tree_modification.h" #include "runtimeappend.h" @@ -766,3 +767,35 @@ pathman_process_utility_hook(Node *parsetree, context, params, dest, completionTag); } + + +void +pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, + uint64 count) +{ + PlanState *state = (PlanState *) queryDesc->planstate; + + if (IsA(state, ModifyTableState)) + { + int i; + ModifyTableState *mt_state = (ModifyTableState *) state; + + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *subplanstate = (CustomScanState *) mt_state->mt_plans[i]; + + if (IsA(subplanstate, CustomScanState)) + { + if (strcmp(subplanstate->methods->CustomName, "PrepareInsert") == 0) + { + PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; + cstate->parent_state = mt_state; + cstate->saved_junkFilter = mt_state->resultRelInfo->ri_junkFilter; + mt_state->resultRelInfo->ri_junkFilter = NULL; + } + } + } + } + + standard_ExecutorRun(queryDesc, direction, count); +} diff --git a/src/include/hooks.h b/src/include/hooks.h index 95400fe2..15fa9906 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -13,6 +13,7 @@ #include "postgres.h" +#include "executor/executor.h" #include "optimizer/planner.h" #include "optimizer/paths.h" #include "parser/analyze.h" @@ -60,5 +61,7 @@ void pathman_process_utility_hook(Node *parsetree, DestReceiver *dest, char *completionTag); +void pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, + uint64 count); #endif /* PATHMAN_HOOKS_H */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index c6792451..ae73d589 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -39,6 +39,8 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ + JunkFilter *orig_junkFilter; /* we keep original JunkFilter from + ResultRelInfo here */ } ResultRelInfoHolder; @@ -94,9 +96,10 @@ typedef struct bool warning_triggered; /* warning message counter */ TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ - ItemPointer ctid; /* ctid of rubuilt tuple - if there any, or NULL */ - bool keep_ctid; /* if false ctid will not filled */ + ItemPointer ctid; /* ctid of scanned tuple + if there any, or NULL, + filled when command_type == CMD_UPDATE*/ + CmdType command_type; ExprContext *tup_convert_econtext; /* ExprContext for projections */ } PartitionFilterState; @@ -118,7 +121,8 @@ void init_result_parts_storage(ResultPartsStorage *parts_storage, bool speculative_inserts, Size table_entry_size, on_new_rri_holder on_new_rri_holder_cb, - void *on_new_rri_holder_cb_arg); + void *on_new_rri_holder_cb_arg, + CmdType cmd_type); void fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels); @@ -144,7 +148,7 @@ Plan * make_partition_filter(Plan *subplan, Oid parent_relid, OnConflictAction conflict_action, List *returning_list, - bool keep_ctid); + CmdType command_type); Node * partition_filter_create_scan_state(CustomScan *node); diff --git a/src/include/partition_update.h b/src/include/partition_update.h index b9607c5c..fc0c0033 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -29,6 +29,7 @@ typedef struct PartitionUpdateState Oid partitioned_table; List *returning_list; ModifyTableState *parent_state; + JunkFilter *saved_junkFilter; Plan *subplan; /* proxy variable to store subplan */ } PartitionUpdateState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 94826129..76d62fe4 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -145,7 +145,8 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, bool speculative_inserts, Size table_entry_size, on_new_rri_holder on_new_rri_holder_cb, - void *on_new_rri_holder_cb_arg) + void *on_new_rri_holder_cb_arg, + CmdType cmd_type) { HASHCTL *result_rels_table_config = &parts_storage->result_rels_table_config; @@ -168,7 +169,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->callback_arg = on_new_rri_holder_cb_arg; /* Currenly ResultPartsStorage is used only for INSERTs */ - parts_storage->command_type = CMD_INSERT; + parts_storage->command_type = cmd_type; parts_storage->speculative_inserts = speculative_inserts; /* Partitions must remain locked till transaction's end */ @@ -311,12 +312,42 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) CopyToResultRelInfo(ri_onConflictSetProj); CopyToResultRelInfo(ri_onConflictSetWhere); + if (parts_storage->command_type == CMD_UPDATE) + { + /* For UPDATE/DELETE, find the appropriate junk attr now */ + char relkind; + JunkFilter *junkfilter = child_result_rel_info->ri_junkFilter; + + relkind = child_result_rel_info->ri_RelationDesc->rd_rel->relkind; + if (relkind == RELKIND_RELATION) + { + junkfilter->jf_junkAttNo = ExecFindJunkAttribute(junkfilter, "ctid"); + if (!AttributeNumberIsValid(junkfilter->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } + else if (relkind == RELKIND_FOREIGN_TABLE) + { + /* + * When there is an AFTER trigger, there should be a + * wholerow attribute. + */ + junkfilter->jf_junkAttNo = ExecFindJunkAttribute(junkfilter, "wholerow"); + } + else + elog(ERROR, "wrong type of relation"); + + } + /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ child_result_rel_info->ri_ConstraintExprs = NULL; /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; + rri_holder->orig_junkFilter = child_result_rel_info->ri_junkFilter; + + if (parts_storage->command_type == CMD_UPDATE) + child_result_rel_info->ri_junkFilter = NULL; /* Generate tuple transformation map and some other stuff */ rri_holder->tuple_map = build_part_tuple_map(parent_rel, child_rel); @@ -464,7 +495,7 @@ Plan * make_partition_filter(Plan *subplan, Oid parent_relid, OnConflictAction conflict_action, List *returning_list, - bool keep_ctid) + CmdType command_type) { CustomScan *cscan = makeNode(CustomScan); Relation parent_rel; @@ -498,7 +529,7 @@ make_partition_filter(Plan *subplan, Oid parent_relid, cscan->custom_private = list_make4(makeInteger(parent_relid), makeInteger(conflict_action), returning_list, - makeInteger((int) keep_ctid)); + makeInteger(command_type)); return &cscan->scan.plan; } @@ -519,7 +550,7 @@ partition_filter_create_scan_state(CustomScan *node) state->partitioned_table = intVal(linitial(node->custom_private)); state->on_conflict_action = intVal(lsecond(node->custom_private)); state->returning_list = lthird(node->custom_private); - state->keep_ctid = (bool) intVal(lfourth(node->custom_private)); + state->command_type = (CmdType) intVal(lfourth(node->custom_private)); /* Check boundaries */ Assert(state->on_conflict_action >= ONCONFLICT_NONE || @@ -544,7 +575,8 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) state->on_conflict_action != ONCONFLICT_NONE, ResultPartsStorageStandard, prepare_rri_for_insert, - (void *) state); + (void *) state, + state->command_type); state->warning_triggered = false; } @@ -607,11 +639,12 @@ partition_filter_exec(CustomScanState *node) MemoryContextSwitchTo(old_cxt); ResetExprContext(econtext); - /* Magic: replace parent's ResultRelInfo with ours */ resultRelInfo = rri_holder->result_rel_info; + + /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = resultRelInfo; - if (state->keep_ctid) + if (state->command_type == CMD_UPDATE) { JunkFilter *junkfilter; Datum datum; @@ -622,17 +655,15 @@ partition_filter_exec(CustomScanState *node) * we need this step because if there will be conversion * junk attributes will be removed from slot */ - junkfilter = resultRelInfo->ri_junkFilter; + junkfilter = rri_holder->orig_junkFilter; Assert(junkfilter != NULL); relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) { - AttrNumber ctid_attno; bool isNull; - ctid_attno = ExecFindJunkAttribute(junkfilter, "ctid"); - datum = ExecGetJunkAttribute(slot, ctid_attno, &isNull); + datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, &isNull); /* shouldn't ever get a null result... */ if (isNull) elog(ERROR, "ctid is NULL"); @@ -661,6 +692,8 @@ partition_filter_exec(CustomScanState *node) /* Now replace the original slot */ slot = state->tup_convert_slot; } + else if (rri_holder->orig_junkFilter) + slot = ExecFilterJunk(rri_holder->orig_junkFilter, slot); return slot; } diff --git a/src/partition_update.c b/src/partition_update.c index b95b7c43..7923792d 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -77,7 +77,7 @@ make_partition_update(Plan *subplan, /* Setup methods and child plan */ cscan->methods = &partition_update_plan_methods; pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, - returning_list, true); + returning_list, CMD_UPDATE); cscan->custom_plans = list_make1(pfilter); cscan->scan.plan.targetlist = pfilter->targetlist; @@ -117,8 +117,13 @@ partition_update_begin(CustomScanState *node, EState *estate, int eflags) TupleTableSlot * partition_update_exec(CustomScanState *node) { - PlanState *child_ps = (PlanState *) linitial(node->custom_ps); - TupleTableSlot *slot; + EState *estate = node->ss.ps.state; + PlanState *child_ps = (PlanState *) linitial(node->custom_ps); + TupleTableSlot *slot; + PartitionUpdateState *state = (PartitionUpdateState *) node; + + /* restore junkfilter in parent node */ + state->parent_state->resultRelInfo->ri_junkFilter = state->saved_junkFilter; /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); @@ -137,26 +142,25 @@ partition_update_exec(CustomScanState *node) HeapTuple oldtuple; PartitionFilterState *child_state = (PartitionFilterState *) child_ps; - EState *estate = node->ss.ps.state; - - Assert(child_state->keep_ctid); - - resultRelInfo = estate->es_result_relation_info; - junkfilter = resultRelInfo->ri_junkFilter; - Assert(junkfilter != NULL); + Assert(child_state->command_type == CMD_UPDATE); EvalPlanQualSetSlot(&epqstate, slot); + resultRelInfo = estate->es_result_relation_info; oldtuple = NULL; + junkfilter = resultRelInfo->ri_junkFilter; relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION && child_state->ctid != NULL) + + if (relkind == RELKIND_RELATION) { + Assert(child_state->ctid != NULL); + tupleid = child_state->ctid; tuple_ctid = *tupleid; /* be sure we don't free * ctid!! */ tupleid = &tuple_ctid; } - else if (relkind == RELKIND_FOREIGN_TABLE) + else if (junkfilter != NULL && relkind == RELKIND_FOREIGN_TABLE) { if (AttributeNumberIsValid(junkfilter->jf_junkAttNo)) { @@ -173,8 +177,7 @@ partition_update_exec(CustomScanState *node) ItemPointerSetInvalid(&(oldtupdata.t_self)); /* Historically, view triggers see invalid t_tableOid. */ - oldtupdata.t_tableOid =RelationGetRelid(resultRelInfo->ri_RelationDesc); - + oldtupdata.t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); oldtuple = &oldtupdata; } } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 7061fe9f..ee0f7066 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -155,6 +155,7 @@ _PG_init(void) planner_hook = pathman_planner_hook; process_utility_hook_next = ProcessUtility_hook; ProcessUtility_hook = pathman_process_utility_hook; + ExecutorRun_hook = pathman_executor_hook; /* Initialize PgPro-specific subsystems */ init_expand_rte_hook(); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 5522dfaa..f863c863 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -404,7 +404,7 @@ partition_filter_visitor(Plan *plan, void *context) relid, modify_table->onConflictAction, returning_list, - false); + CMD_INSERT); } } } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 2f0b6fa6..c38c3cb8 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -566,7 +566,8 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Initialize ResultPartsStorage */ init_result_parts_storage(&parts_storage, estate, false, ResultPartsStorageStandard, - prepare_rri_for_copy, NULL); + prepare_rri_for_copy, NULL, + CMD_INSERT); parts_storage.saved_rel_info = parent_result_rel; /* Set up a tuple slot too */ diff --git a/src/utils.c b/src/utils.c index 099f5a74..480f7ed7 100644 --- a/src/utils.c +++ b/src/utils.c @@ -118,20 +118,20 @@ get_pathman_schema(void) SysScanDesc scandesc; HeapTuple tuple; ScanKeyData entry[1]; - Oid ext_schema; + Oid ext_oid; /* It's impossible to fetch pg_pathman's schema now */ if (!IsTransactionState()) return InvalidOid; - ext_schema = get_extension_oid("pg_pathman", true); - if (ext_schema == InvalidOid) + ext_oid = get_extension_oid("pg_pathman", true); + if (ext_oid == InvalidOid) return InvalidOid; /* exit if pg_pathman does not exist */ ScanKeyInit(&entry[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(ext_schema)); + ObjectIdGetDatum(ext_oid)); rel = heap_open(ExtensionRelationId, AccessShareLock); scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, From 987d30d674d973567cf0270a4b500f60e115e751 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 28 Apr 2017 12:39:16 +0300 Subject: [PATCH 0431/1124] Make little changes in code and add more comments --- src/partition_filter.c | 3 +-- src/partition_update.c | 16 +++++++++++----- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 76d62fe4..84c7287f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -314,7 +314,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) if (parts_storage->command_type == CMD_UPDATE) { - /* For UPDATE/DELETE, find the appropriate junk attr now */ char relkind; JunkFilter *junkfilter = child_result_rel_info->ri_junkFilter; @@ -692,7 +691,7 @@ partition_filter_exec(CustomScanState *node) /* Now replace the original slot */ slot = state->tup_convert_slot; } - else if (rri_holder->orig_junkFilter) + else if (state->command_type == CMD_UPDATE) slot = ExecFilterJunk(rri_holder->orig_junkFilter, slot); return slot; diff --git a/src/partition_update.c b/src/partition_update.c index 7923792d..27d0b300 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -122,7 +122,13 @@ partition_update_exec(CustomScanState *node) TupleTableSlot *slot; PartitionUpdateState *state = (PartitionUpdateState *) node; - /* restore junkfilter in parent node */ + /* + * Restore junkfilter in base resultRelInfo, + * we do it because child's RelResultInfo expects its existence + * for proper initialization. + * Alsowe change junk attribute number in JunkFilter, because + * it wasn't set in ModifyTable node initialization + */ state->parent_state->resultRelInfo->ri_junkFilter = state->saved_junkFilter; /* execute PartitionFilter child node */ @@ -136,7 +142,6 @@ partition_update_exec(CustomScanState *node) ResultRelInfo *resultRelInfo; ItemPointer tupleid = NULL; ItemPointerData tuple_ctid; - JunkFilter *junkfilter; EPQState epqstate; HeapTupleData oldtupdata; HeapTuple oldtuple; @@ -148,7 +153,6 @@ partition_update_exec(CustomScanState *node) resultRelInfo = estate->es_result_relation_info; oldtuple = NULL; - junkfilter = resultRelInfo->ri_junkFilter; relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) @@ -160,9 +164,11 @@ partition_update_exec(CustomScanState *node) * ctid!! */ tupleid = &tuple_ctid; } - else if (junkfilter != NULL && relkind == RELKIND_FOREIGN_TABLE) + else if (relkind == RELKIND_FOREIGN_TABLE) { - if (AttributeNumberIsValid(junkfilter->jf_junkAttNo)) + JunkFilter *junkfilter = resultRelInfo->ri_junkFilter; + + if (junkfilter != NULL && AttributeNumberIsValid(junkfilter->jf_junkAttNo)) { datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, From 870d953e7eab2bb9e3e64b81e8b509c17617c32f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 28 Apr 2017 16:45:02 +0300 Subject: [PATCH 0432/1124] Use tuple buffer in show_partition_list --- expected/pathman_callbacks.out | 85 ++++++++++- sql/pathman_callbacks.sql | 44 ++++++ src/pl_funcs.c | 250 +++++++++++++++++++-------------- 3 files changed, 275 insertions(+), 104 deletions(-) diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index e0343526..b9a13456 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -198,6 +198,89 @@ INSERT INTO callbacks.abc VALUES (301, 0); /* +1 new partition */ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "401", "range_min": "301", "parent_schema": "callbacks", "partition_schema": "callbacks"} DROP TABLE callbacks.abc CASCADE; NOTICE: drop cascades to 4 other objects +/* more complex test using rotation of tables */ +CREATE TABLE callbacks.abc(a INT4 NOT NULL); +INSERT INTO callbacks.abc + SELECT a FROM generate_series(1, 100) a; +SELECT create_range_partitions('callbacks.abc', 'a', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +CREATE OR REPLACE FUNCTION callbacks.rotation_callback(params jsonb) +RETURNS VOID AS +$$ +DECLARE + relation regclass; + parent_rel regclass; +BEGIN + parent_rel := concat(params->>'partition_schema', '.', params->>'parent')::regclass; + + -- drop "old" partitions + FOR relation IN (SELECT partition FROM pathman_partition_list + WHERE parent = parent_rel + ORDER BY range_min::INT4 DESC + OFFSET 4) -- remain 4 last partitions + LOOP + RAISE NOTICE 'dropping partition %', relation; + PERFORM drop_range_partition(relation); + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT * FROM pathman_partition_list +WHERE parent = 'callbacks.abc'::REGCLASS +ORDER BY range_min::INT4; + parent | partition | parttype | partattr | range_min | range_max +---------------+------------------+----------+----------+-----------+----------- + callbacks.abc | callbacks.abc_1 | 2 | a | 1 | 11 + callbacks.abc | callbacks.abc_2 | 2 | a | 11 | 21 + callbacks.abc | callbacks.abc_3 | 2 | a | 21 | 31 + callbacks.abc | callbacks.abc_4 | 2 | a | 31 | 41 + callbacks.abc | callbacks.abc_5 | 2 | a | 41 | 51 + callbacks.abc | callbacks.abc_6 | 2 | a | 51 | 61 + callbacks.abc | callbacks.abc_7 | 2 | a | 61 | 71 + callbacks.abc | callbacks.abc_8 | 2 | a | 71 | 81 + callbacks.abc | callbacks.abc_9 | 2 | a | 81 | 91 + callbacks.abc | callbacks.abc_10 | 2 | a | 91 | 101 +(10 rows) + +SELECT set_init_callback('callbacks.abc', + 'callbacks.rotation_callback(jsonb)'); + set_init_callback +------------------- + +(1 row) + +INSERT INTO callbacks.abc VALUES (110); +NOTICE: dropping partition callbacks.abc_7 +NOTICE: dropping partition callbacks.abc_6 +NOTICE: dropping partition callbacks.abc_5 +NOTICE: dropping partition callbacks.abc_4 +NOTICE: dropping partition callbacks.abc_3 +NOTICE: dropping partition callbacks.abc_2 +NOTICE: dropping partition callbacks.abc_1 +INSERT INTO callbacks.abc VALUES (120); +INSERT INTO callbacks.abc VALUES (130); +NOTICE: dropping partition callbacks.abc_8 +INSERT INTO callbacks.abc VALUES (140); +NOTICE: dropping partition callbacks.abc_9 +INSERT INTO callbacks.abc VALUES (150); +NOTICE: dropping partition callbacks.abc_10 +SELECT * FROM pathman_partition_list +WHERE parent = 'callbacks.abc'::REGCLASS +ORDER BY range_min::INT4; + parent | partition | parttype | partattr | range_min | range_max +---------------+------------------+----------+----------+-----------+----------- + callbacks.abc | callbacks.abc_11 | 2 | a | 101 | 111 + callbacks.abc | callbacks.abc_12 | 2 | a | 111 | 121 + callbacks.abc | callbacks.abc_13 | 2 | a | 121 | 131 + callbacks.abc | callbacks.abc_14 | 2 | a | 131 | 141 + callbacks.abc | callbacks.abc_15 | 2 | a | 141 | 151 +(5 rows) + +DROP TABLE callbacks.abc CASCADE; +NOTICE: drop cascades to 5 other objects DROP SCHEMA callbacks CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index a97d3f57..202f4b60 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -97,6 +97,50 @@ INSERT INTO callbacks.abc VALUES (301, 0); /* +1 new partition */ DROP TABLE callbacks.abc CASCADE; +/* more complex test using rotation of tables */ +CREATE TABLE callbacks.abc(a INT4 NOT NULL); +INSERT INTO callbacks.abc + SELECT a FROM generate_series(1, 100) a; +SELECT create_range_partitions('callbacks.abc', 'a', 1, 10, 10); + +CREATE OR REPLACE FUNCTION callbacks.rotation_callback(params jsonb) +RETURNS VOID AS +$$ +DECLARE + relation regclass; + parent_rel regclass; +BEGIN + parent_rel := concat(params->>'partition_schema', '.', params->>'parent')::regclass; + + -- drop "old" partitions + FOR relation IN (SELECT partition FROM pathman_partition_list + WHERE parent = parent_rel + ORDER BY range_min::INT4 DESC + OFFSET 4) -- remain 4 last partitions + LOOP + RAISE NOTICE 'dropping partition %', relation; + PERFORM drop_range_partition(relation); + END LOOP; +END +$$ LANGUAGE plpgsql; + +SELECT * FROM pathman_partition_list +WHERE parent = 'callbacks.abc'::REGCLASS +ORDER BY range_min::INT4; + +SELECT set_init_callback('callbacks.abc', + 'callbacks.rotation_callback(jsonb)'); + +INSERT INTO callbacks.abc VALUES (110); +INSERT INTO callbacks.abc VALUES (120); +INSERT INTO callbacks.abc VALUES (130); +INSERT INTO callbacks.abc VALUES (140); +INSERT INTO callbacks.abc VALUES (150); +SELECT * FROM pathman_partition_list +WHERE parent = 'callbacks.abc'::REGCLASS +ORDER BY range_min::INT4; + +DROP TABLE callbacks.abc CASCADE; DROP SCHEMA callbacks CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 65251435..eff88115 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -87,7 +87,8 @@ typedef struct const PartRelationInfo *current_prel; /* selected PartRelationInfo */ - uint32 child_number; /* child we're looking at */ + Size child_number; /* child we're looking at */ + SPITupleTable *tuptable; /* buffer for tuples */ } show_partition_list_cxt; /* User context for function show_pathman_cache_stats_internal() */ @@ -341,16 +342,18 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) Datum show_partition_list_internal(PG_FUNCTION_ARGS) { - show_partition_list_cxt *usercxt; - FuncCallContext *funccxt; + show_partition_list_cxt *usercxt; + FuncCallContext *funccxt; + MemoryContext old_mcxt; + SPITupleTable *tuptable; /* * Initialize tuple descriptor & function call context. */ if (SRF_IS_FIRSTCALL()) { - TupleDesc tupdesc; - MemoryContext old_mcxt; + TupleDesc tupdesc; + MemoryContext tuptabcxt; funccxt = SRF_FIRSTCALL_INIT(); @@ -386,137 +389,178 @@ show_partition_list_internal(PG_FUNCTION_ARGS) funccxt->tuple_desc = BlessTupleDesc(tupdesc); funccxt->user_fctx = (void *) usercxt; - MemoryContextSwitchTo(old_mcxt); - } - - funccxt = SRF_PERCALL_SETUP(); - usercxt = (show_partition_list_cxt *) funccxt->user_fctx; + /* initialize tuple table context */ + tuptabcxt = AllocSetContextCreate(CurrentMemoryContext, + "pg_pathman TupTable", + ALLOCSET_DEFAULT_SIZES); + MemoryContextSwitchTo(tuptabcxt); - /* Iterate through pathman cache */ - for (;;) - { - const PartRelationInfo *prel; - HeapTuple htup; - Datum values[Natts_pathman_partition_list]; - bool isnull[Natts_pathman_partition_list] = { 0 }; - char *partattr_cstr; + /* initialize tuple table for partitions list, we use it as buffer */ + tuptable = (SPITupleTable *) palloc0(sizeof(SPITupleTable)); + usercxt->tuptable = tuptable; - /* Fetch next PartRelationInfo if needed */ - if (usercxt->current_prel == NULL) - { - HeapTuple pathman_config_htup; - Datum parent_table; - bool parent_table_isnull; - Oid parent_table_oid; + tuptable->tuptabcxt = tuptabcxt; - pathman_config_htup = heap_getnext(usercxt->pathman_config_scan, - ForwardScanDirection); - if (!HeapTupleIsValid(pathman_config_htup)) - break; + /* set up initial allocations */ + tuptable->alloced = tuptable->free = 128; + tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple)); - parent_table = heap_getattr(pathman_config_htup, - Anum_pathman_config_partrel, - RelationGetDescr(usercxt->pathman_config), - &parent_table_isnull); + MemoryContextSwitchTo(old_mcxt); - Assert(parent_table_isnull == false); - parent_table_oid = DatumGetObjectId(parent_table); + /* Iterate through pathman cache */ + for (;;) + { + const PartRelationInfo *prel; + HeapTuple htup; + Datum values[Natts_pathman_partition_list]; + bool isnull[Natts_pathman_partition_list] = { 0 }; + char *partattr_cstr; - usercxt->current_prel = get_pathman_relation_info(parent_table_oid); + /* Fetch next PartRelationInfo if needed */ if (usercxt->current_prel == NULL) - continue; + { + HeapTuple pathman_config_htup; + Datum parent_table; + bool parent_table_isnull; + Oid parent_table_oid; - usercxt->child_number = 0; - } + pathman_config_htup = heap_getnext(usercxt->pathman_config_scan, + ForwardScanDirection); + if (!HeapTupleIsValid(pathman_config_htup)) + break; - /* Alias to 'usercxt->current_prel' */ - prel = usercxt->current_prel; + parent_table = heap_getattr(pathman_config_htup, + Anum_pathman_config_partrel, + RelationGetDescr(usercxt->pathman_config), + &parent_table_isnull); - /* If we've run out of partitions, switch to the next 'prel' */ - if (usercxt->child_number >= PrelChildrenCount(prel)) - { - usercxt->current_prel = NULL; - usercxt->child_number = 0; + Assert(parent_table_isnull == false); + parent_table_oid = DatumGetObjectId(parent_table); - continue; - } - - partattr_cstr = get_attname(PrelParentRelid(prel), prel->attnum); - if (!partattr_cstr) - { - /* Parent does not exist, go to the next 'prel' */ - usercxt->current_prel = NULL; - continue; - } + usercxt->current_prel = get_pathman_relation_info(parent_table_oid); + if (usercxt->current_prel == NULL) + continue; - /* Fill in common values */ - values[Anum_pathman_pl_parent - 1] = PrelParentRelid(prel); - values[Anum_pathman_pl_parttype - 1] = prel->parttype; - values[Anum_pathman_pl_partattr - 1] = CStringGetTextDatum(partattr_cstr); + usercxt->child_number = 0; + } - switch (prel->parttype) - { - case PT_HASH: - { - Oid *children = PrelGetChildrenArray(prel), - child_oid = children[usercxt->child_number]; + /* Alias to 'usercxt->current_prel' */ + prel = usercxt->current_prel; - values[Anum_pathman_pl_partition - 1] = child_oid; - isnull[Anum_pathman_pl_range_min - 1] = true; - isnull[Anum_pathman_pl_range_max - 1] = true; - } - break; + /* If we've run out of partitions, switch to the next 'prel' */ + if (usercxt->child_number >= PrelChildrenCount(prel)) + { + usercxt->current_prel = NULL; + usercxt->child_number = 0; - case PT_RANGE: - { - RangeEntry *re; + continue; + } - re = &PrelGetRangesArray(prel)[usercxt->child_number]; + partattr_cstr = get_attname(PrelParentRelid(prel), prel->attnum); + if (!partattr_cstr) + { + /* Parent does not exist, go to the next 'prel' */ + usercxt->current_prel = NULL; + continue; + } - values[Anum_pathman_pl_partition - 1] = re->child_oid; + /* Fill in common values */ + values[Anum_pathman_pl_parent - 1] = PrelParentRelid(prel); + values[Anum_pathman_pl_parttype - 1] = prel->parttype; + values[Anum_pathman_pl_partattr - 1] = CStringGetTextDatum(partattr_cstr); - /* Lower bound text */ - if (!IsInfinite(&re->min)) + switch (prel->parttype) + { + case PT_HASH: { - Datum rmin = CStringGetTextDatum( - datum_to_cstring(BoundGetValue(&re->min), - prel->atttype)); + Oid *children = PrelGetChildrenArray(prel), + child_oid = children[usercxt->child_number]; - values[Anum_pathman_pl_range_min - 1] = rmin; + values[Anum_pathman_pl_partition - 1] = child_oid; + isnull[Anum_pathman_pl_range_min - 1] = true; + isnull[Anum_pathman_pl_range_max - 1] = true; } - else isnull[Anum_pathman_pl_range_min - 1] = true; + break; - /* Upper bound text */ - if (!IsInfinite(&re->max)) + case PT_RANGE: { - Datum rmax = CStringGetTextDatum( - datum_to_cstring(BoundGetValue(&re->max), - prel->atttype)); + RangeEntry *re; + + re = &PrelGetRangesArray(prel)[usercxt->child_number]; + + values[Anum_pathman_pl_partition - 1] = re->child_oid; + + /* Lower bound text */ + if (!IsInfinite(&re->min)) + { + Datum rmin = CStringGetTextDatum( + datum_to_cstring(BoundGetValue(&re->min), + prel->atttype)); + + values[Anum_pathman_pl_range_min - 1] = rmin; + } + else isnull[Anum_pathman_pl_range_min - 1] = true; - values[Anum_pathman_pl_range_max - 1] = rmax; + /* Upper bound text */ + if (!IsInfinite(&re->max)) + { + Datum rmax = CStringGetTextDatum( + datum_to_cstring(BoundGetValue(&re->max), + prel->atttype)); + + values[Anum_pathman_pl_range_max - 1] = rmax; + } + else isnull[Anum_pathman_pl_range_max - 1] = true; } - else isnull[Anum_pathman_pl_range_max - 1] = true; - } - break; + break; + + default: + elog(ERROR, "Unknown partitioning type %u", prel->parttype); + } + + /* Fill tuptable */ + old_mcxt = MemoryContextSwitchTo(tuptable->tuptabcxt); - default: - elog(ERROR, "Unknown partitioning type %u", prel->parttype); + /* Form output tuple */ + htup = heap_form_tuple(funccxt->tuple_desc, values, isnull); + + if (tuptable->free == 0) + { + /* Double the size of the pointer array */ + tuptable->free = tuptable->alloced; + tuptable->alloced += tuptable->free; + tuptable->vals = (HeapTuple *) repalloc_huge(tuptable->vals, + tuptable->alloced * sizeof(HeapTuple)); + } + + tuptable->vals[tuptable->alloced - tuptable->free] = htup; + (tuptable->free)--; + + MemoryContextSwitchTo(old_mcxt); + + /* Switch to the next child */ + usercxt->child_number++; } - /* Switch to the next child */ - usercxt->child_number++; + /* Clean resources */ + heap_endscan(usercxt->pathman_config_scan); + UnregisterSnapshot(usercxt->snapshot); + heap_close(usercxt->pathman_config, AccessShareLock); - /* Form output tuple */ - htup = heap_form_tuple(funccxt->tuple_desc, values, isnull); + usercxt->child_number = 0; + } + funccxt = SRF_PERCALL_SETUP(); + usercxt = (show_partition_list_cxt *) funccxt->user_fctx; + tuptable = usercxt->tuptable; + + if (usercxt->child_number < (tuptable->alloced - tuptable->free)) + { + HeapTuple htup = usercxt->tuptable->vals[usercxt->child_number]; + usercxt->child_number++; SRF_RETURN_NEXT(funccxt, HeapTupleGetDatum(htup)); } - /* Clean resources */ - heap_endscan(usercxt->pathman_config_scan); - UnregisterSnapshot(usercxt->snapshot); - heap_close(usercxt->pathman_config, AccessShareLock); - SRF_RETURN_DONE(funccxt); } From 72f59d4a7d860ac7f79a0b698d729fa6eef06c0e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 28 Apr 2017 17:22:14 +0300 Subject: [PATCH 0433/1124] Improve tests --- expected/pathman_callbacks.out | 182 ++++++++++++++++++++++++++++----- sql/pathman_callbacks.sql | 17 ++- 2 files changed, 166 insertions(+), 33 deletions(-) diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index b9a13456..a178a972 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -218,10 +218,12 @@ BEGIN parent_rel := concat(params->>'partition_schema', '.', params->>'parent')::regclass; -- drop "old" partitions - FOR relation IN (SELECT partition FROM pathman_partition_list - WHERE parent = parent_rel - ORDER BY range_min::INT4 DESC - OFFSET 4) -- remain 4 last partitions + FOR relation IN (SELECT partition FROM + (SELECT partition, range_min::INT4 FROM pathman_partition_list + WHERE parent = parent_rel + ORDER BY range_min::INT4 DESC + OFFSET 4) t -- remain 4 last partitions + ORDER BY range_min) LOOP RAISE NOTICE 'dropping partition %', relation; PERFORM drop_range_partition(relation); @@ -252,35 +254,167 @@ SELECT set_init_callback('callbacks.abc', (1 row) -INSERT INTO callbacks.abc VALUES (110); -NOTICE: dropping partition callbacks.abc_7 -NOTICE: dropping partition callbacks.abc_6 -NOTICE: dropping partition callbacks.abc_5 -NOTICE: dropping partition callbacks.abc_4 -NOTICE: dropping partition callbacks.abc_3 -NOTICE: dropping partition callbacks.abc_2 +INSERT INTO callbacks.abc VALUES (1000); NOTICE: dropping partition callbacks.abc_1 -INSERT INTO callbacks.abc VALUES (120); -INSERT INTO callbacks.abc VALUES (130); +NOTICE: dropping partition callbacks.abc_2 +NOTICE: dropping partition callbacks.abc_3 +NOTICE: dropping partition callbacks.abc_4 +NOTICE: dropping partition callbacks.abc_5 +NOTICE: dropping partition callbacks.abc_6 +NOTICE: dropping partition callbacks.abc_7 NOTICE: dropping partition callbacks.abc_8 -INSERT INTO callbacks.abc VALUES (140); NOTICE: dropping partition callbacks.abc_9 -INSERT INTO callbacks.abc VALUES (150); NOTICE: dropping partition callbacks.abc_10 +NOTICE: dropping partition callbacks.abc_11 +NOTICE: dropping partition callbacks.abc_12 +NOTICE: dropping partition callbacks.abc_13 +NOTICE: dropping partition callbacks.abc_14 +NOTICE: dropping partition callbacks.abc_15 +NOTICE: dropping partition callbacks.abc_16 +NOTICE: dropping partition callbacks.abc_17 +NOTICE: dropping partition callbacks.abc_18 +NOTICE: dropping partition callbacks.abc_19 +NOTICE: dropping partition callbacks.abc_20 +NOTICE: dropping partition callbacks.abc_21 +NOTICE: dropping partition callbacks.abc_22 +NOTICE: dropping partition callbacks.abc_23 +NOTICE: dropping partition callbacks.abc_24 +NOTICE: dropping partition callbacks.abc_25 +NOTICE: dropping partition callbacks.abc_26 +NOTICE: dropping partition callbacks.abc_27 +NOTICE: dropping partition callbacks.abc_28 +NOTICE: dropping partition callbacks.abc_29 +NOTICE: dropping partition callbacks.abc_30 +NOTICE: dropping partition callbacks.abc_31 +NOTICE: dropping partition callbacks.abc_32 +NOTICE: dropping partition callbacks.abc_33 +NOTICE: dropping partition callbacks.abc_34 +NOTICE: dropping partition callbacks.abc_35 +NOTICE: dropping partition callbacks.abc_36 +NOTICE: dropping partition callbacks.abc_37 +NOTICE: dropping partition callbacks.abc_38 +NOTICE: dropping partition callbacks.abc_39 +NOTICE: dropping partition callbacks.abc_40 +NOTICE: dropping partition callbacks.abc_41 +NOTICE: dropping partition callbacks.abc_42 +NOTICE: dropping partition callbacks.abc_43 +NOTICE: dropping partition callbacks.abc_44 +NOTICE: dropping partition callbacks.abc_45 +NOTICE: dropping partition callbacks.abc_46 +NOTICE: dropping partition callbacks.abc_47 +NOTICE: dropping partition callbacks.abc_48 +NOTICE: dropping partition callbacks.abc_49 +NOTICE: dropping partition callbacks.abc_50 +NOTICE: dropping partition callbacks.abc_51 +NOTICE: dropping partition callbacks.abc_52 +NOTICE: dropping partition callbacks.abc_53 +NOTICE: dropping partition callbacks.abc_54 +NOTICE: dropping partition callbacks.abc_55 +NOTICE: dropping partition callbacks.abc_56 +NOTICE: dropping partition callbacks.abc_57 +NOTICE: dropping partition callbacks.abc_58 +NOTICE: dropping partition callbacks.abc_59 +NOTICE: dropping partition callbacks.abc_60 +NOTICE: dropping partition callbacks.abc_61 +NOTICE: dropping partition callbacks.abc_62 +NOTICE: dropping partition callbacks.abc_63 +NOTICE: dropping partition callbacks.abc_64 +NOTICE: dropping partition callbacks.abc_65 +NOTICE: dropping partition callbacks.abc_66 +NOTICE: dropping partition callbacks.abc_67 +NOTICE: dropping partition callbacks.abc_68 +NOTICE: dropping partition callbacks.abc_69 +NOTICE: dropping partition callbacks.abc_70 +NOTICE: dropping partition callbacks.abc_71 +NOTICE: dropping partition callbacks.abc_72 +NOTICE: dropping partition callbacks.abc_73 +NOTICE: dropping partition callbacks.abc_74 +NOTICE: dropping partition callbacks.abc_75 +NOTICE: dropping partition callbacks.abc_76 +NOTICE: dropping partition callbacks.abc_77 +NOTICE: dropping partition callbacks.abc_78 +NOTICE: dropping partition callbacks.abc_79 +NOTICE: dropping partition callbacks.abc_80 +NOTICE: dropping partition callbacks.abc_81 +NOTICE: dropping partition callbacks.abc_82 +NOTICE: dropping partition callbacks.abc_83 +NOTICE: dropping partition callbacks.abc_84 +NOTICE: dropping partition callbacks.abc_85 +NOTICE: dropping partition callbacks.abc_86 +NOTICE: dropping partition callbacks.abc_87 +NOTICE: dropping partition callbacks.abc_88 +NOTICE: dropping partition callbacks.abc_89 +NOTICE: dropping partition callbacks.abc_90 +NOTICE: dropping partition callbacks.abc_91 +NOTICE: dropping partition callbacks.abc_92 +NOTICE: dropping partition callbacks.abc_93 +NOTICE: dropping partition callbacks.abc_94 +NOTICE: dropping partition callbacks.abc_95 +NOTICE: dropping partition callbacks.abc_96 +INSERT INTO callbacks.abc VALUES (1500); +NOTICE: dropping partition callbacks.abc_97 +NOTICE: dropping partition callbacks.abc_98 +NOTICE: dropping partition callbacks.abc_99 +NOTICE: dropping partition callbacks.abc_100 +NOTICE: dropping partition callbacks.abc_101 +NOTICE: dropping partition callbacks.abc_102 +NOTICE: dropping partition callbacks.abc_103 +NOTICE: dropping partition callbacks.abc_104 +NOTICE: dropping partition callbacks.abc_105 +NOTICE: dropping partition callbacks.abc_106 +NOTICE: dropping partition callbacks.abc_107 +NOTICE: dropping partition callbacks.abc_108 +NOTICE: dropping partition callbacks.abc_109 +NOTICE: dropping partition callbacks.abc_110 +NOTICE: dropping partition callbacks.abc_111 +NOTICE: dropping partition callbacks.abc_112 +NOTICE: dropping partition callbacks.abc_113 +NOTICE: dropping partition callbacks.abc_114 +NOTICE: dropping partition callbacks.abc_115 +NOTICE: dropping partition callbacks.abc_116 +NOTICE: dropping partition callbacks.abc_117 +NOTICE: dropping partition callbacks.abc_118 +NOTICE: dropping partition callbacks.abc_119 +NOTICE: dropping partition callbacks.abc_120 +NOTICE: dropping partition callbacks.abc_121 +NOTICE: dropping partition callbacks.abc_122 +NOTICE: dropping partition callbacks.abc_123 +NOTICE: dropping partition callbacks.abc_124 +NOTICE: dropping partition callbacks.abc_125 +NOTICE: dropping partition callbacks.abc_126 +NOTICE: dropping partition callbacks.abc_127 +NOTICE: dropping partition callbacks.abc_128 +NOTICE: dropping partition callbacks.abc_129 +NOTICE: dropping partition callbacks.abc_130 +NOTICE: dropping partition callbacks.abc_131 +NOTICE: dropping partition callbacks.abc_132 +NOTICE: dropping partition callbacks.abc_133 +NOTICE: dropping partition callbacks.abc_134 +NOTICE: dropping partition callbacks.abc_135 +NOTICE: dropping partition callbacks.abc_136 +NOTICE: dropping partition callbacks.abc_137 +NOTICE: dropping partition callbacks.abc_138 +NOTICE: dropping partition callbacks.abc_139 +NOTICE: dropping partition callbacks.abc_140 +NOTICE: dropping partition callbacks.abc_141 +NOTICE: dropping partition callbacks.abc_142 +NOTICE: dropping partition callbacks.abc_143 +NOTICE: dropping partition callbacks.abc_144 +NOTICE: dropping partition callbacks.abc_145 +NOTICE: dropping partition callbacks.abc_146 SELECT * FROM pathman_partition_list WHERE parent = 'callbacks.abc'::REGCLASS ORDER BY range_min::INT4; - parent | partition | parttype | partattr | range_min | range_max ----------------+------------------+----------+----------+-----------+----------- - callbacks.abc | callbacks.abc_11 | 2 | a | 101 | 111 - callbacks.abc | callbacks.abc_12 | 2 | a | 111 | 121 - callbacks.abc | callbacks.abc_13 | 2 | a | 121 | 131 - callbacks.abc | callbacks.abc_14 | 2 | a | 131 | 141 - callbacks.abc | callbacks.abc_15 | 2 | a | 141 | 151 -(5 rows) + parent | partition | parttype | partattr | range_min | range_max +---------------+-------------------+----------+----------+-----------+----------- + callbacks.abc | callbacks.abc_147 | 2 | a | 1461 | 1471 + callbacks.abc | callbacks.abc_148 | 2 | a | 1471 | 1481 + callbacks.abc | callbacks.abc_149 | 2 | a | 1481 | 1491 + callbacks.abc | callbacks.abc_150 | 2 | a | 1491 | 1501 +(4 rows) DROP TABLE callbacks.abc CASCADE; -NOTICE: drop cascades to 5 other objects +NOTICE: drop cascades to 4 other objects DROP SCHEMA callbacks CASCADE; NOTICE: drop cascades to 3 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 202f4b60..79325a2c 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -113,10 +113,12 @@ BEGIN parent_rel := concat(params->>'partition_schema', '.', params->>'parent')::regclass; -- drop "old" partitions - FOR relation IN (SELECT partition FROM pathman_partition_list - WHERE parent = parent_rel - ORDER BY range_min::INT4 DESC - OFFSET 4) -- remain 4 last partitions + FOR relation IN (SELECT partition FROM + (SELECT partition, range_min::INT4 FROM pathman_partition_list + WHERE parent = parent_rel + ORDER BY range_min::INT4 DESC + OFFSET 4) t -- remain 4 last partitions + ORDER BY range_min) LOOP RAISE NOTICE 'dropping partition %', relation; PERFORM drop_range_partition(relation); @@ -131,11 +133,8 @@ ORDER BY range_min::INT4; SELECT set_init_callback('callbacks.abc', 'callbacks.rotation_callback(jsonb)'); -INSERT INTO callbacks.abc VALUES (110); -INSERT INTO callbacks.abc VALUES (120); -INSERT INTO callbacks.abc VALUES (130); -INSERT INTO callbacks.abc VALUES (140); -INSERT INTO callbacks.abc VALUES (150); +INSERT INTO callbacks.abc VALUES (1000); +INSERT INTO callbacks.abc VALUES (1500); SELECT * FROM pathman_partition_list WHERE parent = 'callbacks.abc'::REGCLASS From ffa33e7c5e744b8f65987328c7bed1c0c28bfec8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 28 Apr 2017 19:00:46 +0300 Subject: [PATCH 0434/1124] WIP refactoring, move functions to reasonable files --- expected/pathman_calamity.out | 2 +- expected/pathman_utility_stmt.out | 2 +- src/hooks.c | 8 +- src/include/init.h | 10 +- src/include/partition_creation.h | 17 -- src/include/partition_filter.h | 11 +- src/include/pathman.h | 4 +- src/include/relation_info.h | 52 ++--- src/include/utils.h | 2 - src/init.c | 97 +++++++-- src/partition_creation.c | 220 ++------------------ src/pl_funcs.c | 250 ++++++++++++++--------- src/pl_hash_funcs.c | 15 +- src/pl_range_funcs.c | 18 +- src/relation_info.c | 328 ++++++++++++++++++------------ src/utility_stmt_hooking.c | 15 +- src/utils.c | 42 ---- 17 files changed, 529 insertions(+), 564 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 8cac52b6..4f1ea10a 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -556,7 +556,7 @@ ERROR: relation "0" does not exist SELECT add_to_pathman_config('calamity.part_test', NULL); /* no column */ ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong column */ -ERROR: Cannot find type name for attribute "v_a_l" of relation "part_test" +ERROR: cannot find type name for attribute "v_a_l" of relation "part_test" SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ add_to_pathman_config ----------------------- diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 5575965b..8bed134e 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -159,7 +159,7 @@ SELECT * FROM copy_stmt_hooking.test WHERE val > 20; /* COPY FROM (partitioned column is not specified) */ COPY copy_stmt_hooking.test(comment) FROM stdin; -ERROR: partition expression's value should not be NULL +ERROR: partitioning expression's value should not be NULL /* COPY FROM (we don't support FREEZE) */ COPY copy_stmt_hooking.test FROM stdin WITH (FREEZE); ERROR: freeze is not supported for partitioned tables diff --git a/src/hooks.c b/src/hooks.c index d9c9e51a..4877ce0b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -800,8 +800,10 @@ pathman_process_utility_hook(Node *parsetree, /* Override standard RENAME statement if needed */ else if (is_pathman_related_table_rename(parsetree, &relation_oid)) + { PathmanRenameConstraint(relation_oid, (const RenameStmt *) parsetree); + } /* Override standard ALTER COLUMN TYPE statement if needed */ else if (is_pathman_related_alter_column_type(parsetree, @@ -810,15 +812,15 @@ pathman_process_utility_hook(Node *parsetree, &part_type)) { if (part_type == PT_HASH) - { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot change type of column \"%s\"" " of table \"%s\" partitioned by HASH", get_attname(relation_oid, attr_number), get_rel_name(relation_oid)))); - } - mark_pathman_expression_for_update(relation_oid); + + /* Don't forget to invalidate parsed partitioning expression */ + pathman_config_invalidate_parsed_expression(relation_oid); } } diff --git a/src/include/init.h b/src/include/init.h index f6d97aae..cfe503b2 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -204,7 +204,15 @@ bool pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, TransactionId *xmin, - HeapTuple *tuple); + ItemPointerData *iptr); + +void pathman_config_invalidate_parsed_expression(Oid relid); + +void pathman_config_refresh_parsed_expression(Oid relid, + Datum *values, + bool *isnull, + ItemPointer iptr); + bool read_pathman_params(Oid relid, Datum *values, diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index b1fd86ac..a194c165 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -78,23 +78,6 @@ Node * build_raw_hash_check_tree(Node *raw_expression, void drop_check_constraint(Oid relid); -typedef struct -{ - Oid expr_type; - Datum expr_datum; - Node *raw_expr; -} PartExpressionInfo; - -/* Expression parsing functions */ -PartExpressionInfo *get_part_expression_info(Oid relid, - const char *expr_string, - bool check_hash_func, - bool make_plan); - -Node *parse_partitioning_expression(Oid relid, - const char *expression, - char **query_string_out, - Node **parsetree_out); /* Update triggers */ void create_single_update_trigger_internal(Oid partition_relid, diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 5608432d..cccacf2f 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -25,12 +25,11 @@ #endif -#define ERR_PART_ATTR_NULL "partition expression's value should not be NULL" -#define ERR_PART_ATTR_MULTIPLE_RESULTS \ - "partition expression's value should be single, not set" -#define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" -#define ERR_PART_ATTR_MULTIPLE "PartitionFilter selected more than one partition" -#define ERR_PART_DESC_CONVERT "could not convert row type for partition" +#define ERR_PART_ATTR_NULL "partitioning expression's value should not be NULL" +#define ERR_PART_ATTR_MULTIPLE_RESULTS "partitioning expression should return single value" +#define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" +#define ERR_PART_ATTR_MULTIPLE "PartitionFilter selected more than one partition" +#define ERR_PART_DESC_CONVERT "could not convert row type for partition" /* diff --git a/src/include/pathman.h b/src/include/pathman.h index 090b2176..d35de076 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -49,8 +49,8 @@ #define Anum_pathman_config_expression 2 /* partition expression (original) */ #define Anum_pathman_config_parttype 3 /* partitioning type (1|2) */ #define Anum_pathman_config_range_interval 4 /* interval for RANGE pt. (text) */ -#define Anum_pathman_config_expression_p 5 /* parsed partition expression (text) */ -#define Anum_pathman_config_atttype 6 /* partitioned atttype */ +#define Anum_pathman_config_expression_p 5 /* parsed partitioning expression (text) */ +#define Anum_pathman_config_atttype 6 /* partitioned atttype (oid) */ /* type modifier (typmod) for 'range_interval' */ #define PATHMAN_CONFIG_interval_typmod -1 diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 1d0f3254..d5e81b28 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -14,6 +14,7 @@ #include "postgres.h" #include "access/attnum.h" +#include "access/sysattr.h" #include "fmgr.h" #include "nodes/bitmapset.h" #include "nodes/nodes.h" @@ -86,7 +87,10 @@ FreeBound(Bound *bound, bool byval) } inline static int -cmp_bounds(FmgrInfo *cmp_func, const Oid collid, const Bound *b1, const Bound *b2) +cmp_bounds(FmgrInfo *cmp_func, + const Oid collid, + const Bound *b1, + const Bound *b2) { if (IsMinusInfinity(b1) || IsPlusInfinity(b2)) return -1; @@ -139,10 +143,10 @@ typedef struct Oid *children; /* Oids of child partitions */ RangeEntry *ranges; /* per-partition range entry or NULL */ - const char *attname; /* original expression */ + const char *expr_cstr; /* original expression */ Node *expr; /* planned expression */ List *expr_vars; /* vars from expression, lazy */ - Bitmapset *expr_atts; /* set with attnums from expression */ + Bitmapset *expr_atts; /* attnums from expression */ Oid atttype; /* expression type */ int32 atttypmod; /* expression type modifier */ @@ -155,6 +159,8 @@ typedef struct hash_proc; /* hash function for 'atttype' */ } PartRelationInfo; +#define PART_EXPR_VARNO ( 1 ) + /* * PartParentInfo * Cached parent of the specified partition. @@ -166,18 +172,6 @@ typedef struct Oid parent_rel; } PartParentInfo; -/* - * CustomConst - * Modified Const that also stores 'varattno' attribute from some Var - * We can check that is CustomConst by checking `location` attrubute. - * It should be equal -2 - */ -typedef struct -{ - Const cns; - AttrNumber varattno; -} CustomConst; - /* * PartBoundInfo * Cached bounds of the specified partition. @@ -242,11 +236,13 @@ static inline List * PrelExpressionColumnNames(const PartRelationInfo *prel) { List *columns = NIL; - int j = -1; + int i = -1; - while ((j = bms_next_member(prel->expr_atts, j)) >= 0) + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { - char *attname = get_attname(prel->key, j); + AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; + char *attname = get_attname(PrelParentRelid(prel), attnum); + columns = lappend(columns, makeString(attname)); } @@ -257,15 +253,22 @@ PrelExpressionColumnNames(const PartRelationInfo *prel) const PartRelationInfo *refresh_pathman_relation_info(Oid relid, Datum *values, bool allow_incomplete); -PartRelationInfo * invalidate_pathman_relation_info(Oid relid, bool *found); +PartRelationInfo *invalidate_pathman_relation_info(Oid relid, bool *found); void remove_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, bool unlock_if_not_found, LockAcquireResult *lock_result); -/* Expression related routines */ -void mark_pathman_expression_for_update(Oid relid); +/* Partitioning expression routines */ +Node *parse_partitioning_expression(const Oid relid, + const char *expression, + char **query_string_out, + Node **parsetree_out); + +Datum plan_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type); /* Global invalidation routines */ void delay_pathman_shutdown(void); @@ -280,12 +283,12 @@ Oid get_parent_of_partition(Oid partition, PartParentSearch *status); /* Bounds cache */ void forget_bounds_of_partition(Oid partition); -PartBoundInfo * get_bounds_of_partition(Oid partition, - const PartRelationInfo *prel); +PartBoundInfo *get_bounds_of_partition(Oid partition, + const PartRelationInfo *prel); /* Safe casts for PartType */ PartType DatumGetPartType(Datum datum); -char * PartTypeToCString(PartType parttype); +char *PartTypeToCString(PartType parttype); /* PartRelationInfo checker */ void shout_if_prel_is_invalid(const Oid parent_oid, @@ -367,5 +370,6 @@ extern bool pg_pathman_enable_bounds_cache; void init_relation_info_static_data(void); + #endif /* RELATION_INFO_H */ diff --git a/src/include/utils.h b/src/include/utils.h index 8da250f8..bf061f45 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -65,7 +65,5 @@ Datum extract_binary_interval_from_text(Datum interval_text, char ** deconstruct_text_array(Datum array, int *array_size); RangeVar ** qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); -AttrNumber *get_pathman_attributes_map(const PartRelationInfo *prel, - Relation child); #endif /* PATHMAN_UTILS_H */ diff --git a/src/init.c b/src/init.c index 632190b8..4a6d8de2 100644 --- a/src/init.c +++ b/src/init.c @@ -599,12 +599,11 @@ build_update_trigger_func_name_internal(Oid relid) /* * Check that relation 'relid' is partitioned by pg_pathman. - * - * Extract tuple into 'values' and 'isnull' if they're provided. + * Extract tuple into 'values', 'isnull', 'xmin', 'iptr' if they're provided. */ bool pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, - TransactionId *xmin, HeapTuple *tuple) + TransactionId *xmin, ItemPointerData* iptr) { Relation rel; HeapScanDesc scan; @@ -662,8 +661,9 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, *xmin = DatumGetTransactionId(value); } - if (tuple) - *tuple = heap_copytuple(htup); + /* Set ItemPointer if necessary */ + if (iptr) + *iptr = htup->t_self; } /* Clean resources */ @@ -677,9 +677,78 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, return contains_rel; } +/* Invalidate parsed partitioning expression in PATHMAN_CONFIG */ +void +pathman_config_invalidate_parsed_expression(Oid relid) +{ + ItemPointerData iptr; /* pointer to tuple */ + Datum values[Natts_pathman_config]; + bool nulls[Natts_pathman_config]; + + /* Check that PATHMAN_CONFIG table contains this relation */ + if (pathman_config_contains_relation(relid, values, nulls, NULL, &iptr)) + { + Relation rel; + HeapTuple new_htup; + + /* Reset parsed expression */ + values[Anum_pathman_config_expression_p - 1] = (Datum) 0; + nulls[Anum_pathman_config_expression_p - 1] = true; + + /* Reset expression type */ + values[Anum_pathman_config_atttype - 1] = (Datum) 0; + nulls[Anum_pathman_config_atttype - 1] = true; + + rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); + + /* Form new tuple and perform an update */ + new_htup = heap_form_tuple(RelationGetDescr(rel), values, nulls); + simple_heap_update(rel, &iptr, new_htup); + CatalogUpdateIndexes(rel, new_htup); + + heap_close(rel, RowExclusiveLock); + } +} + +/* Refresh parsed partitioning expression in PATHMAN_CONFIG */ +void +pathman_config_refresh_parsed_expression(Oid relid, + Datum *values, + bool *isnull, + ItemPointer iptr) +{ + char *expr_cstr; + Oid expr_type; + Datum expr_datum; + + Relation rel; + HeapTuple htup_new; + + /* get and parse expression */ + expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); + expr_datum = plan_partitioning_expression(relid, expr_cstr, &expr_type); + pfree(expr_cstr); + + /* prepare tuple values */ + values[Anum_pathman_config_expression_p - 1] = expr_datum; + isnull[Anum_pathman_config_expression_p - 1] = false; + + values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_type); + isnull[Anum_pathman_config_atttype - 1] = false; + + rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); + + htup_new = heap_form_tuple(RelationGetDescr(rel), values, isnull); + simple_heap_update(rel, iptr, htup_new); + CatalogUpdateIndexes(rel, htup_new); + + heap_close(rel, RowExclusiveLock); +} + + /* - * Loads additional pathman parameters like 'enable_parent' or 'auto' - * from PATHMAN_CONFIG_PARAMS. + * Loads additional pathman parameters like 'enable_parent' + * or 'auto' from PATHMAN_CONFIG_PARAMS. */ bool read_pathman_params(Oid relid, Datum *values, bool *isnull) @@ -722,6 +791,7 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) return row_found; } + /* * Go through the PATHMAN_CONFIG table and create PartRelationInfo entries. */ @@ -788,10 +858,9 @@ read_pathman_config(void) /* * Validates range constraint. It MUST have one of the following formats: - * - * EXPRESSION >= CONST AND EXPRESSION < CONST - * EXPRESSION >= CONST - * EXPRESSION < CONST + * 1) EXPRESSION >= CONST AND EXPRESSION < CONST + * 2) EXPRESSION >= CONST + * 3) EXPRESSION < CONST * * Writes 'lower' & 'upper' and 'lower_null' & 'upper_null' values on success. */ @@ -838,7 +907,11 @@ validate_range_constraint(const Expr *expr, lower, upper, lower_null, upper_null); } -/* Validates a single expression of kind EXPRESSION >= CONST | EXPRESSION < CONST */ +/* + * Validates a single expression of kind: + * 1) EXPRESSION >= CONST + * 2) EXPRESSION < CONST + */ static bool validate_range_opexpr(const Expr *expr, const PartRelationInfo *prel, diff --git a/src/partition_creation.c b/src/partition_creation.c index 2aeb9163..14d4c6d3 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -39,7 +39,6 @@ #include "parser/parse_relation.h" #include "parser/parse_utilcmd.h" #include "parser/analyze.h" -#include "tcop/tcopprot.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/datum.h" @@ -87,9 +86,9 @@ static Constraint *make_constraint_common(char *name, Node *raw_expr); static Value make_string_value_struct(char *str); static Value make_int_value_struct(int int_val); -static Node *get_partitioning_expression(Oid parent_relid, - Oid *expr_type, - List **columns); +static Node *build_partitioning_expression(Oid parent_relid, + Oid *expr_type, + List **columns); /* * --------------------------------------- @@ -108,7 +107,7 @@ create_single_range_partition_internal(Oid parent_relid, { Oid partition_relid; Constraint *check_constr; - Node *expr; + Node *part_expr; init_callback_params callback_params; List *trigger_columns; @@ -124,8 +123,8 @@ create_single_range_partition_internal(Oid parent_relid, partition_rv = makeRangeVar(parent_nsp_name, partition_name, -1); } - /* check pathman config and fill variables */ - expr = get_partitioning_expression(parent_relid, NULL, &trigger_columns); + /* Check pathman config anld fill variables */ + part_expr = build_partitioning_expression(parent_relid, NULL, &trigger_columns); /* Create a partition & get 'partitioning expression' */ partition_relid = create_single_partition_internal(parent_relid, @@ -134,7 +133,7 @@ create_single_range_partition_internal(Oid parent_relid, /* Build check constraint for RANGE partition */ check_constr = build_range_check_constraint(partition_relid, - expr, + part_expr, start_value, end_value, value_type); @@ -189,7 +188,7 @@ create_single_hash_partition_internal(Oid parent_relid, tablespace); /* check pathman config and fill variables */ - expr = get_partitioning_expression(parent_relid, &expr_type, &trigger_columns); + expr = build_partitioning_expression(parent_relid, &expr_type, &trigger_columns); /* Build check constraint for HASH partition */ check_constr = build_hash_check_constraint(partition_relid, @@ -237,11 +236,11 @@ create_single_partition_common(Oid parent_relid, /* Create trigger if needed */ if (has_update_trigger_internal(parent_relid)) { - const char *trigname; + const char *trigger_name; - trigname = build_update_trigger_name_internal(parent_relid); + trigger_name = build_update_trigger_name_internal(parent_relid); create_single_update_trigger_internal(partition_relid, - trigname, + trigger_name, trigger_columns); } @@ -1687,192 +1686,14 @@ text_to_regprocedure(text *proc_signature) return DatumGetObjectId(result); } -/* - * Checks that columns are from partitioning relation - * Maybe there will be more checks later. - */ -static bool -validate_part_expression(Node *node, void *context) -{ - if (node == NULL) - return false; - - if (IsA(node, Var)) - { - Var *var = (Var *) node; - if (var->varno != 1) - elog(ERROR, "Columns used in expression should only be related" - " with partitioning relation"); - return false; - } - - if (IsA(node, Param)) - elog(ERROR, "Partitioning expression should not contain parameters"); - - return expression_tree_walker(node, validate_part_expression, context); -} - -/* Wraps expression by SELECT query and returns parse tree */ -Node * -parse_partitioning_expression(Oid relid, - const char *expression, - char **query_string_out, - Node **parsetree_out) -{ - SelectStmt *select_stmt; - List *parsetree_list; - - char *sql = "SELECT (%s) FROM ONLY %s.\"%s\""; - char *relname = get_rel_name(relid), - *nspname = get_namespace_name(get_rel_namespace(relid)); - char *query_string = psprintf(sql, expression, nspname, relname); - - parsetree_list = raw_parser(query_string); - Assert(list_length(parsetree_list) == 1); - - select_stmt = (SelectStmt *) linitial(parsetree_list); - - if (query_string_out) - *query_string_out = query_string; - - if (parsetree_out) - *parsetree_out = (Node *) select_stmt; - - return ((ResTarget *) linitial(select_stmt->targetList))->val; -} - -/* - * Parses expression related to 'relid', and returns its type, - * raw expression tree, and if specified returns its plan - */ -PartExpressionInfo * -get_part_expression_info(Oid relid, const char *expr_string, - bool check_hash_func, bool make_plan) -{ - Node *expr_node, - *parsetree; - Query *query; - char *query_string, *out_string; - PartExpressionInfo *expr_info; - List *querytree_list; - PlannedStmt *plan; - TargetEntry *target_entry; - MemoryContext pathman_parse_context, oldcontext; - - expr_info = palloc(sizeof(PartExpressionInfo)); - - pathman_parse_context = AllocSetContextCreate(TopPathmanContext, - "pathman parse context", - ALLOCSET_DEFAULT_SIZES); - - /* Keep raw expression */ - expr_info->raw_expr = parse_partitioning_expression(relid, expr_string, - &query_string, &parsetree); - - /* If expression is just column we check that is not null */ - if (IsA(expr_info->raw_expr, ColumnRef)) - { - ColumnRef *col = (ColumnRef *) expr_info->raw_expr; - if (list_length(col->fields) == 1) - { - HeapTuple tp; - bool result; - char *attname = strVal(linitial(col->fields)); - - /* check if attribute is nullable */ - tp = SearchSysCacheAttName(relid, attname); - if (HeapTupleIsValid(tp)) - { - Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); - result = !att_tup->attnotnull; - ReleaseSysCache(tp); - } - else - elog(ERROR, "Cannot find type name for attribute \"%s\" " - "of relation \"%s\"", - attname, get_rel_name_or_relid(relid)); - - if (result) - elog(ERROR, "partitioning key \"%s\" must be marked NOT NULL", attname); - } - } - expr_info->expr_datum = (Datum) 0; - - /* We don't need pathman activity initialization for this relation yet */ - pathman_hooks_enabled = false; - - /* - * We use separate memory context here, just to make sure we don't leave - * anything behind after analyze and planning. - * Parsed raw expression will stay in context of caller - */ - oldcontext = MemoryContextSwitchTo(pathman_parse_context); - - /* This will fail with elog in case of wrong expression - * with more or less understable text */ - querytree_list = pg_analyze_and_rewrite(parsetree, - query_string, NULL, 0); - query = (Query *) linitial(querytree_list); - - /* expr_node is node that we need for further use */ - target_entry = linitial(query->targetList); - expr_node = (Node *) target_entry->expr; - - /* Now we have node and can determine type of that node */ - expr_info->expr_type = exprType(expr_node); - - if (check_hash_func) - { - TypeCacheEntry *tce; - - tce = lookup_type_cache(expr_info->expr_type, TYPECACHE_HASH_PROC); - if (tce->hash_proc == InvalidOid) - elog(ERROR, "Expression should be hashable"); - } - - if (!make_plan) - goto end; - - /* Plan this query. We reuse 'expr_node' here */ - plan = pg_plan_query(query, 0, NULL); - if (IsA(plan->planTree, IndexOnlyScan)) - /* we get IndexOnlyScan in targetlist if expression is primary key */ - target_entry = linitial(((IndexOnlyScan *) plan->planTree)->indextlist); - else - target_entry = linitial(plan->planTree->targetlist); - - expr_node = (Node *) target_entry->expr; - - expr_node = eval_const_expressions(NULL, expr_node); - validate_part_expression(expr_node, NULL); - if (contain_mutable_functions(expr_node)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("functions in partitioning expression must be marked IMMUTABLE"))); - - out_string = nodeToString(expr_node); - - MemoryContextSwitchTo(oldcontext); - - /* Save expression string as datum and free memory from planning stage */ - expr_info->expr_datum = CStringGetTextDatum(out_string); - MemoryContextReset(pathman_parse_context); - -end: - /* Enable pathman hooks */ - pathman_hooks_enabled = true; - - return expr_info; -} - -struct extract_column_names_context +typedef struct { List *columns; -}; +} extract_column_names_cxt; /* Extract column names from raw expression */ static bool -extract_column_names(Node *node, struct extract_column_names_context *ctx) +extract_column_names(Node *node, extract_column_names_cxt *cxt) { if (node == NULL) return false; @@ -1880,19 +1701,20 @@ extract_column_names(Node *node, struct extract_column_names_context *ctx) if (IsA(node, ColumnRef)) { ListCell *lc; + foreach(lc, ((ColumnRef *) node)->fields) if (IsA(lfirst(lc), String)) - ctx->columns = lappend(ctx->columns, lfirst(lc)); + cxt->columns = lappend(cxt->columns, lfirst(lc)); } - return raw_expression_tree_walker(node, extract_column_names, ctx); + return raw_expression_tree_walker(node, extract_column_names, cxt); } /* Returns raw partitioning expression + expr_type + columns */ static Node * -get_partitioning_expression(Oid parent_relid, - Oid *expr_type, /* ret val #1 */ - List **columns) /* ret val #2 */ +build_partitioning_expression(Oid parent_relid, + Oid *expr_type, /* ret val #1 */ + List **columns) /* ret val #2 */ { /* Values extracted from PATHMAN_CONFIG */ Datum config_values[Natts_pathman_config]; @@ -1916,7 +1738,7 @@ get_partitioning_expression(Oid parent_relid, if (columns) { - struct extract_column_names_context context = { NIL }; + extract_column_names_cxt context = { NIL }; extract_column_names(expr, &context); *columns = context.columns; } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 6951d79a..d8aa8e28 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -100,6 +100,14 @@ typedef struct } show_cache_stats_cxt; +static AttrNumber *pathman_update_trigger_build_attr_map(const PartRelationInfo *prel, + Relation child_rel); + +static ExprState *pathman_update_trigger_build_expr_state(const PartRelationInfo *prel, + Relation source_rel, + HeapTuple new_tuple, + Oid *expr_type); + static void pathman_update_trigger_func_move_tuple(Relation source_rel, Relation target_rel, HeapTuple old_tuple, @@ -435,9 +443,9 @@ show_partition_list_internal(PG_FUNCTION_ARGS) } /* Fill in common values */ - values[Anum_pathman_pl_parent - 1] = PrelParentRelid(prel); - values[Anum_pathman_pl_parttype - 1] = prel->parttype; - values[Anum_pathman_pl_partattr - 1] = CStringGetTextDatum(prel->attname); + values[Anum_pathman_pl_parent - 1] = PrelParentRelid(prel); + values[Anum_pathman_pl_parttype - 1] = prel->parttype; + values[Anum_pathman_pl_partattr - 1] = CStringGetTextDatum(prel->expr_cstr); switch (prel->parttype) { @@ -660,11 +668,12 @@ add_to_pathman_config(PG_FUNCTION_ARGS) bool isnull[Natts_pathman_config]; bool refresh_part_info; HeapTuple htup; - CatalogIndexState indstate; - PathmanInitState init_state; - PartExpressionInfo *expr_info; - MemoryContext old_mcxt = CurrentMemoryContext; + Oid expr_type; + Datum expr_datum; + + PathmanInitState init_state; + MemoryContext old_mcxt = CurrentMemoryContext; if (!PG_ARGISNULL(0)) { @@ -698,8 +707,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) parttype = PG_ARGISNULL(2) ? PT_HASH : PT_RANGE; /* Parse and check expression */ - expr_info = get_part_expression_info(relid, expression, (parttype == PT_HASH), true); - Assert(expr_info->expr_datum != (Datum) 0); + expr_datum = plan_partitioning_expression(relid, expression, &expr_type); /* * Initialize columns (partrel, attname, parttype, range_interval). @@ -713,10 +721,10 @@ add_to_pathman_config(PG_FUNCTION_ARGS) values[Anum_pathman_config_expression - 1] = CStringGetTextDatum(expression); isnull[Anum_pathman_config_expression - 1] = false; - values[Anum_pathman_config_expression_p - 1] = expr_info->expr_datum; + values[Anum_pathman_config_expression_p - 1] = expr_datum; isnull[Anum_pathman_config_expression_p - 1] = false; - values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_info->expr_type); + values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_type); isnull[Anum_pathman_config_atttype - 1] = false; if (parttype == PT_RANGE) @@ -732,14 +740,16 @@ add_to_pathman_config(PG_FUNCTION_ARGS) /* Insert new row into PATHMAN_CONFIG */ pathman_config = heap_open(get_pathman_config_relid(false), RowExclusiveLock); + htup = heap_form_tuple(RelationGetDescr(pathman_config), values, isnull); simple_heap_insert(pathman_config, htup); - indstate = CatalogOpenIndexes(pathman_config); - CatalogIndexInsert(indstate, htup); - CatalogCloseIndexes(indstate); + CatalogUpdateIndexes(pathman_config, htup); + heap_close(pathman_config, RowExclusiveLock); + /* FIXME: check pg_inherits instead of this argument */ refresh_part_info = PG_GETARG_BOOL(3); + if (refresh_part_info) { /* Now try to create a PartRelationInfo */ @@ -1012,83 +1022,6 @@ is_operator_supported(PG_FUNCTION_ARGS) PG_RETURN_BOOL(OidIsValid(opid)); } -struct change_vars_context -{ - HeapTuple tuple; - TupleDesc tuple_desc; - AttrNumber *attributes_map; -}; - -/* - * To prevent calculation of Vars in expression, we change them with - * Const, and fill them with values from current tuple - */ -static Node * -change_vars_to_consts(Node *node, struct change_vars_context *ctx) -{ - const TypeCacheEntry *typcache; - - if (IsA(node, Var)) - { - Var *var = (Var *) node; - AttrNumber varattno = ctx->attributes_map[var->varattno - 1]; - Oid atttype; - Const *new_const = makeNode(Const); - HeapTuple tp; - - Assert(var->varno == 1); - if (varattno == 0) - elog(ERROR, "Couldn't find attribute used in expression in child relation"); - - /* we suppose that type can be different from parent */ - atttype = ctx->tuple_desc->attrs[varattno - 1]->atttypid; - - tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(atttype)); - if (HeapTupleIsValid(tp)) - { - Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); - new_const->consttypmod = typtup->typtypmod; - new_const->constcollid = typtup->typcollation; - ReleaseSysCache(tp); - } - else - elog(ERROR, "Something went wrong while getting type information"); - - typcache = lookup_type_cache(atttype, 0); - new_const->constbyval = typcache->typbyval; - new_const->constlen = typcache->typlen; - new_const->consttype = atttype; - new_const->location = -1; - - /* extract value from NEW tuple */ - new_const->constvalue = heap_getattr(ctx->tuple, - varattno, - ctx->tuple_desc, - &new_const->constisnull); - return (Node *) new_const; - } - return expression_tree_mutator(node, change_vars_to_consts, (void *) ctx); -} - -static ExprState * -prepare_expr_for_execution(const PartRelationInfo *prel, Relation source_rel, - HeapTuple tuple, Oid *value_type) -{ - struct change_vars_context ctx; - Node *expr; - ExprState *expr_state; - - Assert(value_type); - - ctx.tuple = tuple; - ctx.attributes_map = get_pathman_attributes_map(prel, source_rel); - ctx.tuple_desc = RelationGetDescr(source_rel); - expr = change_vars_to_consts(prel->expr, &ctx); - *value_type = exprType(expr); - expr_state = ExecInitExpr((Expr *) expr, NULL); - - return expr_state; -} /* * -------------------------- @@ -1121,7 +1054,7 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) ExprContext *econtext; ExprState *expr_state; - MemoryContext old_cxt; + MemoryContext old_mcxt; PartParentSearch parent_search; const PartRelationInfo *prel; @@ -1158,11 +1091,13 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) /* Execute partitioning expression */ econtext = CreateStandaloneExprContext(); - old_cxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); - expr_state = prepare_expr_for_execution(prel, source_rel, new_tuple, - &value_type); + old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + expr_state = pathman_update_trigger_build_expr_state(prel, + source_rel, + new_tuple, + &value_type); value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); - MemoryContextSwitchTo(old_cxt); + MemoryContextSwitchTo(old_mcxt); if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); @@ -1181,7 +1116,7 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) else if (nparts == 0) { target_relid = create_partitions_for_value(PrelParentRelid(prel), - value, prel->atttype); + value, value_type); /* get_pathman_relation_info() will refresh this entry */ invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); @@ -1219,6 +1154,127 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) PG_RETURN_POINTER(new_tuple); } +struct replace_vars_cxt +{ + HeapTuple new_tuple; + TupleDesc tuple_desc; + AttrNumber *attributes_map; +}; + +/* Replace Vars with values from 'new_tuple' (Consts) */ +static Node * +replace_vars_with_consts(Node *node, struct replace_vars_cxt *ctx) +{ + const TypeCacheEntry *typcache; + + if (IsA(node, Var)) + { + Var *var = (Var *) node; + AttrNumber varattno = ctx->attributes_map[var->varattno - 1]; + Oid vartype; + Const *new_const = makeNode(Const); + HeapTuple htup; + + Assert(var->varno == PART_EXPR_VARNO); + if (varattno == 0) + elog(ERROR, ERR_PART_DESC_CONVERT); + + /* we suppose that type can be different from parent */ + vartype = ctx->tuple_desc->attrs[varattno - 1]->atttypid; + + htup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(vartype)); + if (HeapTupleIsValid(htup)) + { + Form_pg_type typtup = (Form_pg_type) GETSTRUCT(htup); + new_const->consttypmod = typtup->typtypmod; + new_const->constcollid = typtup->typcollation; + ReleaseSysCache(htup); + } + else elog(ERROR, "cache lookup failed for type %u", vartype); + + typcache = lookup_type_cache(vartype, 0); + new_const->constbyval = typcache->typbyval; + new_const->constlen = typcache->typlen; + new_const->consttype = vartype; + new_const->location = -1; + + /* extract value from NEW tuple */ + new_const->constvalue = heap_getattr(ctx->new_tuple, + varattno, + ctx->tuple_desc, + &new_const->constisnull); + return (Node *) new_const; + } + + return expression_tree_mutator(node, replace_vars_with_consts, (void *) ctx); +} + +/* + * Get attributes map between parent and child relation. + * This is simplified version of functions that return TupleConversionMap. + * And it should be faster if expression uses not all fields from relation. + */ +static AttrNumber * +pathman_update_trigger_build_attr_map(const PartRelationInfo *prel, + Relation child_rel) +{ + AttrNumber i = -1; + Oid parent_relid = PrelParentRelid(prel); + TupleDesc child_descr = RelationGetDescr(child_rel); + int natts = child_descr->natts; + AttrNumber *result = (AttrNumber *) palloc0(natts * sizeof(AttrNumber)); + + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + { + int j; + AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; + char *attname = get_attname(parent_relid, attnum); + + for (j = 0; j < natts; j++) + { + Form_pg_attribute att = child_descr->attrs[j]; + + if (att->attisdropped) + continue; /* attrMap[attnum - 1] is already 0 */ + + if (strcmp(NameStr(att->attname), attname) == 0) + { + result[attnum - 1] = (AttrNumber) (j + 1); + break; + } + } + + if (result[attnum - 1] == 0) + elog(ERROR, "Couldn't find '%s' column in child relation", attname); + } + + return result; +} + +static ExprState * +pathman_update_trigger_build_expr_state(const PartRelationInfo *prel, + Relation source_rel, + HeapTuple new_tuple, + Oid *expr_type) /* ret value #1 */ +{ + struct replace_vars_cxt ctx; + Node *expr; + ExprState *expr_state; + + ctx.new_tuple = new_tuple; + ctx.attributes_map = pathman_update_trigger_build_attr_map(prel, source_rel); + ctx.tuple_desc = RelationGetDescr(source_rel); + + expr = replace_vars_with_consts(prel->expr, &ctx); + expr_state = ExecInitExpr((Expr *) expr, NULL); + + AssertArg(expr_type); + *expr_type = exprType(expr); + + return expr_state; +} + + /* Move tuple to new partition (delete 'old_tuple' + insert 'new_tuple') */ static void pathman_update_trigger_func_move_tuple(Relation source_rel, diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 7b056f3b..d59e4ca7 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -121,13 +121,12 @@ get_hash_part_idx(PG_FUNCTION_ARGS) Datum build_hash_condition(PG_FUNCTION_ARGS) { - Oid atttype = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); - uint32 part_count = PG_GETARG_UINT32(2), - part_idx = PG_GETARG_UINT32(3); + Oid expr_type = PG_GETARG_OID(0); + char *expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + uint32 part_count = PG_GETARG_UINT32(2), + part_idx = PG_GETARG_UINT32(3); TypeCacheEntry *tce; - char *attname_cstring = text_to_cstring(attname); char *result; @@ -136,20 +135,20 @@ build_hash_condition(PG_FUNCTION_ARGS) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'partition_index' must be lower than 'partitions_count'"))); - tce = lookup_type_cache(atttype, TYPECACHE_HASH_PROC); + tce = lookup_type_cache(expr_type, TYPECACHE_HASH_PROC); /* Check that HASH function exists */ if (!OidIsValid(tce->hash_proc)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("no hash function for type %s", - format_type_be(atttype)))); + format_type_be(expr_type)))); /* Create hash condition CSTRING */ result = psprintf("%s.get_hash_part_idx(%s(%s), %u) = %u", get_namespace_name(get_pathman_schema()), get_func_name(tce->hash_proc), - attname_cstring, + expr_cstr, part_count, part_idx); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index e08c2c93..fff7c76d 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -59,7 +59,7 @@ static void check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges); static void merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts); -static void modify_range_constraint(Oid child_relid, +static void modify_range_constraint(Oid partition_relid, const char *expression, Oid expression_type, const Bound *lower, @@ -522,9 +522,9 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) Datum build_range_condition(PG_FUNCTION_ARGS) { - Node *expr; Oid partition_relid; char *expression; + Node *expr; Bound min, max; @@ -708,7 +708,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Drop old constraint and create a new one */ modify_range_constraint(parts[0], - prel->attname, + prel->expr_cstr, prel->atttype, &first->min, &last->max); @@ -791,7 +791,7 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) /* Drop old constraint and create a new one */ modify_range_constraint(next->child_oid, - prel->attname, + prel->expr_cstr, prel->atttype, &cur->min, &next->max); @@ -1015,7 +1015,7 @@ interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) * a new one with specified boundaries */ static void -modify_range_constraint(Oid child_relid, +modify_range_constraint(Oid partition_relid, const char *expression, Oid expression_type, const Bound *lower, @@ -1026,20 +1026,20 @@ modify_range_constraint(Oid child_relid, Relation partition_rel; /* Drop old constraint */ - drop_check_constraint(child_relid); + drop_check_constraint(partition_relid); /* Parse expression */ - expr = parse_partitioning_expression(child_relid, expression, NULL, NULL); + expr = parse_partitioning_expression(partition_relid, expression, NULL, NULL); /* Build a new one */ - constraint = build_range_check_constraint(child_relid, + constraint = build_range_check_constraint(partition_relid, expr, lower, upper, expression_type); /* Open the relation and add new check constraint */ - partition_rel = heap_open(child_relid, AccessExclusiveLock); + partition_rel = heap_open(partition_relid, AccessExclusiveLock); AddRelationNewConstraints(partition_rel, NIL, list_make1(constraint), false, true, true); diff --git a/src/relation_info.c b/src/relation_info.c index b865c5dd..f7acd90a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -25,7 +25,10 @@ #include "catalog/pg_type.h" #include "miscadmin.h" #include "optimizer/clauses.h" +#include "optimizer/var.h" +#include "parser/parser.h" #include "storage/lmgr.h" +#include "tcop/tcopprot.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/hsearch.h" @@ -80,6 +83,7 @@ static bool delayed_shutdown = false; /* pathman was dropped */ list = NIL; \ } while (0) + static bool try_perform_parent_refresh(Oid parent); static Oid try_syscache_parent_search(Oid partition, PartParentSearch *status); static Oid get_parent_of_partition_internal(Oid partition, @@ -97,9 +101,7 @@ static void fill_pbin_with_bounds(PartBoundInfo *pbin, const Expr *constraint_expr); static int cmp_range_entries(const void *p1, const void *p2, void *arg); -static void update_parsed_expression(Oid relid, HeapTuple tuple, - Datum *values, bool *nulls); -static void fill_part_expression_vars(PartRelationInfo *prel); + void init_relation_info_static_data(void) @@ -134,8 +136,9 @@ refresh_pathman_relation_info(Oid relid, Datum param_values[Natts_pathman_config_params]; bool param_isnull[Natts_pathman_config_params]; char *expr; - HeapTuple tp; - MemoryContext oldcontext; + Relids expr_varnos; + HeapTuple htup; + MemoryContext old_mcxt; AssertTemporaryContext(); prel = invalidate_pathman_relation_info(relid, NULL); @@ -170,25 +173,34 @@ refresh_pathman_relation_info(Oid relid, expr = TextDatumGetCString(values[Anum_pathman_config_expression_p - 1]); /* Expression and attname should be saved in cache context */ - oldcontext = MemoryContextSwitchTo(PathmanRelationCacheContext); + old_mcxt = MemoryContextSwitchTo(PathmanRelationCacheContext); - prel->attname = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); + /* Build partitioning expression tree */ + prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); prel->expr = (Node *) stringToNode(expr); fix_opfuncids(prel->expr); - fill_part_expression_vars((PartRelationInfo *) prel); - MemoryContextSwitchTo(oldcontext); + expr_varnos = pull_varnos(prel->expr); + if (bms_singleton_member(expr_varnos) != PART_EXPR_VARNO) + elog(ERROR, "partitioning expression may reference only one table"); + + /* Extract Vars and varattnos of partitioning expression */ + prel->expr_vars = NIL; + prel->expr_atts = NULL; + prel->expr_vars = pull_var_clause(prel->expr, 0); + pull_varattnos((Node *) prel->expr_vars, PART_EXPR_VARNO, &prel->expr_atts); + + MemoryContextSwitchTo(old_mcxt); - tp = SearchSysCache1(TYPEOID, values[Anum_pathman_config_atttype - 1]); - if (HeapTupleIsValid(tp)) + htup = SearchSysCache1(TYPEOID, prel->atttype); + if (HeapTupleIsValid(htup)) { - Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); + Form_pg_type typtup = (Form_pg_type) GETSTRUCT(htup); prel->atttypmod = typtup->typtypmod; prel->attcollid = typtup->typcollation; - ReleaseSysCache(tp); + ReleaseSysCache(htup); } - else - elog(ERROR, "Something went wrong while getting type information"); + else elog(ERROR, "cache lookup failed for type %u", prel->atttype); /* Fetch HASH & CMP fuctions and other stuff from type cache */ typcache = lookup_type_cache(prel->atttype, @@ -253,7 +265,7 @@ refresh_pathman_relation_info(Oid relid, /* Free remaining resources */ FreeChildrenArray(prel); FreeRangesArray(prel); - FreeIfNotNull(prel->attname); + FreeIfNotNull(prel->expr_cstr); FreeIfNotNull(prel->expr); /* Rethrow ERROR further */ @@ -291,36 +303,6 @@ refresh_pathman_relation_info(Oid relid, return prel; } -/* Check that one of arguments of OpExpr is expression */ -static bool -extract_vars(Node *node, PartRelationInfo *prel) -{ - if (node == NULL) - return false; - - if (IsA(node, Var)) - { - prel->expr_vars = lappend(prel->expr_vars, node); - prel->expr_atts = bms_add_member(prel->expr_atts, ((Var *) node)->varattno); - return false; - } - - return expression_tree_walker(node, extract_vars, (void *) prel); -} - - -/* - * This function fills 'expr_vars' and 'expr_atts' attributes in PartRelationInfo. - */ -static void -fill_part_expression_vars(PartRelationInfo *prel) -{ - prel->expr_vars = NIL; - prel->expr_atts = NULL; - - extract_vars(prel->expr, prel); -} - /* Invalidate PartRelationInfo cache entry. Create new entry if 'found' is NULL. */ PartRelationInfo * invalidate_pathman_relation_info(Oid relid, bool *found) @@ -338,7 +320,7 @@ invalidate_pathman_relation_info(Oid relid, bool *found) { FreeChildrenArray(prel); FreeRangesArray(prel); - FreeIfNotNull(prel->attname); + FreeIfNotNull(prel->expr_cstr); prel->valid = false; /* now cache entry is invalid */ } @@ -361,79 +343,6 @@ invalidate_pathman_relation_info(Oid relid, bool *found) return prel; } -/* Update expression in pathman_config */ -static void -update_parsed_expression(Oid relid, HeapTuple tuple, Datum *values, bool *nulls) -{ - char *expression; - bool replaces[Natts_pathman_config]; - - Relation rel; - HeapTuple newtuple; - PartExpressionInfo *expr_info; - - /* get and parse expression */ - expression = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); - Assert(nulls[Anum_pathman_config_expression_p - 1]); - expr_info = get_part_expression_info(relid, expression, false, true); - Assert(expr_info->expr_datum != (Datum) 0); - pfree(expression); - - /* prepare tuple values */ - values[Anum_pathman_config_expression_p - 1] = expr_info->expr_datum; - nulls[Anum_pathman_config_expression_p - 1] = false; - - values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_info->expr_type); - nulls[Anum_pathman_config_atttype - 1] = false; - - MemSet(replaces, false, sizeof(replaces)); - replaces[Anum_pathman_config_expression_p - 1] = true; - replaces[Anum_pathman_config_atttype - 1] = true; - - /* update row */ - rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); - newtuple = heap_modify_tuple(tuple, RelationGetDescr(rel), values, nulls, - replaces); - simple_heap_update(rel, &newtuple->t_self, newtuple); - CatalogUpdateIndexes(rel, newtuple); - heap_close(rel, RowExclusiveLock); -} - -/* Mark expression in pathman_config as it needs update */ -void -mark_pathman_expression_for_update(Oid relid) -{ - HeapTuple tuple; - Datum values[Natts_pathman_config]; - bool nulls[Natts_pathman_config]; - - /* Check that PATHMAN_CONFIG table contains this relation */ - if (pathman_config_contains_relation(relid, values, nulls, NULL, &tuple)) - { - Relation rel; - HeapTuple newtuple; - bool replaces[Natts_pathman_config]; - - values[Anum_pathman_config_expression_p - 1] = (Datum) 0; - nulls[Anum_pathman_config_expression_p - 1] = true; - - values[Anum_pathman_config_atttype - 1] = (Datum) 0; - nulls[Anum_pathman_config_atttype - 1] = true; - - MemSet(replaces, false, sizeof(replaces)); - replaces[Anum_pathman_config_expression_p - 1] = true; - replaces[Anum_pathman_config_atttype - 1] = true; - - /* update row */ - rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); - newtuple = heap_modify_tuple(tuple, RelationGetDescr(rel), values, nulls, - replaces); - simple_heap_update(rel, &newtuple->t_self, newtuple); - CatalogUpdateIndexes(rel, newtuple); - heap_close(rel, RowExclusiveLock); - } -} - /* Get PartRelationInfo from local cache. */ const PartRelationInfo * get_pathman_relation_info(Oid relid) @@ -444,19 +353,18 @@ get_pathman_relation_info(Oid relid) /* Refresh PartRelationInfo if needed */ if (prel && !PrelIsValid(prel)) { - HeapTuple tuple; - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; + ItemPointerData iptr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; /* Check that PATHMAN_CONFIG table contains this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL, &tuple)) + if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) { bool upd_expr = isnull[Anum_pathman_config_expression_p - 1]; if (upd_expr) - update_parsed_expression(relid, tuple, values, isnull); + pathman_config_refresh_parsed_expression(relid, values, isnull, &iptr); /* Refresh partitioned table cache entry (might turn NULL) */ - /* TODO: possible refactoring, pass found 'prel' instead of searching */ prel = refresh_pathman_relation_info(relid, values, false); } @@ -649,6 +557,159 @@ fill_prel_with_partitions(PartRelationInfo *prel, } +/* + * Partitioning expression routines. + */ + +/* Wraps expression in SELECT query and returns parse tree */ +Node * +parse_partitioning_expression(const Oid relid, + const char *exp_cstr, + char **query_string_out, /* ret value #1 */ + Node **parsetree_out) /* ret value #2 */ +{ + SelectStmt *select_stmt; + List *parsetree_list; + + const char *sql = "SELECT (%s) FROM ONLY %s.%s"; + char *relname = get_rel_name(relid), + *nspname = get_namespace_name(get_rel_namespace(relid)); + char *query_string = psprintf(sql, exp_cstr, + quote_identifier(nspname), + quote_identifier(relname)); + + parsetree_list = raw_parser(query_string); + if (list_length(parsetree_list) != 1) + elog(ERROR, "expression \"%s\" produced more than one query", exp_cstr); + + select_stmt = (SelectStmt *) linitial(parsetree_list); + + if (query_string_out) + *query_string_out = query_string; + + if (parsetree_out) + *parsetree_out = (Node *) select_stmt; + + return ((ResTarget *) linitial(select_stmt->targetList))->val; +} + +/* + * Parses expression related to 'relid', and returns its type, + * raw expression tree, and if specified returns its plan + */ +Datum +plan_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type_out) +{ + + Node *parsetree; + List *querytree_list; + TargetEntry *target_entry; + + Node *raw_expr; + Query *expr_query; + PlannedStmt *expr_plan; + Node *expr; + Datum expr_datum; + + char *query_string, + *expr_serialized; + + MemoryContext parse_mcxt, + old_mcxt; + + AssertTemporaryContext(); + + parse_mcxt = AllocSetContextCreate(CurrentMemoryContext, + "pathman parse context", + ALLOCSET_DEFAULT_SIZES); + + /* Keep raw expression */ + raw_expr = parse_partitioning_expression(relid, expr_cstr, + &query_string, &parsetree); + + /* Check if raw_expr is NULLable */ + if (IsA(raw_expr, ColumnRef)) + { + ColumnRef *column = (ColumnRef *) raw_expr; + + if (list_length(column->fields) == 1) + { + HeapTuple htup; + bool attnotnull; + char *attname = strVal(linitial(column->fields)); + + /* check if attribute is nullable */ + htup = SearchSysCacheAttName(relid, attname); + if (HeapTupleIsValid(htup)) + { + Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(htup); + attnotnull = att_tup->attnotnull; + ReleaseSysCache(htup); + } + else elog(ERROR, "cannot find type name for attribute \"%s\" " + "of relation \"%s\"", + attname, get_rel_name_or_relid(relid)); + + if (!attnotnull) + elog(ERROR, "partitioning key \"%s\" must be marked NOT NULL", attname); + } + } + + /* We don't need pathman activity initialization for this relation yet */ + pathman_hooks_enabled = false; + + /* + * We use separate memory context here, just to make sure we + * don't leave anything behind after analyze and planning. + * Parsed raw expression will stay in caller's context. + */ + old_mcxt = MemoryContextSwitchTo(parse_mcxt); + + /* This will fail with elog in case of wrong expression */ + querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); + if (list_length(querytree_list) != 1) + elog(ERROR, "partitioning expression produced more than 1 query"); + + expr_query = (Query *) linitial(querytree_list); + + /* Plan this query. We reuse 'expr_node' here */ + expr_plan = pg_plan_query(expr_query, 0, NULL); + + target_entry = IsA(expr_plan->planTree, IndexOnlyScan) ? + linitial(((IndexOnlyScan *) expr_plan->planTree)->indextlist) : + linitial(expr_plan->planTree->targetlist); + + expr = eval_const_expressions(NULL, (Node *) target_entry->expr); + if (contain_mutable_functions(expr)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("functions in partitioning expression must be marked IMMUTABLE"))); + + Assert(expr); + + /* Set 'expr_type_out' if needed */ + if (expr_type_out) + *expr_type_out = exprType(expr); + + expr_serialized = nodeToString(expr); + + /* Switch to previous mcxt */ + MemoryContextSwitchTo(old_mcxt); + + expr_datum = CStringGetTextDatum(expr_serialized); + + /* Free memory */ + MemoryContextDelete(parse_mcxt); + + /* Enable pathman hooks */ + pathman_hooks_enabled = true; + + return expr_datum; +} + + /* * Functions for delayed invalidation. */ @@ -953,15 +1014,16 @@ try_syscache_parent_search(Oid partition, PartParentSearch *status) static bool try_perform_parent_refresh(Oid parent) { - HeapTuple tuple; - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; + ItemPointerData iptr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; - if (pathman_config_contains_relation(parent, values, isnull, NULL, &tuple)) + if (pathman_config_contains_relation(parent, values, isnull, NULL, &iptr)) { - bool upd_expr = isnull[Anum_pathman_config_expression_p - 1]; - if (upd_expr) - update_parsed_expression(parent, tuple, values, isnull); + bool should_update_expr = isnull[Anum_pathman_config_expression_p - 1]; + + if (should_update_expr) + pathman_config_refresh_parsed_expression(parent, values, isnull, &iptr); /* If anything went wrong, return false (actually, it might emit ERROR) */ refresh_pathman_relation_info(parent, diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 3a1c8ddf..64d563db 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -213,9 +213,9 @@ is_pathman_related_alter_column_type(Node *parsetree, /* Examine command list */ foreach (lc, alter_table_stmt->cmds) { - AttrNumber attnum; - - AlterTableCmd *alter_table_cmd = (AlterTableCmd *) lfirst(lc); + AlterTableCmd *alter_table_cmd = (AlterTableCmd *) lfirst(lc); + AttrNumber attnum; + int adjusted_attnum; if (!IsA(alter_table_cmd, AlterTableCmd)) continue; @@ -226,7 +226,8 @@ is_pathman_related_alter_column_type(Node *parsetree, /* Is it a column that used in expression? */ attnum = get_attnum(parent_relid, alter_table_cmd->name); - if (!bms_is_member(attnum, prel->expr_atts)) + adjusted_attnum = attnum - FirstLowInvalidHeapAttributeNumber; + if (!bms_is_member(adjusted_attnum, prel->expr_atts)) continue; /* Return 'prel->attnum' */ @@ -374,12 +375,12 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) attnums = CopyGetAttnums(tupDesc, rel, stmt->attlist); foreach(cur, attnums) { - int attno = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; + int attnum = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; if (is_from) - rte->insertedCols = bms_add_member(rte->insertedCols, attno); + rte->insertedCols = bms_add_member(rte->insertedCols, attnum); else - rte->selectedCols = bms_add_member(rte->selectedCols, attno); + rte->selectedCols = bms_add_member(rte->selectedCols, attnum); } ExecCheckRTPerms(range_table, true); diff --git a/src/utils.c b/src/utils.c index 251f5757..bf7f5799 100644 --- a/src/utils.c +++ b/src/utils.c @@ -574,45 +574,3 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) return rangevars; } - -/* - * Get attributes map between parent and child relation. - * This is simplified version of functions that return TupleConversionMap. - * And it should be faster if expression uses not all fields from relation. - */ -AttrNumber * -get_pathman_attributes_map(const PartRelationInfo *prel, Relation child) -{ - AttrNumber i = -1; - Oid parent_relid = prel->key; - TupleDesc childDesc = RelationGetDescr(child); - int natts = childDesc->natts; - AttrNumber *attrMap = (AttrNumber *) palloc0(natts * sizeof(AttrNumber)); - - while ((i = bms_next_member(prel->expr_atts, i)) >= 0) - { - int j; - char *attname = get_attname(parent_relid, i); - - for (j = 0; j < natts; j++) - { - Form_pg_attribute att = childDesc->attrs[j]; - char *child_attname; - - if (att->attisdropped) - continue; /* attrMap[i] is already 0 */ - - child_attname = NameStr(att->attname); - if (strcmp(child_attname, attname) == 0) - { - attrMap[i - 1] = (AttrNumber) (j + 1); - break; - } - } - - if (attrMap[i - 1] == 0) - elog(ERROR, "Couldn't find '%s' column in child relation", attname); - } - - return attrMap; -} From 184ccff1a88350f82d11e6dff9d782f14378bcbd Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 2 May 2017 12:44:54 +0300 Subject: [PATCH 0435/1124] Fix pull_var_clause call for 9.5 --- src/relation_info.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/relation_info.c b/src/relation_info.c index f7acd90a..c04d5fe1 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -187,7 +187,9 @@ refresh_pathman_relation_info(Oid relid, /* Extract Vars and varattnos of partitioning expression */ prel->expr_vars = NIL; prel->expr_atts = NULL; - prel->expr_vars = pull_var_clause(prel->expr, 0); + prel->expr_vars = pull_var_clause_compat(prel->expr, + PVC_REJECT_AGGREGATES, + PVC_REJECT_PLACEHOLDERS); pull_varattnos((Node *) prel->expr_vars, PART_EXPR_VARNO, &prel->expr_atts); MemoryContextSwitchTo(old_mcxt); From 3ec09c73131ee8e33ab70bf37e0778d9d9fb4d59 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 2 May 2017 12:47:41 +0300 Subject: [PATCH 0436/1124] Fix pull_var_clause call for 9.5 and 9.6 --- src/relation_info.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index c04d5fe1..954fde7b 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -187,9 +187,7 @@ refresh_pathman_relation_info(Oid relid, /* Extract Vars and varattnos of partitioning expression */ prel->expr_vars = NIL; prel->expr_atts = NULL; - prel->expr_vars = pull_var_clause_compat(prel->expr, - PVC_REJECT_AGGREGATES, - PVC_REJECT_PLACEHOLDERS); + prel->expr_vars = pull_var_clause_compat(prel->expr, 0, 0); pull_varattnos((Node *) prel->expr_vars, PART_EXPR_VARNO, &prel->expr_atts); MemoryContextSwitchTo(old_mcxt); From a78cec519edf2149b7a3cad150a9912347407b31 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 2 May 2017 14:04:28 +0300 Subject: [PATCH 0437/1124] remove useless includes --- src/compat/pg_compat.c | 4 +++- src/include/utils.h | 4 ---- src/init.c | 1 - src/nodes_common.c | 3 +-- src/partition_creation.c | 8 +------- src/partition_filter.c | 3 --- src/pathman_workers.c | 1 - src/pg_pathman.c | 1 - src/pl_funcs.c | 4 +--- src/pl_hash_funcs.c | 3 --- src/planner_tree_modification.c | 2 -- src/relation_info.c | 3 +-- src/runtime_merge_append.c | 3 --- src/runtimeappend.c | 2 -- src/utils.c | 7 ++++--- 15 files changed, 11 insertions(+), 38 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 6e441980..dd45fa42 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -13,14 +13,16 @@ #include "compat/pg_compat.h" +#include "utils.h" + #include "access/htup_details.h" +#include "catalog/pg_class.h" #include "catalog/pg_proc.h" #include "foreign/fdwapi.h" #include "optimizer/clauses.h" #include "optimizer/pathnode.h" #include "optimizer/prep.h" #include "port.h" -#include "utils.h" #include "utils/lsyscache.h" #include "utils/syscache.h" diff --git a/src/include/utils.h b/src/include/utils.h index bf061f45..b54e4e0f 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -16,10 +16,6 @@ #include "postgres.h" #include "parser/parse_oper.h" -#include "utils/rel.h" -#include "nodes/relation.h" -#include "nodes/memnodes.h" -#include "nodes/nodeFuncs.h" /* diff --git a/src/init.c b/src/init.c index 4a6d8de2..6f939ceb 100644 --- a/src/init.c +++ b/src/init.c @@ -29,7 +29,6 @@ #include "catalog/pg_type.h" #include "miscadmin.h" #include "optimizer/clauses.h" -#include "utils/datum.h" #include "utils/inval.h" #include "utils/builtins.h" #include "utils/fmgroids.h" diff --git a/src/nodes_common.c b/src/nodes_common.c index 3c217f50..2132f840 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -13,9 +13,8 @@ #include "runtimeappend.h" #include "utils.h" -#include "access/sysattr.h" +#include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" -#include "optimizer/restrictinfo.h" #include "optimizer/tlist.h" #include "optimizer/var.h" #include "rewrite/rewriteManip.h" diff --git a/src/partition_creation.c b/src/partition_creation.c index 14d4c6d3..68d431b7 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -14,7 +14,6 @@ #include "pathman.h" #include "pathman_workers.h" #include "xact_handling.h" -#include "compat/pg_compat.h" #include "access/htup_details.h" #include "access/reloptions.h" @@ -32,13 +31,9 @@ #include "commands/tablespace.h" #include "commands/trigger.h" #include "miscadmin.h" -#include "nodes/plannodes.h" -#include "optimizer/clauses.h" -#include "parser/parser.h" +#include "nodes/nodeFuncs.h" #include "parser/parse_func.h" -#include "parser/parse_relation.h" #include "parser/parse_utilcmd.h" -#include "parser/analyze.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/datum.h" @@ -46,7 +41,6 @@ #include "utils/jsonb.h" #include "utils/snapmgr.h" #include "utils/lsyscache.h" -#include "utils/memutils.h" #include "utils/syscache.h" #include "utils/typcache.h" diff --git a/src/partition_filter.c b/src/partition_filter.c index 944762e5..d60adb2d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -13,10 +13,8 @@ #include "pathman.h" #include "partition_creation.h" #include "partition_filter.h" -#include "planner_tree_modification.h" #include "utils.h" -#include "access/htup_details.h" #include "catalog/pg_type.h" #include "foreign/fdwapi.h" #include "foreign/foreign.h" @@ -24,7 +22,6 @@ #include "rewrite/rewriteManip.h" #include "utils/guc.h" #include "utils/memutils.h" -#include "utils/lsyscache.h" #include "utils/syscache.h" diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 4fe9eb08..27d7a05f 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -34,7 +34,6 @@ #include "storage/proc.h" #include "utils/builtins.h" #include "utils/datum.h" -#include "utils/memutils.h" #include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/typcache.h" diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 9bf4837a..5e9a15fd 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -16,7 +16,6 @@ #include "hooks.h" #include "pathman.h" #include "partition_filter.h" -#include "planner_tree_modification.h" #include "runtimeappend.h" #include "runtime_merge_append.h" diff --git a/src/pl_funcs.c b/src/pl_funcs.c index d8aa8e28..fbc875a4 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -19,10 +19,8 @@ #include "utils.h" #include "access/tupconvert.h" -#include "access/nbtree.h" #include "access/htup_details.h" #include "catalog/indexing.h" -#include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "commands/tablespace.h" #include "commands/trigger.h" @@ -30,8 +28,8 @@ #include "executor/spi.h" #include "funcapi.h" #include "miscadmin.h" +#include "nodes/nodeFuncs.h" #include "utils/builtins.h" -#include "utils/fmgroids.h" #include "utils/inval.h" #include "utils/snapmgr.h" #include "utils/lsyscache.h" diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index d59e4ca7..4f4238f5 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -13,12 +13,9 @@ #include "relation_info.h" #include "utils.h" -#include "catalog/pg_type.h" #include "utils/builtins.h" #include "utils/typcache.h" #include "utils/lsyscache.h" -#include "utils/builtins.h" -#include "utils/array.h" /* Function declarations */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 606f1687..fa36ce78 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -12,7 +12,6 @@ #include "compat/relation_tags.h" #include "compat/rowmarks_fix.h" -#include "nodes_common.h" #include "partition_filter.h" #include "planner_tree_modification.h" #include "rewrite/rewriteManip.h" @@ -21,7 +20,6 @@ #include "miscadmin.h" #include "optimizer/clauses.h" #include "storage/lmgr.h" -#include "utils/lsyscache.h" #include "utils/syscache.h" diff --git a/src/relation_info.c b/src/relation_info.c index 954fde7b..e5f25e38 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -11,7 +11,6 @@ #include "compat/pg_compat.h" #include "relation_info.h" -#include "partition_creation.h" #include "init.h" #include "utils.h" #include "xact_handling.h" @@ -24,6 +23,7 @@ #include "catalog/pg_inherits.h" #include "catalog/pg_type.h" #include "miscadmin.h" +#include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/var.h" #include "parser/parser.h" @@ -33,7 +33,6 @@ #include "utils/fmgroids.h" #include "utils/hsearch.h" #include "utils/memutils.h" -#include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/lsyscache.h" #include "utils/typcache.h" diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index 16622f02..453ebab1 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -13,14 +13,12 @@ #include "compat/pg_compat.h" #include "runtime_merge_append.h" -#include "pathman.h" #include "postgres.h" #include "catalog/pg_collation.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "nodes/plannodes.h" -#include "optimizer/clauses.h" #include "optimizer/cost.h" #include "optimizer/planmain.h" #include "optimizer/tlist.h" @@ -29,7 +27,6 @@ #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/typcache.h" -#include "utils/memutils.h" #include "utils/ruleutils.h" #include "lib/binaryheap.h" diff --git a/src/runtimeappend.c b/src/runtimeappend.c index 86554b0e..9e93aedf 100644 --- a/src/runtimeappend.c +++ b/src/runtimeappend.c @@ -10,8 +10,6 @@ #include "runtimeappend.h" -#include "postgres.h" -#include "utils/memutils.h" #include "utils/guc.h" diff --git a/src/utils.c b/src/utils.c index bf7f5799..dc40a8de 100644 --- a/src/utils.c +++ b/src/utils.c @@ -16,14 +16,15 @@ #include "access/nbtree.h" #include "access/sysattr.h" #include "access/xact.h" -#include "catalog/heap.h" +#include "catalog/indexing.h" #include "catalog/namespace.h" -#include "catalog/pg_type.h" +#include "catalog/pg_class.h" #include "catalog/pg_extension.h" #include "catalog/pg_operator.h" +#include "catalog/pg_type.h" #include "commands/extension.h" #include "miscadmin.h" -#include "optimizer/var.h" +#include "nodes/nodeFuncs.h" #include "parser/parse_coerce.h" #include "parser/parse_oper.h" #include "utils/builtins.h" From 7e449c0dfaaa43f841eff251b40891ef582137be Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 2 May 2017 14:30:54 +0300 Subject: [PATCH 0438/1124] Simplify add_to_pathman_config, check hash function for expression for hash partitioning type. --- expected/pathman_permissions.out | 1 + hash.sql | 3 ++- init.sql | 3 +-- range.sql | 15 ++++++++++----- src/pl_funcs.c | 20 ++++++++++++++------ 5 files changed, 28 insertions(+), 14 deletions(-) diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 6814c442..d4b509d3 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -19,6 +19,7 @@ GRANT SELECT ON permissions.user1_table TO user2; /* Should fail (don't own parent) */ SET ROLE user2; SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +WARNING: skipping "user1_table" --- only table or database owner can analyze it ERROR: only the owner or superuser can change partitioning configuration of table "user1_table" /* Should be ok */ SET ROLE user1; diff --git a/hash.sql b/hash.sql index fbdcd97a..c942e8c6 100644 --- a/hash.sql +++ b/hash.sql @@ -35,7 +35,8 @@ BEGIN PERFORM @extschema@.common_relation_checks(parent_relid, expression); /* Insert new entry to pathman config */ - PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, false); + EXECUTE format('ANALYZE %s', parent_relid); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); /* Create partitions */ PERFORM @extschema@.create_hash_partitions_internal(parent_relid, diff --git a/init.sql b/init.sql index f58c0a75..e27b533e 100644 --- a/init.sql +++ b/init.sql @@ -862,9 +862,8 @@ LANGUAGE C STRICT; */ CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( parent_relid REGCLASS, - attname TEXT, + expression TEXT, range_interval TEXT DEFAULT NULL, - refresh_part_info BOOL DEFAULT TRUE, parttype INT4 DEFAULT 0 ) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' diff --git a/range.sql b/range.sql index 5ae99a79..40894c7e 100644 --- a/range.sql +++ b/range.sql @@ -154,8 +154,9 @@ BEGIN END IF; /* Insert new entry to pathman config */ + EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT, false); + p_interval::TEXT); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid); @@ -251,8 +252,9 @@ BEGIN END IF; /* Insert new entry to pathman config */ + EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT, false); + p_interval::TEXT); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid); @@ -310,7 +312,8 @@ BEGIN bounds[array_length(bounds, 1) - 1]); /* Insert new entry to pathman config */ - PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, false, 2); + EXECUTE format('ANALYZE %s', parent_relid); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, 2); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid); @@ -360,8 +363,9 @@ BEGIN end_value); /* Insert new entry to pathman config */ + EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT, false); + p_interval::TEXT); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid); @@ -417,8 +421,9 @@ BEGIN end_value); /* Insert new entry to pathman config */ + EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT, false); + p_interval::TEXT); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index c80f20b2..a99897bd 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -22,6 +22,7 @@ #include "access/nbtree.h" #include "access/htup_details.h" #include "catalog/indexing.h" +#include "catalog/pg_inherits_fn.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "commands/tablespace.h" @@ -709,7 +710,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) Relation pathman_config; Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; - bool refresh_part_info; HeapTuple htup; Oid expr_type; @@ -745,13 +745,23 @@ add_to_pathman_config(PG_FUNCTION_ARGS) } /* Select partitioning type */ - parttype = PG_GETARG_INT32(4); + parttype = PG_GETARG_INT32(3); if ((parttype != PT_HASH) && (parttype != PT_RANGE)) parttype = PG_ARGISNULL(2) ? PT_HASH : PT_RANGE; /* Parse and check expression */ expr_datum = plan_partitioning_expression(relid, expression, &expr_type); + /* Expression for range partitions should be hashable */ + if (parttype == PT_HASH) + { + TypeCacheEntry *tce; + + tce = lookup_type_cache(expr_type, TYPECACHE_HASH_PROC); + if (tce->hash_proc == InvalidOid) + elog(ERROR, "partitioning expression should be hashable"); + } + /* * Initialize columns (partrel, attname, parttype, range_interval). */ @@ -790,10 +800,8 @@ add_to_pathman_config(PG_FUNCTION_ARGS) heap_close(pathman_config, RowExclusiveLock); - /* FIXME: check pg_inherits instead of this argument */ - refresh_part_info = PG_GETARG_BOOL(3); - - if (refresh_part_info) + /* update caches only if this relation has children */ + if (has_subclass(relid)) { /* Now try to create a PartRelationInfo */ PG_TRY(); From 682d1b14dcccd6a193897f9c1b05f3312d40f494 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 2 May 2017 14:47:07 +0300 Subject: [PATCH 0439/1124] remove russian README --- README.rus.md | 493 -------------------------------------------------- 1 file changed, 493 deletions(-) delete mode 100644 README.rus.md diff --git a/README.rus.md b/README.rus.md deleted file mode 100644 index d7fe87a4..00000000 --- a/README.rus.md +++ /dev/null @@ -1,493 +0,0 @@ -[![Build Status](https://travis-ci.org/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.org/postgrespro/pg_pathman) -[![PGXN version](https://badge.fury.io/pg/pg_pathman.svg)](https://badge.fury.io/pg/pg_pathman) - -# pg_pathman - -Модуль `pg_pathman` предоставляет оптимизированный механизм секционирования, а также функции для создания и управления секциями. - -Расширение совместимо с PostgreSQL 9.5 (поддержка 9.6 будет добавлена в одном из ближайших обновлений). - -## Концепция pg_pathman - -**Секционирование** -- это способ разбиения одной большой таблицы на множество меньших по размеру. Для каждой записи можно однозначно определить секцию, в которой она должна храниться посредством вычисления ключа. -Секционирование в postgres основано на механизме наследования. Каждому наследнику задается условие CHECK CONSTRAINT. Например: - -``` -CREATE TABLE test (id SERIAL PRIMARY KEY, title TEXT); -CREATE TABLE test_1 (CHECK ( id >= 100 AND id < 200 )) INHERITS (test); -CREATE TABLE test_2 (CHECK ( id >= 200 AND id < 300 )) INHERITS (test); -``` - -Несмотря на гибкость, этот механизм обладает недостатками. Так при фильтрации данных оптимизатор вынужден перебирать все дочерние секции и сравнивать условие запроса с CHECK CONSTRAINT-ами секции, чтобы определить из каких секций ему следует загружать данные. При большом количестве секций это создает дополнительные накладные расходы, которые могут свести на нет выигрыш в производительности от применения секционирования. - -Модуль `pg_pathman` предоставляет функции для создания и управления секциями, а также механизм секционирования, оптимизированный с учетом знания о структуре дочерних таблиц. Конфигурация сохраняется таблице `pathman_config`, каждая строка которой содержит запись для одной секционированной таблицы (название таблицы, атрибут и тип разбиения). В процессе инициализации `pg_pathman` кеширует конфигурацию дочерних таблиц в формате, удобном для быстрого поиска. Получив запрос типа `SELECT` к секционированной таблице, `pg_pathman` анализирует дерево условий запроса и выделяет из него условия вида: - -``` -ПЕРЕМЕННАЯ ОПЕРАТОР КОНСТАНТА -``` -где `ПЕРЕМЕННАЯ` -- это атрибут, по которому было выполнено разбиение, `ОПЕРАТОР` -- оператор сравнения (поддерживаются =, <, <=, >, >=), `КОНСТАНТА` -- скалярное значение. Например: - -``` -WHERE id = 150 -``` -Затем основываясь на стратегии секционирования и условиях запроса `pg_pathman` находит в кеше соответствующие секции и строит план. - -В текущей версии `pg_pathman` поддерживает следующие типы секционирования: - -* **RANGE** - разбивает таблицу на секции по диапазонам ключевого аттрибута; для оптимизации построения плана используется метод бинарного поиска. -* **HASH** - данные равномерно распределяются по секциям в соответствии со значениями hash-функции, вычисленными по заданному целочисленному атрибуту. - -More interesting features are yet to come. Stay tuned! - -## Roadmap - - * Предоставить возможность установки пользовательских колбеков на создание\уничтожение партиции (issue [#22](https://github.com/postgrespro/pg_pathman/issues/22)) - * LIST-секционирование; - * Оптимизация hash join для случая, когда обе таблицы секционированы по ключу join’а. - -## Установка - -Для установки pg_pathman выполните в директории модуля команду: -``` -make install USE_PGXS=1 -``` -Модифицируйте параметр shared_preload_libraries в конфигурационном файле postgres.conf: -``` -shared_preload_libraries = 'pg_pathman' -``` -Для вступления изменений в силу потребуется перезагрузка сервера PostgreSQL. Затем выполните в psql: -``` -CREATE EXTENSION pg_pathman; -``` - -> **Важно:** Если вы хотите собрать `pg_pathman` для работы с кастомной сборкой PostgreSQL, не забудьте установить переменную окружения `PG_CONFIG` равной пути к исполняемому файлу pg_config. Узнать больше о сборке расширений для PostgreSQL можно по ссылке: [here](https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules). - -## Функции `pg_pathman` - -### Создание секций -```plpgsql -create_hash_partitions(relation REGCLASS, - attribute TEXT, - partitions_count INTEGER, - partition_name TEXT DEFAULT NULL) -``` -Выполняет HASH-секционирование таблицы `relation` по целочисленному полю `attribute`. Параметр `partitions_count` определяет, сколько секций будет создано. Если `partition_data` установлен в значение `true`, то данные из родительской таблицы будут автоматически распределены по секциям. Стоит иметь в виду, что миграция данных может занять некоторое время, а данные заблокированы. Для конкурентной миграции данных см. функцию `partition_table_concurrently()`. - -```plpgsql -create_range_partitions(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - interval ANYELEMENT, - count INTEGER DEFAULT NULL - partition_data BOOLEAN DEFAULT true) - -create_range_partitions(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - interval INTERVAL, - count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT true) -``` -Выполняет RANGE-секционирование таблицы `relation` по полю `attribute`. Аргумент `start_value` задает начальное значение, `interval` -- диапазон значений внутри одной секции, `count` -- количество создаваемых секций (если не задано, то pathman попытается определить количество секций на основе значений аттрибута). - -```plpgsql -create_partitions_from_range(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - interval ANYELEMENT, - partition_data BOOLEAN DEFAULT true) - -create_partitions_from_range(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - interval INTERVAL, - partition_data BOOLEAN DEFAULT true) -``` -Выполняет RANGE-секционирование для заданного диапазона таблицы `relation` по полю `attribute`. - -### Миграция данных - -```plpgsql -partition_table_concurrently(relation REGCLASS) -``` -Запускает новый процесс (background worker) для конкурентного перемещения данных из родительской таблицы в дочерние секции. Рабочий процесс использует короткие транзакции для перемещения небольших объемов данных (порядка 10 тысяч записей) и, таким образом, не оказывает существенного влияния на работу пользователей. - -```plpgsql -stop_concurrent_part_task(relation REGCLASS) -``` -Останавливает процесс конкурентного партиционирования. Обратите внимание, что процесс завершается не мгновенно, а только по завершении текущей транзакции. - -### Утилиты -```plpgsql -create_hash_update_trigger(parent REGCLASS) -``` -Создает триггер на UPDATE для HASH секций. По-умолчанию триггер на обновление данных не создается, т.к. это создает дополнительные накладные расходы. Триггер полезен только в том случае, когда меняется значение ключевого аттрибута. -```plpgsql -create_range_update_trigger(parent REGCLASS) -``` -Аналогично предыдущей, но для RANGE секций. - -### Управление секциями -```plpgsql -split_range_partition(partition REGCLASS, - value ANYELEMENT, - partition_name TEXT DEFAULT NULL,) -``` -Разбивает RANGE секцию `partition` на две секции по значению `value`. - -```plpgsql -merge_range_partitions(partition1 REGCLASS, partition2 REGCLASS) -``` -Объединяет две смежные RANGE секции. Данные из `partition2` копируются в `partition1`, после чего секция `partition2` удаляется. - -```plpgsql -append_range_partition(p_relation REGCLASS, - partition_name TEXT DEFAULT NULL) -``` -Добавляет новую RANGE секцию с диапазоном `pathman_config.range_interval` в конец списка секций. - -```plpgsql -prepend_range_partition(p_relation REGCLASS, - partition_name TEXT DEFAULT NULL) -``` -Добавляет новую RANGE секцию с диапазоном `pathman_config.range_interval` в начало списка секций. - -```plpgsql -add_range_partition(relation REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL) -``` -Добавляет новую RANGE секцию с заданным диапазоном к секционированной таблице `relation`. - -```plpgsql -drop_range_partition(partition TEXT) -``` -Удаляет RANGE секцию вместе с содержащимися в ней данными. - -```plpgsql -attach_range_partition(relation REGCLASS, - partition REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT) -``` -Присоединяет существующую таблицу `partition` в качестве секции к ранее секционированной таблице `relation`. Структура присоединяемой таблицы должна в точности повторять структуру родительской. - -```plpgsql -detach_range_partition(partition REGCLASS) -``` -Отсоединяет секцию `partition`, после чего она становится независимой таблицей. - -```plpgsql -disable_pathman_for(relation REGCLASS) -``` -Отключает механизм секционирования `pg_pathman` для заданной таблицы. При этом созданные ранее секции остаются без изменений. - -```plpgsql -drop_partitions(parent REGCLASS, - delete_data BOOLEAN DEFAULT FALSE) -``` -Удаляет все секции таблицы `parent`. Если параметр `delete_data` задан как `false` (по-умолчанию `false`), то данные из секций копируются в родительскую таблицу. - -### Дополнительные параметры - -```plpgsql -enable_parent(relation REGCLASS) -disable_parent(relation REGCLASS) -``` -Включает/исключает родительскую таблицу в план запроса. В оригинальном планировщике PostgreSQL родительская таблица всегда включается в план запроса, даже если она пуста. Это создает дополнительные накладные расходы. Выполните `disable_parent()`, если вы не собираетесь хранить какие-либо данные в родительской таблице. Значение по-умолчанию зависит от того, был ли установлен параметр `partition_data` при первоначальном разбиении таблицы (см. функции `create_range_partitions()` и `create_partitions_from_range()`). Если он был установлен в значение `true`, то все данные были перемещены в секции, а родительская таблица отключена. В противном случае родительская таблица по-умолчанию влючена. - -```plpgsql -enable_auto(relation REGCLASS) -disable_auto(relation REGCLASS) -``` -Включает/выключает автоматическое создание секций (только для RANGE секционирования). По-умолчанию включено. - -## Custom plan nodes -`pg_pathman` вводит три новых узла плана (см. [custom plan nodes](https://wiki.postgresql.org/wiki/CustomScanAPI)), предназначенных для оптимизации времени выполнения: - -- `RuntimeAppend` (замещает узел типа `Append`) -- `RuntimeMergeAppend` (замещает узел типа `MergeAppend`) -- `PartitionFilter` (выполняет работу INSERT-триггера) - -`PartitionFilter` работает как прокси-узел для INSERT-запросов, распределяя новые записи по соответствующим секциям: - -``` -EXPLAIN (COSTS OFF) -INSERT INTO partitioned_table -SELECT generate_series(1, 10), random(); - QUERY PLAN ------------------------------------------ - Insert on partitioned_table - -> Custom Scan (PartitionFilter) - -> Subquery Scan on "*SELECT*" - -> Result -(4 rows) -``` - -Узлы `RuntimeAppend` и `RuntimeMergeAppend` имеют между собой много общего: они нужны в случает, когда условие WHERE принимает форму: -``` -ПЕРЕМЕННАЯ ОПЕРАТОР ПАРАМЕТР -``` -Подобные выражения не могут быть оптимизированы во время планирования, т.к. значение параметра неизвестно до стадии выполнения. Проблема может быть решена путем встраивания дополнительной процедуры анализа в код `Append` узла, таким образом позволяя ему выбирать лишь необходимые субпланы из всего списка дочерних планов. - ----------- - -Есть по меньшей мере несколько ситуаций, которые демонстрируют полезность таких узлов: - -``` -/* создаем таблицу, которую хотим секционировать */ -CREATE TABLE partitioned_table(id INT NOT NULL, payload REAL); - -/* заполняем данными */ -INSERT INTO partitioned_table -SELECT generate_series(1, 1000), random(); - -/* выполняем секционирование */ -SELECT create_hash_partitions('partitioned_table', 'id', 100); - -/* создаем обычную таблицу */ -CREATE TABLE some_table AS SELECT generate_series(1, 100) AS VAL; -``` - - - - **`id = (select ... limit 1)`** -``` -EXPLAIN (COSTS OFF, ANALYZE) SELECT * FROM partitioned_table -WHERE id = (SELECT * FROM some_table LIMIT 1); - QUERY PLAN ----------------------------------------------------------------------------------------------------- - Custom Scan (RuntimeAppend) (actual time=0.030..0.033 rows=1 loops=1) - InitPlan 1 (returns $0) - -> Limit (actual time=0.011..0.011 rows=1 loops=1) - -> Seq Scan on some_table (actual time=0.010..0.010 rows=1 loops=1) - -> Seq Scan on partitioned_table_70 partitioned_table (actual time=0.004..0.006 rows=1 loops=1) - Filter: (id = $0) - Rows Removed by Filter: 9 - Planning time: 1.131 ms - Execution time: 0.075 ms -(9 rows) - -/* выключаем узел RuntimeAppend */ -SET pg_pathman.enable_runtimeappend = f; - -EXPLAIN (COSTS OFF, ANALYZE) SELECT * FROM partitioned_table -WHERE id = (SELECT * FROM some_table LIMIT 1); - QUERY PLAN ----------------------------------------------------------------------------------- - Append (actual time=0.196..0.274 rows=1 loops=1) - InitPlan 1 (returns $0) - -> Limit (actual time=0.005..0.005 rows=1 loops=1) - -> Seq Scan on some_table (actual time=0.003..0.003 rows=1 loops=1) - -> Seq Scan on partitioned_table_0 (actual time=0.014..0.014 rows=0 loops=1) - Filter: (id = $0) - Rows Removed by Filter: 6 - -> Seq Scan on partitioned_table_1 (actual time=0.003..0.003 rows=0 loops=1) - Filter: (id = $0) - Rows Removed by Filter: 5 - ... /* more plans follow */ - Planning time: 1.140 ms - Execution time: 0.855 ms -(306 rows) -``` - - - **`id = ANY (select ...)`** -``` -EXPLAIN (COSTS OFF, ANALYZE) SELECT * FROM partitioned_table -WHERE id = any (SELECT * FROM some_table limit 4); - QUERY PLAN ------------------------------------------------------------------------------------------------------------ - Nested Loop (actual time=0.025..0.060 rows=4 loops=1) - -> Limit (actual time=0.009..0.011 rows=4 loops=1) - -> Seq Scan on some_table (actual time=0.008..0.010 rows=4 loops=1) - -> Custom Scan (RuntimeAppend) (actual time=0.002..0.004 rows=1 loops=4) - -> Seq Scan on partitioned_table_70 partitioned_table (actual time=0.001..0.001 rows=10 loops=1) - -> Seq Scan on partitioned_table_26 partitioned_table (actual time=0.002..0.003 rows=9 loops=1) - -> Seq Scan on partitioned_table_27 partitioned_table (actual time=0.001..0.002 rows=20 loops=1) - -> Seq Scan on partitioned_table_63 partitioned_table (actual time=0.001..0.002 rows=9 loops=1) - Planning time: 0.771 ms - Execution time: 0.101 ms -(10 rows) - -/* выключаем узел RuntimeAppend */ -SET pg_pathman.enable_runtimeappend = f; - -EXPLAIN (COSTS OFF, ANALYZE) SELECT * FROM partitioned_table -WHERE id = any (SELECT * FROM some_table limit 4); - QUERY PLAN ------------------------------------------------------------------------------------------ - Nested Loop Semi Join (actual time=0.531..1.526 rows=4 loops=1) - Join Filter: (partitioned_table.id = some_table.val) - Rows Removed by Join Filter: 3990 - -> Append (actual time=0.190..0.470 rows=1000 loops=1) - -> Seq Scan on partitioned_table (actual time=0.187..0.187 rows=0 loops=1) - -> Seq Scan on partitioned_table_0 (actual time=0.002..0.004 rows=6 loops=1) - -> Seq Scan on partitioned_table_1 (actual time=0.001..0.001 rows=5 loops=1) - -> Seq Scan on partitioned_table_2 (actual time=0.002..0.004 rows=14 loops=1) -... /* 96 scans follow */ - -> Materialize (actual time=0.000..0.000 rows=4 loops=1000) - -> Limit (actual time=0.005..0.006 rows=4 loops=1) - -> Seq Scan on some_table (actual time=0.003..0.004 rows=4 loops=1) - Planning time: 2.169 ms - Execution time: 2.059 ms -(110 rows) -``` - - - **`NestLoop` involving a partitioned table**, which is omitted since it's occasionally shown above. - ----------- - -Узнать больше о работе RuntimeAppend можно в [блоге](http://akorotkov.github.io/blog/2016/06/15/pg_pathman-runtime-append/) Александра Короткова. - -## Примеры - -### Common tips -- You can easily add **_partition_** column containing the names of the underlying partitions using the system attribute called **_tableoid_**: -``` -SELECT tableoid::regclass AS partition, * FROM partitioned_table; -``` -- Несмотря на то, что индексы на родительской таблице не очень полезны (т.к. таблица пуста), они тем не менее выполняют роль прототипов для создания индексов в дочерних таблицах: `pg_pathman` автоматически создает аналогичные индексы для каждой новой секции. - -- Получить все текущие процессы конкурентного секционирования можно из представления `pathman_concurrent_part_tasks`: -```plpgsql -SELECT * FROM pathman_concurrent_part_tasks; - userid | pid | dbid | relid | processed | status ---------+------+-------+-------+-----------+--------- - dmitry | 7367 | 16384 | test | 472000 | working -(1 row) -``` - -### HASH секционирование -Рассмотрим пример секционирования таблицы, используя HASH-стратегию на примере таблицы товаров. -``` -CREATE TABLE items ( - id SERIAL PRIMARY KEY, - name TEXT, - code BIGINT); - -INSERT INTO items (id, name, code) -SELECT g, md5(g::text), random() * 100000 -FROM generate_series(1, 100000) as g; -``` -Если дочерние секции подразумевают наличие индексов, то стоит их создать в родительской таблице до разбиения. Тогда при разбиении pg_pathman автоматически создаст соответствующие индексы в дочерних.таблицах. Разобьем таблицу `hash_rel` на 100 секций по полю `value`: -``` -SELECT create_hash_partitions('items', 'id', 100); -``` -Пример построения плана для запроса с фильтрацией по ключевому полю: -``` -SELECT * FROM items WHERE id = 1234; - id | name | code -------+----------------------------------+------ - 1234 | 81dc9bdb52d04dc20036dbd8313ed055 | 1855 -(1 row) - -EXPLAIN SELECT * FROM items WHERE id = 1234; - QUERY PLAN ------------------------------------------------------------------------------------- - Append (cost=0.28..8.29 rows=0 width=0) - -> Index Scan using items_34_pkey on items_34 (cost=0.28..8.29 rows=0 width=0) - Index Cond: (id = 1234) -``` -Стоит отметить, что pg_pathman исключает из плана запроса родительскую таблицу, и чтобы получить данные из нее, следует использовать модификатор ONLY: -``` -EXPLAIN SELECT * FROM ONLY items; - QUERY PLAN ------------------------------------------------------- - Seq Scan on items (cost=0.00..0.00 rows=1 width=45) -``` - -### RANGE секционирование -Рассмотрим пример разбиения таблицы по диапазону дат. Пусть у нас имеется таблица логов: -``` -CREATE TABLE journal ( - id SERIAL, - dt TIMESTAMP NOT NULL, - level INTEGER, - msg TEXT -); -CREATE INDEX ON journal(dt); - -INSERT INTO journal (dt, level, msg) -SELECT g, random()*6, md5(g::text) -FROM generate_series('2015-01-01'::date, '2015-12-31'::date, '1 minute') as g; -``` -Разобьем таблицу на 365 секций так, чтобы каждая секция содержала данные за один день: -``` -SELECT create_range_partitions('journal', 'dt', '2015-01-01'::date, '1 day'::interval); -``` -Новые секции добавляются автоматически при вставке новых записей в непокрытую область. Однако есть возможность добавлять секции вручную. Для этого можно воспользоваться следующими функциями: -``` -SELECT add_range_partition('journal', '2016-01-01'::date, '2016-01-07'::date); -SELECT append_range_partition('journal'); -``` -Первая создает новую секцию с заданным диапазоном. Вторая создает новую секцию с интервалом, заданным при первоначальном разбиении, и добавляет ее в конец списка секций. Также можно присоеднинить существующую таблицу в качестве секции. Например, это может быть таблица с архивными данными, расположенная на другом сервере и подключенная с помощью fdw: - -``` -CREATE FOREIGN TABLE journal_archive ( - id INTEGER NOT NULL, - dt TIMESTAMP NOT NULL, - level INTEGER, - msg TEXT -) SERVER archive_server; -``` -> Важно: структура подключаемой таблицы должна полностью совпадать с родительской. - -Подключим ее к имеющемуся разбиению: -``` -SELECT attach_range_partition('journal', 'journal_archive', '2014-01-01'::date, '2015-01-01'::date); -``` -Устаревшие секции можно сливать с архивной: -``` -SELECT merge_range_partitions('journal_archive', 'journal_1'); -``` -Разделить ранее созданную секцию на две можно с помощью следующей функции, указав точку деления: -``` -SELECT split_range_partition('journal_366', '2016-01-03'::date); -``` -Чтобы отсоединить ранее созданную секцию, воспользуйтесь функцией: -``` -SELECT detach_range_partition('journal_archive'); -``` - -Пример построения плана для запроса с фильтрацией по ключевому полю: -``` -SELECT * FROM journal WHERE dt >= '2015-06-01' AND dt < '2015-06-03'; - id | dt | level | msg ---------+---------------------+-------+---------------------------------- - 217441 | 2015-06-01 00:00:00 | 2 | 15053892d993ce19f580a128f87e3dbf - 217442 | 2015-06-01 00:01:00 | 1 | 3a7c46f18a952d62ce5418ac2056010c - 217443 | 2015-06-01 00:02:00 | 0 | 92c8de8f82faf0b139a3d99f2792311d - ... -(2880 rows) - -EXPLAIN SELECT * FROM journal WHERE dt >= '2015-06-01' AND dt < '2015-06-03'; - QUERY PLAN ------------------------------------------------------------------- - Append (cost=0.00..58.80 rows=0 width=0) - -> Seq Scan on journal_152 (cost=0.00..29.40 rows=0 width=0) - -> Seq Scan on journal_153 (cost=0.00..29.40 rows=0 width=0) -(3 rows) -``` - -### Деакцивация pg_pathman -Для включения и отключения модуля `pg_pathman` и отдельных его копонентов существует ряд [GUC](https://www.postgresql.org/docs/9.5/static/config-setting.html) переменных: - - - `pg_pathman.enable` --- полная отключение (или включение) модуля `pg_pathman` - - `pg_pathman.enable_runtimeappend` --- включение/отключение функционала `RuntimeAppend` - - `pg_pathman.enable_runtimemergeappend` --- включение/отключение функционала `RuntimeMergeAppend` - - `pg_pathman.enable_partitionfilter` --- включение/отключение функционала `PartitionFilter` - -Чтобы **безвозвратно** отключить механизм `pg_pathman` для отдельной таблицы, используйте фунцию `disable_pathman_for()`. В результате этой операции структура таблиц останется прежней, но для планирования и выполнения запросов будет использоваться стандартный механизм PostgreSQL. -``` -SELECT disable_pathman_for('range_rel'); -``` - -## Обратная связь -Если у вас есть вопросы или предложения, а также если вы обнаружили ошибки, напишите нам в разделе [issues](https://github.com/postgrespro/pg_pathman/issues). - -## Авторы -Ильдар Мусин Postgres Professional, Россия -Александр Коротков Postgres Professional, Россия -Дмитрий Иванов Postgres Professional, Россия From 6698baee7824e84a3b3f45e569c0919f3993c38d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 2 May 2017 14:59:23 +0300 Subject: [PATCH 0440/1124] Add myself to contributors --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 655b4ac4..e9880b31 100644 --- a/README.md +++ b/README.md @@ -675,4 +675,4 @@ Ildar Musin Postgres Professional Ltd., Russia Alexander Korotkov Postgres Professional Ltd., Russia Dmitry Ivanov Postgres Professional Ltd., Russia Maksim Milyutin Postgres Professional Ltd., Russia - +Ildus Kurbangaliev Postgres Professional Ltd., Russia From e7cb5040819ad6d596e48e364651337def4e2dfc Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 2 May 2017 15:40:36 +0300 Subject: [PATCH 0441/1124] bump version to 1.4 --- Makefile | 2 +- pg_pathman.control | 2 +- src/include/init.h | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index d2f06c81..2ebb95be 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ PG_CPPFLAGS = -I$(CURDIR)/src/include EXTENSION = pg_pathman -EXTVERSION = 1.3 +EXTVERSION = 1.4 DATA_built = pg_pathman--$(EXTVERSION).sql diff --git a/pg_pathman.control b/pg_pathman.control index bace115b..0d6af5d3 100644 --- a/pg_pathman.control +++ b/pg_pathman.control @@ -1,4 +1,4 @@ # pg_pathman extension comment = 'Partitioning tool for PostgreSQL' -default_version = '1.3' +default_version = '1.4' module_pathname = '$libdir/pg_pathman' diff --git a/src/include/init.h b/src/include/init.h index cfe503b2..778da9bb 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -147,10 +147,10 @@ simpify_mcxt_name(MemoryContext mcxt) /* Lowest version of Pl/PgSQL frontend compatible with internals (0xAA_BB_CC) */ -#define LOWEST_COMPATIBLE_FRONT 0x010300 +#define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010300 +#define CURRENT_LIB_VERSION 0x010400 void *pathman_cache_search_relid(HTAB *cache_table, From 68aea8d8e726c49dac633aaa9b0d2de9e7142138 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 2 May 2017 15:44:49 +0300 Subject: [PATCH 0442/1124] fix calamity regression test --- expected/pathman_calamity.out | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 4f1ea10a..8b5d0ae8 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10300 + 10400 (1 row) set client_min_messages = NOTICE; From b696a7eb2343992817bfc9ca9238338cb0f3a376 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 2 May 2017 15:59:46 +0300 Subject: [PATCH 0443/1124] META.json file added in order to support pgxn repo --- META.json | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 META.json diff --git a/META.json b/META.json new file mode 100644 index 00000000..4c72cdcb --- /dev/null +++ b/META.json @@ -0,0 +1,40 @@ +{ + "name": "pg_pathman", + "abstract": "Partitioning tool", + "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", + "version": "1.3.1", + "maintainer": [ + "Ildar Musin ", + "Dmitry Ivanov ", + "Ildus Kurbangaliev " + ], + "license": "postgresql", + "resources": { + "bugtracker": { + "web": "https://github.com/postgrespro/pg_pathman/issues" + }, + "repository": { + "url": "git://github.com:postgrespro/pg_pathman.git", + "web": "https://github.com/postgrespro/pg_pathman", + "type": "git" + } + }, + "generated_by": "Ildar Musin", + "provides": { + "pg_pathman": { + "file": "pg_pathman--1.3.sql", + "docfile": "README.md", + "version": "1.3.1", + "abstract": "Partitioning tool" + } + }, + "meta-spec": { + "version": "1.0.0", + "url": "http://pgxn.org/meta/spec.txt" + }, + "tags": [ + "partitioning", + "partition", + "optimization" + ] +} From 462d8d5fe3aa4079b08cbe9535a10830843f2434 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 2 May 2017 17:41:38 +0300 Subject: [PATCH 0444/1124] Fix updates on same child relation --- src/partition_filter.c | 45 +++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index bd3de8f5..c13ca974 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -167,7 +167,6 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->on_new_rri_holder_callback = on_new_rri_holder_cb; parts_storage->callback_arg = on_new_rri_holder_cb_arg; - /* Currenly ResultPartsStorage is used only for INSERTs */ parts_storage->command_type = cmd_type; parts_storage->speculative_inserts = speculative_inserts; @@ -244,7 +243,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) if (!found) { Relation child_rel, - parent_rel = parts_storage->saved_rel_info->ri_RelationDesc; + base_rel = parts_storage->saved_rel_info->ri_RelationDesc; RangeTblEntry *child_rte, *parent_rte; Index child_rte_idx; @@ -267,7 +266,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) CheckValidResultRel(child_rel, parts_storage->command_type); /* Build Var translation list for 'inserted_cols' */ - make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); + make_inh_translation_list(base_rel, child_rel, 0, &translated_vars); /* Create RangeTblEntry for partition */ child_rte = makeNode(RangeTblEntry); @@ -348,7 +347,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) child_result_rel_info->ri_junkFilter = NULL; /* Generate tuple transformation map and some other stuff */ - rri_holder->tuple_map = build_part_tuple_map(parent_rel, child_rel); + rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); /* Call on_new_rri_holder_callback() if needed */ if (parts_storage->on_new_rri_holder_callback) @@ -367,7 +366,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Build tuple conversion map (e.g. parent has a dropped column) */ TupleConversionMap * -build_part_tuple_map(Relation parent_rel, Relation child_rel) +build_part_tuple_map(Relation base_rel, Relation child_rel) { TupleConversionMap *tuple_map; TupleDesc child_tupdesc, @@ -377,7 +376,7 @@ build_part_tuple_map(Relation parent_rel, Relation child_rel) child_tupdesc = CreateTupleDescCopy(RelationGetDescr(child_rel)); child_tupdesc->tdtypeid = InvalidOid; - parent_tupdesc = CreateTupleDescCopy(RelationGetDescr(parent_rel)); + parent_tupdesc = CreateTupleDescCopy(RelationGetDescr(base_rel)); parent_tupdesc->tdtypeid = InvalidOid; /* Generate tuple transformation map and some other stuff */ @@ -565,15 +564,26 @@ partition_filter_create_scan_state(CustomScan *node) void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { - Index varno = 1; Node *expr; MemoryContext old_cxt; PartitionFilterState *state = (PartitionFilterState *) node; const PartRelationInfo *prel; ListCell *lc; + PlanState *child_state; + Index expr_relid = 1; + + child_state = ExecInitNode(state->subplan, estate, eflags); /* It's convenient to store PlanState in 'custom_ps' */ - node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); + node->custom_ps = list_make1(child_state); + if (state->command_type == CMD_UPDATE) + { + Assert(IsA(child_state, SeqScanState)); + expr_relid = ((Scan *) ((ScanState *) child_state)->ps.plan)->scanrelid; + Assert(expr_relid >= 1); + } + else + expr_relid = state->partitioned_table; if (state->expr_state == NULL) { @@ -582,18 +592,21 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) Assert(prel != NULL); /* Change varno in Vars according to range table */ - expr = copyObject(prel->expr); - foreach(lc, estate->es_range_table) + if (expr_relid > 1) { - RangeTblEntry *entry = lfirst(lc); - if (entry->relid == state->partitioned_table) + expr = copyObject(prel->expr); + foreach(lc, estate->es_range_table) { - if (varno > 1) - ChangeVarNodes(expr, 1, varno, 0); - break; + RangeTblEntry *entry = lfirst(lc); + if (entry->relid == expr_relid) + { + ChangeVarNodes(expr, 1, expr_relid, 0); + break; + } } - varno += 1; } + else + expr = prel->expr; /* Prepare state for expression execution */ old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); From 29e22771484352e609ebcbfb43ab46ad65f7df39 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 2 May 2017 18:12:12 +0300 Subject: [PATCH 0445/1124] Try to fix clang warning --- src/partition_update.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/partition_update.c b/src/partition_update.c index 17b78b38..828cc6fc 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -142,7 +142,7 @@ partition_update_exec(CustomScanState *node) bool isNull; char relkind; ResultRelInfo *resultRelInfo; - ItemPointer tupleid = NULL; + ItemPointer tupleid; ItemPointerData tuple_ctid; EPQState epqstate; HeapTupleData oldtupdata; @@ -188,6 +188,8 @@ partition_update_exec(CustomScanState *node) oldtupdata.t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); oldtuple = &oldtupdata; } + + tupleid = NULL; } else elog(ERROR, "PartitionUpdate supports only relations and foreign tables"); @@ -325,6 +327,7 @@ ldelete:; hufd.xmax); if (!TupIsNull(epqslot)) { + Assert(tupleid != NULL); *tupleid = hufd.ctid; goto ldelete; } From 7d18aa49048606a0ab76406ad789fd43eab5c4b2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 2 May 2017 18:25:04 +0300 Subject: [PATCH 0446/1124] Require tupleid in basic delete function --- src/partition_update.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/partition_update.c b/src/partition_update.c index 828cc6fc..bafa8a68 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -285,7 +285,7 @@ ExecDeleteInternal(ItemPointer tupleid, /* we don't need slot anymore */ ExecDropSingleTupleTableSlot(slot); } - else + else if (tupleid != NULL) { /* delete the tuple */ ldelete:; @@ -340,6 +340,8 @@ ldelete:; return NULL; } } + else + elog(ERROR, "tupleid should be specified for deletion"); /* AFTER ROW DELETE Triggers */ ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple); From bfa56c94b95adf2fafc628f48fdf4a74b69e665c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 2 May 2017 18:33:39 +0300 Subject: [PATCH 0447/1124] introduce inline function PrelExpressionForRelid() --- src/hooks.c | 17 ++++-------- src/include/nodes_common.h | 6 ++--- src/include/relation_info.h | 18 ++++++++++++- src/include/utils.h | 2 +- src/init.c | 2 +- src/nodes_common.c | 48 +++++++++------------------------ src/partition_filter.c | 40 +++++++++++++-------------- src/pg_pathman.c | 6 ++--- src/planner_tree_modification.c | 6 ++--- src/utils.c | 4 +-- 10 files changed, 66 insertions(+), 83 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 4877ce0b..33a53f7a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -130,12 +130,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, } /* Make copy of partitioning expression and fix Var's varno attributes */ - expr = inner_prel->expr; - if (innerrel->relid != 1) - { - expr = copyObject(expr); - ChangeVarNodes(expr, 1, innerrel->relid, 0); - } + expr = PrelExpressionForRelid(inner_prel, innerrel->relid); paramsel = 1.0; foreach (lc, joinclauses) @@ -196,9 +191,9 @@ pathman_join_pathlist_hook(PlannerInfo *root, innerrel->ppilist = saved_ppi_list; /* Skip ppi->ppi_clauses don't reference partition attribute */ - if (!(ppi && get_partitioned_attr_clauses(ppi->ppi_clauses, - inner_prel, - innerrel->relid))) + if (!(ppi && get_partitioning_clauses(ppi->ppi_clauses, + inner_prel, + innerrel->relid))) continue; inner = create_runtimeappend_path(root, cur_inner_path, ppi, paramsel); @@ -319,9 +314,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, bool modify_append_nodes; /* Make copy of partitioning expression and fix Var's varno attributes */ - expr = copyObject(prel->expr); - if (rti != 1) - ChangeVarNodes(expr, 1, rti, 0); + expr = PrelExpressionForRelid(prel, rti); if (prel->parttype == PT_RANGE) { diff --git a/src/include/nodes_common.h b/src/include/nodes_common.h index 2648663b..b996ea61 100644 --- a/src/include/nodes_common.h +++ b/src/include/nodes_common.h @@ -65,9 +65,9 @@ clear_plan_states(CustomScanState *scan_state) } } -List * get_partitioned_attr_clauses(List *restrictinfo_list, - const PartRelationInfo *prel, - Index partitioned_rel); +List * get_partitioning_clauses(List *restrictinfo_list, + const PartRelationInfo *prel, + Index partitioned_rel); Oid * get_partition_oids(List *ranges, int *n, const PartRelationInfo *prel, bool include_parent); diff --git a/src/include/relation_info.h b/src/include/relation_info.h index d5e81b28..73e59232 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -21,6 +21,7 @@ #include "nodes/primnodes.h" #include "nodes/value.h" #include "port/atomics.h" +#include "rewrite/rewriteManip.h" #include "storage/lock.h" #include "utils/datum.h" #include "utils/lsyscache.h" @@ -207,7 +208,7 @@ typedef enum /* - * PartRelationInfo field access macros. + * PartRelationInfo field access macros & functions. */ #define PrelParentRelid(prel) ( (prel)->key ) @@ -249,6 +250,21 @@ PrelExpressionColumnNames(const PartRelationInfo *prel) return columns; } +static inline Node * +PrelExpressionForRelid(const PartRelationInfo *prel, Index rel_index) +{ + Node *expr; + + if (rel_index != PART_EXPR_VARNO) + { + expr = copyObject(prel->expr); + ChangeVarNodes(expr, PART_EXPR_VARNO, rel_index, 0); + } + else expr = prel->expr; + + return expr; +} + const PartRelationInfo *refresh_pathman_relation_info(Oid relid, Datum *values, diff --git a/src/include/utils.h b/src/include/utils.h index b54e4e0f..5c3b4ba5 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -24,7 +24,7 @@ bool clause_contains_params(Node *clause); bool is_date_type_internal(Oid typid); bool check_security_policy_internal(Oid relid, Oid role); -bool expr_matches_operand(Node *operand, Node *expr); +bool match_expr_to_operand(Node *expr, Node *operand); /* * Misc. diff --git a/src/init.c b/src/init.c index 6f939ceb..62446e07 100644 --- a/src/init.c +++ b/src/init.c @@ -1109,7 +1109,7 @@ validate_hash_constraint(const Expr *expr, hash_arg = (Node *) linitial(type_hash_proc_expr->args); /* Check arg of TYPE_HASH_PROC() */ - if (!expr_matches_operand(prel->expr, hash_arg)) + if (!match_expr_to_operand(prel->expr, hash_arg)) return false; /* Check that PARTITIONS_COUNT is equal to total amount of partitions */ diff --git a/src/nodes_common.c b/src/nodes_common.c index 2132f840..9949327b 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -315,65 +315,41 @@ unpack_runtimeappend_private(RuntimeAppendState *scan_state, CustomScan *cscan) scan_state->enable_parent = (bool) linitial_int(lthird(runtimeappend_private)); } -struct check_clause_context -{ - Node *prel_expr; - int count; -}; /* Check that one of arguments of OpExpr is expression */ static bool -check_clause_for_expression(Node *node, struct check_clause_context *ctx) +clause_contains_prel_expr(Node *node, Node *prel_expr) { if (node == NULL) return false; - if (IsA(node, OpExpr)) - { - OpExpr *expr = (OpExpr *) node; - Node *left = linitial(expr->args), - *right = lsecond(expr->args); - - if (expr_matches_operand(left, ctx->prel_expr)) - ctx->count += 1; - - if (expr_matches_operand(right, ctx->prel_expr)) - ctx->count += 1; - - return false; - } + if (match_expr_to_operand(node, prel_expr)) + return true; - return expression_tree_walker(node, check_clause_for_expression, (void *) ctx); + return expression_tree_walker(node, clause_contains_prel_expr, prel_expr); } /* * Filter all available clauses and extract relevant ones. */ List * -get_partitioned_attr_clauses(List *restrictinfo_list, - const PartRelationInfo *prel, - Index partitioned_rel) +get_partitioning_clauses(List *restrictinfo_list, + const PartRelationInfo *prel, + Index partitioned_rel) { List *result = NIL; ListCell *l; foreach(l, restrictinfo_list) { - RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); - struct check_clause_context ctx; + RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); + Node *prel_expr; Assert(IsA(rinfo, RestrictInfo)); - ctx.count = 0; - ctx.prel_expr = prel->expr; - if (partitioned_rel != 1) - { - ctx.prel_expr = copyObject(prel->expr); - ChangeVarNodes(ctx.prel_expr, 1, partitioned_rel, 0); - } - check_clause_for_expression((Node *) rinfo->clause, &ctx); + prel_expr = PrelExpressionForRelid(prel, partitioned_rel); - if (ctx.count == 1) + if (clause_contains_prel_expr((Node *) rinfo->clause, prel_expr)) result = lappend(result, rinfo->clause); } return result; @@ -591,7 +567,7 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, /* Since we're not scanning any real table directly */ cscan->scan.scanrelid = 0; - cscan->custom_exprs = get_partitioned_attr_clauses(clauses, prel, rel->relid); + cscan->custom_exprs = get_partitioning_clauses(clauses, prel, rel->relid); cscan->custom_plans = custom_plans; cscan->methods = scan_methods; diff --git a/src/partition_filter.c b/src/partition_filter.c index d60adb2d..ac0e5528 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -417,7 +417,7 @@ select_partition_for_insert(Datum value, Oid value_type, ResultPartsStorage *parts_storage, EState *estate) { - MemoryContext old_cxt; + MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; Oid selected_partid = InvalidOid; Oid *parts; @@ -439,9 +439,9 @@ select_partition_for_insert(Datum value, Oid value_type, else selected_partid = parts[0]; /* Replace parent table with a suitable partition */ - old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); + old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); rri_holder = scan_result_parts_storage(selected_partid, parts_storage); - MemoryContextSwitchTo(old_cxt); + MemoryContextSwitchTo(old_mcxt); /* Could not find suitable partition */ if (rri_holder == NULL) @@ -531,12 +531,12 @@ partition_filter_create_scan_state(CustomScan *node) void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { - Index varno = 1; - Node *expr; - MemoryContext old_cxt; - PartitionFilterState *state = (PartitionFilterState *) node; - const PartRelationInfo *prel; - ListCell *lc; + Index varno = 1; + Node *expr; + MemoryContext old_mcxt; + PartitionFilterState *state = (PartitionFilterState *) node; + const PartRelationInfo *prel; + ListCell *lc; /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); @@ -562,9 +562,9 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) } /* Prepare state for expression execution */ - old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); + old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); state->expr_state = ExecInitExpr((Expr *) expr, NULL); - MemoryContextSwitchTo(old_cxt); + MemoryContextSwitchTo(old_mcxt); } /* Init ResultRelInfo cache */ @@ -595,13 +595,13 @@ partition_filter_exec(CustomScanState *node) if (!TupIsNull(slot)) { - MemoryContext old_cxt; - const PartRelationInfo *prel; - ResultRelInfoHolder *rri_holder; - bool isnull; - Datum value; - ExprDoneCond itemIsDone; - TupleTableSlot *tmp_slot; + MemoryContext old_mcxt; + const PartRelationInfo *prel; + ResultRelInfoHolder *rri_holder; + bool isnull; + Datum value; + ExprDoneCond itemIsDone; + TupleTableSlot *tmp_slot; /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); @@ -616,7 +616,7 @@ partition_filter_exec(CustomScanState *node) } /* Switch to per-tuple context */ - old_cxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); /* Execute expression */ tmp_slot = econtext->ecxt_scantuple; @@ -635,7 +635,7 @@ partition_filter_exec(CustomScanState *node) &state->result_parts, estate); /* Switch back and clean up per-tuple context */ - MemoryContextSwitchTo(old_cxt); + MemoryContextSwitchTo(old_mcxt); ResetExprContext(econtext); /* Magic: replace parent's ResultRelInfo with ours */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 5e9a15fd..9455b856 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -867,7 +867,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) Assert(exprnode != NULL); - if (!expr_matches_operand(context->prel_expr, exprnode)) + if (!match_expr_to_operand(context->prel_expr, exprnode)) goto handle_arrexpr_return; if (arraynode && IsA(arraynode, Const) && @@ -1145,14 +1145,14 @@ pull_var_param(const WalkerContext *ctx, Node *left = linitial(expr->args), *right = lsecond(expr->args); - if (expr_matches_operand(left, ctx->prel_expr)) + if (match_expr_to_operand(left, ctx->prel_expr)) { *var_ptr = left; *param_ptr = right; return true; } - if (expr_matches_operand(right, ctx->prel_expr)) + if (match_expr_to_operand(right, ctx->prel_expr)) { *var_ptr = right; *param_ptr = left; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index fa36ce78..4a804101 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -239,13 +239,13 @@ static void handle_modification_query(Query *parse) { const PartRelationInfo *prel; + Node *prel_expr; List *ranges; RangeTblEntry *rte; WrapperNode *wrap; Expr *expr; WalkerContext context; Index result_rel; - Node *prel_expr; /* Fetch index of result relation */ result_rel = parse->resultRelation; @@ -277,9 +277,7 @@ handle_modification_query(Query *parse) if (!expr) return; /* Prepare partitioning expression */ - prel_expr = copyObject(prel->expr); - if (result_rel != 1) - ChangeVarNodes(prel_expr, 1, result_rel, 0); + prel_expr = PrelExpressionForRelid(prel, result_rel); /* Parse syntax tree and extract partition ranges */ InitWalkerContext(&context, prel_expr, prel, NULL, false); diff --git a/src/utils.c b/src/utils.c index dc40a8de..a04d4a67 100644 --- a/src/utils.c +++ b/src/utils.c @@ -108,9 +108,9 @@ check_security_policy_internal(Oid relid, Oid role) /* Compare clause operand with expression */ bool -expr_matches_operand(Node *operand, Node *expr) +match_expr_to_operand(Node *expr, Node *operand) { - /* strip relabeling for both operand and expr */ + /* Strip relabeling for both operand and expr */ if (operand && IsA(operand, RelabelType)) operand = (Node *) ((RelabelType *) operand)->arg; From 988fe9ae86cbc057d4898f109ee27d9909584c8c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 2 May 2017 18:38:53 +0300 Subject: [PATCH 0448/1124] remove function clause_contains_params() --- src/utils.c | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/src/utils.c b/src/utils.c index a04d4a67..bd9365ed 100644 --- a/src/utils.c +++ b/src/utils.c @@ -34,32 +34,6 @@ #include "utils/typcache.h" -static bool clause_contains_params_walker(Node *node, void *context); - - -/* - * Check whether clause contains PARAMs or not - */ -bool -clause_contains_params(Node *clause) -{ - return expression_tree_walker(clause, - clause_contains_params_walker, - NULL); -} - -static bool -clause_contains_params_walker(Node *node, void *context) -{ - if (node == NULL) - return false; - if (IsA(node, Param)) - return true; - return expression_tree_walker(node, - clause_contains_params_walker, - context); -} - /* * Check if this is a "date"-related type. */ From 083e6a11f3ad3feb69e46e5a60039d9dfc241706 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 2 May 2017 18:50:29 +0300 Subject: [PATCH 0449/1124] remove function get_attribute_type() --- src/include/utils.h | 1 - src/utils.c | 28 ---------------------------- 2 files changed, 29 deletions(-) diff --git a/src/include/utils.h b/src/include/utils.h index 5c3b4ba5..16100df7 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -37,7 +37,6 @@ List * list_reverse(List *l); */ Oid get_rel_owner(Oid relid); char * get_rel_name_or_relid(Oid relid); -Oid get_attribute_type(Oid relid, const char *attname, bool missing_ok); RangeVar *makeRangeVarFromRelid(Oid relid); /* diff --git a/src/utils.c b/src/utils.c index bd9365ed..5f070e30 100644 --- a/src/utils.c +++ b/src/utils.c @@ -193,34 +193,6 @@ get_rel_name_or_relid(Oid relid) return relname; } -/* - * Get type of column by its name. - */ -Oid -get_attribute_type(Oid relid, const char *attname, bool missing_ok) -{ - Oid result; - HeapTuple tp; - - /* NOTE: for now it's the most efficient way */ - tp = SearchSysCacheAttName(relid, attname); - if (HeapTupleIsValid(tp)) - { - Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); - result = att_tup->atttypid; - ReleaseSysCache(tp); - - return result; - } - - if (!missing_ok) - elog(ERROR, "cannot find type name for attribute \"%s\" " - "of relation \"%s\"", - attname, get_rel_name_or_relid(relid)); - - return InvalidOid; -} - RangeVar * makeRangeVarFromRelid(Oid relid) { From 566b8f584c09a055577ca3418ddbc9025728f51a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 3 May 2017 15:59:37 +0300 Subject: [PATCH 0450/1124] slightly reworked function add_to_pathman_config() --- expected/pathman_calamity.out | 4 +-- hash.sql | 2 +- init.sql | 16 +++++++--- range.sql | 10 +++--- sql/pathman_calamity.sql | 4 +-- src/init.c | 2 +- src/pl_funcs.c | 59 ++++++++++++++++++++++------------- 7 files changed, 60 insertions(+), 37 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 8b5d0ae8..d122908d 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -553,9 +553,9 @@ SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ ERROR: relation "0" does not exist -SELECT add_to_pathman_config('calamity.part_test', NULL); /* no column */ +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ ERROR: 'expression' should not be NULL -SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong column */ +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ ERROR: cannot find type name for attribute "v_a_l" of relation "part_test" SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ add_to_pathman_config diff --git a/hash.sql b/hash.sql index c942e8c6..4c21f9df 100644 --- a/hash.sql +++ b/hash.sql @@ -36,7 +36,7 @@ BEGIN /* Insert new entry to pathman config */ EXECUTE format('ANALYZE %s', parent_relid); - PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression); /* Create partitions */ PERFORM @extschema@.create_hash_partitions_internal(parent_relid, diff --git a/init.sql b/init.sql index e27b533e..06840410 100644 --- a/init.sql +++ b/init.sql @@ -857,15 +857,21 @@ LANGUAGE C STRICT; /* - * Add record to pathman_config. If parttype if not specified then determine - * partitioning type. + * Add record to pathman_config (RANGE) and validate partitions. */ CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( parent_relid REGCLASS, expression TEXT, - range_interval TEXT DEFAULT NULL, - parttype INT4 DEFAULT 0 -) + range_interval TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' +LANGUAGE C; + +/* + * Add record to pathman_config (HASH) and validate partitions. + */ +CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( + parent_relid REGCLASS, + expression TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; diff --git a/range.sql b/range.sql index 40894c7e..371a9f83 100644 --- a/range.sql +++ b/range.sql @@ -156,7 +156,7 @@ BEGIN /* Insert new entry to pathman config */ EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT); + p_interval::TEXT); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid); @@ -254,7 +254,7 @@ BEGIN /* Insert new entry to pathman config */ EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT); + p_interval::TEXT); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid); @@ -313,7 +313,7 @@ BEGIN /* Insert new entry to pathman config */ EXECUTE format('ANALYZE %s', parent_relid); - PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, 2); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid); @@ -365,7 +365,7 @@ BEGIN /* Insert new entry to pathman config */ EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT); + p_interval::TEXT); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid); @@ -423,7 +423,7 @@ BEGIN /* Insert new entry to pathman config */ EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT); + p_interval::TEXT); /* Create sequence for child partitions names */ PERFORM @extschema@.create_or_replace_sequence(parent_relid); diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 8ccb1723..c78e0bf1 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -236,8 +236,8 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); /* check function add_to_pathman_config() -- PHASE #1 */ SELECT add_to_pathman_config(NULL, 'val'); /* no table */ SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ -SELECT add_to_pathman_config('calamity.part_test', NULL); /* no column */ -SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong column */ +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ SELECT disable_pathman_for('calamity.part_test'); SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ diff --git a/src/init.c b/src/init.c index 62446e07..dbb028da 100644 --- a/src/init.c +++ b/src/init.c @@ -619,7 +619,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Open PATHMAN_CONFIG with latest snapshot available */ rel = heap_open(get_pathman_config_relid(false), AccessShareLock); - /* Check that 'partrel' column is if regclass type */ + /* Check that 'partrel' column is of regclass type */ Assert(RelationGetDescr(rel)-> attrs[Anum_pathman_config_partrel - 1]-> atttypid == REGCLASSOID); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 41297f02..6182cc1a 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -735,29 +735,57 @@ add_to_pathman_config(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL"))); + /* Check current user's privileges */ if (!check_security_policy_internal(relid, GetUserId())) { - elog(ERROR, "only the owner or superuser can change " - "partitioning configuration of table \"%s\"", - get_rel_name_or_relid(relid)); + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("only the owner or superuser can change " + "partitioning configuration of table \"%s\"", + get_rel_name_or_relid(relid)))); } /* Select partitioning type */ - parttype = PG_GETARG_INT32(3); - if ((parttype != PT_HASH) && (parttype != PT_RANGE)) - parttype = PG_ARGISNULL(2) ? PT_HASH : PT_RANGE; + switch (PG_NARGS()) + { + /* HASH */ + case 2: + { + parttype = PT_HASH; + + values[Anum_pathman_config_range_interval - 1] = (Datum) 0; + isnull[Anum_pathman_config_range_interval - 1] = true; + } + break; + + /* RANGE */ + case 3: + { + parttype = PT_RANGE; + + values[Anum_pathman_config_range_interval - 1] = PG_GETARG_DATUM(2); + isnull[Anum_pathman_config_range_interval - 1] = PG_ARGISNULL(2); + } + break; + + default: + elog(ERROR, "error in function " CppAsString(add_to_pathman_config)); + PG_RETURN_BOOL(false); /* keep compiler happy */ + } /* Parse and check expression */ expr_datum = plan_partitioning_expression(relid, expression, &expr_type); - /* Expression for range partitions should be hashable */ + /* Check hash function for HASH partitioning */ if (parttype == PT_HASH) { TypeCacheEntry *tce; tce = lookup_type_cache(expr_type, TYPECACHE_HASH_PROC); - if (tce->hash_proc == InvalidOid) - elog(ERROR, "partitioning expression should be hashable"); + if (!OidIsValid(tce->hash_proc)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("no hash function for partitioning expression"))); } /* @@ -778,17 +806,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_type); isnull[Anum_pathman_config_atttype - 1] = false; - if (parttype == PT_RANGE) - { - values[Anum_pathman_config_range_interval - 1] = PG_GETARG_DATUM(2); - isnull[Anum_pathman_config_range_interval - 1] = PG_ARGISNULL(2); - } - else - { - values[Anum_pathman_config_range_interval - 1] = (Datum) 0; - isnull[Anum_pathman_config_range_interval - 1] = true; - } - /* Insert new row into PATHMAN_CONFIG */ pathman_config = heap_open(get_pathman_config_relid(false), RowExclusiveLock); @@ -798,7 +815,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) heap_close(pathman_config, RowExclusiveLock); - /* update caches only if this relation has children */ + /* Update caches only if this relation has children */ if (has_subclass(relid)) { /* Now try to create a PartRelationInfo */ From 73691513d8bff3996210314f749cde093f4e08a2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 3 May 2017 16:56:02 +0300 Subject: [PATCH 0451/1124] Fix tests and tuple conversion for inverted or dropped columns --- Makefile | 6 +- expected/pathman_update_node.out | 313 ++++++++++++++++++++----------- sql/pathman_update_node.sql | 105 ++++++----- src/hooks.c | 24 ++- src/include/hooks.h | 1 + src/include/partition_update.h | 3 +- src/include/relation_info.h | 6 +- src/include/utils.h | 1 - src/partition_filter.c | 59 ++++-- src/partition_update.c | 11 +- src/pg_pathman.c | 1 + src/pl_funcs.c | 47 +---- src/relation_info.c | 45 +++++ src/utils.c | 1 + 14 files changed, 381 insertions(+), 242 deletions(-) diff --git a/Makefile b/Makefile index 793e1579..bb4ff894 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/relation_tags.o src/compat/expand_rte_hook.o \ - src/compat/rowmarks_fix.o src/partition_update.o $(WIN32RES) + src/compat/rowmarks_fix.o $(WIN32RES) PG_CPPFLAGS = -I$(CURDIR)/src/include @@ -41,10 +41,10 @@ REGRESS = pathman_basic \ pathman_rowmarks \ pathman_runtime_nodes \ pathman_update_trigger \ - pathman_update_node \ pathman_updates \ pathman_utility_stmt \ - pathman_expressions + pathman_expressions \ + pathman_update_node EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index e15d04f9..0867c58c 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -1,279 +1,370 @@ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_update_trigger; +CREATE SCHEMA test_update_node; SET pg_pathman.enable_partitionupdate=on; /* Partition table by RANGE (NUMERIC) */ -CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; -SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +CREATE INDEX val_idx ON test_update_node.test_range (val); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); NOTICE: sequence "test_range_seq" does not exist, skipping create_range_partitions ------------------------- 10 (1 row) +/* Moving from 2st to 1st partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; + QUERY PLAN +------------------------------------------------------------------- + Insert on test_range_2 + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(7 rows) + +/* Keep same partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; + QUERY PLAN +------------------------------------------------------------------- + Insert on test_range_2 + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(7 rows) + +/* Scan all partitions */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE comment='15'; + QUERY PLAN +---------------------------------------------------- + Insert on test_range + Insert on test_range + Insert on test_range_1 + Insert on test_range_2 + Insert on test_range_3 + Insert on test_range_4 + Insert on test_range_5 + Insert on test_range_6 + Insert on test_range_7 + Insert on test_range_8 + Insert on test_range_9 + Insert on test_range_10 + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_1 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_2 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_3 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_4 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_5 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_6 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_7 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_8 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_9 + Filter: (comment = '15'::text) + -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionFilter) + -> Seq Scan on test_range_10 + Filter: (comment = '15'::text) +(56 rows) + /* Update values in 1st partition (rows remain there) */ -UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; +UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; /* Check values #1 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val < 10 ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_1 | 5 | 1 - test_update_trigger.test_range_1 | 5 | 10 - test_update_trigger.test_range_1 | 5 | 2 - test_update_trigger.test_range_1 | 5 | 3 - test_update_trigger.test_range_1 | 5 | 4 - test_update_trigger.test_range_1 | 5 | 5 - test_update_trigger.test_range_1 | 5 | 6 - test_update_trigger.test_range_1 | 5 | 7 - test_update_trigger.test_range_1 | 5 | 8 - test_update_trigger.test_range_1 | 5 | 9 + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_1 | 5 | 1 + test_update_node.test_range_1 | 5 | 10 + test_update_node.test_range_1 | 5 | 2 + test_update_node.test_range_1 | 5 | 3 + test_update_node.test_range_1 | 5 | 4 + test_update_node.test_range_1 | 5 | 5 + test_update_node.test_range_1 | 5 | 6 + test_update_node.test_range_1 | 5 | 7 + test_update_node.test_range_1 | 5 | 8 + test_update_node.test_range_1 | 5 | 9 (10 rows) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 100 (1 row) /* Update values in 2nd partition (rows move to 3rd partition) */ -UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; +UPDATE test_update_node.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; /* Check values #2 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val > 20 AND val <= 30 ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_3 | 21 | 11 - test_update_trigger.test_range_3 | 22 | 12 - test_update_trigger.test_range_3 | 23 | 13 - test_update_trigger.test_range_3 | 24 | 14 - test_update_trigger.test_range_3 | 25 | 15 - test_update_trigger.test_range_3 | 26 | 16 - test_update_trigger.test_range_3 | 27 | 17 - test_update_trigger.test_range_3 | 28 | 18 - test_update_trigger.test_range_3 | 29 | 19 - test_update_trigger.test_range_3 | 30 | 20 - test_update_trigger.test_range_3 | 21 | 21 - test_update_trigger.test_range_3 | 22 | 22 - test_update_trigger.test_range_3 | 23 | 23 - test_update_trigger.test_range_3 | 24 | 24 - test_update_trigger.test_range_3 | 25 | 25 - test_update_trigger.test_range_3 | 26 | 26 - test_update_trigger.test_range_3 | 27 | 27 - test_update_trigger.test_range_3 | 28 | 28 - test_update_trigger.test_range_3 | 29 | 29 - test_update_trigger.test_range_3 | 30 | 30 + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_3 | 21 | 11 + test_update_node.test_range_3 | 22 | 12 + test_update_node.test_range_3 | 23 | 13 + test_update_node.test_range_3 | 24 | 14 + test_update_node.test_range_3 | 25 | 15 + test_update_node.test_range_3 | 26 | 16 + test_update_node.test_range_3 | 27 | 17 + test_update_node.test_range_3 | 28 | 18 + test_update_node.test_range_3 | 29 | 19 + test_update_node.test_range_3 | 30 | 20 + test_update_node.test_range_3 | 21 | 21 + test_update_node.test_range_3 | 22 | 22 + test_update_node.test_range_3 | 23 | 23 + test_update_node.test_range_3 | 24 | 24 + test_update_node.test_range_3 | 25 | 25 + test_update_node.test_range_3 | 26 | 26 + test_update_node.test_range_3 | 27 | 27 + test_update_node.test_range_3 | 28 | 28 + test_update_node.test_range_3 | 29 | 29 + test_update_node.test_range_3 | 30 | 30 (20 rows) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 100 (1 row) /* Move single row */ -UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; +UPDATE test_update_node.test_range SET val = 90 WHERE val = 80; /* Check values #3 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 90 ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_9 | 90 | 80 - test_update_trigger.test_range_9 | 90 | 90 + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_9 | 90 | 80 + test_update_node.test_range_9 | 90 | 90 (2 rows) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 100 (1 row) /* Move single row (create new partition) */ -UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; +UPDATE test_update_node.test_range SET val = -1 WHERE val = 50; /* Check values #4 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = -1 ORDER BY comment; - tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_11 | -1 | 50 + tableoid | val | comment +--------------------------------+-----+--------- + test_update_node.test_range_11 | -1 | 50 (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 100 (1 row) /* Update non-key column */ -UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; +UPDATE test_update_node.test_range SET comment = 'test!' WHERE val = 100; /* Check values #5 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 100 ORDER BY comment; - tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_10 | 100 | test! + tableoid | val | comment +--------------------------------+-----+--------- + test_update_node.test_range_10 | 100 | test! (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 100 (1 row) /* Try moving row into a gap (ERROR) */ -DROP TABLE test_update_trigger.test_range_4; -UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; +DROP TABLE test_update_node.test_range_4; +UPDATE test_update_node.test_range SET val = 35 WHERE val = 70; ERROR: cannot spawn a partition /* Check values #6 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 70 ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_7 | 70 | 70 + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_7 | 70 | 70 (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 90 (1 row) /* Test trivial move (same key) */ -UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; +UPDATE test_update_node.test_range SET val = 65 WHERE val = 65; /* Check values #7 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 65 ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_7 | 65 | 65 + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_7 | 65 | 65 (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 90 (1 row) /* Test tuple conversion (attached partition) */ -CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); -SELECT attach_range_partition('test_update_trigger.test_range', - 'test_update_trigger.test_range_inv', +CREATE TABLE test_update_node.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_node.test_range', + 'test_update_node.test_range_inv', 101::NUMERIC, 111::NUMERIC); attach_range_partition ------------------------------------ - test_update_trigger.test_range_inv + test_update_node.test_range_inv (1 row) -UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 105; /* Check values #8 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 105 ORDER BY comment; tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_inv | 105 | 60 + test_update_node.test_range_inv | 105 | 60 (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 90 (1 row) /* Test tuple conversion (dropped column) */ -ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; -SELECT append_range_partition('test_update_trigger.test_range'); +ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_node.test_range'); append_range_partition ----------------------------------- - test_update_trigger.test_range_12 + test_update_node.test_range_12 (1 row) -UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; +UPDATE test_update_node.test_range SET val = 115 WHERE val = 55; +UPDATE test_update_node.test_range SET val = 115 WHERE val = 115; /* Check values #9 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 115; tableoid | val -----------------------------------+----- - test_update_trigger.test_range_12 | 115 + test_update_node.test_range_12 | 115 (1 row) -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; count ------- 90 (1 row) /* Partition table by HASH (INT4) */ -CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; -SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); +CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); create_hash_partitions ------------------------ 3 (1 row) /* Move all rows into single partition */ -UPDATE test_update_trigger.test_hash SET val = 1; +UPDATE test_update_node.test_hash SET val = 1; /* Check values #1 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash +FROM test_update_node.test_hash WHERE val = 1 ORDER BY comment; tableoid | val | comment ---------------------------------+-----+--------- - test_update_trigger.test_hash_2 | 1 | 1 - test_update_trigger.test_hash_2 | 1 | 10 - test_update_trigger.test_hash_2 | 1 | 2 - test_update_trigger.test_hash_2 | 1 | 3 - test_update_trigger.test_hash_2 | 1 | 4 - test_update_trigger.test_hash_2 | 1 | 5 - test_update_trigger.test_hash_2 | 1 | 6 - test_update_trigger.test_hash_2 | 1 | 7 - test_update_trigger.test_hash_2 | 1 | 8 - test_update_trigger.test_hash_2 | 1 | 9 + test_update_node.test_hash_2 | 1 | 1 + test_update_node.test_hash_2 | 1 | 10 + test_update_node.test_hash_2 | 1 | 2 + test_update_node.test_hash_2 | 1 | 3 + test_update_node.test_hash_2 | 1 | 4 + test_update_node.test_hash_2 | 1 | 5 + test_update_node.test_hash_2 | 1 | 6 + test_update_node.test_hash_2 | 1 | 7 + test_update_node.test_hash_2 | 1 | 8 + test_update_node.test_hash_2 | 1 | 9 (10 rows) -SELECT count(*) FROM test_update_trigger.test_hash; +SELECT count(*) FROM test_update_node.test_hash; count ------- 10 (1 row) /* Don't move any rows */ -UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; +UPDATE test_update_node.test_hash SET val = 3 WHERE val = 2; /* Check values #2 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash +FROM test_update_node.test_hash WHERE val = 3 ORDER BY comment; tableoid | val | comment ----------+-----+--------- (0 rows) -SELECT count(*) FROM test_update_trigger.test_hash; +SELECT count(*) FROM test_update_node.test_hash; count ------- 10 (1 row) -DROP SCHEMA test_update_trigger CASCADE; +DROP SCHEMA test_update_node CASCADE; NOTICE: drop cascades to 18 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index c3cc8d4d..75fc6c64 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -2,161 +2,172 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_update_trigger; +CREATE SCHEMA test_update_node; SET pg_pathman.enable_partitionupdate=on; /* Partition table by RANGE (NUMERIC) */ -CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; -SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +CREATE INDEX val_idx ON test_update_node.test_range (val); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); +/* Moving from 2st to 1st partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; + +/* Keep same partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; + +/* Scan all partitions */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE comment='15'; /* Update values in 1st partition (rows remain there) */ -UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; +UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; /* Check values #1 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val < 10 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Update values in 2nd partition (rows move to 3rd partition) */ -UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; +UPDATE test_update_node.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; /* Check values #2 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val > 20 AND val <= 30 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Move single row */ -UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; +UPDATE test_update_node.test_range SET val = 90 WHERE val = 80; /* Check values #3 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 90 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Move single row (create new partition) */ -UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; +UPDATE test_update_node.test_range SET val = -1 WHERE val = 50; /* Check values #4 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = -1 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Update non-key column */ -UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; +UPDATE test_update_node.test_range SET comment = 'test!' WHERE val = 100; /* Check values #5 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 100 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Try moving row into a gap (ERROR) */ -DROP TABLE test_update_trigger.test_range_4; -UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; +DROP TABLE test_update_node.test_range_4; +UPDATE test_update_node.test_range SET val = 35 WHERE val = 70; /* Check values #6 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 70 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Test trivial move (same key) */ -UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; +UPDATE test_update_node.test_range SET val = 65 WHERE val = 65; /* Check values #7 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 65 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Test tuple conversion (attached partition) */ -CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); -SELECT attach_range_partition('test_update_trigger.test_range', - 'test_update_trigger.test_range_inv', +CREATE TABLE test_update_node.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_node.test_range', + 'test_update_node.test_range_inv', 101::NUMERIC, 111::NUMERIC); -UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 105; /* Check values #8 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 105 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Test tuple conversion (dropped column) */ -ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; -SELECT append_range_partition('test_update_trigger.test_range'); -UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; +ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_node.test_range'); +UPDATE test_update_node.test_range SET val = 115 WHERE val = 55; +UPDATE test_update_node.test_range SET val = 115 WHERE val = 115; /* Check values #9 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range +FROM test_update_node.test_range WHERE val = 115; -SELECT count(*) FROM test_update_trigger.test_range; +SELECT count(*) FROM test_update_node.test_range; /* Partition table by HASH (INT4) */ -CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; -SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); +CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); /* Move all rows into single partition */ -UPDATE test_update_trigger.test_hash SET val = 1; +UPDATE test_update_node.test_hash SET val = 1; /* Check values #1 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash +FROM test_update_node.test_hash WHERE val = 1 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_hash; +SELECT count(*) FROM test_update_node.test_hash; /* Don't move any rows */ -UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; +UPDATE test_update_node.test_hash SET val = 3 WHERE val = 2; /* Check values #2 */ SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash +FROM test_update_node.test_hash WHERE val = 3 ORDER BY comment; -SELECT count(*) FROM test_update_trigger.test_hash; +SELECT count(*) FROM test_update_node.test_hash; -DROP SCHEMA test_update_trigger CASCADE; +DROP SCHEMA test_update_node CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index e6a4887d..7e6118b7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -65,6 +65,7 @@ planner_hook_type planner_hook_next = NULL; post_parse_analyze_hook_type post_parse_analyze_hook_next = NULL; shmem_startup_hook_type shmem_startup_hook_next = NULL; ProcessUtility_hook_type process_utility_hook_next = NULL; +ExecutorRun_hook_type executor_run_hook_next = NULL; /* Take care of joins */ @@ -856,18 +857,23 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, { CustomScanState *subplanstate = (CustomScanState *) mt_state->mt_plans[i]; - if (IsA(subplanstate, CustomScanState)) + if (!IsA(subplanstate, CustomScanState)) + continue; + + if (strcmp(subplanstate->methods->CustomName, UPDATE_NODE_DESCRIPTION) == 0) { - if (strcmp(subplanstate->methods->CustomName, "PrepareInsert") == 0) - { - PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; - cstate->parent_state = mt_state; - cstate->saved_junkFilter = mt_state->resultRelInfo->ri_junkFilter; - mt_state->resultRelInfo->ri_junkFilter = NULL; - } + PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; + cstate->parent_state = mt_state; + cstate->saved_junkFilter = mt_state->resultRelInfo->ri_junkFilter; + mt_state->resultRelInfo->ri_junkFilter = NULL; } } } - standard_ExecutorRun(queryDesc, direction, count); + /* Call hooks set by other extensions if needed */ + if (executor_run_hook_next) + executor_run_hook_next(queryDesc, direction, count); + /* Else call internal implementation */ + else + standard_ExecutorRun(queryDesc, direction, count); } diff --git a/src/include/hooks.h b/src/include/hooks.h index 15fa9906..b93b4ba8 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -27,6 +27,7 @@ extern planner_hook_type planner_hook_next; extern post_parse_analyze_hook_type post_parse_analyze_hook_next; extern shmem_startup_hook_type shmem_startup_hook_next; extern ProcessUtility_hook_type process_utility_hook_next; +extern ExecutorRun_hook_type executor_run_hook_next; void pathman_join_pathlist_hook(PlannerInfo *root, diff --git a/src/include/partition_update.h b/src/include/partition_update.h index fc0c0033..ea73bfed 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -33,7 +33,8 @@ typedef struct PartitionUpdateState Plan *subplan; /* proxy variable to store subplan */ } PartitionUpdateState; -extern bool pg_pathman_enable_partition_update; +extern bool pg_pathman_enable_partition_update; +extern const char *UPDATE_NODE_DESCRIPTION; extern CustomScanMethods partition_update_plan_methods; extern CustomExecMethods partition_update_exec_methods; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index d5e81b28..0dae9458 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -24,6 +24,7 @@ #include "storage/lock.h" #include "utils/datum.h" #include "utils/lsyscache.h" +#include "utils/relcache.h" /* Range bound */ @@ -370,6 +371,9 @@ extern bool pg_pathman_enable_bounds_cache; void init_relation_info_static_data(void); +AttrNumber * build_attributes_map(const PartRelationInfo *prel, + Relation child_rel, + int *map_length); -#endif /* RELATION_INFO_H */ +#endif /* RELATION_INFO_H */ diff --git a/src/include/utils.h b/src/include/utils.h index b54e4e0f..3e5c65a8 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -61,5 +61,4 @@ Datum extract_binary_interval_from_text(Datum interval_text, char ** deconstruct_text_array(Datum array, int *array_size); RangeVar ** qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); - #endif /* PATHMAN_UTILS_H */ diff --git a/src/partition_filter.c b/src/partition_filter.c index c13ca974..9daf8251 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -570,20 +570,31 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) const PartRelationInfo *prel; ListCell *lc; PlanState *child_state; - Index expr_relid = 1; + Index expr_varno = 1; child_state = ExecInitNode(state->subplan, estate, eflags); /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(child_state); + if (state->command_type == CMD_UPDATE) + expr_varno = ((Scan *) child_state->plan)->scanrelid; + else { - Assert(IsA(child_state, SeqScanState)); - expr_relid = ((Scan *) ((ScanState *) child_state)->ps.plan)->scanrelid; - Assert(expr_relid >= 1); + Index varno = 1; + + foreach(lc, estate->es_range_table) + { + RangeTblEntry *entry = lfirst(lc); + if (entry->relid == state->partitioned_table) + break; + varno++; + } + + expr_varno = varno; + Assert(expr_varno <= list_length(estate->es_range_table)); } - else - expr_relid = state->partitioned_table; + if (state->expr_state == NULL) { @@ -591,23 +602,35 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) prel = get_pathman_relation_info(state->partitioned_table); Assert(prel != NULL); - /* Change varno in Vars according to range table */ - if (expr_relid > 1) + /* Change varno in expression Vars according to range table */ + Assert(expr_varno >= 1); + if (expr_varno > 1) { expr = copyObject(prel->expr); - foreach(lc, estate->es_range_table) - { - RangeTblEntry *entry = lfirst(lc); - if (entry->relid == expr_relid) - { - ChangeVarNodes(expr, 1, expr_relid, 0); - break; - } - } + ChangeVarNodes(expr, 1, expr_varno, 0); } else expr = prel->expr; + /* + * Also in updates we would operate with child relation, but + * expression expects varattnos like in base relation, so we map + * parent varattnos to child varattnos + */ + if (state->command_type == CMD_UPDATE) + { + int natts; + bool found_whole_row; + AttrNumber *attr_map; + Oid child_relid = getrelid(expr_varno, estate->es_range_table); + Relation child_rel = heap_open(child_relid, NoLock); + + attr_map = build_attributes_map(prel, child_rel, &natts); + expr = map_variable_attnos(expr, expr_varno, 0, attr_map, natts, + &found_whole_row); + heap_close(child_rel, NoLock); + } + /* Prepare state for expression execution */ old_cxt = MemoryContextSwitchTo(estate->es_query_cxt); state->expr_state = ExecInitExpr((Expr *) expr, NULL); @@ -704,7 +727,7 @@ partition_filter_exec(CustomScanState *node) /* * extract `ctid` junk attribute and save it in state, * we need this step because if there will be conversion - * junk attributes will be removed from slot + * then junk attributes will be removed from slot */ junkfilter = rri_holder->orig_junkFilter; Assert(junkfilter != NULL); diff --git a/src/partition_update.c b/src/partition_update.c index bafa8a68..66fdde4b 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -22,6 +22,7 @@ #include "utils/guc.h" #include "utils/rel.h" +const char *UPDATE_NODE_DESCRIPTION = "PrepareInsert"; bool pg_pathman_enable_partition_update = true; CustomScanMethods partition_update_plan_methods; @@ -36,10 +37,10 @@ static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, void init_partition_update_static_data(void) { - partition_update_plan_methods.CustomName = "PrepareInsert"; + partition_update_plan_methods.CustomName = UPDATE_NODE_DESCRIPTION; partition_update_plan_methods.CreateCustomScanState = partition_update_create_scan_state; - partition_update_exec_methods.CustomName = "PrepareInsert"; + partition_update_exec_methods.CustomName = UPDATE_NODE_DESCRIPTION; partition_update_exec_methods.BeginCustomScan = partition_update_begin; partition_update_exec_methods.ExecCustomScan = partition_update_exec; partition_update_exec_methods.EndCustomScan = partition_update_end; @@ -278,9 +279,9 @@ ExecDeleteInternal(ItemPointer tupleid, */ ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate, - resultRelInfo, - slot, - planSlot); + resultRelInfo, + slot, + planSlot); /* we don't need slot anymore */ ExecDropSingleTupleTableSlot(slot); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a6aa2c73..2d44f55b 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -165,6 +165,7 @@ _PG_init(void) planner_hook = pathman_planner_hook; process_utility_hook_next = ProcessUtility_hook; ProcessUtility_hook = pathman_process_utility_hook; + executor_run_hook_next = ExecutorRun_hook; ExecutorRun_hook = pathman_executor_hook; /* Initialize PgPro-specific subsystems */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 41297f02..19d16432 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -100,9 +100,6 @@ typedef struct } show_cache_stats_cxt; -static AttrNumber *pathman_update_trigger_build_attr_map(const PartRelationInfo *prel, - Relation child_rel); - static ExprState *pathman_update_trigger_build_expr_state(const PartRelationInfo *prel, Relation source_rel, HeapTuple new_tuple, @@ -1258,48 +1255,6 @@ replace_vars_with_consts(Node *node, struct replace_vars_cxt *ctx) return expression_tree_mutator(node, replace_vars_with_consts, (void *) ctx); } -/* - * Get attributes map between parent and child relation. - * This is simplified version of functions that return TupleConversionMap. - * And it should be faster if expression uses not all fields from relation. - */ -static AttrNumber * -pathman_update_trigger_build_attr_map(const PartRelationInfo *prel, - Relation child_rel) -{ - AttrNumber i = -1; - Oid parent_relid = PrelParentRelid(prel); - TupleDesc child_descr = RelationGetDescr(child_rel); - int natts = child_descr->natts; - AttrNumber *result = (AttrNumber *) palloc0(natts * sizeof(AttrNumber)); - - while ((i = bms_next_member(prel->expr_atts, i)) >= 0) - { - int j; - AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; - char *attname = get_attname(parent_relid, attnum); - - for (j = 0; j < natts; j++) - { - Form_pg_attribute att = child_descr->attrs[j]; - - if (att->attisdropped) - continue; /* attrMap[attnum - 1] is already 0 */ - - if (strcmp(NameStr(att->attname), attname) == 0) - { - result[attnum - 1] = (AttrNumber) (j + 1); - break; - } - } - - if (result[attnum - 1] == 0) - elog(ERROR, "Couldn't find '%s' column in child relation", attname); - } - - return result; -} - static ExprState * pathman_update_trigger_build_expr_state(const PartRelationInfo *prel, Relation source_rel, @@ -1311,7 +1266,7 @@ pathman_update_trigger_build_expr_state(const PartRelationInfo *prel, ExprState *expr_state; ctx.new_tuple = new_tuple; - ctx.attributes_map = pathman_update_trigger_build_attr_map(prel, source_rel); + ctx.attributes_map = build_attributes_map(prel, source_rel, NULL); ctx.tuple_desc = RelationGetDescr(source_rel); expr = replace_vars_with_consts(prel->expr, &ctx); diff --git a/src/relation_info.c b/src/relation_info.c index e5f25e38..d69f41ae 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1348,3 +1348,48 @@ shout_if_prel_is_invalid(const Oid parent_oid, expected_str); } } + +/* + * Get attributes map between parent and child relation. + * This is simplified version of functions that return TupleConversionMap. + * And it should be faster if expression uses not all fields from relation. + */ +AttrNumber * +build_attributes_map(const PartRelationInfo *prel, Relation child_rel, + int *map_length) +{ + AttrNumber i = -1; + Oid parent_relid = PrelParentRelid(prel); + TupleDesc child_descr = RelationGetDescr(child_rel); + int natts = child_descr->natts; + AttrNumber *result = (AttrNumber *) palloc0(natts * sizeof(AttrNumber)); + + if (map_length != NULL) + *map_length = natts; + + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + { + int j; + AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; + char *attname = get_attname(parent_relid, attnum); + + for (j = 0; j < natts; j++) + { + Form_pg_attribute att = child_descr->attrs[j]; + + if (att->attisdropped) + continue; /* attrMap[attnum - 1] is already 0 */ + + if (strcmp(NameStr(att->attname), attname) == 0) + { + result[attnum - 1] = (AttrNumber) (j + 1); + break; + } + } + + if (result[attnum - 1] == 0) + elog(ERROR, "Couldn't find '%s' column in child relation", attname); + } + + return result; +} diff --git a/src/utils.c b/src/utils.c index a6b05189..f1576b9a 100644 --- a/src/utils.c +++ b/src/utils.c @@ -575,3 +575,4 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) return rangevars; } + From 336b2a7b7670dc16ffbc903da96c410aa2b68996 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 3 May 2017 17:19:34 +0300 Subject: [PATCH 0452/1124] get rid of extract_column_names_cxt --- src/partition_creation.c | 44 +++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 68d431b7..7f170808 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -101,9 +101,9 @@ create_single_range_partition_internal(Oid parent_relid, { Oid partition_relid; Constraint *check_constr; - Node *part_expr; init_callback_params callback_params; - List *trigger_columns; + List *trigger_columns = NIL; + Node *expr; /* Generate a name if asked to */ if (!partition_rv) @@ -118,7 +118,7 @@ create_single_range_partition_internal(Oid parent_relid, } /* Check pathman config anld fill variables */ - part_expr = build_partitioning_expression(parent_relid, NULL, &trigger_columns); + expr = build_partitioning_expression(parent_relid, NULL, &trigger_columns); /* Create a partition & get 'partitioning expression' */ partition_relid = create_single_partition_internal(parent_relid, @@ -127,7 +127,7 @@ create_single_range_partition_internal(Oid parent_relid, /* Build check constraint for RANGE partition */ check_constr = build_range_check_constraint(partition_relid, - part_expr, + expr, start_value, end_value, value_type); @@ -160,9 +160,9 @@ create_single_hash_partition_internal(Oid parent_relid, Oid partition_relid, expr_type; Constraint *check_constr; - Node *expr; init_callback_params callback_params; - List *trigger_columns; + List *trigger_columns = NIL; + Node *expr; /* Generate a name if asked to */ if (!partition_rv) @@ -1213,20 +1213,20 @@ build_range_check_constraint(Oid child_relid, const Bound *end_value, Oid value_type) { - Constraint *hash_constr; + Constraint *range_constr; char *range_constr_name; /* Build a correct name for this constraint */ range_constr_name = build_check_constraint_name_relid_internal(child_relid); /* Initialize basic properties of a CHECK constraint */ - hash_constr = make_constraint_common(range_constr_name, - build_raw_range_check_tree(raw_expression, - start_value, - end_value, - value_type)); + range_constr = make_constraint_common(range_constr_name, + build_raw_range_check_tree(raw_expression, + start_value, + end_value, + value_type)); /* Everything seems to be fine */ - return hash_constr; + return range_constr; } /* Check if range overlaps with any partitions */ @@ -1300,7 +1300,6 @@ build_raw_hash_check_tree(Node *raw_expression, A_Expr *eq_oper = makeNode(A_Expr); FuncCall *part_idx_call = makeNode(FuncCall), *hash_call = makeNode(FuncCall); - //ColumnRef *hashed_column = makeNode(ColumnRef); A_Const *part_idx_c = makeNode(A_Const), *part_count_c = makeNode(A_Const); @@ -1680,14 +1679,9 @@ text_to_regprocedure(text *proc_signature) return DatumGetObjectId(result); } -typedef struct -{ - List *columns; -} extract_column_names_cxt; - /* Extract column names from raw expression */ static bool -extract_column_names(Node *node, extract_column_names_cxt *cxt) +extract_column_names(Node *node, List **columns) { if (node == NULL) return false; @@ -1698,10 +1692,10 @@ extract_column_names(Node *node, extract_column_names_cxt *cxt) foreach(lc, ((ColumnRef *) node)->fields) if (IsA(lfirst(lc), String)) - cxt->columns = lappend(cxt->columns, lfirst(lc)); + *columns = lappend(*columns, lfirst(lc)); } - return raw_expression_tree_walker(node, extract_column_names, cxt); + return raw_expression_tree_walker(node, extract_column_names, columns); } /* Returns raw partitioning expression + expr_type + columns */ @@ -1732,9 +1726,9 @@ build_partitioning_expression(Oid parent_relid, if (columns) { - extract_column_names_cxt context = { NIL }; - extract_column_names(expr, &context); - *columns = context.columns; + /* Column list should be empty */ + Assert(*columns == NIL); + extract_column_names(expr, columns); } return expr; From c4c530c6e13b8e62b98a050110734537fdcb0a36 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 3 May 2017 17:29:39 +0300 Subject: [PATCH 0453/1124] Fix tests --- Makefile | 2 +- expected/pathman_expressions.out | 3 +++ expected/pathman_update_node.out | 34 ++++++++++++++++---------------- sql/pathman_expressions.sql | 3 +++ src/include/partition_update.h | 3 ++- src/partition_update.c | 3 +-- 6 files changed, 27 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index bb4ff894..eb4bda02 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/relation_tags.o src/compat/expand_rte_hook.o \ - src/compat/rowmarks_fix.o $(WIN32RES) + src/compat/rowmarks_fix.o src/partition_update.o $(WIN32RES) PG_CPPFLAGS = -I$(CURDIR)/src/include diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index b462bf20..ee56306c 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -152,3 +152,6 @@ SELECT COUNT(*) FROM test.range_rel_2; 24 (1 row) +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 17 other objects +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 0867c58c..2cd7688a 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -109,7 +109,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val < 10 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment -------------------------------+-----+--------- test_update_node.test_range_1 | 5 | 1 test_update_node.test_range_1 | 5 | 10 @@ -136,7 +136,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val > 20 AND val <= 30 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment -------------------------------+-----+--------- test_update_node.test_range_3 | 21 | 11 test_update_node.test_range_3 | 22 | 12 @@ -173,7 +173,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 90 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment -------------------------------+-----+--------- test_update_node.test_range_9 | 90 | 80 test_update_node.test_range_9 | 90 | 90 @@ -192,7 +192,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = -1 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment --------------------------------+-----+--------- test_update_node.test_range_11 | -1 | 50 (1 row) @@ -210,7 +210,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 100 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment --------------------------------+-----+--------- test_update_node.test_range_10 | 100 | test! (1 row) @@ -230,7 +230,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 70 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment -------------------------------+-----+--------- test_update_node.test_range_7 | 70 | 70 (1 row) @@ -248,7 +248,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 65 ORDER BY comment; - tableoid | val | comment + tableoid | val | comment -------------------------------+-----+--------- test_update_node.test_range_7 | 65 | 65 (1 row) @@ -264,8 +264,8 @@ CREATE TABLE test_update_node.test_range_inv(comment TEXT, val NUMERIC NOT NULL) SELECT attach_range_partition('test_update_node.test_range', 'test_update_node.test_range_inv', 101::NUMERIC, 111::NUMERIC); - attach_range_partition ------------------------------------- + attach_range_partition +--------------------------------- test_update_node.test_range_inv (1 row) @@ -276,8 +276,8 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 105 ORDER BY comment; - tableoid | val | comment -------------------------------------+-----+--------- + tableoid | val | comment +---------------------------------+-----+--------- test_update_node.test_range_inv | 105 | 60 (1 row) @@ -290,8 +290,8 @@ SELECT count(*) FROM test_update_node.test_range; /* Test tuple conversion (dropped column) */ ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; SELECT append_range_partition('test_update_node.test_range'); - append_range_partition ------------------------------------ + append_range_partition +-------------------------------- test_update_node.test_range_12 (1 row) @@ -301,8 +301,8 @@ UPDATE test_update_node.test_range SET val = 115 WHERE val = 115; SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 115; - tableoid | val ------------------------------------+----- + tableoid | val +--------------------------------+----- test_update_node.test_range_12 | 115 (1 row) @@ -328,8 +328,8 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_hash WHERE val = 1 ORDER BY comment; - tableoid | val | comment ----------------------------------+-----+--------- + tableoid | val | comment +------------------------------+-----+--------- test_update_node.test_hash_2 | 1 | 1 test_update_node.test_hash_2 | 1 | 10 test_update_node.test_hash_2 | 1 | 2 diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index bc24e30f..c543548b 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -51,3 +51,6 @@ UPDATE test.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= ' SELECT COUNT(*) FROM test.range_rel; SELECT COUNT(*) FROM test.range_rel_1; SELECT COUNT(*) FROM test.range_rel_2; + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman CASCADE; diff --git a/src/include/partition_update.h b/src/include/partition_update.h index ea73bfed..7aed09d1 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -22,6 +22,8 @@ #include "nodes/extensible.h" #endif +#define UPDATE_NODE_DESCRIPTION ("PrepareInsert") + typedef struct PartitionUpdateState { CustomScanState css; @@ -34,7 +36,6 @@ typedef struct PartitionUpdateState } PartitionUpdateState; extern bool pg_pathman_enable_partition_update; -extern const char *UPDATE_NODE_DESCRIPTION; extern CustomScanMethods partition_update_plan_methods; extern CustomExecMethods partition_update_exec_methods; diff --git a/src/partition_update.c b/src/partition_update.c index 66fdde4b..9d122d04 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -22,8 +22,7 @@ #include "utils/guc.h" #include "utils/rel.h" -const char *UPDATE_NODE_DESCRIPTION = "PrepareInsert"; -bool pg_pathman_enable_partition_update = true; +bool pg_pathman_enable_partition_update = true; CustomScanMethods partition_update_plan_methods; CustomExecMethods partition_update_exec_methods; From 23625e64367aefae53eb59304ed0a9d03d5c685a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 3 May 2017 18:17:56 +0300 Subject: [PATCH 0454/1124] remove duplicate include --- src/relation_info.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/relation_info.c b/src/relation_info.c index e5f25e38..cfff65e0 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -43,7 +43,6 @@ #if PG_VERSION_NUM >= 90600 #include "catalog/pg_constraint_fn.h" -#include "nodes/nodeFuncs.h" #endif From bf4fe65da2706111aefd1f041e5ff5379b1e182d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 3 May 2017 18:59:14 +0300 Subject: [PATCH 0455/1124] Fix updates, add returning tests --- expected/pathman_update_node.out | 41 ++++++++++++++++++++++++++++++++ sql/pathman_update_node.sql | 11 +++++++++ src/hooks.c | 18 ++++++++++---- src/include/partition_update.h | 2 +- src/partition_update.c | 4 ++-- 5 files changed, 69 insertions(+), 7 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 2cd7688a..7fd01a0e 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -281,6 +281,46 @@ ORDER BY comment; test_update_node.test_range_inv | 105 | 60 (1 row) +UPDATE test_update_node.test_range SET val = 60 WHERE val = 105; +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +/* Test RETURNING */ +UPDATE test_update_node.test_range SET val = 71 WHERE val = 41 RETURNING val, comment; + val | comment +-----+--------- + 71 | 41 +(1 row) + +UPDATE test_update_node.test_range SET val = 71 WHERE val = 71 RETURNING val, comment; + val | comment +-----+--------- + 71 | 71 + 71 | 41 +(2 rows) + +UPDATE test_update_node.test_range SET val = 106 WHERE val = 61 RETURNING val, comment; + val | comment +-----+--------- + 106 | 61 +(1 row) + +UPDATE test_update_node.test_range SET val = 106 WHERE val = 106 RETURNING val, comment; + val | comment +-----+--------- + 106 | 61 +(1 row) + +UPDATE test_update_node.test_range SET val = 61 WHERE val = 106 RETURNING val, comment; + val | comment +-----+--------- + 61 | 61 +(1 row) + +/* Just in case, check we don't duplicate anything */ SELECT count(*) FROM test_update_node.test_range; count ------- @@ -306,6 +346,7 @@ WHERE val = 115; test_update_node.test_range_12 | 115 (1 row) +UPDATE test_update_node.test_range SET val = 55 WHERE val = 115; SELECT count(*) FROM test_update_node.test_range; count ------- diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index 75fc6c64..5ba660fe 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -120,8 +120,18 @@ FROM test_update_node.test_range WHERE val = 105 ORDER BY comment; +UPDATE test_update_node.test_range SET val = 60 WHERE val = 105; SELECT count(*) FROM test_update_node.test_range; +/* Test RETURNING */ +UPDATE test_update_node.test_range SET val = 71 WHERE val = 41 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 71 WHERE val = 71 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 106 WHERE val = 61 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 106 WHERE val = 106 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 61 WHERE val = 106 RETURNING val, comment; + +/* Just in case, check we don't duplicate anything */ +SELECT count(*) FROM test_update_node.test_range; /* Test tuple conversion (dropped column) */ ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; @@ -134,6 +144,7 @@ SELECT tableoid::REGCLASS, * FROM test_update_node.test_range WHERE val = 115; +UPDATE test_update_node.test_range SET val = 55 WHERE val = 115; SELECT count(*) FROM test_update_node.test_range; diff --git a/src/hooks.c b/src/hooks.c index 7e6118b7..3fdb3abb 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -862,10 +862,20 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, if (strcmp(subplanstate->methods->CustomName, UPDATE_NODE_DESCRIPTION) == 0) { - PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; - cstate->parent_state = mt_state; - cstate->saved_junkFilter = mt_state->resultRelInfo->ri_junkFilter; - mt_state->resultRelInfo->ri_junkFilter = NULL; + PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; + + /* Save parent resultRelInfo in PartitionUpdate node */ + cstate->resultRelInfo = mt_state->resultRelInfo + i; + + /* + * We unset junkfilter to disable junk cleaning in + * ExecModifyTable. We don't need junk cleaning because + * there is possible modification of tuple in `partition_filter_exec` + * Same time we need this junkfilter in PartitionFilter + * nodes, so we save it in node. + */ + cstate->saved_junkFilter = cstate->resultRelInfo->ri_junkFilter; + cstate->resultRelInfo->ri_junkFilter = NULL; } } } diff --git a/src/include/partition_update.h b/src/include/partition_update.h index 7aed09d1..84668587 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -30,7 +30,7 @@ typedef struct PartitionUpdateState Oid partitioned_table; List *returning_list; - ModifyTableState *parent_state; + ResultRelInfo *resultRelInfo; JunkFilter *saved_junkFilter; Plan *subplan; /* proxy variable to store subplan */ } PartitionUpdateState; diff --git a/src/partition_update.c b/src/partition_update.c index 9d122d04..aaaa4555 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -128,10 +128,10 @@ partition_update_exec(CustomScanState *node) * Restore junkfilter in base resultRelInfo, * we do it because child's RelResultInfo expects its existence * for proper initialization. - * Alsowe change junk attribute number in JunkFilter, because + * Also we set jf_junkAttNo there, because * it wasn't set in ModifyTable node initialization */ - state->parent_state->resultRelInfo->ri_junkFilter = state->saved_junkFilter; + state->resultRelInfo->ri_junkFilter = state->saved_junkFilter; /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); From 2184b916f8bfc7a23d2ed6f4f32794474758ccfe Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 4 May 2017 13:52:44 +0300 Subject: [PATCH 0456/1124] Fix compability with 9.5 --- expected/pathman_update_node.out | 62 ---------------------- sql/pathman_update_node.sql | 3 -- src/hooks.c | 2 +- src/include/hooks.h | 8 ++- tests/python/partitioning_test.py | 87 +++++++++++++++++++++++++++++-- 5 files changed, 90 insertions(+), 72 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 7fd01a0e..f4312b2c 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -40,68 +40,6 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = Index Cond: (val = '15'::numeric) (7 rows) -/* Scan all partitions */ -EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE comment='15'; - QUERY PLAN ----------------------------------------------------- - Insert on test_range - Insert on test_range - Insert on test_range_1 - Insert on test_range_2 - Insert on test_range_3 - Insert on test_range_4 - Insert on test_range_5 - Insert on test_range_6 - Insert on test_range_7 - Insert on test_range_8 - Insert on test_range_9 - Insert on test_range_10 - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_1 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_2 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_3 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_4 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_5 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_6 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_7 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_8 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_9 - Filter: (comment = '15'::text) - -> Custom Scan (PrepareInsert) - -> Custom Scan (PartitionFilter) - -> Seq Scan on test_range_10 - Filter: (comment = '15'::text) -(56 rows) - /* Update values in 1st partition (rows remain there) */ UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; /* Check values #1 */ diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index 5ba660fe..754dffc2 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -18,9 +18,6 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 1 /* Keep same partition */ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; -/* Scan all partitions */ -EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE comment='15'; - /* Update values in 1st partition (rows remain there) */ UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; diff --git a/src/hooks.c b/src/hooks.c index 3fdb3abb..8fb7c954 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -844,7 +844,7 @@ pathman_process_utility_hook(Node *parsetree, void pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, - uint64 count) + ExecutorRun_CountArgType count) { PlanState *state = (PlanState *) queryDesc->planstate; diff --git a/src/include/hooks.h b/src/include/hooks.h index b93b4ba8..fec0a8c0 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -62,7 +62,13 @@ void pathman_process_utility_hook(Node *parsetree, DestReceiver *dest, char *completionTag); +#if PG_VERSION_NUM >= 90600 +typedef uint64 ExecutorRun_CountArgType; +#else +typedef long ExecutorRun_CountArgType; +#endif + void pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, - uint64 count); + ExecutorRun_CountArgType count); #endif /* PATHMAN_HOOKS_H */ diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index cda00c62..41b8d5c4 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -15,16 +15,18 @@ import re import subprocess import threading +import json from testgres import get_new_node, stop_all # Helper function for json equality -def ordered(obj): +def ordered(obj, skip_keys=None): if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) + return sorted((k, ordered(v, skip_keys=skip_keys)) for k, v in obj.items() + if skip_keys is None or (skip_keys and k not in skip_keys)) if isinstance(obj, list): - return sorted(ordered(x) for x in obj) + return sorted(ordered(x, skip_keys=skip_keys) for x in obj) else: return obj @@ -470,8 +472,6 @@ def test_foreign_table(self): def test_parallel_nodes(self): """Test parallel queries under partitions""" - import json - # Init and start postgres instance with preload pg_pathman module node = get_new_node('test') node.init() @@ -990,6 +990,83 @@ def test_concurrent_detach(self): node.cleanup() FNULL.close() + def test_update_node_plan1(self): + ''' Test scan on all partititions when using update node. + We can't use regression tests here because 9.5 and 9.5 give + different plans + ''' + + node = get_new_node('test_update_node') + node.init() + node.append_conf( + 'postgresql.conf', + """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false + """) + node.start() + + # Prepare test database + node.psql('postgres', 'CREATE EXTENSION pg_pathman;') + node.psql('postgres', 'CREATE SCHEMA test_update_node;') + node.psql('postgres', 'CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT)') + node.psql('postgres', 'INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i;') + node.psql('postgres', "SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10);") + + node.psql('postgres', """ + create or replace function query_plan(query text) returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + with node.connect() as con: + con.execute("SET pg_pathman.enable_partitionupdate=on") + + test_query = "UPDATE test_update_node.test_range SET val = 14 WHERE comment=''15''" + plan = con.execute('SELECT query_plan(\'%s\')' % test_query)[0][0] + plan = plan[0]["Plan"] + + self.assertEqual(plan["Node Type"], "ModifyTable") + self.assertEqual(plan["Operation"], "Insert") + self.assertEqual(plan["Relation Name"], "test_range") + self.assertEqual(len(plan["Target Tables"]), 11) + + expected_format = ''' + { + "Plans": [ + { + "Plans": [ + { + "Filter": "(comment = '15'::text)", + "Node Type": "Seq Scan", + "Relation Name": "test_range%s", + "Parent Relationship": "child" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "child", + "Custom Plan Provider": "PartitionFilter" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "Member", + "Custom Plan Provider": "PrepareInsert" + } + ''' + for i, f in enumerate([''] + list(map(str, range(1, 10)))): + num = '_' + f if f else '' + expected = json.loads(expected_format % num) + p = ordered(plan["Plans"][i], skip_keys=['Parallel Aware', 'Alias']) + self.assertEqual(p, ordered(expected)) + + node.stop() + node.cleanup() + if __name__ == "__main__": unittest.main() From a1f8149a73e5221fb88a05739fd4dc78981a8478 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 4 May 2017 13:56:41 +0300 Subject: [PATCH 0457/1124] Add proper cleaning in update node test --- tests/python/partitioning_test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 41b8d5c4..26e77037 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1067,6 +1067,9 @@ def test_update_node_plan1(self): node.stop() node.cleanup() + node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') + node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') + if __name__ == "__main__": unittest.main() From 0a3c84a3cc18b930c918c60d817b2eb1debaab28 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 4 May 2017 13:57:41 +0300 Subject: [PATCH 0458/1124] multilevel partitioning --- src/include/partition_filter.h | 16 ++++-- src/partition_filter.c | 90 ++++++++++++++++++++++++++++------ src/pg_pathman.c | 18 ++++++- src/pl_funcs.c | 6 +-- src/utility_stmt_hooking.c | 24 ++++----- 5 files changed, 117 insertions(+), 37 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index cccacf2f..9b0d8391 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -40,6 +40,8 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ + bool has_subpartitions; + ExprState *expr_state; /* if has_subpartitions true */ } ResultRelInfoHolder; @@ -133,11 +135,15 @@ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); -ResultRelInfoHolder * select_partition_for_insert(Datum value, Oid value_type, - const PartRelationInfo *prel, - ResultPartsStorage *parts_storage, - EState *estate); - +// ResultRelInfoHolder * select_partition_for_insert(Datum value, Oid value_type, +// const PartRelationInfo *prel, +// ResultPartsStorage *parts_storage, +// EState *estate); +ResultRelInfoHolder * +select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, + const PartRelationInfo *prel, + ResultPartsStorage *parts_storage, + EState *estate); Plan * make_partition_filter(Plan *subplan, Oid parent_relid, diff --git a/src/partition_filter.c b/src/partition_filter.c index ac0e5528..93fad0ae 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -319,6 +319,11 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Generate tuple transformation map and some other stuff */ rri_holder->tuple_map = build_part_tuple_map(parent_rel, child_rel); + /* Are there subpartitions? */ + rri_holder->has_subpartitions = + (get_pathman_relation_info(partid) != NULL); + rri_holder->expr_state = NULL; + /* Call on_new_rri_holder_callback() if needed */ if (parts_storage->on_new_rri_holder_callback) parts_storage->on_new_rri_holder_callback(parts_storage->estate, @@ -412,7 +417,7 @@ find_partitions_for_value(Datum value, Oid value_type, * Smart wrapper for scan_result_parts_storage(). */ ResultRelInfoHolder * -select_partition_for_insert(Datum value, Oid value_type, +select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, const PartRelationInfo *prel, ResultPartsStorage *parts_storage, EState *estate) @@ -422,9 +427,23 @@ select_partition_for_insert(Datum value, Oid value_type, Oid selected_partid = InvalidOid; Oid *parts; int nparts; + TupleTableSlot *tmp_slot; + // const PartRelationInfo *subprel; + bool isnull; + ExprDoneCond itemIsDone; + Datum value; + + /* Execute expression */ + value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); + + if (isnull) + elog(ERROR, ERR_PART_ATTR_NULL); + + if (itemIsDone != ExprSingleResult) + elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); /* Search for matching partitions */ - parts = find_partitions_for_value(value, value_type, prel, &nparts); + parts = find_partitions_for_value(value, prel->atttype, prel, &nparts); if (nparts > 1) elog(ERROR, ERR_PART_ATTR_MULTIPLE); @@ -438,9 +457,57 @@ select_partition_for_insert(Datum value, Oid value_type, } else selected_partid = parts[0]; + // subprel = get_pathman_relation_info(state->partitioned_table)) /* Replace parent table with a suitable partition */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); rri_holder = scan_result_parts_storage(selected_partid, parts_storage); + + /* If partition has subpartitions */ + if (rri_holder->has_subpartitions) + { + const PartRelationInfo *subprel; + + /* Fetch PartRelationInfo for this partitioned relation */ + subprel = get_pathman_relation_info(selected_partid); + Assert(subprel != NULL); + + /* Build an expression state if not yet */ + if (!rri_holder->expr_state) + { + MemoryContext tmp_mcxt; + Node *expr; + Index varno = 1; + ListCell *lc; + + /* Change varno in Vars according to range table */ + expr = copyObject(subprel->expr); + foreach(lc, estate->es_range_table) + { + RangeTblEntry *entry = lfirst(lc); + if (entry->relid == selected_partid) + { + if (varno > 1) + ChangeVarNodes(expr, 1, varno, 0); + break; + } + varno += 1; + } + + /* Prepare state for expression execution */ + tmp_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + rri_holder->expr_state = ExecInitExpr((Expr *) expr, NULL); + MemoryContextSwitchTo(tmp_mcxt); + } + + Assert(rri_holder->expr_state != NULL); + + /* Dive in */ + rri_holder = select_partition_for_insert(econtext, rri_holder->expr_state, + subprel, + parts_storage, + estate); + } + MemoryContextSwitchTo(old_mcxt); /* Could not find suitable partition */ @@ -598,9 +665,9 @@ partition_filter_exec(CustomScanState *node) MemoryContext old_mcxt; const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; - bool isnull; - Datum value; - ExprDoneCond itemIsDone; + // bool isnull; + // Datum value; + // ExprDoneCond itemIsDone; TupleTableSlot *tmp_slot; /* Fetch PartRelationInfo for this partitioned relation */ @@ -618,22 +685,15 @@ partition_filter_exec(CustomScanState *node) /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - /* Execute expression */ tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; - value = ExecEvalExpr(state->expr_state, econtext, &isnull, &itemIsDone); - econtext->ecxt_scantuple = tmp_slot; - - if (isnull) - elog(ERROR, ERR_PART_ATTR_NULL); - - if (itemIsDone != ExprSingleResult) - elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); /* Search for a matching partition */ - rri_holder = select_partition_for_insert(value, prel->atttype, prel, + rri_holder = select_partition_for_insert(econtext, state->expr_state, prel, &state->result_parts, estate); + econtext->ecxt_scantuple = tmp_slot; + /* Switch back and clean up per-tuple context */ MemoryContextSwitchTo(old_mcxt); ResetExprContext(econtext); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 9455b856..7e80aa92 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -249,7 +249,9 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, child_rte = copyObject(parent_rte); child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; - child_rte->inh = false; /* relation has no children */ + // child_rte->inh = false; /* relation has no children */ + child_rte->inh = (child_oid != parent_rte->relid) ? + child_relation->rd_rel->relhassubclass : false; child_rte->requiredPerms = 0; /* perform all checks on parent */ /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ @@ -391,6 +393,17 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, parent_rowmark->isParent = true; } + /* + * TODO: new!!! + */ + if (child_rte->inh) + { + pathman_rel_pathlist_hook(root, + child_rel, + childRTindex, + child_rte); + } + return childRTindex; } @@ -1659,7 +1672,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, set_foreign_pathlist(root, childrel, childRTE); } - else + /* TODO: temporary!!! */ + else if(!childRTE->inh || childrel->pathlist == NIL) { /* childrel->rows should be >= 1 */ set_plain_rel_size(root, childrel, childRTE); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 41297f02..ef06c581 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -612,9 +612,9 @@ is_tuple_convertible(PG_FUNCTION_ARGS) void *map; /* we don't actually need it */ /* Try to build a conversion map */ - map = convert_tuples_by_name_map(RelationGetDescr(rel1), - RelationGetDescr(rel2), - ERR_PART_DESC_CONVERT); + map = convert_tuples_by_name(RelationGetDescr(rel1), + RelationGetDescr(rel2), + ERR_PART_DESC_CONVERT); /* Now free map */ pfree(map); } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 64d563db..97a33574 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -597,11 +597,11 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { TupleTableSlot *slot, *tmp_slot; - ExprDoneCond itemIsDone; - bool skip_tuple, - isnull; + // ExprDoneCond itemIsDone; + bool skip_tuple; + // isnull; Oid tuple_oid = InvalidOid; - Datum value; + // Datum value; const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; @@ -641,19 +641,19 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Execute expression */ tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; - value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); - econtext->ecxt_scantuple = tmp_slot; + // value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); - if (isnull) - elog(ERROR, ERR_PART_ATTR_NULL); + // if (isnull) + // elog(ERROR, ERR_PART_ATTR_NULL); - if (itemIsDone != ExprSingleResult) - elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); + // if (itemIsDone != ExprSingleResult) + // elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); /* Search for a matching partition */ - rri_holder = select_partition_for_insert(value, - prel->atttype, prel, + // rri_holder = select_partition_for_insert(value, + rri_holder = select_partition_for_insert(econtext, expr_state, prel, &parts_storage, estate); + econtext->ecxt_scantuple = tmp_slot; child_result_rel = rri_holder->result_rel_info; estate->es_result_relation_info = child_result_rel; From 29619a500e87a3f8a3f135bdb2f895f1c53ccb67 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 4 May 2017 14:49:57 +0300 Subject: [PATCH 0459/1124] clean up --- src/include/compat/debug_compat_features.h | 2 - src/include/partition_filter.h | 13 ++---- src/partition_creation.c | 1 - src/partition_filter.c | 6 --- src/pg_pathman.c | 53 +++++++++++++--------- src/utility_stmt_hooking.c | 11 ----- 6 files changed, 36 insertions(+), 50 deletions(-) diff --git a/src/include/compat/debug_compat_features.h b/src/include/compat/debug_compat_features.h index c668d4ce..8968b572 100644 --- a/src/include/compat/debug_compat_features.h +++ b/src/include/compat/debug_compat_features.h @@ -12,8 +12,6 @@ #define ENABLE_PGPRO_PATCHES /* PgPro exclusive features */ -//#define ENABLE_EXPAND_RTE_HOOK -//#define ENABLE_RELATION_TAGS #define ENABLE_PATHMAN_AWARE_COPY_WIN32 /* Hacks for vanilla */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 9b0d8391..e053d2a5 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -135,15 +135,10 @@ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); -// ResultRelInfoHolder * select_partition_for_insert(Datum value, Oid value_type, -// const PartRelationInfo *prel, -// ResultPartsStorage *parts_storage, -// EState *estate); -ResultRelInfoHolder * -select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, - const PartRelationInfo *prel, - ResultPartsStorage *parts_storage, - EState *estate); +ResultRelInfoHolder *select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, + const PartRelationInfo *prel, + ResultPartsStorage *parts_storage, + EState *estate); Plan * make_partition_filter(Plan *subplan, Oid parent_relid, diff --git a/src/partition_creation.c b/src/partition_creation.c index 68d431b7..4bb54b2e 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1300,7 +1300,6 @@ build_raw_hash_check_tree(Node *raw_expression, A_Expr *eq_oper = makeNode(A_Expr); FuncCall *part_idx_call = makeNode(FuncCall), *hash_call = makeNode(FuncCall); - //ColumnRef *hashed_column = makeNode(ColumnRef); A_Const *part_idx_c = makeNode(A_Const), *part_count_c = makeNode(A_Const); diff --git a/src/partition_filter.c b/src/partition_filter.c index 93fad0ae..47ad1e88 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -427,8 +427,6 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, Oid selected_partid = InvalidOid; Oid *parts; int nparts; - TupleTableSlot *tmp_slot; - // const PartRelationInfo *subprel; bool isnull; ExprDoneCond itemIsDone; Datum value; @@ -457,7 +455,6 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, } else selected_partid = parts[0]; - // subprel = get_pathman_relation_info(state->partitioned_table)) /* Replace parent table with a suitable partition */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); rri_holder = scan_result_parts_storage(selected_partid, parts_storage); @@ -665,9 +662,6 @@ partition_filter_exec(CustomScanState *node) MemoryContext old_mcxt; const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; - // bool isnull; - // Datum value; - // ExprDoneCond itemIsDone; TupleTableSlot *tmp_slot; /* Fetch PartRelationInfo for this partitioned relation */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 7e80aa92..e10d1172 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -249,10 +249,14 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, child_rte = copyObject(parent_rte); child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; - // child_rte->inh = false; /* relation has no children */ + child_rte->requiredPerms = 0; /* perform all checks on parent */ + /* + * If it is the parent relation, then set inh flag to false to prevent + * further recursive unrolling. Else if relation is a child and has subclass + * then we will need to check if there are subpartitions + */ child_rte->inh = (child_oid != parent_rte->relid) ? child_relation->rd_rel->relhassubclass : false; - child_rte->requiredPerms = 0; /* perform all checks on parent */ /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ root->parse->rtable = lappend(root->parse->rtable, child_rte); @@ -394,7 +398,7 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, } /* - * TODO: new!!! + * Recursively expand child partition if it has subpartitions */ if (child_rte->inh) { @@ -1660,29 +1664,36 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, set_rel_consider_parallel_compat(root, childrel, childRTE); #endif - /* Compute child's access paths & sizes */ - if (childRTE->relkind == RELKIND_FOREIGN_TABLE) + /* + * If inh is True and pathlist is not null then it is a partitioned + * table and we've already filled it, skip it. Otherwise build a + * pathlist for it + */ + if(!childRTE->inh || childrel->pathlist == NIL) { - /* childrel->rows should be >= 1 */ - set_foreign_size(root, childrel, childRTE); + /* Compute child's access paths & sizes */ + if (childRTE->relkind == RELKIND_FOREIGN_TABLE) + { + /* childrel->rows should be >= 1 */ + set_foreign_size(root, childrel, childRTE); - /* If child IS dummy, ignore it */ - if (IS_DUMMY_REL(childrel)) - continue; + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(childrel)) + continue; - set_foreign_pathlist(root, childrel, childRTE); - } - /* TODO: temporary!!! */ - else if(!childRTE->inh || childrel->pathlist == NIL) - { - /* childrel->rows should be >= 1 */ - set_plain_rel_size(root, childrel, childRTE); + set_foreign_pathlist(root, childrel, childRTE); + } + else + { + /* childrel->rows should be >= 1 */ + set_plain_rel_size(root, childrel, childRTE); - /* If child IS dummy, ignore it */ - if (IS_DUMMY_REL(childrel)) - continue; + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(childrel)) + continue; - set_plain_rel_pathlist(root, childrel, childRTE); + set_plain_rel_pathlist(root, childrel, childRTE); + } } /* Set cheapest path for child */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 97a33574..b2e46c43 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -597,11 +597,8 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { TupleTableSlot *slot, *tmp_slot; - // ExprDoneCond itemIsDone; bool skip_tuple; - // isnull; Oid tuple_oid = InvalidOid; - // Datum value; const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; @@ -641,16 +638,8 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Execute expression */ tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; - // value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); - - // if (isnull) - // elog(ERROR, ERR_PART_ATTR_NULL); - - // if (itemIsDone != ExprSingleResult) - // elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); /* Search for a matching partition */ - // rri_holder = select_partition_for_insert(value, rri_holder = select_partition_for_insert(econtext, expr_state, prel, &parts_storage, estate); econtext->ecxt_scantuple = tmp_slot; From 055065983966343657a2381481baa350c3e4b330 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 4 May 2017 15:43:42 +0300 Subject: [PATCH 0460/1124] Fix tests --- tests/python/partitioning_test.py | 46 +++++++++++++++---------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 26e77037..9dc404af 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1037,26 +1037,26 @@ def test_update_node_plan1(self): self.assertEqual(len(plan["Target Tables"]), 11) expected_format = ''' - { - "Plans": [ - { - "Plans": [ - { - "Filter": "(comment = '15'::text)", - "Node Type": "Seq Scan", - "Relation Name": "test_range%s", - "Parent Relationship": "child" - } - ], - "Node Type": "Custom Scan", - "Parent Relationship": "child", - "Custom Plan Provider": "PartitionFilter" - } - ], - "Node Type": "Custom Scan", - "Parent Relationship": "Member", - "Custom Plan Provider": "PrepareInsert" - } + { + "Plans": [ + { + "Plans": [ + { + "Filter": "(comment = '15'::text)", + "Node Type": "Seq Scan", + "Relation Name": "test_range%s", + "Parent Relationship": "child" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "child", + "Custom Plan Provider": "PartitionFilter" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "Member", + "Custom Plan Provider": "PrepareInsert" + } ''' for i, f in enumerate([''] + list(map(str, range(1, 10)))): num = '_' + f if f else '' @@ -1064,12 +1064,12 @@ def test_update_node_plan1(self): p = ordered(plan["Plans"][i], skip_keys=['Parallel Aware', 'Alias']) self.assertEqual(p, ordered(expected)) - node.stop() - node.cleanup() - node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') + node.stop() + node.cleanup() + if __name__ == "__main__": unittest.main() From 34272f3d0e1c98474b6ca3346cc718291e46e45d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 4 May 2017 15:51:25 +0300 Subject: [PATCH 0461/1124] refactoring, introduce function canonicalize_custom_exprs(), use get_partitioning_clauses() in pathman_rel_pathlist_hook() --- src/hooks.c | 10 +++- src/include/runtimeappend.h | 3 ++ src/nodes_common.c | 105 ++++++++++++++++-------------------- src/partition_filter.c | 19 ++++--- src/pg_pathman.c | 37 +++++++------ 5 files changed, 85 insertions(+), 89 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 33a53f7a..87bb5d86 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -308,10 +308,10 @@ pathman_rel_pathlist_hook(PlannerInfo *root, *pathkeyDesc = NULL; double paramsel = 1.0; /* default part selectivity */ WalkerContext context; - ListCell *lc; - int i; Node *expr; bool modify_append_nodes; + ListCell *lc; + int i; /* Make copy of partitioning expression and fix Var's varno attributes */ expr = PrelExpressionForRelid(prel, rti); @@ -462,6 +462,12 @@ pathman_rel_pathlist_hook(PlannerInfo *root, /* Get existing parameterization */ ppi = get_appendrel_parampathinfo(rel, inner_required); + /* Skip if there are no partitioning clauses */ + if (!get_partitioning_clauses(list_union(rel->baserestrictinfo, + rel->joininfo), + prel, rti)) + return; + if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) inner_path = create_runtimeappend_path(root, cur_path, ppi, paramsel); diff --git a/src/include/runtimeappend.h b/src/include/runtimeappend.h index 912ce18e..a1f934c4 100644 --- a/src/include/runtimeappend.h +++ b/src/include/runtimeappend.h @@ -38,6 +38,9 @@ typedef struct /* Restrictions to be checked during ReScan and Exec */ List *custom_exprs; + /* Refined clauses for partition pruning */ + List *canon_custom_exprs; + /* All available plans \ plan states */ HTAB *children_table; HASHCTL children_table_config; diff --git a/src/nodes_common.c b/src/nodes_common.c index 9949327b..0565943d 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -329,6 +329,41 @@ clause_contains_prel_expr(Node *node, Node *prel_expr) return expression_tree_walker(node, clause_contains_prel_expr, prel_expr); } + +/* Prepare CustomScan's custom expression for walk_expr_tree() */ +static Node * +canonicalize_custom_exprs_mutator(Node *node, void *cxt) +{ + if (node == NULL) + return NULL; + + if (IsA(node, Var)) + { + Var *var = palloc(sizeof(Var)); + *var = *(Var *) node; + + /* Replace original 'varnoold' */ + var->varnoold = INDEX_VAR; + + /* Restore original 'varattno' */ + var->varattno = var->varoattno; + + /* Forget 'location' */ + var->location = -1; + + return (Node *) var; + } + + return expression_tree_mutator(node, canonicalize_custom_exprs_mutator, NULL); +} + +static List * +canonicalize_custom_exprs(List *custom_exps) +{ + return (List *) canonicalize_custom_exprs_mutator((Node *) custom_exps, NULL); +} + + /* * Filter all available clauses and extract relevant ones. */ @@ -603,14 +638,20 @@ create_append_scan_state_common(CustomScan *node, void begin_append_common(CustomScanState *node, EState *estate, int eflags) { + RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + node->ss.ps.ps_TupFromTlist = false; + + /* Prepare custom expression according to set_set_customscan_references() */ + scan_state->canon_custom_exprs = + canonicalize_custom_exprs(scan_state->custom_exprs); } TupleTableSlot * exec_append_common(CustomScanState *node, void (*fetch_next_tuple) (CustomScanState *node)) { - RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + RuntimeAppendState *scan_state = (RuntimeAppendState *) node; /* ReScan if no plans are selected */ if (scan_state->ncur_plans == 0) @@ -660,51 +701,6 @@ end_append_common(CustomScanState *node) hash_destroy(scan_state->children_table); } -/* Find first Var with varno == INDEX_VAR, and returns its varnoold */ -static bool -find_varnoold(Node *node, int *varnoold) -{ - if (node == NULL) - return false; - - if (IsA(node, Var)) - { - Var *var = (Var *) node; - if (var->varno == INDEX_VAR) - { - /* we found it */ - *varnoold = var->varnoold; - return true; - } - return false; - } - - return expression_tree_walker(node, find_varnoold, (void *) varnoold); -} - -/* - * To check equality we need to modify partitioning expression's Vars like - * they appear in custom_exprs, it means that varno should be equal to - * INDEX_VAR and varnoold should be changed according to query - */ -static bool -prepare_vars(Node *node, const int *varnoold) -{ - if (node == NULL) - return false; - - if (IsA(node, Var)) - { - Var *var = (Var *) node; - Assert(var->varno == 1); - var->varno = INDEX_VAR; - var->varnoold = *varnoold; - return false; - } - - return expression_tree_walker(node, prepare_vars, (void *) varnoold); -} - void rescan_append_common(CustomScanState *node) { @@ -718,28 +714,17 @@ rescan_append_common(CustomScanState *node) int nparts; Node *prel_expr; - int varnoold = -100; /* not possible number */ - prel = get_pathman_relation_info(scan_state->relid); Assert(prel); - /* Prepare expression. Copy and modify 'varno' and 'varnoold' attributes */ - prel_expr = copyObject(prel->expr); - foreach(lc, scan_state->custom_exprs) - { - find_varnoold((Node *) lfirst(lc), &varnoold); - if (varnoold != -100) - break; - } - - if (varnoold != -100) - prepare_vars(prel_expr, &varnoold); + /* Prepare expression according to set_set_customscan_references() */ + prel_expr = PrelExpressionForRelid(prel, INDEX_VAR); /* First we select all available partitions... */ ranges = list_make1_irange_full(prel, IR_COMPLETE); InitWalkerContext(&wcxt, prel_expr, prel, econtext, false); - foreach (lc, scan_state->custom_exprs) + foreach (lc, scan_state->canon_custom_exprs) { WrapperNode *wn; diff --git a/src/partition_filter.c b/src/partition_filter.c index ac0e5528..0e7225fa 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -502,7 +502,7 @@ make_partition_filter(Plan *subplan, Oid parent_relid, Node * partition_filter_create_scan_state(CustomScan *node) { - PartitionFilterState *state; + PartitionFilterState *state; state = (PartitionFilterState *) palloc0(sizeof(PartitionFilterState)); NodeSetTag(state, T_CustomScanState); @@ -531,11 +531,12 @@ partition_filter_create_scan_state(CustomScan *node) void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { - Index varno = 1; - Node *expr; - MemoryContext old_mcxt; PartitionFilterState *state = (PartitionFilterState *) node; + + MemoryContext old_mcxt; const PartRelationInfo *prel; + Node *expr; + Index parent_varno = 1; ListCell *lc; /* It's convenient to store PlanState in 'custom_ps' */ @@ -548,18 +549,16 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) Assert(prel != NULL); /* Change varno in Vars according to range table */ - expr = copyObject(prel->expr); foreach(lc, estate->es_range_table) { RangeTblEntry *entry = lfirst(lc); + if (entry->relid == state->partitioned_table) - { - if (varno > 1) - ChangeVarNodes(expr, 1, varno, 0); break; - } - varno += 1; + + parent_varno += 1; } + expr = PrelExpressionForRelid(prel, parent_varno); /* Prepare state for expression execution */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 9455b856..2592a8a2 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -712,9 +712,9 @@ wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) static WrapperNode * handle_const(const Const *c, WalkerContext *context) { + WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); + int strategy = BTEqualStrategyNumber; const PartRelationInfo *prel = context->prel; - WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); - int strategy = BTEqualStrategyNumber; result->orig = (const Node *) c; @@ -725,7 +725,7 @@ handle_const(const Const *c, WalkerContext *context) if (!context->for_insert || c->constisnull) { result->rangeset = NIL; - result->paramsel = 1.0; + result->paramsel = 0.0; return result; } @@ -796,18 +796,18 @@ handle_const(const Const *c, WalkerContext *context) static WrapperNode * handle_boolexpr(const BoolExpr *expr, WalkerContext *context) { - WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); - ListCell *lc; + WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); + ListCell *lc; const PartRelationInfo *prel = context->prel; - result->orig = (const Node *)expr; + result->orig = (const Node *) expr; result->args = NIL; result->paramsel = 1.0; - if (expr->boolop == AND_EXPR) - result->rangeset = list_make1_irange_full(prel, IR_COMPLETE); - else - result->rangeset = NIL; + /* First, set default rangeset */ + result->rangeset = (expr->boolop == AND_EXPR) ? + list_make1_irange_full(prel, IR_COMPLETE) : + NIL; foreach (lc, expr->args) { @@ -856,9 +856,9 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) static WrapperNode * handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) { - WrapperNode *result = (WrapperNode *) palloc(sizeof(WrapperNode)); - Node *exprnode = (Node *) linitial(expr->args); - Node *arraynode = (Node *) lsecond(expr->args); + WrapperNode *result = (WrapperNode *) palloc(sizeof(WrapperNode)); + Node *exprnode = (Node *) linitial(expr->args); + Node *arraynode = (Node *) lsecond(expr->args); const PartRelationInfo *prel = context->prel; result->orig = (const Node *) expr; @@ -993,8 +993,8 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) static WrapperNode * handle_opexpr(const OpExpr *expr, WalkerContext *context) { - WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); - Node *var, *param; + WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); + Node *var, *param; const PartRelationInfo *prel = context->prel; result->orig = (const Node *) expr; @@ -1006,7 +1006,8 @@ handle_opexpr(const OpExpr *expr, WalkerContext *context) { if (IsConstValue(context, param)) { - handle_binary_opexpr(context, result, var, ExtractConst(context, param)); + handle_binary_opexpr(context, result, var, + ExtractConst(context, param)); return result; } else if (IsA(param, Param) || IsA(param, Var)) @@ -1026,6 +1027,7 @@ handle_opexpr(const OpExpr *expr, WalkerContext *context) } /* Binary operator handler */ +/* FIXME: varnode */ static void handle_binary_opexpr(WalkerContext *context, WrapperNode *result, const Node *varnode, const Const *c) @@ -1113,6 +1115,7 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, } /* Estimate selectivity of parametrized quals */ +/* FIXME: varnode */ static void handle_binary_opexpr_param(const PartRelationInfo *prel, WrapperNode *result, const Node *varnode) @@ -1460,7 +1463,7 @@ translate_col_privs(const Bitmapset *parent_privs, attno = InvalidAttrNumber; foreach(lc, translated_vars) { - Var *var = (Var *) lfirst(lc); + Var *var = (Var *) lfirst(lc); attno++; if (var == NULL) /* ignore dropped columns */ From e50016d4158e243f4a2d317d9785c4a93932d301 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 4 May 2017 16:23:34 +0300 Subject: [PATCH 0462/1124] restore function clause_contains_params() and get rid of 'found_params' in WalkerContext --- src/hooks.c | 40 +++++++++++++++++----------------------- src/include/pathman.h | 3 --- src/nodes_common.c | 2 +- src/pg_pathman.c | 11 ++++------- src/utils.c | 25 +++++++++++++++++++++++++ 5 files changed, 47 insertions(+), 34 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 87bb5d86..31530ba8 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -81,10 +81,10 @@ pathman_join_pathlist_hook(PlannerInfo *root, const PartRelationInfo *inner_prel; List *joinclauses, *otherclauses; - ListCell *lc; WalkerContext context; double paramsel; - Node *expr; + Node *part_expr; + ListCell *lc; /* Call hooks set by other extensions */ if (set_join_pathlist_next) @@ -130,14 +130,14 @@ pathman_join_pathlist_hook(PlannerInfo *root, } /* Make copy of partitioning expression and fix Var's varno attributes */ - expr = PrelExpressionForRelid(inner_prel, innerrel->relid); + part_expr = PrelExpressionForRelid(inner_prel, innerrel->relid); paramsel = 1.0; foreach (lc, joinclauses) { WrapperNode *wrap; - InitWalkerContext(&context, expr, inner_prel, NULL, false); + InitWalkerContext(&context, part_expr, inner_prel, NULL, false); wrap = walk_expr_tree((Expr *) lfirst(lc), &context); paramsel *= wrap->paramsel; } @@ -308,13 +308,13 @@ pathman_rel_pathlist_hook(PlannerInfo *root, *pathkeyDesc = NULL; double paramsel = 1.0; /* default part selectivity */ WalkerContext context; - Node *expr; - bool modify_append_nodes; + Node *part_expr; + List *part_clauses; ListCell *lc; int i; /* Make copy of partitioning expression and fix Var's varno attributes */ - expr = PrelExpressionForRelid(prel, rti); + part_expr = PrelExpressionForRelid(prel, rti); if (prel->parttype == PT_RANGE) { @@ -328,11 +328,11 @@ pathman_rel_pathlist_hook(PlannerInfo *root, tce = lookup_type_cache(prel->atttype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); /* Make pathkeys */ - pathkeys = build_expression_pathkey(root, (Expr *) expr, NULL, + pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, tce->lt_opr, NULL, false); if (pathkeys) pathkeyAsc = (PathKey *) linitial(pathkeys); - pathkeys = build_expression_pathkey(root, (Expr *) expr, NULL, + pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, tce->gt_opr, NULL, false); if (pathkeys) pathkeyDesc = (PathKey *) linitial(pathkeys); @@ -345,7 +345,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, ranges = list_make1_irange_full(prel, IR_COMPLETE); /* Make wrappers over restrictions and collect final rangeset */ - InitWalkerContext(&context, expr, prel, NULL, false); + InitWalkerContext(&context, part_expr, prel, NULL, false); wrappers = NIL; foreach(lc, rel->baserestrictinfo) { @@ -359,12 +359,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, ranges = irange_list_intersection(ranges, wrap->rangeset); } - /* - * Walker should been have filled these parameter while checking. - * Runtime[Merge]Append is pointless if there are no params in clauses. - */ - modify_append_nodes = context.found_params; - /* Get number of selected partitions */ irange_len = irange_list_length(ranges); if (prel->enable_parent) @@ -441,7 +435,13 @@ pathman_rel_pathlist_hook(PlannerInfo *root, pg_pathman_enable_runtime_merge_append)) return; - if (!modify_append_nodes) + /* Get partitioning-related clauses */ + part_clauses = get_partitioning_clauses(list_union(rel->baserestrictinfo, + rel->joininfo), + prel, rti); + + /* Skip if there's no PARAMs in partitioning-related clauses */ + if (!clause_contains_params((Node *) part_clauses)) return; /* Generate Runtime[Merge]Append paths if needed */ @@ -462,12 +462,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, /* Get existing parameterization */ ppi = get_appendrel_parampathinfo(rel, inner_required); - /* Skip if there are no partitioning clauses */ - if (!get_partitioning_clauses(list_union(rel->baserestrictinfo, - rel->joininfo), - prel, rti)) - return; - if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) inner_path = create_runtimeappend_path(root, cur_path, ppi, paramsel); diff --git a/src/include/pathman.h b/src/include/pathman.h index d35de076..6dd2447e 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -148,8 +148,6 @@ typedef struct const PartRelationInfo *prel; /* main partitioning structure */ ExprContext *econtext; /* for ExecEvalExpr() */ bool for_insert; /* are we in PartitionFilter now? */ - bool found_params; /* mark if left or right argument - of clause is Param */ } WalkerContext; /* Usual initialization procedure for WalkerContext */ @@ -159,7 +157,6 @@ typedef struct (context)->prel = (prel_info); \ (context)->econtext = (ecxt); \ (context)->for_insert = (for_ins); \ - (context)->found_params = (false); \ } while (0) /* Check that WalkerContext contains ExprContext (plan execution stage) */ diff --git a/src/nodes_common.c b/src/nodes_common.c index 0565943d..e67bc0cb 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -323,7 +323,7 @@ clause_contains_prel_expr(Node *node, Node *prel_expr) if (node == NULL) return false; - if (match_expr_to_operand(node, prel_expr)) + if (match_expr_to_operand(prel_expr, node)) return true; return expression_tree_walker(node, clause_contains_prel_expr, prel_expr); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 2592a8a2..ae338ac8 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -65,7 +65,7 @@ static void handle_binary_opexpr_param(const PartRelationInfo *prel, WrapperNode *result, const Node *varnode); -static bool pull_var_param(const WalkerContext *ctx, +static bool pull_var_param(const WalkerContext *context, const OpExpr *expr, Node **var_ptr, Node **param_ptr); @@ -1012,9 +1012,6 @@ handle_opexpr(const OpExpr *expr, WalkerContext *context) } else if (IsA(param, Param) || IsA(param, Var)) { - if (IsA(param, Param)) - context->found_params = true; - handle_binary_opexpr_param(prel, result, var); return result; } @@ -1140,7 +1137,7 @@ handle_binary_opexpr_param(const PartRelationInfo *prel, * NOTE: returns false if partition key is not in expression. */ static bool -pull_var_param(const WalkerContext *ctx, +pull_var_param(const WalkerContext *context, const OpExpr *expr, Node **var_ptr, Node **param_ptr) @@ -1148,14 +1145,14 @@ pull_var_param(const WalkerContext *ctx, Node *left = linitial(expr->args), *right = lsecond(expr->args); - if (match_expr_to_operand(left, ctx->prel_expr)) + if (match_expr_to_operand(context->prel_expr, left)) { *var_ptr = left; *param_ptr = right; return true; } - if (match_expr_to_operand(right, ctx->prel_expr)) + if (match_expr_to_operand(context->prel_expr, right)) { *var_ptr = right; *param_ptr = left; diff --git a/src/utils.c b/src/utils.c index 5f070e30..12afb632 100644 --- a/src/utils.c +++ b/src/utils.c @@ -34,6 +34,31 @@ #include "utils/typcache.h" +static bool +clause_contains_params_walker(Node *node, void *context) +{ + if (node == NULL) + return false; + + if (IsA(node, Param)) + return true; + + return expression_tree_walker(node, + clause_contains_params_walker, + context); +} + +/* + * Check whether clause contains PARAMs or not. + */ +bool +clause_contains_params(Node *clause) +{ + return expression_tree_walker(clause, + clause_contains_params_walker, + NULL); +} + /* * Check if this is a "date"-related type. */ From d07dc927d71916d0120cf44bd8f5ce8590169569 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 4 May 2017 16:29:13 +0300 Subject: [PATCH 0463/1124] do not consider rel->joininfo in pathman_rel_pathlist_hook() --- src/hooks.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 31530ba8..5ce731c0 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -436,9 +436,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, return; /* Get partitioning-related clauses */ - part_clauses = get_partitioning_clauses(list_union(rel->baserestrictinfo, - rel->joininfo), - prel, rti); + part_clauses = get_partitioning_clauses(rel->baserestrictinfo, prel, rti); /* Skip if there's no PARAMs in partitioning-related clauses */ if (!clause_contains_params((Node *) part_clauses)) From c4b543f3fc9c94706c20f219d66c8e3ffdcc1809 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 4 May 2017 16:40:53 +0300 Subject: [PATCH 0464/1124] clean up pathman_expressions tests --- Makefile | 4 +- expected/pathman_expressions.out | 64 +++++++++++++++--------------- expected/pathman_join_clause.out | 30 +++++++------- sql/pathman_expressions.sql | 68 +++++++++++++++++--------------- sql/pathman_join_clause.sql | 31 ++++++++------- 5 files changed, 102 insertions(+), 95 deletions(-) diff --git a/Makefile b/Makefile index 2ebb95be..53249f9c 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,7 @@ REGRESS = pathman_basic \ pathman_column_type \ pathman_cte \ pathman_domains \ + pathman_expressions \ pathman_foreign_keys \ pathman_inserts \ pathman_interval \ @@ -42,8 +43,7 @@ REGRESS = pathman_basic \ pathman_runtime_nodes \ pathman_update_trigger \ pathman_updates \ - pathman_utility_stmt \ - pathman_expressions + pathman_utility_stmt EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index b462bf20..a55ef371 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -1,55 +1,54 @@ \set VERBOSITY terse SET search_path = 'public'; -CREATE SCHEMA pathman; -CREATE EXTENSION pg_pathman SCHEMA pathman; -CREATE SCHEMA test; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; /* hash */ -CREATE TABLE test.hash_rel ( +CREATE TABLE test_exprs.hash_rel ( id SERIAL PRIMARY KEY, value INTEGER, value2 INTEGER ); -INSERT INTO test.hash_rel (value, value2) +INSERT INTO test_exprs.hash_rel (value, value2) SELECT val, val * 2 FROM generate_series(1, 5) val; -SELECT COUNT(*) FROM test.hash_rel; +SELECT COUNT(*) FROM test_exprs.hash_rel; count ------- 5 (1 row) -SELECT pathman.create_hash_partitions('test.hash_rel', 'value * value2', 4); +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); create_hash_partitions ------------------------ 4 (1 row) -SELECT COUNT(*) FROM ONLY test.hash_rel; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; count ------- 0 (1 row) -SELECT COUNT(*) FROM test.hash_rel; +SELECT COUNT(*) FROM test_exprs.hash_rel; count ------- 5 (1 row) -INSERT INTO test.hash_rel (value, value2) +INSERT INTO test_exprs.hash_rel (value, value2) SELECT val, val * 2 FROM generate_series(6, 10) val; -SELECT COUNT(*) FROM ONLY test.hash_rel; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; count ------- 0 (1 row) -SELECT COUNT(*) FROM test.hash_rel; +SELECT COUNT(*) FROM test_exprs.hash_rel; count ------- 10 (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 5; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; QUERY PLAN ------------------------------ Append @@ -63,7 +62,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 5; Filter: (value = 5) (9 rows) -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE (value * value2) = 5; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; QUERY PLAN ---------------------------------------- Append @@ -72,12 +71,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE (value * value2) = 5; (3 rows) /* range */ -CREATE TABLE test.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); -INSERT INTO test.range_rel (dt, txt) +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; -SELECT pathman.create_range_partitions('test.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); ERROR: start value is less than min value of "random()" -SELECT pathman.create_range_partitions('test.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); NOTICE: sequence "range_rel_seq" does not exist, skipping create_range_partitions @@ -85,22 +84,22 @@ NOTICE: sequence "range_rel_seq" does not exist, skipping 10 (1 row) -INSERT INTO test.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" -SELECT COUNT(*) FROM test.range_rel_6; +SELECT COUNT(*) FROM test_exprs.range_rel_6; count ------- 4 (1 row) -INSERT INTO test.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); -SELECT COUNT(*) FROM test.range_rel_6; +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; count ------- 5 (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; QUERY PLAN ------------------------------------------------------------------------------------------------------------- Append @@ -108,47 +107,50 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (AGE(dt, '2000-01-01'::DA Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) (3 rows) -SELECT pathman.create_update_triggers('test.range_rel'); +SELECT create_update_triggers('test_exprs.range_rel'); create_update_triggers ------------------------ (1 row) -SELECT COUNT(*) FROM test.range_rel; +SELECT COUNT(*) FROM test_exprs.range_rel; count ------- 65 (1 row) -SELECT COUNT(*) FROM test.range_rel_1; +SELECT COUNT(*) FROM test_exprs.range_rel_1; count ------- 12 (1 row) -SELECT COUNT(*) FROM test.range_rel_2; +SELECT COUNT(*) FROM test_exprs.range_rel_2; count ------- 12 (1 row) -UPDATE test.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; +UPDATE test_exprs.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; /* counts in partitions should be changed */ -SELECT COUNT(*) FROM test.range_rel; +SELECT COUNT(*) FROM test_exprs.range_rel; count ------- 65 (1 row) -SELECT COUNT(*) FROM test.range_rel_1; +SELECT COUNT(*) FROM test_exprs.range_rel_1; count ------- 10 (1 row) -SELECT COUNT(*) FROM test.range_rel_2; +SELECT COUNT(*) FROM test_exprs.range_rel_2; count ------- 24 (1 row) +DROP SCHEMA test_exprs CASCADE; +NOTICE: drop cascades to 17 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out index 747d6e54..48aeba5e 100644 --- a/expected/pathman_join_clause.out +++ b/expected/pathman_join_clause.out @@ -7,17 +7,17 @@ CREATE SCHEMA test; */ /* create test tables */ CREATE TABLE test.fk ( - id1 INT NOT NULL, - id2 INT NOT NULL, - start_key INT, - end_key INT, - PRIMARY KEY (id1, id2)); + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); CREATE TABLE test.mytbl ( - id1 INT NOT NULL, - id2 INT NOT NULL, - key INT NOT NULL, - CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), - PRIMARY KEY (id1, key)); + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); create_hash_partitions ------------------------ @@ -26,15 +26,15 @@ SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); /* ...fill out with test data */ INSERT INTO test.fk VALUES (1, 1); -INSERT INTO test.mytbl VALUES (1, 1, 5), (1,1,6); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); /* gather statistics on test tables to have deterministic plans */ ANALYZE test.fk; ANALYZE test.mytbl; /* run test queries */ EXPLAIN (COSTS OFF) /* test plan */ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key - FROM test.mytbl m JOIN test.fk USING(id1, id2) - WHERE NOT key <@ int4range(6, end_key); +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); QUERY PLAN ------------------------------------------------------------------------------------ Nested Loop @@ -85,8 +85,8 @@ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key /* test joint data */ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key - FROM test.mytbl m JOIN test.fk USING(id1, id2) - WHERE NOT key <@ int4range(6, end_key); +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); tableoid | id1 | id2 | key | start_key | end_key --------------+-----+-----+-----+-----------+--------- test.mytbl_6 | 1 | 1 | 5 | | diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index bc24e30f..9eef9e27 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -1,53 +1,57 @@ \set VERBOSITY terse SET search_path = 'public'; -CREATE SCHEMA pathman; -CREATE EXTENSION pg_pathman SCHEMA pathman; -CREATE SCHEMA test; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; + /* hash */ -CREATE TABLE test.hash_rel ( +CREATE TABLE test_exprs.hash_rel ( id SERIAL PRIMARY KEY, value INTEGER, value2 INTEGER ); -INSERT INTO test.hash_rel (value, value2) +INSERT INTO test_exprs.hash_rel (value, value2) SELECT val, val * 2 FROM generate_series(1, 5) val; -SELECT COUNT(*) FROM test.hash_rel; -SELECT pathman.create_hash_partitions('test.hash_rel', 'value * value2', 4); -SELECT COUNT(*) FROM ONLY test.hash_rel; -SELECT COUNT(*) FROM test.hash_rel; +SELECT COUNT(*) FROM test_exprs.hash_rel; +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; +SELECT COUNT(*) FROM test_exprs.hash_rel; -INSERT INTO test.hash_rel (value, value2) +INSERT INTO test_exprs.hash_rel (value, value2) SELECT val, val * 2 FROM generate_series(6, 10) val; -SELECT COUNT(*) FROM ONLY test.hash_rel; -SELECT COUNT(*) FROM test.hash_rel; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; +SELECT COUNT(*) FROM test_exprs.hash_rel; -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 5; -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE (value * value2) = 5; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; /* range */ -CREATE TABLE test.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); -INSERT INTO test.range_rel (dt, txt) +INSERT INTO test_exprs.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; -SELECT pathman.create_range_partitions('test.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); -SELECT pathman.create_range_partitions('test.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); -INSERT INTO test.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); -SELECT COUNT(*) FROM test.range_rel_6; -INSERT INTO test.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); -SELECT COUNT(*) FROM test.range_rel_6; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; - -SELECT pathman.create_update_triggers('test.range_rel'); -SELECT COUNT(*) FROM test.range_rel; -SELECT COUNT(*) FROM test.range_rel_1; -SELECT COUNT(*) FROM test.range_rel_2; -UPDATE test.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + +SELECT create_update_triggers('test_exprs.range_rel'); +SELECT COUNT(*) FROM test_exprs.range_rel; +SELECT COUNT(*) FROM test_exprs.range_rel_1; +SELECT COUNT(*) FROM test_exprs.range_rel_2; +UPDATE test_exprs.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; /* counts in partitions should be changed */ -SELECT COUNT(*) FROM test.range_rel; -SELECT COUNT(*) FROM test.range_rel_1; -SELECT COUNT(*) FROM test.range_rel_2; +SELECT COUNT(*) FROM test_exprs.range_rel; +SELECT COUNT(*) FROM test_exprs.range_rel_1; +SELECT COUNT(*) FROM test_exprs.range_rel_2; + + +DROP SCHEMA test_exprs CASCADE; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql index 594e9305..b97650ba 100644 --- a/sql/pathman_join_clause.sql +++ b/sql/pathman_join_clause.sql @@ -11,22 +11,22 @@ CREATE SCHEMA test; /* create test tables */ CREATE TABLE test.fk ( - id1 INT NOT NULL, - id2 INT NOT NULL, - start_key INT, - end_key INT, - PRIMARY KEY (id1, id2)); + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); CREATE TABLE test.mytbl ( - id1 INT NOT NULL, - id2 INT NOT NULL, - key INT NOT NULL, - CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), - PRIMARY KEY (id1, key)); + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); /* ...fill out with test data */ INSERT INTO test.fk VALUES (1, 1); -INSERT INTO test.mytbl VALUES (1, 1, 5), (1,1,6); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); /* gather statistics on test tables to have deterministic plans */ ANALYZE test.fk; @@ -35,12 +35,13 @@ ANALYZE test.mytbl; /* run test queries */ EXPLAIN (COSTS OFF) /* test plan */ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key - FROM test.mytbl m JOIN test.fk USING(id1, id2) - WHERE NOT key <@ int4range(6, end_key); +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + /* test joint data */ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key - FROM test.mytbl m JOIN test.fk USING(id1, id2) - WHERE NOT key <@ int4range(6, end_key); +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); DROP SCHEMA test CASCADE; From 20eb98d583d8de9f2f39667293bb75d7eccddcf1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 4 May 2017 16:45:26 +0300 Subject: [PATCH 0465/1124] fix cmocka-based tests --- tests/cmocka/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index 678a1ca0..7975ef93 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -3,6 +3,7 @@ TOP_SRC_DIR = ../../src CC = gcc CFLAGS = -I $(TOP_SRC_DIR) -I $(shell $(PG_CONFIG) --includedir-server) +CFLAGS += -I$(CURDIR)/../../src/include CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) LDFLAGS = -lcmocka From 5a12ecc5e6a725be6fedf7de1d921996ab5154ea Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 4 May 2017 17:28:17 +0300 Subject: [PATCH 0466/1124] make UPDATE and DELETE queries work with multilevel partitioning --- src/planner_tree_modification.c | 182 +++++++++++++++++++------------- 1 file changed, 106 insertions(+), 76 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 4a804101..19b4a34b 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -36,6 +36,8 @@ static void partition_filter_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); +static Oid find_deepest_partition(Oid relid, Index idx, Expr *quals); + /* * HACK: We have to mark each Query with a unique @@ -238,14 +240,10 @@ disable_standard_inheritance(Query *parse) static void handle_modification_query(Query *parse) { - const PartRelationInfo *prel; - Node *prel_expr; - List *ranges; RangeTblEntry *rte; - WrapperNode *wrap; - Expr *expr; - WalkerContext context; + Expr *quals; Index result_rel; + Oid child; /* Fetch index of result relation */ result_rel = parse->resultRelation; @@ -261,101 +259,133 @@ handle_modification_query(Query *parse) /* Exit if it's DELETE FROM ONLY table */ if (!rte->inh) return; - prel = get_pathman_relation_info(rte->relid); - - /* Exit if it's not partitioned */ - if (!prel) return; - - /* Exit if we must include parent */ - if (prel->enable_parent) return; - - /* Parse syntax tree and extract partition ranges */ - ranges = list_make1_irange_full(prel, IR_COMPLETE); - expr = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); - - /* Exit if there's no expr (no use) */ - if (!expr) return; - - /* Prepare partitioning expression */ - prel_expr = PrelExpressionForRelid(prel, result_rel); - - /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel_expr, prel, NULL, false); - wrap = walk_expr_tree(expr, &context); + quals = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); - ranges = irange_list_intersection(ranges, wrap->rangeset); + /* + * Parse syntax tree and extract deepest partition (if there is only one + * satisfying quals) + */ + child = find_deepest_partition(rte->relid, result_rel, quals); /* * If only one partition is affected, * substitute parent table with partition. */ - if (irange_list_length(ranges) == 1) + if (OidIsValid(child)) { - IndexRange irange = linitial_irange(ranges); + Relation child_rel, + parent_rel; - /* Exactly one partition (bounds are equal) */ - if (irange_lower(irange) == irange_upper(irange)) + void *tuple_map; /* we don't need the map itself */ + + LOCKMODE lockmode = RowExclusiveLock; /* UPDATE | DELETE */ + + HeapTuple syscache_htup; + char child_relkind; + Oid parent = rte->relid; + + /* Lock 'child' table */ + LockRelationOid(child, lockmode); + + /* Make sure that 'child' exists */ + syscache_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(child)); + if (HeapTupleIsValid(syscache_htup)) { - Oid *children = PrelGetChildrenArray(prel), - child = children[irange_lower(irange)], - parent = rte->relid; + Form_pg_class reltup = (Form_pg_class) GETSTRUCT(syscache_htup); - Relation child_rel, - parent_rel; + /* Fetch child's relkind and free cache entry */ + child_relkind = reltup->relkind; + ReleaseSysCache(syscache_htup); + } + else + { + UnlockRelationOid(child, lockmode); + return; /* nothing to do here */ + } - void *tuple_map; /* we don't need the map itself */ + /* Both tables are already locked */ + child_rel = heap_open(child, NoLock); + parent_rel = heap_open(parent, NoLock); - LOCKMODE lockmode = RowExclusiveLock; /* UPDATE | DELETE */ + /* Build a conversion map (may be trivial, i.e. NULL) */ + tuple_map = build_part_tuple_map(parent_rel, child_rel); + if (tuple_map) + free_conversion_map((TupleConversionMap *) tuple_map); - HeapTuple syscache_htup; - char child_relkind; + /* Close relations (should remain locked, though) */ + heap_close(child_rel, NoLock); + heap_close(parent_rel, NoLock); - /* Lock 'child' table */ - LockRelationOid(child, lockmode); + /* Exit if tuple map was NOT trivial */ + if (tuple_map) /* just checking the pointer! */ + return; - /* Make sure that 'child' exists */ - syscache_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(child)); - if (HeapTupleIsValid(syscache_htup)) - { - Form_pg_class reltup = (Form_pg_class) GETSTRUCT(syscache_htup); + /* Update RTE's relid and relkind (for FDW) */ + rte->relid = child; + rte->relkind = child_relkind; - /* Fetch child's relkind and free cache entry */ - child_relkind = reltup->relkind; - ReleaseSysCache(syscache_htup); - } - else - { - UnlockRelationOid(child, lockmode); - return; /* nothing to do here */ - } + /* HACK: unset the 'inh' flag (no children) */ + rte->inh = false; + } +} + +/* + * Find a single deepest subpartition. If there are more than one partitions + * satisfies quals or no such partition at all then return InvalidOid. + */ +static Oid +find_deepest_partition(Oid relid, Index idx, Expr *quals) +{ + const PartRelationInfo *prel; + Node *prel_expr; + WalkerContext context; + List *ranges; + WrapperNode *wrap; + + /* Exit if there's no quals (no use) */ + if (!quals) return InvalidOid; + + prel = get_pathman_relation_info(relid); + + /* Exit if it's not partitioned */ + if (!prel) return InvalidOid; - /* Both tables are already locked */ - child_rel = heap_open(child, NoLock); - parent_rel = heap_open(parent, NoLock); + /* Exit if we must include parent */ + if (prel->enable_parent) return InvalidOid; - /* Build a conversion map (may be trivial, i.e. NULL) */ - tuple_map = build_part_tuple_map(parent_rel, child_rel); - if (tuple_map) - free_conversion_map((TupleConversionMap *) tuple_map); + /* Prepare partitioning expression */ + prel_expr = PrelExpressionForRelid(prel, idx); - /* Close relations (should remain locked, though) */ - heap_close(child_rel, NoLock); - heap_close(parent_rel, NoLock); + ranges = list_make1_irange_full(prel, IR_COMPLETE); - /* Exit if tuple map was NOT trivial */ - if (tuple_map) /* just checking the pointer! */ - return; + /* Parse syntax tree and extract partition ranges */ + InitWalkerContext(&context, prel_expr, prel, NULL, false); + wrap = walk_expr_tree(quals, &context); + ranges = irange_list_intersection(ranges, wrap->rangeset); - /* Update RTE's relid and relkind (for FDW) */ - rte->relid = child; - rte->relkind = child_relkind; + if (irange_list_length(ranges) == 1) + { + IndexRange irange = linitial_irange(ranges); + + if (irange_lower(irange) == irange_upper(irange)) + { + Oid *children = PrelGetChildrenArray(prel), + partition = children[irange_lower(irange)], + subpartition; + + /* + * Try to go deeper and see if there is subpartition + */ + subpartition = find_deepest_partition(partition, idx, quals); + if (OidIsValid(subpartition)) + return subpartition; - /* HACK: unset the 'inh' flag (no children) */ - rte->inh = false; + return partition; } } -} + return InvalidOid; +} /* * ------------------------------- From 26cb18fd506f3bc2a0d55b13490985e2e32c0081 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 4 May 2017 18:17:24 +0300 Subject: [PATCH 0467/1124] run cmocka-based tests in Travis CI --- Makefile | 6 +++++- tests/cmocka/Makefile | 3 ++- tests/cmocka/cmocka-1.1.1.tar.xz | Bin 0 -> 85648 bytes travis/pg-travis-test.sh | 32 +++++++++++++++++++++++++++++-- 4 files changed, 37 insertions(+), 4 deletions(-) create mode 100644 tests/cmocka/cmocka-1.1.1.tar.xz diff --git a/Makefile b/Makefile index 53249f9c..00efb76b 100644 --- a/Makefile +++ b/Makefile @@ -75,5 +75,9 @@ isolationcheck: | submake-isolation --temp-config=$(top_srcdir)/$(subdir)/conf.add \ --outputdir=./isolation_output \ $(ISOLATIONCHECKS) -partitioning_tests: + +python_tests: $(MAKE) -C tests/python partitioning_tests + +cmocka_tests: + $(MAKE) -C tests/cmocka check diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index 7975ef93..65f967e6 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -2,10 +2,11 @@ PG_CONFIG = pg_config TOP_SRC_DIR = ../../src CC = gcc -CFLAGS = -I $(TOP_SRC_DIR) -I $(shell $(PG_CONFIG) --includedir-server) +CFLAGS += -I $(TOP_SRC_DIR) -I $(shell $(PG_CONFIG) --includedir-server) CFLAGS += -I$(CURDIR)/../../src/include CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) +CFLAGS += $(CFLAGS_SL) LDFLAGS = -lcmocka TEST_BIN = rangeset_tests diff --git a/tests/cmocka/cmocka-1.1.1.tar.xz b/tests/cmocka/cmocka-1.1.1.tar.xz new file mode 100644 index 0000000000000000000000000000000000000000..7b25e7ff504a2fa1b8f7bb33abdb73fdfef9f860 GIT binary patch literal 85648 zcmV(fK>EM^H+ooF000E$*0e?f03iVu0001VFXf}<#2oMcT>vqgNp#i!F4oYh6SI@RU(QpFQN#cI8k7#Ud|7b7`vEDl1dQ~Yt%NG> zAl&LHlMzfr2D8pX0z8W8wy7j&V5vaAk!kaCi==(M!K9ou8;9)CIJ_f) zE+w{h_uBKy0-Q+x8?;1Oy2zGGEWw^HS_T07W-z$K^a-ySrqfTEp@rX{dae5q^jG`8i|)@cT>~iqA_aO zo6aasK$cFN8aKuu-uEYai=GP5>1PQ2KhhQ0I;}^6i+M=cQ$Y^*eh;j&R>Vn$#@9@% z8t$zvcn?@#Pmci+^Bh_#=Vm!uFHeb>#yKY>fQZZI{)L7h&Oud3VHX zOh$BTsHmefBBMj5gHoOrV|>xr`B@W@is60dpD9SYv**OSLX>wWI4?RDTa)%$cEsxn zWev8yJ38%{nS>2V6jaON7|2D%Cj7XQxnz?N@PyXZJddM(3D;B%A@Ko3RhbY=24ahoiM8Na@|L?u48gQ8HCpThNkR7a5 zOmE64P!9VWc`l*z2YGDw2r))$z}v}Z7{0j1+H6T7Tv%9T`mb0#t0dKGS`a(vt5%o- z&=vpW8vO~ek_Cf5qr0<Q9j zUsRE%G6zu!Fd2MFz~7EZg+s8WVf!KpDnrwvO(yvnCjNUk zjZa5vXJMS_0Tz`;XS%MbWQ~N*qW6kI*m+_>FRhB~`5m29PSf%8z%(P0UHa&IE0l)* zXgl&;f|y`5$>0k!-rH}j3XD`CVGY#&-Kejwn@Trc>IG9=%$3)$ob(WQZ-JPL5u(v^ zPk?#h3EeIw%~lTV)adycrIPj62-!gH4sDrQ#{uS5M$Jd-v9O#wmHqBqV_`#-vyr*L zB6-i4!dWjOGLN|}$5WQDv@Wuy?u@hBv44{)fz+4yyJcFMY7gvZn{CxR@s9eiv^HjW zBVB$E5JspVX(z~Z{wa41*`axKJlEnhzoE^Uwm9fV=%w*a(y{@tanJf_cq$cc?`Y@) z?ea}o0Ql0v%?qXi1-Hcs2g^1j&{qpzu^CBY{7!FEM?DFT#1e5Ms|W;Xn2YSdo$rgG z{`NLcFFIBhW>hgwuVH>82{otCH4q}%?_{rAy%QZ2u&M=3Gf&<5pPtQjSWLCD-wvJ1 z{l<5IV0{NB~o{K_@01dR^$^VC4+DO0v)t~ z5RpNIWDb3+pl&58Ab|n*t8SB^$j;R;9tDsO;L;2FGT1RNLKaa6X2h(!=XH_gd)0g0 zXpTU8bO3&?F*5LTY(PJL`C74$PA7c;5!2YGkSjqMN*GYOy9_C^fs_mbQZsbUliWEc16hA7 z$^3 zQfDPBAEeo6tYlff39@R;D*R*brM0dBlr(QG3m7B_RrLWVnsA!i&pRp8V1_oAbwN;0NT%m5(i`W(_?EU{(TXAE`O(Jx|LtTHl@IC{`mQ_yDVr{TRZ1IJ z2{|o+?q54<;GL;j*nJBNrwYyfK6&ijux*c3%|Kpn-;&)IwUSc?rY0CS;JD2kK`5CGq( z4!qBaLoRS-jG}eJ9+EoPWzk_<8bXMzMGUY(X`sY$O?ZTa=Mo1#Q|Wd<)H{s4%dqYD z$_YcJREJv5fb310<2iJjVr?~mpOYQ}=(27Xy?lZ-J(hrF_#nniNdKezFwTJERc=Z% zLQo^z7WGn6)3^88%6Zl=4<0EwLi(;8_&)7-TiHqR-n)Hv09Wk5#|$7UlJtTf$QXXc zL(pP>jf|-*a2Y4r5@}1w(D9>`He% zy6E3Ln8lUL4_ce8RV2Y@RYV<4XG`tX)PkoB!bN8MiY` z%r*lVsED}=qID^Ts0r113`oZqDRl2ZbHm?T(xXNR*feSY+XE4YvoxNwVd3ZJU2iWr zk+X&I>qKb$td5`I;(rfxL6U#kbpJB#*^e};vod$q>jnW7{mM#h zZ(7F&jmStwLUo;h3gW@>;dD*N^JQT|z5Qz$iDWWaZmYBUd3^Kokfn)NouRQ$IqAt@ zyDO_n--~@wVyzG%m0p$SB4S~&L=BOE@1%@FD<69w3OyQ>jtVTikf$_vbZylc3tKc7`6Q=bRveS3`CS=7=>H zpI@gvY{kM|P6}Ggo`Z=wH}!gsIyWj*=dlnhhlK%FMSTF)Wp_VI@_UWD`7FTfP_arF zw+OL+IAjELj8wGavD3&TP@jH~UnaZ&KG;{a*^}8~1C^I_wwv;qP(+Ha#M}Du>g}H? zm-b!{1)r`YZ^WB6r$D?HJy!Uut#M&+dSZx1KFwpxR*%fcE=;K7BqZrq4S04Yg?rcD zL3Pf0jMwD2_!rB|{S5zi*g73h+x-rTrI}v){6Ua7ctB*H%Qq z#hbtHEifJG{fZjWF~ydnEP_=Ux$+3E6Mpo3*;%S@HM$5G^4we$maSTip8}4qL2n&4 z_#y;F#i}wteBX-WYX%6EDml*N5m5uE{%%ByKOa3%SPt;lEd;#Bb*ba&$I`?5-2x6z zy(pDSBYDUmQoBG(G_@3yuaK+-A;PpDlfS zfr*53%5bHq^u)zF-!d(F`M1#pDt<6;b-qh%f>uKg=JGuPl3cAo0(A7Le31k!sHuF7 zt(>~9Kq5wDs(AD|-uxT^RC)O|GSFR*-Y7BNe|hcvR$w0aTDG)WaTQFPq$- zRgb5KyUah{`@hwQjvgi+U<2{pp}N_cE=Aa6u3< z^@*sGVC`7M+_e3(fRixQv!OOr7ovs!6%PwW_qt1-bV?1%nD@gY!hdZbbQ&7S4|dUv zk-1GJdylmbffno+u=!MKzLz=afJo3`p$f&<8hW5(rIW8LS#Y&+9n3luAEHy3z;qtpOid`rN;N1XY5UYB>?K7dXq>?e%i5uC_A8(b8o%C@ABBQzb{lgx! z5eOIBsK<0d@x`JVO1gUaEw}ny{6q46(Jlwd7HX-YxWJ^smvJM9ja`z0p&|ge5UG&Z z#pTdRU~xc{VXMM4y~@}a_y>-NKHJG29aU)gmhx!5Sh%GPuDv`-G+C#-%j!jqO$`50 zv!lgx2BuF-*q|mv|90Z2rYkhS;hzi=?k3SyTeWc@J+t6GmsQP=|BB8U|G29OeO9|( zLR*D5=2Nz+t{}mu2T6X@vhy)Tq>ED(h?0~g5ySfF<|XR8h0~Ix^6d+WccuseC|pbk zVVuFqG^T%jX9mNj;$51+@cMwKH_9}4pLu@DO8O3%=HBjA7>&>;UG z)wLxqMMuv`amKAE_jcWrOd|F*=F|_~#!H%_9Gs?;yjg%)B0}ANl=4gLviB1jF!Oyk zZJyH*E9yCz#){!5OMQs(-UOY$Q>pAX{NW{!{5g~)W=u#ik>QN}BVfr& zi%)=)sMs1zx>>#ao~K7bg@8X6b`^cNUfEf#Te~s4fyQ)eOA=HdjABU21o5ob8`x?X zXXe4SV{XbZeR0sRWnXFl)yJz;FzYEgzCeS+aGl;1)EY}hikz|B{y%MW|J5zxW_b1F z-<9^aou}WgwgLi_TM3c$pwf~MShUl!UgoB+<;aanb7@r*sMJkNf526T+zjaB9^$J^ zo8)T?0-j5Y$)|JjR*kKU(sr`I&p$bpaVdT!K}8pADw1{AGbP{Al>@VTOeLqo)y-~E z^*cgg9`3GlZE*pTg%iKklRBx}QpE|KAiU>7DOZ>7kY!*tpL_`-K|tQLMDw9;G^*sI z3dRojvFS3t^!=@zI=qCmm>&1<*cZSt#lKR4$|fq4jfvDd6l%RlY`kB6MQ$oKlgh;& zE6$ex7r_Dsc{1zI>Clu=AfW29XMri5v)RrQ+88^dadwdZKSTKiy#FRvB52f zTXPngA#&~eR%kO_lP->^_onp!Ww`Z&lmrNnNWCyUS0eJQFFK;F8=r);9|WAa+DaoR zypDN5=>g&s`f|X$>%L z?Z(d}⁢(OEzJiDyfA!LUJiSTG<#>K>V0i?r}!;e-}WH33|>^1UAq-=wAhN-ZfNz z4E4|`FR^}xum@zj9z>m>h%WCOz{jo4tqC35_=9?)PIz2|dv=VaAisteAP&q@Lrknf z-C{*7*Ea>5^$o%ft-f5TVqQ$#Q$7u@mEdI@j8otWNo@2C=}3`KUjeGq&Z+^gj|{Om z5QvY@|0X3rst9r61n{?42a1J=Hg{__`+aH!fKn9T06DxV1=8|ads^D))-atQv!lts zq^H8A4EsT-#``D&z0vqTeSLBM7M=Pc3}qI{=Qd@{zgK2#4vYTrK)|5-Jj!(2p2Gnd zhtW7TVlR4=UTDNdYP-s#V{iQ#s&mB?72CK3-=vk9gIypJHFX#LhzX%gzZ+n*mToob=r^Vc=#9&liV@{%@@}|&3iHuy!5qF2j{BW zGf4O!r*jstDv?&Wo!Tb*7WoIagc5; z-O9n;Wj0O`^SVI%3}`Qg%kuD0KV?Pnl;y$S786CTRdOT(mIZwp?cGit0PtYyRzi@N zCCt}S*8<2$oKDG`aYB;vn)TDWOwRS8Li5qMWJ9IBF-w`*%>;v{Z2>v*m{>vh1~JsF zqoGe@g1sh;kBD?cFB;EFf0oz6FzA1?F@S-!SdZ^X%DCl2SmyPc? za16G<*Y$%(2nMXqhiiM|-_B3C)N9URPgcSdLQEa0aR_9V46rPWz-jvs2{q_K>Mctz zLH{+P$oc}`wCp;JlzRKR;&6w4?e8vUm}0@y)AK`sw05Hg#b*QlG8e zlFtB|JpcU*4oCl;EW0yYK?<#|Ur+oVOBQmHXW3N{-1N7brwedB*9^T9Ou1`}XU5VH zIK-B{GYmCSar+;5#E1lYWV~VWcq**?i?a0`7*W{?&Efk62V)q#?&OZ$oB{aY zJh^6m#2_WbWXyY|7b99yAZO^Zt<6{}#oU$INNCV%akodJz!KTQ*<7q&Gme2hw#z^0 z?67S~Bpgw~hr%l*^h^K00NG{^vpG9hW}sd9-ncCOHs6ehFKn%cY4#TskCs73Fp$8+ zp7_R3PG@C-Yuk#)7Zz=X+=ZMa{=zp3E-{0EZw4JR?%j^D4nI>;l+0ZfoSUjd{%IoQPYO8dbb$^X+B#+VO(suKwJ43nrDZlJj(z1 z7J0TI)?=@Y=tIFJ|2*()Tuyyn_P()#+`PUQIdqu7*nlHE73zwO)ecK>$>T zkO5RPqX7sZ7_L;pQp|A5by9m;4RR!(ws-OH*$S6n5J zm-1~FlueRHh=^=P?<I^lAmB!IF?@b$}ZfWJB7QIT4gU$t5>tdMZex=ATi#XSJTAi|S#8eD~9 z9;xH}vgWu*gsX@#x3oa=T7_+izm6n9H-}_z5<(j~!tJJyXw-f1Fn>QQw2&}7YT+~m zKK-z5)S4;KSW9^zL*4;6X2+<@0nYCFXSH9m&+r z4&D@721$5{N*qJ#!kcszS9$xENe8Qy=)1pkfu0 zT2E_XW&T2Km{}4x*^vl?W^m#$rK19ek6py5h7mdz)vGU86{&pg!k`Gy9B!mzI3#)^ z(?tWl|0zttC4CR!ZAGohTq^CvtO^{828EFGk?LV;){h zYHo>Lkf$<_Vku%r_JoT}(h+p0HM}%{!uqgGX%jufMI9#EP3L`lsXc(8(dHxA8o0XJ zh&8K_yO!j>J5cT4UOlM_WaTgVKTHCFXElm<%k|Fkt22kUy&#J}KF775B*iT*4;RcKqNFL%^HQ-L*e&~E z#s?HLblF>D2s=%AK2qjJ9i~$AH&DX+E>jots+?-w8mNJNOEukKRlPNgEeg9IiI*ux z69G)VH89DX%s@U zJuZP5_&2F9Rm-P8f&jq<*pOSd!EG#e7#c5k?ckQsfw6#f5hfpkGJy1~UIcX}Mqp`%2dx--^UeX6+Y$hxge z`AC+R3$^s75|#8nNS@{SP~U0!ZjgI%iWVqmmO?H_QIjHOiT*+n7pf>Tg1$VSFzlUG z_~yVD@ww+0TaTka)o95BmB80E9!5_%)AbIA_wrZd8EOwPzc<=Uyo7f_54-)ut1fCl z_MFsY87aJ~2~t3A!tLKu?t0IjZzKRb2vMetgGyew|MxYN!$Y030rnw8lx!=9G`^#q zOO<6B;gaP+*IlYfg4uvIf%5u}~)z#-E_1fzH+Fg_FAaD2*U^ zjj0^5YG6&9YaW`rW|7_js4ex<0DMAL#1$a&k~1$84-#JIkBntYG{2PZ9-X&?-(7qt zBX$>tA_d2DEGfh&=FaVIpN2>4Mo$M$e9qxlWU7)^Llw{CIlW%2AlV z7~!V=hOx`s`4<7s0z~Nu7@@~Db-1y#JSz}5LB#-{yKv^4`T* z{V9&61GH=IGsUAmDCWJfS-~2wyM0_uw&txruyARCvHrmFPqeoCyVeo=uAG6OEchq) zHMh8RAlLG%Rt-47bEeDYIizi2NYz9fh`hOO5YHB7_67tv=HMEQIZ8MF#IxYE_6+q% zKbMERN}WTV`iUa8(<(+K+iY@)K*ek}Ga#|V2~10Wrn|^^8RD}KD8{E|4~zOwi*Vnk z#@Z}UMFXVr@xojRAk1?aZH*O9BuD5$;#Cwxpe#3$=UExTwQm0QmOP)gumB3;G>rt4 z{;byeX}yL3NfyGDB#l`wD#giy zXCSLK8C@dIU7`q37CSUKJ*bdBy%w(EM|AtY2TCAebCmcX(TgO_GJQb#o~SnQjGy7De-!1#ECx){=h z;`BW?me#}Iu3(a7_06kLDqp%qRN8(XQ+B&gocP+BQ(R^dZN^fB5uy>kcJb6Jsopmy zMF@1LU!NGvkd5ET9H5e)C+nOm_LeiW^*p+x^n+ThQ+Hb^8cPEP3|T#GY6r9VJ)kar zfKc}UMsUX7Q`?obr!yjnP`JShrr1jf>u{i%7uizGvx1t-tFQZLK)@4mD4Sr@h)i&TAP?+_of8;_6QoHaVh?$w%kaYjqTqFpwpAD^sW$v11}@0V|<54nn!r8hc=c;!D|OuPuPy0~&#n{E7zBU;ZLkXTjyx zLO$oGe=6^46-mt9T=`(L8^^oRG4hH4u%RzjuE%JT(v{Ygm*H?vEp<|N`hlY5dIvkx zBw9idxZ0LTnn_?gq|AD5E9uh?GyU&6U6m(FKxq#wq} zRU*9OeqTi)W4Q?jde2#yu)~4$^>bKB@2%d$d}-JI2pZHMvtmoh)`C0#460wkvFr5eSVLOtP;S3-h%4GEr!b-<%=0P`NP2h6DqV zBk;y^-g;@YWafFTA(quZ@$3SVNuJb^ry;6#M%cc$&_wnd7^%b;A`NmzHFQYz*;SWg%lr8%-R3l9 zH7c+xFlbrhgwdyYk3|86o_08-+>`{&NN@av{=eJ4U96~B=3`mhv)*2wQ$dY1_*=)7 zNQSzraIf?8@6rL#OKHuY?gFom;#g=QmuAiMFya2}8BCPr&cZ*TnV7grCd{7{QRiF$ zijzzda{SZYw_gn41fWD}l@j0AIK^sY@BK_#V@5t}m-E+;l$!`yi>u*z|LgXDT0Kd_ zOw%?m2Ef}uu3*mp*UQ?saN)ZboIm$is`k6HTvIDbfk<##$Id63F5BoCV;mg!QGZ1k zy~n%*E=XB)Z2H5=@C?yw-g!v$qOm3&$UesbJOtKcW69H#6L?pQd!t8z(%CA&uRd0Z zf$WZU#lOinwf`X4(wg4=VbcyS;C0+cWhPSc9gP z&3;&kt1*#pk=IQ4u=iR#x0H!ja?2gM(yOdP5I|43LO0p*tKGHFnfKIT55_`1l{i>V z7`j$3dmyQEjjL5H3dM@mp1Lk8Dd+Rl*q5_%9&f7-cM>uHBPC6d@t?jy{h8Erkj7@A>@~nAaVK-gt1!% z^rX6eWxzfX(iU2L(m@fsmca^zfnGU`q`9@|Y=Bk*#W_q9o&+_P0PI1#%Hw+~8=E@i zYe#fRM@&G3)4Cco;}t0r#C6DXer39BFg^OdJf2HT64b+FOB4vHq&p*R7r>I}Xq_uh z>9#guO%M9x#9dGfmR)&k-Mn5Tx~0wei`3^{Ln>r)$1s^u4craE94bvgbqS7O8x!EoM;^F0I`(Vxq)Bd&} zy5v`T!rDFV`dp^q2=Qcg^z%teN2-td+s1ovWVf{uHO^+=d0@fu<j_<+9s2_ES5WVv zN3Cg_t8}ZLFB7`Ltp`Sq%c}`$U4A=mfAeTW%F4;6lg7)x^X(AOCU=5%@w#=KD<;1^ z?pg+9Ab0WvemSSiR#@$I7SnJFmbb}84v9cJX@F{)`7xEBLDW(+2<0y|Y*bx`rO@G` zN~}0~JK)|z`kYJM)U2JG`a6Oid+$!DqzVOUk*0Cgz3SOU3YE2k(fIu4mvwr%%L{rI zXg*C_cdX@$R0~%&K%R=+tb)%}(KuQRp9tzPQFiRH=*q(tLQxMX6iKzmUCc zy^bqLn;HScCSih6pw?1<`lJIVTkRco5W`IqOYY(Yb$N$buW5k2g4x<&wn~T>=qb={ z_T=Ts^GUfcVO#9fnn?fkzNm^n9M+g8`WLI(VZ1mrE_kEQou2o0D7&~1abCE(UOh~o z!COCS{)v<__`St;Y!BSU*Z$E11GSS>I{VXpDmhxk$1d_2?Md`oqJy`{z0ai8Q%ubfEo~*nUZN>x0u3Ni5&)1-5u(qTse2*eNrII7AShV^F3H%NWoVb zj1-th@pjFj9R!7M{&n+}c7pZjP_ux_oh~?iJr6zfKd$8 zP4xKAzdDrSdNwE4HhJRIl4P~>?ml42@*=imL)>7e0S1q4^$pyvk#-B3Ktw5mdoV`A z$A{m^C21Of{gDb#7Vm=f<|A~z*8L{sYQIEz@7^QgdD%^`ZSOyNw)E^{X{Sdu0PMLXi#(5|J)Tin*5I9tAb- zv$Dy;QDszwNQxY0g+9mcNWtODGI)@)@mi;Uc~;`iT8sCIvxE&<6hiMwH5lYR5S=3n z!y9ScwIBDrrmwZGKJ(cH=#(`!%BYIKM%Ck%m*w_CFVt+GQ5pWgR<0EYP9lZ!e+p#P zeh1B(MeDN=u1gJh&D&+KKK?C%p=5lR+<4IS|i{mvF_kjX1~+L zG|ybh0B&&)h*hP3#(%KoLVQAaO1%$q`U;L~gu-88*6SnweV*0^WD31O`f$?!Hb1Qm^E zv6Tk;A0icD{FkkQS^URm107SR=qRH#iP%Nnc35p-1 zuir)~-Iz3lhS5~J+5{6#*@@@eJNfMEhAsbvss5*~FC(t&rTdY?5;ohpqkdKvnIwJ_ z!=h{af!OG2&|NCcZejhxu%j3J?*PTKFKZ3ORoI%<~=Y#Hnkped>#O7KFX1- zbW2GTP-!Bn&JeEy5nPBGR9kMA@0tsY&d^1}3*-%cOr-AKNtpk(>8F7a1m1kjZ-5AT z$PvrqTM5at>DphY6Yl&sP`D3zSm4Nl{$AV4*P|2d9%(jH79kDo07I|TLgC)P$fSzY ztVrkcBPlaR!G+%3*A35Znp17?LJ1zoQsLJo+~zJZ1Vows#Ub2KS5C+q({U7vOEt2~ zI_SE}H@fdRD$ULYE))N*Iq*RMx^NoyUsaPrbzMA5>>*N=0JnooW)7b4w^Lqey}Uq} zIvaHukASyF_kp^%v3QFgWKRMX!k{H>;+%@Npqu)rlt|>!(9nckZk5 zdv&H$+}RF+BOexrOr~1iYo>5G^V=eleK&dNjCjV;=zok5u!g3Hl%IVcHLM5R40L~7 zz}JGpDY^VCs5ImYT{?a9MR)-g*Gwm>V&K!5#;cO>DC!h~R z_i1b8gZJpOL!2Fx_K;*KtXGuID2z-%6}YI#s=?5w-ACuC{RYiPWr=oA*ImRiwiJnM z3i}VU)O_F|O*zCzzOB{~z7;W|uYT^0Z{!OotCNTIHP z7#`}z2&b#a(Ei5b9Zeh=k@pfj?2TsyjJskr-3))O=PFR)&-OH zt&>nqYv^8Vm0VZb%1q^g=Xt&!T8+%%<?{ul8i{ z1jRW}cPA6O%b2rg?LW?J9+m9C4@E1>EhVp3l2R5^nXt&4Yo0_^$*AGpF+kC^pLT`$ zPfO48kTZTxr0pXHeU5Lnc%3Ezt@Gc4#tJL=cB?+RLv<%P?8<$@n)rs_v(~AnO!u?i zn5|#AZ#6^D_b17W0{LTFI1$ilwRbN+?XWq;z=(-g_Y|?h2h#@wW_)Q2&hwCo>tdrs z0R+o)N-s0cIac{6*(E;N2&CM#8(s*$^ckcYhtv>fqW?}pQ*8vPlEL5e zWdPSU=&WI z`EY_7pQbQmx<&TXVu}BT-zx-4Qup3BoZs-0c?G0hldx@Q7g34MfBUGkwGUdk)BQrp zf>J2sGyGANe#pK4Y@7_|TR>r>xvr8C@M#;N1<)&{W)(Cwrfv+t4;$!*FARR8DGDst zcDV4Ai*ltyp`ELlIx{2ro=vLG*)IElmgc7_i2d@h^}jj$KOz>ju(Ua+8A7@#QKdGS zrC?ED2qEYUY~c2pNyi&e+y>TmCpreKZlFn#`(8OsBY2z^&v^~L0x{`8E-Jr_3mFzV z7oPNR0OiEn4I%SVmX;!_-o{yX;YbrGx9GM3iQL)-QTi{cK1Cp1FoWU@eP)z_ z>2O<*Zxk_(n~R7+b^Y6zJ9XJ(A+V-`DCvvVh1EZRe9Lv1!?UXM zijXV&9p-rK6n^U1Ql1N;;=;y7#1_~gzTn2grUodV1aU;9jhz6+mHzq%dY?eLm@c<%D z0E?FsL+A*T?uBP3@--g~mf?8o)__b2j;7=lxDBi#SI7F>$5OugQ3k92DVH<(_C_Wp z`HVbgG~zH~B#WIy{seVZfbjrDt23D(v36pB&b@c?P*{awAD-yGMLCISBfYz=G4-O& zj&xEAdm#V*D75j=h3#$y7`~90X}wUSAA=>gKDPfz90KN%;`;6_Fu-p0Ro$m~t%=Ra z;S>`^`%~4>mKtQAQY0><@7}rLcBsZI?BCo#oVZvUEI%_e+TXadL6{D{O!MBPU7RA_ z4Y&jMT#_ccbWc_|ZgN;Ki3EqF=_Rb2+tPHo4to(a?Pu%X`<^VfH2I6dFn=rOdKJL0 z=qJ;Kzy={R$=+&rvhj9&MA?yVPY>tle`-emt~XR2SJ zqcU(8XKGMD$I~y3j`3`s)VHXl0dr#y2;+f%`+7H29CcFM21!~pM%bgkkH8c=FG}Z7 zubkfHcWU0TJV=@OKdI!LjW9VQLJR%f3koq42d4V1$#`K~%Bj6|T3@XYr`MrP!OO1~ zpE+OvyWwH6baqvRpD9U^7~Vv3^@GmDW&;EPIAGaozXF$Nh;`JM6C3i_kav6h7uHgqATF6o~S*sQ8|clJj<_nu{H>jBIc_!vu-Lidqo$ zJjuPZgLp|H7)Kq|b}8cl$3NO%Q40)#y>ksLBe~y&Dh?4P)P;%RLnOYNN zjLcP1MWM5jEFppWuQ%UvQACzed8Wl_9`$YM8EKnxe#TT{L+5%3W1h#36A+x_R4TG9 z;_#^GvW{jMVwhnP1n^vZn-1C#ecpX~HbBt5t$U?I-H$=1m0{n$zZb`pt4B6U+JvxV&?}dWLQcB)ud(yGx#*xnZkG6fChy;C!<6 zg>gbENj;?$mBvu{JXiJkib)aGYtDqLjqf@v6hrlP(ZmU^>?e|@ zB9Wzh{kp4(wqEMDK5;@&Wh8BnD#fEbz>WtCpog~ZXnmk6+syIs7&Uc28f5h?pz!+& z3^HxW2$?xK-Sa6RO$PO>>@3J(i0Z4T`-(KQy5rGCgGst-KpL4ngfOP|Q z=r;vkEh#kh!cNy_IMTn+*(IDDnz7(JmujAOLxO|mjCLY%`~Ft3aLg?*6Fe+$J1hO~ z@J`acWpaYLUlM~V(qM~px($BV&nklNXni1Fn-iFDtwEk+%W2mT z%$74VI*(mhZkp}A<&!5sio~Vb1SUxzT3XB| zHXr9QNVo(9-X=Si`Z?||<9ahI1nB{pJ1#tgMd6WnJr=$jdaM1Z%IXFLeIo+?3JyTw z^2nghszH&_PXuL=Lf;>}r6o<-|bXHuwzpvQA7lbXMx^ zc6G1>dC7o9Hh~YYRCA?@-?>by*a@W;%tAE`FqfmX`_#kY!o&DNeG?F8B@4FL$xJC@ zdl+m8FnrWL`Kh`rKYqi!G}Vr6PN;kN-ae9%^!Mti zX}uHPk-TI|TgT~&!DxB+Gx{ME{$0<8qOtSqF|;^we6_z{=8>7bqj~DDDPwHYs%{zX zj9J(a*QXSuSV$kTarkZO9`qi`=blExy1r5g)4f5XJE!+M?XQiXe-S%+?C}44;}Ilt zGc!*iuPF23V~dR^2ff2E04+e$znw3nf^Dipn}k?B3bdX(QDKx^zLmCJSlp_z?7v~A zxemEr-OL%m49eF+ba1t}%n%p|#uA<+oEQM4YH684y>kwwZjOt2oO?Hh8R|f@?JsF6 z#j&AlqJ499vmN|UY@8tEAbk0a2nGoG4$NAyK9> zE73KfU1A~<)7l^!%ze2DY}!xi3I1pUm~LB501GnH4T9Ek0Ctl(BruOt_1Wa@G6kKL z_m9)h>P{$nt6JOm-Lf8+hoZ)bBJ9Y?pEU}m#|al;41y6~eMY+1#4v&Mo3s-1bh^= zU8e<{{>>JoHQNiR!;V*}K@23#LTUPjE>jRCvX|$+4iDK+J8lBfuGG zij?0fUW6X}(kyPbGqaN1{pv2L_+Q2a|K2zLZcEjR_63avMAG-0JuFdOBmJS@#?EzkZmop05=7Ar>76*qzwH4(H+47bswxI zm4QS+(@B@d!KpcT;zS<#9(x*P?mV;1jv^ZCqjRE#!KQ$wKl637UPx+?(<#BhDLT>c zB4~(3qU#DG-_7}M|7oqZnv__vNbLNZ6F}TIFpD*Wqj|12rGATWuvBAefzp+#`hU4~ zn8m6)Aw*xtDMUMJp~?A$G zll=b%x}#yTVfRGKC+3#klETDjMfl})PR5R(7lh`BF@Fb^)x5NInN#(^rpQ|vq)Ai2jO07~EzuzxhQ0npN`1$-Q>88Rp8C!Y69K}>SSr~(`dN@5i+)mwuCxwd3jC+*xHp&he?yXv zLagMb(k}PP3p!h*K@b~nO^2$!|7`g+)Yz)@bILEP=z&mSraZF}G{Jk)ZR+L2U`TtQ zS!gga+T{;CH3~Z-Qo`?z7$EPun8*cNqhAWx_`YNEpR?@#Mu?A!dxnn9BqmX23<=L& zFPzu2*PvbTb)r=IzakAyP{GIyoce5F2r(ot+~R5CYZodgXpHRW zdqk)@n2)Rvq?IpeUBvH|eZDv)+fPxZ@bPNg4O0NcG+A%9Ym^h{{N7eUyD#SY>&%g= z2y8M0xr}o+wCq%?ktN?VEY83WTCA(~4eGev4L1=*{i^3-h8sFjbH!R!gcwwCNz{V* z@B=L-GbkzGVeKA%T#>=S;ijTsOo*8}5yK+IkhTGQ{l2oTviq8K44;Z|nUsPL;GkO{ zd9vZ@X;gab)`=#)l#sNr7Hu`aw;4wR}3z3I`j-48(U zp#{%ydP*V1T2m?;P`_L7gbw1{Q2$t3tsu6uF&jNL@&JConLl@$~lAgkOzaj7nF zOXy8&8Y+`MgmHRjku?1Lj7l0Sn^~xM+4Rp(VquP5wLG#Fp#Rulf+}4yM99eR-4Vid z^mn@~fuqcAaZDmBsRO*zQ3FIv>2wqi{YcjHisLjhiaaRGjKPgkS0jWCpE~1f4blpo zG@}25$Ah!vjYc9?lo|X$<|=#c=1s16K9u31U*iwPi|V$#yQYAEY8Fk%Z!Xi@{|bGC zy8V`krK*-_mS1s-GS!y<;Ybs;KA$hR{2|=_)*w2q)HQo>eV$!b4PkKXfTdK4E)OkI z@jp%liE0?F2yrzSw9nz81bg!^6>T3(vpn!9H$-alIZr>=KCZViYLHVgrKcwuqND3l zmlD;NGQ+9WdsqogOQnl^bV1%N?!pC+R6v`y_T9v!o6f$w zJk5oU-kO&5kCJ`u18vpetv68HkE3hSf7KP>*QK@5ZMI+NMdLHq;w%Ehwl{iu8c=U_ zHff!P4~47|F-Xj`mbjAzscB@pty1AI1@Nuh9klFPusv;I+Pxv+y%bgLZhp6IJAae%!IYor$avGN z#5#*iQ(k=S5^Gx422#@x{-gCV4~`D~#>o}yz;?xFLV7+NDR^Oug%D!ZQI2PhfEukH zw-MK>Z@wMqu5rScV%w&%lX)kA%{%OV)93;lRB6W_c2#_kuqqO2kzPK_de+TP)ckEi zGP5*gfe>xOys)relg!Lq_|aJ0VP&E1#Q4kaAREn2L1kCv$LOhVsU+AFnqenc^172{ zJvVsFwWivRV2!`3DwLD>Hr{FE7b3UsdZ1m~UH{Xj-U}_Djwq}vmJ~k)J>?-r2i%W0 z&mMup4~LWjYQ*Nnx1M&RG}s5jbn9!Ua52@G%%J*9BnJ5KB@hTZMJ<~7>P5Ayatpi)j(FCnGCY_o zyA>$ZcV9)QuWK*rz;T`b!!6&WWkVec9;UUhFAPRH(Q_r=X&LM z)`0_w7F@@(i5Pc$(G3_27>V%a0vI@W?9D2W%~lh|x)%$U3H1sek%{IU>6UaCSulxdKKUk+JCzspO>^W-Xjf(d1Sbx1H0f~3Y&cx;%hJdAgNG;N{3RwlD(lwU^y|qju!7e4Nk=U^-b%akPk&J!S*SM zQn5Rxz+hXE**}7d1rf_L9`=HiY9pHXjgmf{z&a^zOcYTp$hV#ilgE>zamtw!-$Z*J_gpS8Et72`@sV-~PZS7M27U9OZCPdg_7p8%pE&c{ysBWn%cP z&i%NO_f_7je}NulBNi$2KRTP1u>7Q@8-Y>=_2`397W5~pa*X< zQ+A0SgfU@%nkgj8aX{z_z8BeFi{~$`|00^IwNS| z4AlWQqp)m1_EXBWXpk+B-xkJH>Z{quITK2hJ%SpC#tBzx&PD~;0%|Y1zCsa7bVvt^ z6PC8tm8yCA?@V{xP<^edW&gA4P4s_4JF$QMwXXNUac^%Ely`!lh~X*5!=>!xN#&7Y zY(gec&mV${UE2(trJJ*8nje@hq0SXf2WTSV+Uw0KS~df8_WjrzwRvMDFUY#K@SJt^ zfyIIRGcl@H4`5m{fZbL(mk@j8@o1NwHbhU)BoHx02 zg>14iD@9SAQSJP8_BUSkkCNrjb(=hEkz>(MPG^FA9TIQ7oVWv`F6dA#;^qnVXmmf} zw+VYw?Ac5xXgE0FGTO(4hT1b!iBb`|!w+ZZF#{J9|M!h@>^+`5hV z+x&{PRG_;>;R7tBj1t3_Hkhs|?Us~WpU%Jslsj+Pl`9M3YKa25sA~n$0JXdJo@{47 z+8=uk6eRSDH;%d{gcJVo3=y!W*|(>F9vo>L^A)CW;kmQS_yha`ks1~fCElB0)J9CC)a#CK^93c;`+6`QQWq<|{kf=$lYpk-%8;0EKo9}J}_(cFnZpprbP_|o$dcQ#L2a{8!8= zp3NSKCWf^xiScB2oUM<8+LN!vQw_pzoGkg+5m%Lm%m~p;&oUR^Uu6`-(z`zp=_Z~A z;Th;)bZJL&o|_^c+q>P9}3dWM^(M3T(EeR*T%J9XC*9?Ql;HmT(?okzWTNZ>Nbz+H82F*tq~Y9Y;5vbOhJS z{&(7KEHW6q;AB&C#eu$rJ@qm|G^}yDb^lD2PbBAs;4o?e3}Xo&F*I~h-Y0!BD928C zuzDW0w`wTY2k&{25#AdRwbVgkRcKF7%j~HW^w~L~t5*+isFNWI`}&GaBqep6hnweg zCxJ*YT{&YSBunmt_#UNC810G^E`uYzHJvJAgE-O2`Z$m<#QCt?>+#$~s;6kuHOshl z#|~=24khWM4_UA(U9T+d@E;LU=)7M0@9<{lGv)Vt|PGRJE zq8b7A=I$Z77$@`d*fj&E={6SGN8+9T>e%Tl(cAc9Fqs)oH0DD&H^S5lECu(-2vwej z$@nlLVt%t2aA5`$|1?X5I}l`H(x_X=KW3z2gv#tA)G>QdM6GXk)soMqZaM`?nM~!~ ziCSLoycSHEAFGe!hRRXR5x$hRjiobt*w$95Ga~pg#+ZsQvvrpc`?6opiyRQyd9=0< z2B=FJdEy`2bhsIi#4N3i!p?8*I1hxS3}Zm_jNnMMbTh0|{J3g)^aEfT;2C87@b56H z=F3RY?pLG6h6HnblO0(IhYwo*k2+qh3_%!sYPRm zV*oFuEVrbPM0g{Hs!KHrw4{h!B#zj?Ul$(qty>QvAMJZXAC;3TySUKEl0JhBn*Y)W z>#um54wg$5(SYU5e&=ZXn0h4@<6w-AY1&>gBjYEhuK2zcsoHd zCHPVl-j(BeDY&g~<5NTqQ`4nvvHROhKnil#RzGCL-D1Lm0F@7DU(l~{ZF(79& zfDanjx~^F1ZQ0_Oj77x43Oh-2QT|1c>J4Yt8Ntr=1Y!4>{Vj9 z+P~0u9T3<4r~i4USAdS|&01+um}!5kz%6AIycx~SXzY99_Us#mC#KmYpNbRGl11qU zapIe*G}Dm!d2^hE=4YLgUx%Sci&1F*N6+DW{eMP=vz_cPAflZ^+D90TOhQ*vzcAOi z-{V*q^Y7ZDHODAQ_}dGHg=+WVydRl&uHajRC02sMsM0pJ=WSVw#oNPb9azl$R zSx6%O+bIO2smI@JSQ2l?_`E1;f3p~yuq6N@%1PZa1UboJOBxVtIwuTA*h5BM2YJ>C zq5Ken%*(csP5%~OZ=p1cH;cthJLg*A6UNO?I(F8racI^)5G~J@m)6N7&}6Qu(cp@) zhI`FbqR&eX=z05l*I8-h3ssX`k`%Q>Wh*M>Xq$45T+jkxrd{Lh0$(Y}HLnb10 zgiIx&kg}hrHlbf$labGi&1+JAz(GQ#`eV|(@w776sf6d!>rMWgCRJLdU4wu&!0QM{ z5S=kRb&>>k@ZOsl_Ko+gkMjALPWNS0@B8JDvSJw(Fvhf3yHw|&aeUeaHuhKOhu}Nk zE^p#`8c3=B9G{I;({ia2psRUYez|2TvZD%gj}MSKdZrd49gnVsf{1^n zLKZOo+%Cdc`o>tQio{^?r;c;KZ807|@4HLaP(uDjxhLCa2u&WA;P}j_s3+%t%?ZFd zR(a$ZG~}2i>Cwo1DSkMsPGSdM5jcqAqTR~~DsMP??Jt_N+(IT!X(rqrkEJ))2s#_klF^2zI`GUbc_)i zv)gVFD#Tiwx?NFgr^(y%x_TcRj%MV2_ajiC0H|*vv=4$WB*!57&_I5}INVZ*KLWI6 z4pxs&Y7HbUMNhV~kkS*OD)9E883nd_gl-XsXeUEfD-{S>&6^CYwbM%qWpf@Be9=4^ zDBDJ3c_GH)+FfNx5wt+U!da6<%KoHtUfY#>b9O8`@BKGb3*9nf=rZ1^;}8{rK@)1X zz1?3j@oE^XcaHp57*n_?Mhic)uMVYc!?W0Gg^+SxzdS(&M}1M@I%D7)wqB4W84_t+ zk^?Taw@ecPh0(%g;)T2J2AyKKw?Pj*l8K7sRGbJ&2C?xc8>S-HNV!=vHE@Mce%09< z!&!9V>cWKicW1$R;xFt8VjtE!>z)NV2jgjZ8FH&B{fkL$79;mM?k$1hOgm|E6o#UeqdW$}tp)`9L zXnM-*6hrQb5BzoTgs)p%Zl4=h@~g84+3t&DeBHYnozlo#M0 zdTvlW?9PU?WTjzx%Q!p09BE7)xOHqdq+N=)d5sCbgX z%w|OH^t~$(7UF8(F>FGRm=CD822@;RkrZ>P^?sxTu-w}O;g3z-p;U#pTT~)zb^@Q9 z<5`9!IxCqjZU>6kztYTuEA~&uI7H+A$!ort)|6d&I|MZYl}GdU#!|Swsw#TbPY@za z7q6Y?Ip^VIrJ$fs2Dy9S3DnJ(*ztjAz;p&|+mkONRxHFT?ej!Pkj4kM{oUnl(TKo)+Lmj0BTnG?{EW8HOqy;3S5tjLdU|NDOm=Rf^j$=%Oo&_zJq zdeRxcf@h+J@=nMVsGJ14%q0uf$9m^WUA~T{qJb~0$CcESR{)h(7}lj<#%;KSDt^=K z1ac+I0F{jCpS#L(_T2v{Pr?>;{b=k#XQF-_tm?r*mu;B2csU>Y@9BE<(@ev>dl?9{ z@Mea_#J?z+X|Q7InxeJTH8T$McU?=nB9caZwGk`5aVDliQ%z>+zp#|A_~6h}L&=HA zM=DnlBcCHPipg=H;2{40I@&L*T+WAuJ|b~O2R?&Dvu%5B(d?^Ty}BJjL_|kIZ`;{1<;Au1=Es?-*+Vv``y%)@ITDk2WS{qNhYg;TWlqZxWp zYS!}OXF9rIgu9t_57LY7ZXm1?Chz%c$BOjHpjSH1=5#iEuuGt{T1q2QN+Uuf67&@3 z?A2*K(y0UK6lsmY=e@L&Q7^=DPstJYoyFaad}?7o@m*5q{%$qMkiC=xF0K@D14=c> z^?!4KB9~Tp`cEuWK_&}_=pl>cq|Uf4j6WM_i!JcMCpRnro_b@J-)ORx&8x9O*AlOj zoPlJe^Y$Y`d!9@5ouEz8>Zv5!Ze))NxW525L;+9(%U3N8jk9re9zzF28L&~|8P!#U zFho4dePWZDwB4Yns!&%uxhE3aU`(kfSV}W@;NTdUhkf1lZ4b}^HrPmL^*Eckmh-O& zU`*rg`VuAqx8g=n+1Op<6-DfYfW@~-$*jU7eN_q1ldDf#N-8DmM`N1ARNuEZ7^%Jb z$NXoqtw^A{MTq^gs*@IJ-S4?!t76bzI8I&kfcIH&aV^WEDI0DdfRsFI694(xAVF=_ zoL>*acuCRXQ-%J3iCF*sFKMR~S@mtfoRu>Fw4ruQma)*Zg?W|U{iyJss~mYk`*sZ^ zl0fGqXY?`V5l_^61&5oFAyjs7^gWs-_gm-=qAY0TrbFs=TI1+@V$cR z80`aerSXoZL_~j>b=Js1kAAHBAF)(PUJt7x_~ ztN4m=ourI>MOq%)6vh1-LC{6_$?+4e45|9nh<3X*v&tRFDA(nNh5~QC+6u_y@ylKE zPihW9lM@VI=8`I4y0I;CSR&tctTv^^heXUZZgBx0pvoco*ehg$iqV>*| z=sKPfSZw{U?hKYA?Lh`kYL-IjGi;-U|JMuocqGsrOT3#@d%O9S;X9s}!-qW|ey6kD zjdx(^zAB{a*K~C^o94~U8`NMY4gG&9zBvL=EEv)xVai^Ohx0FH^3DihEm80|XCu5M|Zq8Bz^&m;8mB=+2aiE4<@6p_D~2Q=}SVG30#rZWsYeI&<%3!LKAK?Mrvl- zu(_Ih{y92f5<&b|iwJp94TEGoL7t;^p>bH|SnG9bb%@I|KDZ$ww~33_CG*pf!))$| zegdVg@C*KbbOMSo_5!6b2X7Ptz|Hjsh9ck5^&Tpi5{HEF=ZH5I`;;r1&`CMc@hRm- z(_7a8X{4nAaLy+w;*EWI|NO6W^4HHA_>#QMjy_W#IdA-vXxbUJ+7++) z3p$UIPXZQA6eoeM$6D0P+01Q4eFUGJDLBWVLh!yp#zN3+x6TGTSF z1mwy0a2w!Ru{_pDlDNX?aHp0%rMmi#p^f5$plItHcs5w+*Xm1MgKN1 zZ>A|k)VxAvL8V;u_G$z~LFGp%lw{UOpHKPLKqV*eh5l8ko(4woJq_rIAX@%-^;%aK zfkaRn?MlzYvC+hoE=&%0fp(kKf~WZ+)fe{Mbq*HM9`R+9msyk z(d<%rZ$yUuxW8#BXf6-0z{D0#0#0&hk6ATpVK{6$qW_cxsWDNU)o75x;mDTjbs7p~ zJ*$SD-0}}~Sfi8fqdDoaox@@8w5ON1?ly?RIQ55$VufEo`${YBNynUh^Rg0Hf)J3M zm7$k<3p+~dgk<7VlpWV9v{LAm8aea()BgGn7XydL z22!21+~)rB=q;_eWCxqoglz| z7sn*;8Tq~uA3rhH9j}gzYZukQ{RSKrDhJ*X>cLlYqXI1KQF#1^erFks zfS6KzR$Y;-fW!`ME>7_Yo2^S1 z2K)sD+EDuLRJHkuF|W0EA$)0m&WCOV$F0Q@cgvZ9Lg>8YD3t%2fR*I!TTytU-47^# zSkG)H$>E0Fm%X+<^AHh>WP4bk=79TEUa|#9Q(`Vfz z_g4_L*@-WUI!MUr3Cxre`>L&`n*46^50khLIPgeiFf>b?LS|NiXnf37hVAN(>n$HG z@FprYLzn$Ry9o9J3b4xmUk@n9hS*nvv5SX#H?xkRNAkUlb{|s)J@bz5mi_Md(G{*)#p8_Sl7yv|q3ZzSNFaIbocDK8bl7DmWAavui76+_Jkvz^G1Ix$9` zpepK> zpRkjFf+IEi!2Ivu500#Y6JdQvL~Pf;@aE9W-}#XMAX`RfLgn)#gY^-Ih;k3$zu@nz zeQGd$Ysxo!*E3QQ3I7_7j-(S06Su~F0L%UP-3FB)-zAy>BF`offVXz$ z4K=?qpBzCv)g&pq@nQhYbk9yZYb&VCE2Yi8?+_RUkF{VY`I*)(ykUK&p6hhtbf6=H z>g|B8s|LIq84L<8g&rFO7lL161k^-i1q#i7&licqw_lc7DIWP$H1us_MGV(q<+=D9x>&)wHX%Eftq>Ayvz7k3TZilSg}>Id1hrL z`#tg)7PFa7-KV0Qi(~g44q|qGkJ-(i3P>`C7(_8|;}R$V-Urpvzis7pgrNB!5#Ab* zzeYfQKJwOz<)iwrx1cO*+WtfTej})RHkD))k<^?iusL(qY9wu=1HR>17p3!W!DiO{ z9bOsdn_0GX9dxXT#F+xfVPWH4LMN)ftKXjyW*s1NV~1djS&MW%3&^>O-loNK0jqewm-()s%W& z3{v1^+iRo6tc2XK53aKlK9QnRamBmr=%^*SGhNG5tf852B|z+p3Bq@7MhJYK?XSyR zgHGW2wBm^lXL_r_5uL4-W~oW$bUNuWtM17us}->G_y4x6k?70sJYoBJA+>tumC7sR z16yW!qMdI+i$JqJXDwq)cIJwW^ixCLd1?8Tc4dgTP~p9`C%%DSU*t@hg09qPzSOB? z-4P^pxe^Cu7HdS?Zpvx;hAGxu1rh}uWL+0zZHNOQQ~XqBF3`BJgz8_Wm%A|9WNJ!= z59|XKkNYZR_`g(D(vJ*qE40!8^!?Xc<`rje;v`HpW>QRLrQxbYDtyflk zf?1R#e8sd|F4#okk>c;#j9AJ41G<|YfiSWylfC7si!^I{Z`;%-zPLy;j$g>Wr81%G z{0G5vK%}8od8<)mt9R8yx6QCw{N0uPe$QNA8+q=`Jm|Pywznb_ z34HS&W#usA@%24y;6T{o4%|QqtnB5jzky_mF8yO7JkWMS-ki&h>x~MWDHSj3mAIr~ z19>G+W@~YAGGxoyWbfI&HfZSYafj3dA#guS)l^PT%if~^kP1Km9GIALMuqy^`M1LG zD>U~$65t4@t?E}u@X6Tk_5dbU8R6Y5DW5FKKJ(NbrYNI^EAQ*y>YM=_} zI^wRS=i5K;CS=k_AHqcFRb2VN0@C8(RQskupb$NhoST{Rwv9{Xtfo4N(Xk zuE(jYEOlI=dezGA4O0{_fK@-SV*uJ&qH>eoR-yn%M7u_2KRJgBq}8E>J6jW>eBR*K(f()Z{O47oD>__g1V8j z8L@wVE~Qb}GW1KyZ`O#$GskE%{`MO5`pfL+pYeVc!mQY1K9F1NG1cL&|V zDy1xO;GyoRAap)D*#eLqbtRKYGDW)#5}p**?%f+A)1?`E-@bdRJf?>*e!G8~p~AB9 zL{|c0LAQ|Vb1R%jG_#oHcYY}zRaOfkPRvBQk)8U(Dp3l_rrnSx31vp;1;DBEP;V^w zCIR9|3=@_0M|w2I9E`X04cF>Bdc?>&MQ23|f@60D7|_oUI({01-_@Oaq%nv$sUn!! zg7>Ec@hSP`$^s7ZILCDLhC+Jv&U3OXT6?587}CUYoi(h*@{C#ie=1%wXZlB%YV|iM zaC$o}7QpYfZU%1mH$;x;NEK# z_5x<^PEpo9!I0Cjx2i|N9E}_oHT&^3uv@knX7{^;qem}o^paU@S%{)8P`xWv#x66F zS3Io?&kTHAnLeho5Ked)BbrNH(WMd(9r((%+>Vk1)o`0bPsPk3gYV+U zEK6!4u>*Z5D0YedW$s`hlrlQqgMrczn2t}tv(`pes>-k3+KNf$44{bGmpI?$Bz0%I zaD%u~Z-ZAarR!s|+K-_QYV_$$`oFDQL4%6t*@;FZHWGEQZ}AvE**Y z;R6Y^5S5k78t!Gbrnm@A%a<9&zL4zqT|l%r9if$F63=>WU=!yD61qO4`yIBMxV|8Ec()ON00(Qsz@F(p5 za#$`e>6>rBqMjo_-3nE`YRH=N|$sE}a# zxp?24QkYr5`0^fdE0Hvy@*oaI=0Gn|;J)YdrQbx3>$iZILl}5eCA4lCZ&Ggb;1e}; zRjZO2ESSfLDhDSp{*-us?9ULOmf=nvF@w8)A6Xt{=af6X-{0#Y zJGB9Jb?1v8aTGE;efjR2?GVEyTG9u@fXzHoMCDDrY|i5C=|36BJ3nGE%k%Y!>p+Bk zN|idg+}U4aiaGv=4cAcwpNN@8Qp+ABBYU=^dX~^F-rK-14crUotOlr+ zNj@$Mo%EmqP$*&tttWyp=FHq+p=9UdXU9@rT>lEVC((zW%VJ37*FWR1JkV-V?*d1l z?rmrc#645Xxc5eu(A52vb@gtwI~F@8%ZwHhpded8lBjH0=PipaCQ&5I%o$EtV245_ zJfOh(aqZ zr!nivV^&*g!aQ2u5?}adG0D4Inc$OX(xYhlAk#wukPd)gL`Wya-BS2I=Fv8LHH~*& zYXqL)knmWU-)}?mEx4i(8HME8q0gsrlNR=0I5V|nfoE|dj(d{xD z+u^n!oF2e3mwJ%iMgogGOI_#>1V&YzVhnvvgh_4QOS(=*Qr%xeQcP~G0f0l&i_)kY&L zh>!r25+=ZQJqmS%1?wtN+&xl|W=e_;lk6sz-^+$cDv=K4{I8yBvw2|aP zp`I09I|`Y{Zvb9(H-ozDCYQ(ZeR^>@%q- zLb88ax&ME~eDWfn? z!?!<{s<|;8d`-sYn_hQy%=n}5UbD}M&4It05^P%weeDEb)4j+R#+)3JRl?){w=Dni z3HK9&H;=gBTyJ493nji?c!6ZG^Vdms$wjij@xu&T(>$wlr~gN3{r+aCHr+B?RcDGx znczS>KYY_G(}c?H!%{C|nj6JnW^0$_1_t9h3X;Z$cXKbCGa&w8kYmAc2KIR z!}ewmK^5L5leD5G+4Tdig*lcpt2lz`kMAj!@(4Tl7y-yVC{tVLEkEy$6QAXq2Cy4M zPkB1I`A(?tx&)aWZ0^B%r~?T3%dhE#_M|_#j|S^%L4nfpuQLuJmBwlhPZ~0K94+|> zz2w+*>yxB?gBIboR0QA(U6SKY=lG4*mqm|n?t_VQN{*~S&{a_YUL=Wvo4%a2eeqzAJ(UF+laNg!jqw7iZ}y%PRt4t3f9=`(7x~;|p^t(j!$9GD zTavjBreNbzB&B3f4%3<*qyN_(ivjyk0|_7aa%+_T0CzQ=&cCZTpAl#$aI+AoQvF_{ z+TNiLo|B#u+XB1vo25;l-RA<$!9`BoVpf1$hF%=QUF8a5-1q=#Lyq5O*4fZ1GF7l@ zA6$~CrT#Xh5}`OCOn14g+M+FU-?CO*84kayn$=Pflv2w|^$_4FYh5v{0<2ke(c@+{ zAsRc{vNnKCiW^vn31}8-1@WlHF^Y~Vlzt^YrMMPfmT8ge>z0+z@VaBj3?H98>n zr?G{%l|ga5Dh^Hp7C{k&>LumG^Sr6H7z%?Z6Sx_6vq{PE7}~^;>WlKELBvKBkM#Ko ztp;HBbMU*);SI2>gF_G-dziEnaYl$uf+OQ1J~YMxZphC{y-=nK@wr@gksUmM_!~d~ z4wjcJ$r6#(5aWrN^f?QldhBZ=U9HwD#>eF;Mgp6@3utC2;MU?3v&^WON%6*PlH4w~ zTYbgxiT!jNR-}|KbH7|SF2FmMTHk6)bl16*tee}qSl(g;M1)Xav?hN7TC9nWGeke_7J>OYhIn#ijJG=XwOt zz$R=fHh;ZJ^BpL>SBVwW@^JpOY_!0qtmG~nh(nYJe?<5pfLGV?KABsI#@7qH)nr7aZ8Wi>Q%&V9pn zzN5KH>`#dbtdepQy~Y%!l%SB+qcY|&rzHCzTTQUOrE;B8RhR1#3DW3svXhzZ}+*sTzLHXU%^%b!@Cy)v7Xpf%w1^IOJ)ghJ$hHqP~|Bv)^1PZtCdke*2 z@V;&5ys}IuMfe?3e7tiz70RoT__!r8WXhLEi;LFWu)ElJPd*4VC))%DQNkjHQZgt6 z{&|MH+Q74DYLUP%vcD>ZZYE~MY{dAp^*Z*Rd5^svN$i^Rn*7p=JUH*!8o~nS?;`Yb zYj)!|)&m$j@bys!i4888h)hE!jTMV|`xolqUlnKao0>#}itv0kswfkcwHQmlICSaF z;r4%oDI-@sBVBHfJxRYuho0u`UcnV!6C`y3+Gl7ri_==m59jSg ziaV(+Dt`Dp`$ky=codn7t}&nNvK&rMw5%Dsk5O4W(8v+Ve3~r4EBfd#jAxnq7j!&p z_giCOQ{Zjnl5AwkcAM~jhTL?d_Q%EC#uwj*9pOdTYH<{_4>Na+bg5fo8ZTOjD|7>T zk3@>QKi2PpyLv+k>FdpL-(k`$cBDb>Qm&oE5SAlir z-fz?6ZThP4p$4Ee_>? zP|1IFYrEsLin8Cw#VmyhFI31>af~T-vn0%dww%XKw_3)$_JMxy)I#QAi@zF{+?S zcGwJXFto~0s^6J*Ny}|6N}f_)4OXo*3xWPcQDg6uELa7+%{>A~I0He+L;@HU2q#hw zL0m^EK>?|i1V7m~A4dg~2DD}c#2-Y_WMq7u)&%obU4oc(B^QXSL21b2W8NUti%GfJ zpa9Aj5$9XD7#^-ocvz^;)5RLr+ZvLmps)rcQ>Y^lz22r9O=7A|>lnOZcR=R7Qeow{ zq~K*M!dT2|;I8|U_O(@nW;}NH6w;&PgrWD5#gSDAUG$M;{LE_LNGEe|WQ2OWL!9N4 zJCH5Y(h)=;9o#jZG-2^GvS)uGw{1K0>M_zVy*hQpKc#)dUf#C$@j^H_Dku0 zQ5ihBGZEvFuy-P221~orzZ;?)w*Jy(c!D}S>idFimz+Hf+ zWPC;#lGA0XR-uv0N04}i4Fu@mFFp&Y@hjOo(sLp#D4s=%O)DjXjz-8QTwU&$>Yh@! zzfmf6A&W3a{vZaO7Al;`5IV>$YZ6Y}K2TEXw;XCG6?LIJG?!8CuW(IgggtY+90_w! zld|s4$^z(>$_F7}hjWdlMrR{w7{4xpuZ8gLvjq7wcC0kiOJqscSupS$uLAkuZufzc z&GL(8RO`|!n&T+iT^s@>ig0Hgb%=YUARhg(z|5*`jw2KTtGA}L)xQ&sGOh9<6eGNc zG~r)EEVG%`hRKku7kF5<0eM#I`KZiCpk*I43wP$A3D&Qr-tvem!Hc?h@Usf-ES>1W zSo5o(zpt@_5yzDW^GWubEx_!Sx}yYFn7MP7*l&+=vY?`f{%@6b{Dl-yKGUwETHK1* zNl4!Mc|wtCWTx76tu%G20|$%MO3Kn#p531lbyMU+MyUI?3#C**(A~RM`UM z?9;SdQAVZ4jhduVLt#;g;Ju}7kZS}OUT6F2Qu;29qm2mbHaAb&82g@dU-e%V7bhnz z7qqd>;C%g#mol}=tSO>vnu};tA*enxv*Xu`ynzytMVqPmh#dUgS2S;RgK^<+&qog87@}Dn1cRbRYBaFP8efzgZOY zCzLWoEs9hAr4can2foQ&(m+1Fn|0xI>I(J^D$1i%0&O?h_d{s%DwZy*uI>*Hr4Ar!OIm~9NXe)_Q8<20 zepK=jT*uWwcBXFj$-8(BDfyxRY}xgh5E;>11c&v!35c&um?Oz1%@*yFAJJox%qakz;%4rS0eL0-3R z5ll=UMgsuZktuLbG_&Ae1mw5>0C(3kn>dR zuons2(gTlc-&5Qw*9x_de>39e*ho9trX9=kb8cWGU9FKV=& z%EF=A;>hLz3NkW>$u64p9<#I3;wYuy-G<-4Ak8%j{i?M# z=OYxxeF7sQQehSLA83hJ?b<$yX{KH+S-1x@+=MHNz7A%&#a;&>K6R@kKeoDHo@&61 z=grEqnJF0RljKUni4(nMj2?B{SE3A66iZsPpTZ~LIAa2Zkf29f06gbPDtQJDYot_X z6Du!z(Q?G)FVmuX4-(@>w-^@7e3rm0UIyw+*w}5$<~@F!vwR_oU)-G|ddfHA>&?@` z3$bOToW|HGv^a+|Mr1-%6Nv4%>8v4ES8F_aXiFs{9i{)K#I!Sze#NPAW9fj zE0owFoMy({+06e&;E(J|%tat=vnU+x)pxW0ZR%w+-k+Yd?&Hq!F#RoE7;?k~635ip z1%9_EO~FE*dhb(V+-s=vRH@lpPCpwD@@iy-H{6eMBqNVn*K7SI^N<}l-M?43kAHPd zd)7r>Laafn95JXyX+pS))Dma3P=&YyiDDt}uvQ0LxM3iOQUXB|66crv)qdYH_Un&o zg7y}e0!zk$hVR-lW_%4H!ob>$kzUa&EHpkL6_22P73lu8T9xlR^*qo_`Fq^@vgR#^ zdyZQ{CG($b=?lYunai$nQj*c!GcERz_=TkJ#~KN??b1U6Rh18b*bfN9u_Uio6)6CB zN44msuys2Ocvyp(*kSSVi0WmlumJk8g%7C#>^%DpXG0h5iD<8sbAs7B2+ucx(!l#) z6gBl$MV|SsiI&)<&Lr<>>nu%@r}dosfzhyG*m5gz=}KF;_ZzuVE_CvAmA3OM^wr0o zQ@VZEaCwPtnyN*qfiiOnSVC;*m8W!N;gLASw45;w`4xNTUB_c$fEIY&PYSYSkPcHW zmqNEyc>pJAQCxo_k}LQ*zXylPM!IlK)CcFITa|B8C;*=hfx9*(pTliu;`>XCh2E}< zHLkW*Qs+rGFafMtlMw9>`AV6X%9B(b%)iRRo~AfCwd(c6Z9?^RD#knI3s_Znd1F+Q z7%cuKy@K;87gD;7X)&F2nw@}lBu9~fbJErT!X}$~>Q`mU93EVVHEr;7w#H;^)I}Ai zfp<+?ycrJN)K>!-%5H18L0mMN&09(&b=-153(iNy8sMO+iQs!aRPmG9t$T&Y!l^dy$PcNo=v(ysRKy5c%1|n zM-Kuf$6ySpSg?u1J`v^p#(GlfU+yyOq^|F{eh-xLx;Qp)8ztKgn6Bfa?^t62Y8qO-kd)r$$j{33S8MKp-=zE*X!@%l`+VpEIDy{g1QMuiD&j- z27EI16f_-c0HYf*(Dp$?x(+<_9um1mto@r|0Bj75*Bv3**yltyj+eEYc`D2z(IsW; zvPr5yGtOQv&$-E6KIz5KL#Wbw`&Ib(Ya(7~Q@T+fRtsVv{fdT0^Z&5kI;nmQIS+fw zTY&vslS9rB)O(TexN#ucQKGLNHX~zIS5-hVD?uW&8^5smQ64uxTEfj~^~#(4S7*xU zKF~=-x-B673v#&BzE`WtO}-%A8vD2gw=@9TD%!?lQTLTUK_3>x^ghaMX8Af+7~O%h zY{%_<8(UC_-ZF@|OqOyqr{PB&VXeLl+59jP!|3SykK`N@2aml7mQMjd9mDlns3*MLsh?-}1a^PlHV@=pdzu&&VuZ~GATVmo^ zsuc{pYNGgC99G_hIaB__-gUy#gQ7XBT9u3so>@7rd#)htsDv!D?dZ_xXgbA71KhIX zCrxfne_<({l<iKZ#-40*@Sz+3Awt83S<|7#h%%``r)%s3BgrDI@rqY=?!O6 z%*?t9DiGpdy?+M5+VRec|5tJ3{j#3PJ7q7W;8xa&l>c-#iWiC)4MiT zd;g@P4Fvxt1ZB4TKG?K?l>c=W78?TztQ8c)44m1g{Qk_%ne{ zFxYq4+uu=qF-A!}$NwFl`AdXdF^R=H3ezZ_4|+v5jC~4aXq>Lpe;w}@b4iBjuv8`4 zp#3iIWuXI9Q4Yk^tKFNtP@uH3<*pI~x`tQ8?9L66D}zYpMG=UBn-x7a1S&aYBiSP5 zNw-Veg+F~{T96ZbvOg&Kw3cC@MNd?ezhcD1J0vs2^|`bW>X9|R>!$x(Z2P|3Vcc{K z@6rMqWD(zzpjeMF9kF&{cV7#WYHz6YnHDo4Ia7vPqy?%2-bRRUCaeru74+kwde04( z@%}b7{;ARrro;9Te~g~q$gYfC#c|mJMf3{LM9V))`>?J0xO6T{snXtO3dBK z6tpp6zj%Z{MVr`X&E?o6L{P~^{Poa6)M84xQoFzNsDz~hOp5h;Po^@_#%_>+Qsf5t z%qQcKn(|!_z`~0e=@jfAR}5ufK8urr2h#3aIY|z>y-mPVSF>17)V|lgUW%O|8-32r zknIV^bxurZd3BIOd3w`Be!W~9n*d-pi1&cSS_=+v z_xuUN!_)esOSCaBB;imFczzVQcV0b^KOI{{j11Pnh_))k{p^jpxr~VSIp6S&8q~Y0 zRR^v+_@@0>>VJ7P4NBc~ng$vOTT$%lpM$}`bYjQ97;+7DX$(X>i~4oqlaT_e3SPdd z?C2Ds6AG;|s)oG&qF=m0gMI7pUvxJo3GC);GvU`@I6m2C5cN-EY%YUH?5_qFx~9mP7}dB;&7Iz0 zh#ANdl$$i{BO@**^z~wOa6J2=uyC<&{(&Mw8lxR(WgRGftdW6h489i7(QkUt>}BS! zgszO(iz`X@{YrHHMq$Je5tU96i`~*@avc+dg2-`fVEH?@^96y>O%p#=_Xu11W`cZD zGK`Tu7N(!GL|x4v*YU)s?M+G%l1z6|^;WR|HK`Wk9~t89Au3~;;fx1^&_Vr%;0ZD3 zjXl>Qnoe9Uiz^9|4d>m^m-(8rVP-cx)Sbb6;LEz3W*3YYI){7X5ejii;YLTG#UCy( z%Iv{&#YE1BikEG>!mujjAJ(~9mob9tcGV*tjI912df&v1xpkgm9v+qhEOfh@CF;+I zEFh84ECO3?H)?N7NqZ`TK|6AI2|j-TF{Kz4;T%l6HIG!jYCyyaPoVf@Tf1ol!e9`z ziOC7liR~T`O(w-%g`$0O*+}?N=r9&5je=OXZDc#(ad0fxY8x*Jg{jxU0p*ix&|jKh zTC3?~=PX&qp0Br;>WHTq3?bUFm+vqI1IH6|7b7D(Me2DHv1^DJWVnIoFLQ3C`xmp#yrOaV~jGtMBMFsLfr>%8Pp#cJG1u3Gb@LO;XkBoFG|W@qNoeY^hZiKB8MMs17W{F^brTQwl zz0I0@)h@U{EZo5~0}iG1FP0G^*XtSsE!zEOWHix>zP{8>-l*vK|IeOt1Iv-Hm++Zw z7_@~K#lE30U|FOLtN=HaTtQ`mdl#a0^i9|*`s_6iw8mxBBF&Kx<7}RQu0$DSAL>DJ z$SQ?N>FyE-TC@24J!f0 zaUO>+G08jbwvd}X40ll4-aN6W)RzO|%~(a1pH-vHS&_7Jgf*MqE~w8JHBh=i4425K z=dI(ZTONDq9_I5~7C&#D9y>Z=_&EtLp8RoXJh;ps4R{LCM09ep`Wz zA2?8(@A~_(lza3ekf?BIKaEzTXQ|{<#d2QxA8L8%D-(B0qC81Ha;8B5@obTm7}Uch z+jI+lRuW?nAJ}5BJ6DmQCgH<~(l4?tfB?O{<$>p-w4qxFH}E{WTb-~6fsjGnA44)i z{J=-U0)81~@!L-(X6&D!j4d{fqXHvdlP3B)S-(1tv`-HpnP4lM?aQQFU~M`KR$V~o zLJ1n`c5<&oGJx2X5q2dBbqbh)zp*6jEO=$SXcazOV>qMjaDxuL0T#&(!3mWW=v9~c z7+Id)rb25%lG2H=Mw|)a5PNojXgX(&xWw&fWY6P>k2kuGFiB+xw#wWw!cl6g!prq~ z?-ss3k%)n;P~!-!jFyqaiJ7&#^iC5kU0u-RNGYv+0SquBH{(C@jb>Yd`sbj2mehwL zk=Z#2u^D&;h{%qkFZ)SymZ3^rIz1&}5wR!19M%LC1=HW)=o!f}ttxT89zG>gP)*J9 z@I^9YmmG^2e4v$<(#tBMQ)+fzspWT!9Aq zY>r;(up8a6f^l`>19mFKL?GxVUG@=mF`22!+79LDl-Gn89pDih5SfRlKXA>T*s16$ z`tON&E#G)IsM&U)H!PU?9{n^hVvxK_z^u%qqi zA|dqbliRa9@iW2N_uA;E$OYpaNz|&t3iYg|#s@p^3e(H!N%eglo&o!8JSDZM0aOkF z>4@6I*)Lm@XN`U{@-#zzrVBYvu0@rsaa_;><#jh~v#AvmSpttysFt?IoyWY`N+jyl zOK0bhzOTcOIpE8-OJLNZqt6Bw7IpQUpwzpE5QLndGWs>!NLHbYGk$*4qG_gszJw#v z`aguWJ=ZnIoU$n1o5wOuzW>_!NjZU49CIp$s+NO#AZJnQDcPd$A6#5RKi5G zCL?nf=26XHMRxUgjvVr+sBpPky;$?nQ#Rdt*XNRnp%nQTHumQ94M`sYuMqDg`fFVl znc^eDM#fc1}dFtBER-v7&(2^ya8}_9+-?k=^ z^&?%UF0aH0dA6p3t$XMeqFrd^jOWlu4VL)DV{p!*yAX2w@qAAdZ+{hwbwmt=v4Mp7 zBIZoOn#yZjU)WDw8zpREawIcSucCjTpM^Nxx zLZ#=Mg@!h??G6N(aaO^h^)q~SmH%`tkpG^UQ_Pd-9gAu^X97C9+>rW`$ZlPD=mZk6 zT?fGe+i^2Yna}mD517pSf&3s*N|tx-ONbH}Tok;3k&={3?=$Qn&)w7?)_Q|SNv>dM z8==B*dThXzuf@OcAVF}bcYopX+@5Vk%UTyyRQ0Ht27iG5A+KML(dH8EvWA$(p-CYD zqhh&y0@D;1h0_&i&fO#Ve|{0AA9}TzF{PHoRNBCNL6$YD(&ArfIK4vAdyO-q!*L8L ztRTKUaoX15r1#9vk2M-pWigZ-n9$hNeZLo}SS!$o9JhKmi~2|4{R53+b85LQ)z?97 z{;yBRa3|a+@rV0#NRh$lrhk#7REr8bWgXL-jBBbgZ0W7S^O1jUk`g@*mYm_fR~G9$!s8 zpkqKn(^sI%D(7Cbz|Y^a#q8_BTT{AVjb;?pOnf2ZFt+e#raYwV@!`Yo{V%D)QtqT} z*f5v%-2<7gtQ_MsqI82s)keB8PSy)Sm=}IM9;qS4?)t6jKNX922STS6r($ElyLq~c z272KBi`}}c9*Wv(833?fnC%4>`&x`0fW4=4@jAV)r2_2SM(h7`v(s#8UOSb4nq1mc zKM_2%w~qz-XrDBWuTyO=r=w>ZSD>hFPN*?$0=jF1h{SE0;Fo~XS?#>*5ISfxa!_j} zUJdwD!qgPt1qY`9y@&NQDLNgNV}TwTR-R}it&{dC0{AvQ5s|r;xO+q&i_R|c6aGH> z6=RbOOY;nv-i|jKUO)?DQJ47*58}14Y5+dy*%UUlZJG@^TV?SpZv1+kvtLVih#NY;IXE}ull-(QTDi<#G5mAhE-^n4v zS9Bs-!00fH+#MmR9t+0Hoyx*+tyK5>L=9?}1oB z>8<*gYwXHJpcr(CpR@|tOP969hzQO_*oC^vto~jOaO*{jRjY1Wf6-_ONx}LqANkgz zy9RwFXA|L`%R7>L%E5fX=KdFzzxhzS^aq`O_;v_k6GcO84N|kIh?6ynx&PXaO?WMH zZvJ*UdI>Cx0#Lp7t0?yhTeY=<&=#;sZ?;VbJ0X~nf*x4;>nWrxYuqC))+$AI7cUqQ zxamDwLKa=G;W>YH2{_ElnfJby&;cSh7DTGTcXBAel~K@n)g(LFWm&dx3pzT94bz544Mza|Dd(4*x*Ju4_~bzzG0Px( zmB>=>X$1&rp0UFLpks~KhxQ&|(Wf79_AaMLKc`DI@6Eo3E-CnpQ-!2Vidl2-lcpjz zu~!!*d97I&yI`r`(}J~A6->W~dbJKM$b!;?f_N9K(BXB@({9@o{~nbX4G+0%P4LUF zO^r6&f)-6~y>n%@&3!>GnlSV!`B{7*Vbr0@gG1V&OptyfE)GE>66yH>iwvJM#6Ne&HVBii>jjibDZ>gv zh|8F+BBx=6CALU+xYC+bqT52w0chUFATL9lNbV*l)ytqj8#RMF)&7+`*7%7{HG(`8K$k&w49% za@;9&c&j#uRW6~Ja!j*!YC}3_QDctChNB*yQCvq4CAAK~5x5bhWBYh_B@X%Dv>VAN?AnA%_%K(D8ez)yycwl?_H^Em38%ZuwHTMS$1cL-TWi zzYmaNFl@H61p%}M6pJl3x<&0g?73lwA%&a?px}4mLZt*`{SOnO5&e*=F0!uK;e`)Y z67(8;HohT(3bvisfuYcD(?wfb5QRlu6k{V+2kFyA?_A51Fof1q9~oZ`I**r16He>> zV)H1K_cIE_7I!NrS@Gdt0ZZ-$6cYCYZGFa#!JVyxj1}hJrjn*)v@Bh9Uq0T( z^CSTvs2qeuXW=k*YsL1;dqD(gw7!Xp9+mq&uW*m9_&I|@_Ol&3-;=@3MOXgisRTr9 zy!_r%QW19%TQP~t>gU2v<9U1`y=P1w%7RtJkSF^|(%$67eSz2dIsXij37N7NedIwI z&007B=dX=A5j|Grt*S(!i;g6-$d!q*b0C^i12UlXByZ?Zt+0Dwn}r4S)Jjjnh>8_l zS%F93yx7hMK9IEoWGJx#MoF&YaXGs2fISZG0x^lvu)3w6}wA=*U_x!Yi!QW-ot0G*Ql~r8#-u@$-~fkIM_x z2B9@v9FDfXn8)@P%eW|bq+yuRF%+NwssYAAx%E+<^to)}N77jHh(@=c`cHkJ%DwTE z%qya_`FD6O3vJwRd%jp@-X)tgdhms=H{@! z!#cXfU^F9SsBi87@qONUzJOj)eQds<_2Vm#5#)T=*_gz&1@2LX4Mi|Xy>h+E`_&7S z>uEj{i(-eR8KV<`OIRvLn|pWiFdY}YL~W3bAwk>0QR0nhD-8W!W(SK+M)+PL{)ork z+tD{=-+T4ZMAuKb^ex35CG@zm)PU)vR_C^0mk}jeJI@$=4Ot5LSfvUcMWjG&hrB0Q zXR;YJeYg4sE{zq&7+~i@I<8d~?itE8k9!a!e@lEQp9?_F8p?O06mO%~Fi-4Tk+~6i z*618Di(7F!H!n-!)gzWgc?Vx7aaPbTn-&2@4X$Uloz?UG>lEj>HX?799t3TlChB^l z6roCy(KKg$f@xD+Z#x-`mu{eQg$25%Wp0$C88A;Le(o?nm+_Rvy8vLG0^IQ8Abjb$ zD`vpqx?@{rkU3A~yxQvUda6i>YE2$zD)xi7m8p8qksMuHCgBhz(VVErG(l>o>fBt> zmGS83-RH9H5>iu&v#8pIag4gexY|x}S=-ZFMX1(3;_ngcg^M{mfx`&KZ(H7AJh1Zg z{a!RB8PaOEKam*Tkthv05WI($&B!E=1H8iDXo>I6pYsj>cHXpN9GYMTIQJIRB zRGd4^yNr#?vg|vUfj^7dPC~p^0XKmp@U!9^#hBmnt*qk>yFuE==EuWm z9FkWRd+9q+F_@=aXLUM~L6eF?I{%pE(3uVqvCbZ*_xgLyEhXjR_`0@qI#1+@_V?eV ziz~1ABo29jTo`9aNZO`LQtP!Ssaw^`6UqK)Q!&@7#s5^3aYte9d2Ah`C(r}zwUNxaQHlg)_MTZ6YOIDcJTJtTSYURt2 z^4Bw(creB3mD=#XKo&{OLq6mK3z!&$=Xdw_mN)i|)aW8gvj5Gu}xRH4s<~}yR|0uZw^itt26yn#9K&*Ui zv|oekL)A{CRk!J!DJz*XmJYO+TlKcPNmiuXak2I0Hy|H;(#;)$(9a~T2>BeA;DMvs zdJ;S3+~`?jdI!$qe-JaPElwx!b-DX`qWZ^adf(H?#wOCAdjv?hvf!kSOlRaW_ErjU zprD9XDt6`GRmS*&K={evhib(OgwF+aN8-QtVbxbC=Ev5Ll4AvZlXDD`B7vAkjCRh{MaJ<^oDvM|W#2VbeKb1j>P{SwT0F+L|Bs!Y1ah)<;F{Lz(cy^odPZ0 zt%Yfcm!2u=ZvlokZtg_ae#bLyb6H|bM|t}Ayekxg3y(q8!SlBK37Kf1X)PVXKzF(8bdlIC%+bJ1iFQQyiIUywv>~uul$JKm&Z1RIe z>*NpXLupk&dEC8bSNJg|@LzEOz1f~3o&W%Es=yUvKLOTh-o7VZzXu{DWZyK^ zcmxQQpk~F?soQ+llE->|k9Lu?Vrw_awkJ2es?7AprmGD)aB2_v z_!>q*>xO%wQrpf-45?>xTG>#Y^i4|W0B3@tmH`yFn4UE0Qyv9WtUY*VH~&%Y*N@CB z)v5N=!y_V|T42n`K~PYb+vrM3d-(FlZDv}xJh1{lE!u}1Nem@_(2SW4Zgywz==Lfq*2+YWL9XbV{!uh;w7eaMMlF=1ym}1A2U5Rrg~WNq`}slW+MJJ|H6(A<4s)o%En)f?6_6{OzII2X^mSluSv)?9$krctpW`)72vM9 zqY{BV&>Y86-|--hkq^$wbVJNIZl`}zU$tL%lVR=B2ymA8JSr8&MZQ3u2asq2PhxMx zOZ7cp4T}=n+Lu?b?B`bH(WDuema|LB z8u?_?Xkdx!bZYYxPP58V4wF2*IAA`-WsvZQfqxnT(VGlrSuWDX<4Q_LkD&PwKu$z0 zE%e>Pgt(rFtd`j*tPutn0dS6K)@|I9WAqLGap`eAjt|uId&^=CJuW41Q;T*%VN9-7 z=U!+?NI;q3mHjCX799-y8ac8iLQ5(Nl1X^$RHb`Th?5N*xszC}J(NOlkVAf#^`MBa zSG)l$lG4n|1HaKFZ^-_?|JRoZ;XSX`HdEGamT^Mdte#ii_k`Y~A|r58hT zX+jPNmLIhUx9wfGhau^p(5i)$Wq3ueIQgQS%rcN>{R@`7tq<)_Gu_M49Nyx~2_{y#Hz`)483R*7^tfd`!`|PqbA{h+ zAa9y|X_(NT1}aqA0*`=HKP{hq5Q=5V zxl@J;a)Bn8;TxX|i<%Laj=7SHO4a{wl4|;CMU;GJs<*sZ>B??3V~?*Q=c4%`Bccy& zSLE=AJK*`#7kgB!qdavNx1JR|uk@KG6J)lDtP#U1f_+@>Hvr(+`1Boswx-QJ41#Bu zM(sj0Gy6Ge-Bt(WYgm&F#v=QDF{h&~Ek@wh<}Xhi3N0e(c_ZSV$o&R;s3v%;`gQt8 z7uinI0DY$sFU>8d(}9xdFw~Xo?Tzs4Tp2CtW@A1KJJf?p$d4TW+@}1>!tPkpeIJlK z)Lh193AKW``$0uK3L4=6j$LNGvag*t$E@joX~xB53g4MVDcixB-nn6~QUAmM!P*mM z*pdJdWdl3ChN;=d=hvfYo&ex~q6{3QrZ;%zeSSxBlW+)|Zub+|PPrLH**6^vHLOo~ z>G{|I&jWBhvs@fnA<>x!q&Wej|Fj0DUD|O%$m5i@MXyiZkbQtDTPzbCry>y=vaI+^ z+zd}O!@O8Ld}ecLONPLOKBl_z6uO$0;I{E!Q<0;SQic}lzQ`J%59P5N0BWi5B4?7) z32!`)o$Zx@x-cntmYk0(!pW>;Ol9PNf*NAH_XCU6o0~A<6CmmReDy|J{0~26@xZcZ z9+NH>+4wXdOV6y7_V1m7pZX`)xz8Di_7_H5pQQps#5TGAHKbfE4WD&KBLAa7Ar$#caV5 z!I#2Ztn&HYDT}0)Dw+DV(UZGlvuHhVdFY}es4q+TU;CS+{Uu|d^fXHpm}De#(z=hc z{!XYWywV71t)82F+#k|fi(XqNfY1C}hmW^b5thqX7mpD~itLj`I%-S`iHN|E2biqI^ zAst>=cvvvPM*T=yl-dNE=mvnY@=aq`F#>>71&})W%1#Dl;9LSVytlY-4OQ28ZFsUS zkkS0U1P*cNgZbxGKqZS-MXs;&mX;(s8aM|z#H}s|Y0RzJwE5WLYztb)-KFujUmBXL zEk*iYoQH9l#FL)3)0bcw{)WM#qQ2Livd?yF3$-1_5kgFIZ@TJ#YbIu#5U|c$6>*+k zP07Tn)X0$Y?xZ4r5O)?q-s~M=;Sg=-_f6sU=p<2K5lUV!yJrD|#_g_ErggG&un1Dd zAQBtiRA!@9ZPbAhRv@hs{Eh$Q^S14ltv;kc#NO)nvO9QLHrwv^ww@BwTXLLo2xFiY z$n5g4XG6jrhI}$JjE2kdl3zf#+8L(-kH(W*c*0uVHRx>`!(@iV0 zwB}jqg#EEV>9%p{M58J)Kg$a-)FFbcU^nkL}T``dk-W9Q_Xv7e-2~GCzC=pxKL8lkbw*2pqkn z!frJ9Q{>e;qFMbfdmj)@JH2w;Fx0J>sM)*WagE4RvZ-NTFq3 z!aljY*gUo5qEbz}E|^MgFWe1!TSm~SQwRWRlQP8_QtWQsPBNO$fw9%6(LAZke%Qs8 zy@RUF!1qhjl#yD}Dh}mQ2`ldtvM3gH1LK%9;&V)?Y5TfhXMViH4gcO@TtPo`Pa@lH z>7U4u24-tWOO2(HlJg=PJAe6{b-!Uiqbe6&^61PjhchQk2~zTc_53Jl&dFl@ukC?> zz6w#6A<3L-$-ycyI10d%i`JwYPQL!t(|~%PWp2m0lS@X`K-j(^_}gAv4Ko>~pMK%apM`-#om? z?BNPPAch zp#B)@0<1WOu9OnFHBT0A{iSdb7NF{Ci$oaDDUACwrlS;>{$nAb!SGMUmN2uhNs$)L zc20TNaba8VdE3=~vEfx*d`*t~rW=E`@vZ4Qz11$+lj}=!kv_dXvA_A;dN= z8_ZH-=L@y1#Jl%PC%D7=i^=7%%`erKaQp;RiWIj=+c?o$=~$$8_ww_9;hGtV$RI*w zWi^-AW{f>?~GuN z=p;DCQATaVx#Ni4$b(6xxip!k09K20$i3EJkV8fApTwON%!L}(FN7^ITl5iSixY(a zm&~-}@QyRyKkUKI94_5Uj%6gA2gbI#6WAP3qd)3gT(p6~+u?zA9#_%Bp7f}e1fd0t zx2td5gR}YUncz2*Rx@_vMYGN)p)*rtT;>Hlf9oeU216((%^P%A&gUYs<9Jg1&UI#e zVI&Ndsp%N4pg&oe-DpY1smbZ}lC+>9(rK5GT#tgc?_$scj6%$Ij~&EU8r8gvLrte^ zf2P8|y8t3f9e-KSH7zkn8Y}%q|IuwC){Wli5q$+wf`{vGV12CxfUwRJIsd7+9@RVe zDj8EvI8*XRKwG?3C}JITvf|0M76AUky^_-z3Tgln)!8ch@C$)H6#QRWqmHGR)_xXJ z`6lE(iI+n@$;N{LOvd>?&fxUbkU^;^6W3tY`C`(iU-CUF@kTYq%BHntV?OP|9}q8!-KQi^l)2w{_z)FaQZpMd|2klk5MpsA0*tZ72N81?+OK@JSi?w& zlLUnyT!QeskOx@Ifx-xEoPD=w^)JOp%Z9l31$Xyj9sppaf8o#GvWT^!EyAX2k(?WU z@k{tt7)09qwfD$Ks3LX$j;x+0RVOc1l7r+IGnDwrEY`5)moAqrF{=L}!U@&F*=|yR z8ApjK9fe_$r3Jw}Ma(B3iK zlX4Bf(6=mo;0xLWUaJ}A-_NV&;)XD;9xOVD9uc7i@3nv|IM5MD-{qF8gvEj1rei9@ z5y1C?j_N0}hVdFKny!vddgGBb$TRH-6B$)zGAK}=*Fyh$COYVpZ=*nFjc(?i1KlX; z_G6#wyCk?THf{*)cDkC4Rf0C7G%YYpP+uT`$Lx8SfreCd z-Ui5{$xR;m&*F2t;AdB5s>});s`c-fVQc9TfhXkk!YvdECZ<-9k}o^&_9$uxatieR z)za8Mb1;wX5N2rLTmQe%-usf$lvf%wK=+r#)1tDfaTuk#sYw@-TJJ{K!_4j}?3Kc4 zP)eiCyVpKNg0^#^Qgw0FOjkhj_A~5%O4a&$k3~gZiUK@KNq7rM8ZGS1)vm}amiGt} z*NQL$E|hz!g0D6@WOI&_BQ7vUEbk8auggn}xQ!xBAz;wd$J}>o?b~WPQUmg~w zZJ!Lho|N0pZcrEf4U{S*-^24_Lt#@=F9m}9xs!>!KVGv2Faynq9w)(BU8xx0(j?Jh zQ%I=Pw8wvE<#5N?yH!fYA!MwP^ltcJpNp^C z+JnIl=7gM3)Yd3H2I+h;(lSf=sYDF7#0^pmKJ(ji^8!_uls4`+r>n-eYOIPh-03mU`Qud*y>=CotW|fgDhpmy4pFFhjre7&(rIn z=y4l-V!t6z;|J{PEa3f}DN#FJ)tf%M8D6^cc7x;NVj0NoQ81?f=K^ReWIQ{2KVoe> zT^oLDu-CY)5c}6A^3|1}nLb5;c)9~}*?d;_W?Zh5_PR8V9!3#?WG_3IBZ7QB;Y#(wbm;*t7;5TwH z@q(yXMRE$~3F4nCaLMD6t28X@(rPM6bjQun(|5@F&0ZJUVWu9EL%}P<*sokHjtska zvK4ZW>qafR1?22!t181Nelh5eI_$9DRwpXbIvKbKoG>L-XnS)UY z&9^CrioUwQ-^sBRK+=EX5~ZYJhCqZ2TCkUkGCbfH_;@s39DtdQ>DDTPuRA+(-@=;5 z&W@YykD|*VWAI>mpO>;q(pb6JhI?IH6JQ#&v^A&^U- z{m7hLh;w%M{qBg3%?UXOzp10VjHE5u48HkD#c8C5Xy~uTsT0bI=Zm%hE{WR-ZPKV3 z8G~Bgn3BE_ovw|}Em6cwt}w?f4tQxgA+d*nCrR{pKwWHrfTc#)8L_^_7~BdO#-Mc~ z>y;{8gQ|aR=9A|Y1V4@gVF?`+ZR6xpu!nL03AO<3y+2kj)qtUFs#G}=27+3-1>t%S zxIdm5o@B}ID~JG#18%zw?AJgBEC79^lPaMdOl#|ps?PYeh#4~FSegVI@^;(OOCWje zI{Hy(;tFOw=UqIE9?_*FbKJi48ZIZrcfK8g5t)Y(mwWVbnAbRykkBB{0N6aNG0;QP z#VdEMRGYT(jiJ1Rg~*aWp`Kc$$3_TEo0bkYpfk0cR;u`_ieuQeR|ST^KNk{|^(f@* zYj$6!zdPzE&7EtT3D)5vO)kCpcdK00r$GOPiNUkI3&|iMHu&5@|b)V~rZ?*Zbvu9^3ps`~bG16bouY0|Pk}bu}C@_{L{&DXbPJAria5+zFP3xI!B*o0VltHmW{CZMyV| zxN=>B_pFTQ_g8|Jo|}=hyu#mG?Qt12eJ7DzjOr>{?CVEsmX|3tV1@koTFEj`z&Kz3|TVG4oe1 z_9^4Bz@qf+jzl2p?KBI69kK0qq;5vyE0o`<8HrEs6+BeZ+)-EuU}h)Rl=-Wk zwzh1BX*tQ&{|NDN6siG@-2KT%MQ7#^(-SA#CaOGhqAR@7hh+w7Ev9k!v&5-+lm!Op zSksA7M3${1N|P+S?bAcxq-}iRErD*KbKub9?LJxB{$e-L;ZYmBTbR^?4K&f;22e*- z7tDhg`XKl3rAy(b>!T@#-*?Htk9f6>Ac{qh88Rf($b?XE?4B`>3FffRWe!lg4 z|Bh#-FTF?JTvL%_;x8Nc=Oghg*2XYgl?BDy5g$!s*81jR zByYGlBawwx_?HyGhImEjHJ|tJ1Oy?T^#ff4sszT{AG&p$$ft2?G1^7Mo3K1~(#4-N zmBVj3n%;CtMTneRqLTS;`GSj$p3?_wVc7t{Z(VI8FD0>E5F#8?5SFi$WDIKDcGm8cHCNU3!~lWma1vb#BGg3C z^4Cs#x^lHNIm*U zXeg$bpgzD9-_!4Q{ozaNca%S~4rQQK_LGp1 zRG1u6xk6-GmuiQa78vcBrYwkoRQ?0@VxNCwY;x#;R6Z*UBq7}D{ zPqLdZt{l2VtKbmSl;sonEwLY;PW7vGMS_+f#uRleOM#Ik2_NJtup&nP@g;_Kr5ea2 zygoSPbhFTe>D^s~wQ>i4k*+^a%dgU-THOl}li1}RKw5ApkLG3QU+=l4DjbvjqSq+D zo~u={#K_sWeU2vT`0!7~!+Z2qPSa-$ER-8$Uo!pn?_QFmRh+@hsf)wXqZ{w8I-`Tx zh;$8M=IuN|2&L-9D1*UJ?H`OYuVXm)pEFsBz%0R#NNwyIPVAhgu|aChbl zT^ElRJ>t$5gY+%A$L4)@R|^F#7(kJS+UkOk-tKvDcf?1N7%q#Bkr*J$ev-EIu$$9hTy){s+m+mp^^Fr)5;7#hJN&AQ2%}UhZwXS+SAWL@=w7 z*cPX+v zujrJ)w!`zXS8<-(nLGimfjx?V(D2ZcV_gGo$}jNAEBa`I7MA3~dG!FDh#(EjV@| z{vNX&0)HoQ7g%R@O(-;VboqTSER+am-7-k91bPEQ=nZs$4-Eg3J0#ObFbh7fPT{dx zZs!wul=Ww#uD`ZHhb#h^><4*q?Hkvn@<__y=8_2Pb9?)6NK)xwE|`h3BW~CBW>6>G znAIm#M43p;d>n<2TeMmKU2bd$LX?Kg{}U(3?bXz?R#jlhP8Yin&m-EDgPyS{q2!Dq zlY4+$Z{gdMzy~NBP`xw76%wR}WKK&~uyE}TZ1>vya}A{+I&Fyz5r^h6nXp%>&WlCK z=N+jwm@yvPC<7nbMV`3WV_?tj7MxDru<%;0`x&6JgzV+hB7nU`-fxAVk9-@)OogyZ z%f3{gC#a?CGRo~UF4xcx)fb&BU116UeQw;&Lu|ufE3?oPQ*AmZXA%R3C?#Pl%f7^? zocXw8$e$Ji{$2RT9jjQj7@(6 zE0f^{sO#Jz9=^f;?PIY!7(A}d{Z0z|S=Xrz)1|pZhbegvm-|zvfHboTb>8kX_u-3- zN+*H!jBPM^-4nLCOwI?0$qG*h2o75xml<>$K-7m$JvfAfpjd^5xSSj+4w$3~U@|C- zX1CF(l`Ws&iZAlG?l9y`=sL7RZF!{j14+DY#P618syjg1+gwz9^kICdp9cITN;h2r zG;1tZ#?tAmn`>76rjL+0TFA&h(KDo4jry;7c@crK8~q3Tr!OE3)M+d*9V2UsYYhZ7 zMOI;gSr_C>U;m}F^6wd!RlZzC@bVovU>bJC#E{1$=WGBmxwJTh@O0BSkoOo2Egh24 z9ZQhKZ&rLK%`H;NQ&}SZacaE=?1u4rs>=w(lq-_QoO1NuY_B;6=Q&U)3%uN9{WACD z24EK=>t(>fVaUp@N%xEBjStR>*RO=tgo+tIsU@Aa=PH9k#1^Fz)_XvexxQVLn<|<~ znFwMZ^%B;A$J=MuH#lz$fxwW+Tx*%`vP$pz><0zE^zf_PZsFa`$QUA_`8r$>=tffi zQ@EbVQvTKCY=vj(GKt@AEHNhz@3oe7wEdrgn;tdBYF)qlpNSV!ggGi2Mg?=m-^;(7 zsfB1^3GtCL=$U_EaFp93&4X7m%Ye284=g|#JQGPlZpkwv@^}*l$5y-U<9k4|DAYsg z&KY#HIB%xhtM?iZ7GaK(Os6q}Te{=x3;Qk!+F4Q@LHuf58FY)*(L9Q`uCyu*WN|6zh^Bw(n|bV~rq?w~EARx66xK3amG2kP}TtN`bQ1+E~ueohtbqn*RpX=T9t7C}NyVpm#Q z({9Nary-u)p*eC_TntP-TT9RNus-|GsZGizq#$zobx$7Y=M~yBj>gi>;Q+&o*+T91De5^FLM>x zW_{Xs>s7|`K-&x_wC{NT-i7HA>OzAsLhZ?tm?XyP@*%l4WwoL>%%ngWsn{&D^kMJ0 z3{37jp7Wd?ru{at?b9YvgTk%?F1<-w{*1LpiAVZ3SB4ODAWsZga$_U$yureHpG`|V z7rO{AA@B-Sf>~6Jp1gy4$!v4OcTTY3To84O8vUPN*Q6EId!ij0VxaHs1P5b~W1&i= zWWOI~xUU;Q)8nBAC%wv@xWXH1ZTrb0gkCA7LHtrPU|~VaUHa|f`zcBa=J)|gZ(M!K zkW6@hIE7|1f|1FZ&zn=A~L=sfCvYZNGH0(^wKFU^N2b47_in@^EhM0$*31ZUp<6cCukjP&8)7ex`*F9(!#1zdqG1{aSN4kX( z$a(CMDZ2rZGVi$w%OQwhdFqfE9#=4m&IRU?L$#w>o?R06)^S0a@aprfO7JS3+d)uhsOM$C>#p!7B>L+nj)D{;um{p36J zM>+{`ZYJ&QLe(C6-^+1K`@&ndC4RpaCOikXW;Q3XZ^zDzLIF7+r4wK<;!2`5PW8FU zJWs9Og=@GX5pk7HRt2CYcKyQsNAY?kFxBSw4@pyn^=e>%3>Ow(8zcb-QU4O)f$M(F zi!AA-Ce#vPjFQ%VaI-K~es?9Y18`Oa6G+06L(&DLd}V(+`rH>R6OO6;?a#<@k$X!z zoh*&0im)U8Lt_k;DL?#0Hrf_!X5#jCXwMFjT)!1DnqiIgbeX8)f5vqGBMHl+QG2=G z>pA~zclt52U~$FvW2qQ`lynBVef8uA_700!&rU{p?yuU_h+2}&myky(Wn8)|F?qiu zO+zdlD2qtbvMzVtOgYc!o3{vt6^#!I;RDWBYo?GS=)ydvF!`k(|LScOJu4mEv@|{+ za_mMFQ_>*@ui4$kQfHT7xS&Lw>BH*xIybtRVOXH%e=1m6;WUVnvxvaBMz?6-@f9Cl z+$hskwSx;$%J28vG-dxbRpn$`G8+$Xs6He7YsY8Y%sr$b$8HmNNY1A6GS6fh+MX#p zR`URB>TRCULmg0ao(inOb3D1>{oJDZ&p7f;1DYp)E;rcN=Q2awo&D5t2@(uj@G<d$+K*2TWMuxg3zsNQ_^D+w z>S9~d8n>JH!CmONjvZTAx?650S~3R-#Ak$oWMBrfJYR zUvl>P2Ye={xWAR;qpYX6boCc$S(D$2YFfERZYbY(QE2R9-rI2_|Fk(Y$#GT^j}fRX z#4Q_9B#6eM26ZiIK&a%N4#@v{!9l$!(>xQ)SpPr6tP3HdB5W%5u&tg(sz`N>)s6zF z81;7^HFB>3G(}8Ch>GCcjD@6nt0)I?hJa%Y(QA&3zHj>?7AlQ{tZo0yC$#%w`c~Xx z4w=FFrod_fyq2KF^xrrG~6yyVR<-AWp&I z3(q5`Q9CGM`OVdjnPgk%wxy!iBg+2CDqrcI0Vi-H1uMHke28X0) zk85&6pD~muns=hDq+_ug!6Pq<_ToRFs~hJ&Rst2@{GZm>0b*Bg9^Mv58QbV)jP4w# zb^V)R*M!?zpOtEq4S?Z~H12seKd@{nd1X)ES#S>LrVs-d&d-IU`Kli*C0SQB9$Lyn zBDWP-1PTj(|8-yWt;PnVjT)*DxN>zvMD5u2R}SMo+r8M;vfg6Q{Ch=at}OuV&n&9C zhT;?Sru}-e^QoXh;m07OVoBkXjID8!(UQW@#k#r+T1J%CdgHQ&!FXR9awjp2qjU&; zEKCN=(vX|*P}3WeP|f=2?%zvuS!{Ut`u;OeR31AezrvmFgWluCneV}xovGuRa2o`3 zX42wvX+t0zh5^I=>+b1~;!=AWRJHJ(+I22}YTNN;)ap&Y)Jf%CBu?T!{f1(o?(k@* zC}y0FQxdWKepM~NQ@Vc|?joxlxjI{BTd_DDB9u!Yn-`%>e$GR=S_g83c}y(}ng`2Z^Y+@V03A z*YK}yPN!Ab`%i@{sC7>#?D-S;@*X7PcKZf=SE%ya#fsqkrKFYbiJKj7+1YFoNcZ(i zR*yUwRArluN~Ek&w?|0E1_Y<{O7ND4PU8_zu#h!99DVk8*57}PdBw5*6J5~q=#dF~ zKPgh%fs}N~F)KdknOFAGrsu6^tLq0va)p;?Lk-_LIOoeAXKBOw>sRJ2HNry9d7fJ# z0x9VDlv;8jek6irgPy^qK=?G$E!zt-#F3s#ts}Van{Fcv=Mmr9>jcs5T>pvHqa`Q< zvPtqb%h2!mjiG0ow7t|>wIWC6B*u+h45hVToR1MrmPX~!9MMXt%{pV29M3y3;a^~E zQgMzJf-3OR8Q+7I6;RSmJ2^_jY>s*(`ao?Jt z?&?&yb6M|Nqu6`iXQqT>{G|(K(2PqMf^D6h>;e|Z_B)8jI#2*h#uE7|yKW$<=hFn1 z={g&2f`#baQt$0qlfCh{2O-e?)7~;_;(_%tBc81jMGiGmJKw1NNJj^ZyYSouQ%@Sn zKXyF~k_Y`AU^HLtYn~2r5Zbl>QT5s-g zM|w@+nb;puv-zSr5Tn5qZf2V1CxLte^S1)DCUbWi=3WRQ){&x2qBWtuo3<)`A_kXI ze-&;N-h-Mjm~(CLqWVao1R~d^+SFv89jp^8V|xzA7x*g5b7o>yB zoG78j_m(TqH2BsjYjQhgQ|25;AE~&zSisP9`+hyn>e^x%iD9*zu|*;JB`5c-Bak^@ z-w;$~z6GX0A!X05Yc;2u0BmL(ys0h_qi0XkR0i!q^W-1~pg=b-h98`js@$KibSvek z@}IMkKUYfZQJs$Cu+cJH z8Zr{gX;=^4h!Ghx-SDFw-IrY-+)Wgxchh^p_^7xet#-L!CjW}5CYjlyM^`R${;3kl zH%-Kuy#^Npi%7&IP)rE$XGJkftqm!WI=cBR=Zb{##Q{qw5qrg=(S0%iNoLX{5E`cO z_@x%abm9#1L-x|vS-Ueczw^TbZzDr!50OJ~eTSC)6I6nQuq?1Po12HUF@$9z5SmX! zFy%5u)5&%)287+;4T*39`@Sx)=82V_KH6i*6&Jb*Mx%8g(itf5v*|k&(?JjnQp{OY zOYA-34!JPHX)IV+ET1j21(cK2J#wk@`sy(F+0~swTC^^rYxb{F&!A*Ue;fAry>}%2 z$fk-HJ2Kh8(QP#ka^y9lOfx>w2o+nniWZHI$Hgkk2+Ur*%v=7aZXz#mtFF6!};n`Zuj*1Fo(--Vu z-#n32dgiNCiYg+%E_5T=UYv+^o_<>gZN!$n37x`yHLk7(2ccU`X6t7M!+XtP4*fs4 z(u?rdq8|ZpJ$xH+ED68sVdGo1!wS?GMm<+WTIx%IFhf!5_T3M8F4wmYGAfYvvexRP zV1#GL&}G-qI3X{vKcTo?WPIWIDc!r$r`chGm=GKfL|nv3osN>Z3rG<^Bwk&j2`-Lm zIg|sD*d5MsLBS+U78ty+`}9cw_8v7IO>DBtQcGWBgq;1721J8TlbtA^(w}_*WTb5N zXJR2m>1M&`JDF+iSGgkZdeUj>C6tH^Imxg(3((Q~azek1RC$3fq~dsV#4gK|%_(H% z9@|+-&C`fSmg6~YfqU9%)#p#A^{8D~gdHLD6Nsrs5>*z3?N~6R*l9Hz^WL4RCYf9n zeauoiQ49)zuk0ZGHmBbmLmWf#El(6S;NjwXRRg@~z#r_n@!Qwi<4B(1RZPW)l<;^f z$%XPR>xS(Xv?JbStLT`5zLz)P(kWh&cjyc+ZS?=HqPJ{M%sKwr#4Z`V~Ei{@L($ zNZlql#>z;-z%TJ~lxM#xjXw8Q*tdO=dOK_X3v&L0BBCKMj#hrVO@L@CQn|JX(3uOz zN5a&Jy6LrvdsF5MVanlkUqL8*uP5Z|8lQ#7hFQBT=Wue4KN|uva|{(IU`4NKLr~lR zn+E8}?zhsmSZWgJ8*Wf4R{l3BA_el>8bBgc4_+obFaS}SILmmR8`$ASj)H_0i{e7i z%Jc~s#T5Eg6TGr$pbOwg=q-o$MC+-qwq4oPVzoKM1L|meE^i-p8ZxJH5H!Q@Bf<)3ivc z=Aw^gN7^Rk2;(+GiZ6sqEwhsKvSLgmE}^X{Fp>DekyQ^!P` z6-nkG5$>4!d}VIT(4~#jeT!>tdhL)0%g&=FUKB(BlFqYB=x{K=G^S}j61MLMv)c;)XMC@=s1N$ zy`fI2JjxqBe>8e~bPl9_@Kg;w0*vCxBRLY!T?r*9Y{?GOt+hLvD+YVfMb%5WM8-L@ z>P?jSgE`uQ;HnL9Uiov%x;?cQ3|0azw_Q*%>4?2zE3OedzgZXq)Y&cz<}eh)R0aaTwd?Q1a> zx^R)^bEYB==Rt-V65sCC=)3+PwF5Rzh}+pq6)y4c26UO-_Ti9n)5turA$Qqumk(#q z{#*;Z0SuCJch+nRlEjB<{POg1eH`V_gnm>Fz!N{Ke{qa2ITecCT2|pU@g3WGv1_5z zy|%?dRK~)qrv@E{Bb87Mzd66N_uC^eKR=~cbye9Nsr&4jRPYG-9mB=e$gsjsCviy@ z!0Ts^mYwp@I;F&~oOBA`)5?eTw5UmsY^*FsBTvsLJNMd@L+u#EWfAXgod+u9?AnkS zFo=o6xt%%O0HdLw#$J=4}r__q29*s4f*C> zzgkToD3A8j_9Mm@6N3{-Nw?{=uL(%upE%=BT;6tf`0x)(9oihQfe;IBr=gQQxL;-f z8$XTe1yK32n^6+z$>)FR-AKPPHj1{Js?xOyKh(zNg*1SIWYr_IahS{1oEK`R@T{3N z8Mh&68De!Kdc4Srs+%cBmCo}&*q0FV{-^-$3m5J36S$B{p8>IX^__1rd}E*6P9S(_ z%p^W}$#9EeS_fR}_Q&`+=uU#;I;I*=wth@q_?f(HkFX&tO5aoy7Rod@9u&*r$8*%e zOnCEFC8qq#5<5et;%tAAfndUKNlp1Xl^@XOYH+tdFbmdo!2J?mF=w4`>yhlm|eZp_|qdJu4` z7EA1`JZ3#=llgntLCR(tsvpF#0Y%Ypt4CDg$eb4TmVw^R2DY5pL-azD3lP$$Yqw>8 zC*stMj;8{pw~TiyOs-cT)?W_GFb?Phw0t+ClVp&r`~j^X5Y)DXEP0q zhvu&8RTBTDqTU;^YxpX|1jDEwE%QfTzc9;%4INM0%`pgJzelsLGRtAzBEV&wa+R9r zKr*)e+gw0VZ_rN;8*(MqlZU2Fad)B1BIikbz9h8I7g=haSnK@~YyZ=^|5tjRPwP4H zt}k7}xiJm-T9R^zNm5U;s3fRY=nW0FD+XSBJEI&W5wt{2;hwd`8AkNWkosQS`?dn)y~1ykl;|=(Ouo%KJfGzsr!_>c+KDn%Mb1vbn5A zt&usfJX+HDU`jXqA*W^OEQO)U*c#cSezVc!S`zP;sc7s0^`e`FQLZSndjl3?tlEMDy<*qA-XFV z7@jsr8FlGx$AfbW7upskxm3?>(U$2Xxg-$&luYR!oro_JR&ig^TcklMsMc7zJtK&R zg@f)(>&cgx>2=Wfwre>8iKnK? zzl8Y|R~y6k+vwA?iO{HPZkcO_0{N@><6JiPO>9O`@^o8s+C~9xK#3_`Ru}jyXt!N+ zMFQA)>2--3BbJ;%MPAlS;C(%)`j;#&Rq|W?m!~pfDoE)YoBo($mQrr3{eX6)>#!gT z1i7zJY$YoRyb?@<15HS&X19yx*y^qAE7!;7VUQZ~9b&~zko`Qav+MG*6U!wM$et}R ziXk>5{rZOF#|LyrGRc_a##Y%h_f<~L#hNb;3h+0f!Z4UaV%TyP&dp>yVJmcj7YdwJ zdVy^<8wTwXJdE~8Sv_8Xtfdz1Ey+ChOxbR^v&_CJ4<~o)^IHU*M zwc;HwL;H7ATcY-Aj}!6%N%8Dz+6(WWzpTRGoyr6WMYwUD*@{Al1*+&VFOjPBv7pQU zdX&s;L*|S6m{QnspTw#FG+$@^jEFX%`eD;^7?hJL1ktH89~k|g^3Mo0Ajhl?+!z^= z6@MX$eCVmg&R9%m8ib>aSn?ztT`l1xw3Hm63qG>d{?hkm=-NeX8EP_r;yP6w?Y;v< z%YNMTu@8lqH!2Iu)%=9d>kG{2gJfg?PeIxx#b{StcOffh{j8w^R{i72a`Gx$*#))} zjf4oQD%?6uX$Ye`l}=^~gKjy6(*m`7yt9(TqQ=-j9yUKV@J~9oW@FHP^0FOk5_jo)y!tW!0+7 z_G>s!>tLs`m-CbHm+A6K;nm%l@UMiw$&p6~h{BhQhzv{gDWPCaXh2hhZZ2maM8-v! z?M;Uk9ui1PDc^FHq?D!@%b+x^u{xL1x=-@(2Z6CjGe3uMftTz_W^NNW02)4-(kleH zJf%p&2UzoA{oIHby&XC?5PcC--Lo6PHFn%Y3du5wB_qveI$$g z)-yW|r?A(y+C*^m9f7xkc))<$R%!u4hMl&=K3&u0{tZ0@k<31l{wg+OIRQr3z5Pm* z#WsX%gg~HMR-?33RsEi5q17-O3|wqfl7y$~2DtZ^UEr4L!#VSpkAl(AD4Kb(@R78^ zZ4+f_It7(#ky9xL#B3SEtsL(}u-Iphm=}(3pD~kt4wAYsRxEaL`O@f3LG0G;_;eVv zG-)ctHJXYtQfEgoM5>UVY4Ecqmk~)x>2OeXiXj*tNs=b{Gf=ad>abGb5~_eX)Ud|1 z-4~op(D9(NQSsiWO<7jIX41AH|B%RMx@_@Hvg}rz{7K6Yszw7)g?A;!^bA$ME-R@( z>pB`EonP2Kxy^zDHDCu67hkABihQvA?~}h3sWX-ir-0!Wtewjd+>)15*o)Qsg|k_v zR|zF)15}smqLv=Ol8^#RS&`Z!(Fp;|MJ}$-2y3Rk5q)>|;&G zoq5%DNF`OPE=`}qB@`Y_^JoG+K^7U%iI((ZVsv%OBaxT5sh71_%(X0}V)EL%K)>j( zK_=8O-neZxP>a*lQ=g{Dr7D4eBfhL@LYtlqxDgu#{oB%+E>j9J24`GJH&rg&|1CI# zcIZH*)}Ge)Y9%lu_8Q=_*80?~R!FU(8;3*3g{zN2^_(>xe)#;e;|=Fw=kqL)eM!{k zw)U4o;6{%o5fDThjfU@*pam)Y7r^Xd8M*i}0i;sTdnN|}wbxm(Q+O!MmPuz*#rT5) zhCj+8NRf#a*9tRwQFZjW8&o9{x)o=?!*Zxf!z~)Dy|k^U8c9Z-H3;Ct9{A2IY&2(M zUP|^pxm%G1aXosscW*6B7)$_xTBLog5uHVn)+z$;IaLx^)yQVv1CBxz(%uigC}Kn{ z?M8rt<-#~==J261BP_o*{7%jt6V`l$DvsR7!(rQD*QATMn-5?at}gbgr`+$pY@5ex zG#9FyJ*lDc5I*MCN}h>@1azACadK`fwEzYL($LcNwEM}9*$B|I=VwsOd*jQ>1CiA6qaPgp>YTa${zeS zwEB(MAlOWL0d?(jsd&?3<0t7@lqv5C)FsaQSeJ3qfVL%@3Nx2-#braLMn%S4yN_q> zamNM41pHn0Syhz92c%~Vl<2?8^H@)vsGW?A@t&pYp+o@%%&w%QQHhwPT@nzC6Z~I6 za4Tt~5g^Sa6KqP~a1ZO3df%sOY^bcYS5aO3try_qV;p~PQRV~Ul|jCEE_nR0l#~rD z`Qg{`Qg)?;_~_gF{Ux$#NkUxw+(8)|nOObMY#nFWC@g{wldG}Qr}5&F+Cc1?3!24U z%#pAVZ`osGSr^u#%g99P#a)+f)lWMxgjSKFJ%SE;X(8N7*u(UtX{hUHZ& z6EA^ig!u2qJw#TEzW~sXc%>EwXhP^rhJ7~@D@LNdn(X$D*Hq2#w;9-GD437!&j4*FFAJ2IP-j19T6UU0cM~>CYYK_+nVJ=n*g1+r%E>b zvO8yTm_UM!N3FgI8>ByHd)M1m@aLKj7iPG)x8!of^5l{KNrb`bTI~H& zdW-5{&$P&g)BP*_DR+u1@F#s21w+PEA(gamSMD zH?Wb2D{+(RKVX#o55Wf_<-5Z1t91Myi+2*X&4FKr>PezwWHxJf*jhKC7)a706!1S} z*xhr;lddj*@l|u9`g~iPpR?v(s+OLi7nEoB;YBV0jIr(Y?+v1W*FN=AB=k%T96+q? z{2aHFaPj_0H@y!PK#T9T>GtP*DGMQI!C%Aiev0nymbyw5_TaP|4{Py}4$e~I9b)7n zkY-^XkK2%ts=dBpPMytQ8n^=1BA)w+!}8u2hmgD%n1VPzJ;)}? zr4^>LY`&yz0(K2CKpSU*yG8!Qa&mA+5d{$dg2m!qNC3(~o@VV$Ump6$Xx_r((razV z_jU;w49ReI?9QDcK}!`{S?nTSmM8Ib(~uo)ClK>kf98eHGw{$|8?hA&p6Xol@sJT0f4+$2%Ce4ogCGq~VFN}qKoCb1m-w!rX8AP(}v zsoi}Al#SCSWm1_Ui11nt|IIiBXDshSs4`hFq;BHka@vq)q}xKOYQY3Y_%rl6-fmCez6p0^UN%fYJBfl zcggZ@klFwht645H58gAeu$LHo%>ure_K4UAAn&m0}h3a`Fj?@!PwG$_M5hpzqmPiH){}Z;6QKYtQpWpojB%&=wwLKy03T$w2B z-^eKEw2F6$??b-vLefeoqGI*&xL87^ZB)=`DApo0H^N_Ey1T4K1Iu)UjbKiSjVq+7 zladSx<$5X;DWy1{UQFQbZ^TWj}7TOV@70ZH9@r707 zhvhxPRjbnnZ(fdFI=#-+4!NPT>9nm{sNliN7x8-fk%EN-u_|Yp9e==x$|6s>Wz%vq zt~nZwUo;Tm@o5J7#T?*{#_^H$d_q+5d$aFpSu=F%{d@j0)#7kJj)MFW;{txf^71xM zt+$N#mEF8{a8UkdvmkL@(AMa(erRfG<>HiSLf~s=-0>g z0N>N+AgB~8D#zBwBp~ZPO5;_*`}youkR*}K2&H$fX(7cVFU$%{aP*6lsg3|*+DA7* zGCEgz?dqiH91glNxaz@zjT-&cZ)D(DJ0&8oeu;~#?_+bXrv@o2c;srP*J~h4B(mZJ zb5M(BBMExyC4~Hb6j2Wkl{u?ST;(;DEP;XvnuGHKy+DeoOYA&!r7s2S9FS`jvodt< znO_bPzb&G~Tk+C!aC#ZBEylgkD3IzfsU@QGp{!{lAR@v;<)7PP#`4YMXxpg@XM-nn zIpiVoWf3_NnP|_Sk=RHbDAI}1F@Vm!w%tUCcMw?OH3lgBZxQog77HeJDa9pN=IDh5 zpuQ}QQEb4`i)icKW`}rDD#QUjSkj|SX$c>s=)rTr`J5uXst$!Mw@;cxlXSn3&3?Fb zKrL;|J6_U`OrEoAZ6mM5J!5DGW8WkbF5N|)MKXf{MBhOSJte@PfCjlp33%PB zs|e3bLu`$6pyl8%`3&{|{Cps}3|qo<;<mLmDc|Ih^kx`}Exb6oJ>zhPOmNI*!o{?S0ihhtsuSla!jh8K47Pvoi=K9uG7 zv=qvJp=#%K2oL)xr<#Yq2du1FfZy#r>Kgz5e+J=y8z01#=V&aRC#6hUl4LaXeslJ#v4tR73U zvM-D_SyE093voD;LuVe!W&r>4Qnh)Zn{Cx^fH*PFC6>19>3nwwrQ}A1KTenWj=KWX zUn5pEv3p*-f4&>ETr3%e-o5%|^%|(yzu_y9&^Wvk7ch4EBg3&B-TxHw2U|W!_R3{D zMqRI5cJ=CajfUQJ`oA@p%p$~j?aq__eD@|dgW*h%10;)9*?(X}z-50z6Q-*CDi~7% zoVWAw6e5-{#QP2=qqPhT@f85JVtk2W9CtYFvTc2;S(-qAgtHBOR3n;62M&DR3><(G zshTQ%NPDX|M#>o*6S7MK`Pb1wKi-tt*_J{CQFG`HHTr3@fdos<*nJ>e6^L=FB($_` zlbF_DmM*GJW@CZeQnV@VHFzCp%uJ;B=Copaysk}rL!tYN@=jmnV6sBdngtmv+E5O- zNd0}u@j=Ae-1ZU}h(>&eMfdo0wf%IU%Q^PzGw6*^aNmXph%(a;&Wk%zc(?r8c3 z*X$03G06Hw$5K5q{yh=cp0dVpm*x@`!g?8nowDS6hk1>29bi?HCxY)u-P}A|yjCaD zF-iW53Ax~C*rQbZlUunNeVTLCEI*50CVeHCAuQF;y4~+tgFb6Gyt*A3!qi~h-mmU} zPcn_+jwxO09V8#;WKhOU_IrVaT0qg(olLBV+IDLF))<=Lcf;L2Vdz~&o{YGQJ7QF@ zA`g`Kx8g@?^o#jDsE}=RVT4DP0qX~j##aKQRlW)~fxzfrZc)nLMGaQevSNc`F|zUu zFEhchroFyaJ<&3?rNAcxuL0RDTPmS|obm}*z%%oWi>EE)h|9{zQC?3rvuI7!c4W+L ze#{mbueQb!&CYe9T<8B~IX&E7oHjh$2+#Fc`nO_-!ceWq>aN9Su&5H0$5%NC#L$4H>$Esww}}j|g$&Rt!zuA2=IJ8ZR!-$1mtYaT?1=v;>U3#rcrjUe1tEP0jwM-Y6<8(Z&>U>< zccvPxJFs|M>Ik@=_^P9KwZk12bCDPuh0U4^KwhC2rzdVOIE}GH@2OS2&kJwwDQdlC;Ed{;fs17B+1fPm zB<4^}erny~ZeaZt*P_+-fV{gF%gv{NL#GoH-eV z#{ecq$G|;UckknHxl9(%PaIQf+pnsM)DE%5#xA;C`Y_y)cvn;fJy^t%V?4rii?8N}T*h|_+8ph%Wbo`V`5(H0cZamn=$nP=8&o?@Oe+@|qws;zi)K!d= zO?#Q3In2W`bBJ1^k9_?dcQktkWec{4dkUaA<+ZHX^jh* z>pEU1XS(>6!WS)_*t)0T4Nn^6X46@SWP)1(WD8Cpq&VE2zjE=&S#`i6F?xo!EC0v|TQBATPv(m2fvH5yU^bGKO59IVs1=8X(pK8yurXdqGyL2hU#vD;5UtLUW{i zI)Z8e3_etE`xpZ^lmwI`gRxdrL)U5|d^^)yqf8ZavR#u)ZMxa8oT|2mosp$X#FTS==^?W_vMy>x;|aiVf+3BdxaowCzgvl`YB@y3_}GTpE*) z=Q6z2NZ~Xtfh|&AVjiL0miNGYbUHp7bFqi*%GVHweQ-4+gG6Xx0G_Y|>xx)X+q&)$ zGGyj@^q8-3Bl%7&^4OZwYA|QZt}tFXkmyCMAXkRYawA0KKfrC`$RLMl#ou<4<42WC z;ASAfRyo;cPb?!NQ!y{llV9^Pe)sRuaJjm!XBnJI3F=xPP zt^_uM46A+OwB0lgA6l#^P?Fg-i#_hMXJU(f_VgRjG=4FeaJ}SE2#Q27WtR+oR%Ye0 zqnC)P(Uci0arB~S%JG@ss!FVNT&mE20xyCUXPKA42>BECw_+I9E=4hvg~OiykylT| zc+<$RS~tqm9ao)l)@6E1CRJ>uEN9B>qDv3~ z0~<#ubCK1NoS-*r>SqNs){!M@QXW32(*ooH=Zo3*AsZK`bOyFvyW5?Y#Wj!)8W~7c z>aaF*JKQQ*_WuSqJ0h~l{?(9wB#&)>*Y;{JgnORMW=1ZlMq4RA+jeM^@h+c*^^*ss z(`^77+QbU_$I*l2?#P>((7O=0{;!mLLPnfXW2}aR=rI6EK(@b72=4UkB$TImMiOIk zAD3pDaS+ns61c8E`{2^UoM5;<(g4{562*q`Czij#?*#bUMB=ktd5C}vZ1OV*gn|~@ zE+vPVMu_-?{CGyJUE|=|Mt5vxd?1ZY^*&`)i}& z&TBs2$_+aM&zwWrLD(6Vjb=~i1zGG7XD!f4kL1&I>k#Rr({0DVtG04L$QS1>zJaf@ z==DQes-*F0R+n523vWhC=chFo3K@3d-Po^4^&m%7t!UL&PuX<`ManMjMQSsfH^Qx| z)_aQ+6PpGAm5t3+i&=n=?$sF&~3 z%XcOA7js^W$D9Lxk#jA~|#+L$v z=z;-#t<^RjkdNZsJXXe+D4rpA$$&U&P0P4>a$NPjrl9;X>@MoCeG5FWRa^NWSsU|j%19o@_ym^F zMvV>g7fB_Qprd1sx;zI}P1%T7VT~eVxuWehcuqBJdAT#My!>k&rS`=sC@YqkTS#=D zRT3St>VwR&dUxoR8O)v+4)IUIDgHE?2m2hnE$m|wR8H}ja?LrhnYAGcII&%tNMCt3 zy60yL=?SNF#ElmjhXjYYGM{=l=Nlt2fhNCPOo%k&+u4RDBmvj-E^%2gJpAu!6mB!M)cq;-;9bVwd8+|_h8lzfb4{Y`TWGV?fB51yx+$E1(u zYzYDS_iJ$MQ&5lnyg+?diOIeF*}8JPD;*lrqn<#bz-^!0AhDRqAKoeh8)-LOcuT^q z3ke1oLHJkPMnsnMsL)xP7UxT>TTL&=DfuAPpq zcxgDkg^-dNA5n2y40u@EiAIGRy=0g6X4yI}POLB0 zl%UDvXS*k<3G|k*v?mxSZS{0dA)uH1CXtUEVAt8n*w-=5oO!M5Li=N`p9Y`Bm5F71 zTE(hrTy3Tbs8sW$BeXZPi7B0}(2mj*ev4Gi*N{f#mPUmZiVOO7ZhuO7ShYmYD0z}n z-@&-QQmJA7sB9DDf|o$row$2aEfLg!EA;IAc3W}LJxNhU7~vcb7MMk20n9a@H!s%M9HQeyzm2 zW7Mc6avTI36$zj@#{NM)$!+QC+>9!_x^|%rYxpLk)vMqLx zOwr#wA*!fWD+G4%S*gJi?&WQs84=x13nuel6IKOTX|ae-d$L;anF)xINbzeVWkN+{ z#Dt&N-S-AHn0&Oe&072W(#19ERvc$84ZvD-?#=k%pa`6af9MJmq|NM>y(ObT6by2b z)A@}Jc4z&+L9YOhfkDrVy}w;MjUR!6n!M#U?hWFlE9RWI82?lJJ^Qj0)Eu&55mXJaM8dvg*$NNSHZ~ltc3ZQ*cNBoq${WzdpODkX=kP)OQ5-1U3&Ba6 z&{0}p%3-_K_hq36H*{{c{5&ZQPU)@d@!{*8FreR)*~#}gy_AU{CW3FIG5ebwTFI7bgR@EcR?aSDL)2)wh&%*fEB%T-T5|YQ5Y)u82Q#Yvd~AZDUwk zXnv#9u@Z{+C$rD9&Kn-2gTS%?oR=~jphcLqof#_~R__Z>q&s1pwg$J<8u6*#6hj&3_?41%A$X69ekEXoQ=}sCbz}_L-{kHmWbc4C_%44!>CvO$1WbRC>Hqxf;n@=$f0vNd$nv zIqP36wGwgj`JRUtsnN3^EFW_H)FGA7FLY(y?rirTJ%0R=QKWnFOxczNt-0?an~UyoMB*D+|eXgE7AXF z6j0eJa5@DQ_GEy>;pU`gOE1r{a@11I?-E?Z|41Hmk8?)6azi0@Hc=09f$ddSmvzxy zT6W!wfa)vGg(j8i=wxs@Y#C^wJo{<-LD=jH$J=l*>6XqN6q8Sj5D~)1efMG{vO5wy z;TW!lSo+IRVM%w~L`OA)fV!@cP*573jtnZOs|u_fmOI&di98;}Ov62j=`hs;5$8Q8 zTcNNM*JaZaDF0(5n%8s`H3^h#Y3s+S=bf_MCRgn`ou?@{1a}5`X!AA4VH^1n6??VH z9ibgwtJ32`Tavq`LjP7;*rkUyF^E6apj0T5$~y5J@U;iK6D=Q$n-zFuWfIUj0Wli* zfx8h6?44Oa0eABpHN%y+|DVd%tE69m!GNO^9t8lbx?O;m&nQvmdU8Bc+WEdv(~*ZX4z)AY*-rRBBIdjLu;LmG$y~|Ey5MX0G0wL z>G}j@ka@2HRy#a$Hy4J3$TT^Mt8VpLMeqbv6L`$E_wG0jn=|wbQSjcby4D}c zRbgWTUyVn1Jp4$10_V?qNib8(9u*T=wu7Pj7k@p-zJ%c>;K5V&rnxhHJVy>QOInJx zgVzm463W5-N=#!!m1VTpKgwTD70mRM_bkd&I!c#rP_eO8T;MGi54|eYPt!XnIb9Qe zUbYo_KcquYCvYSPPI*ijpe*6=ED8&I&jp(%3|PQsFnpTH(xhu^1*)8FUrk<);2;O~ z6ae`f(I~p#++V2+2>{ooH}D?h5KT84~m z(whgmtS*%PSEk(15)Gmr-f#|+>+C980SrIG>XqZU_H4Kpr;<}c^bsQyb7jiVZ(hvu zUJ)iwU}6TaB2S+n-izwm{6VW|Poz?7BJV6@}TwFLZz)%{%AeM;q8OU01)J z?x*ScAH9_`=iSBT{$3q5Atv__{$M_}-{0=`TIeXSt{JiQZ;BvsxaSqm+?0ZeQ;$yf znCSisk?37rRq`p}t&enm#3SE-*i9uO)TEYbGXPfg*Cuozz?H)4SZOnWI+S(~1`;g@ zB0qo1g=2sQu}ZA)*^4926Hufl7VsWZ7vbcNdEg7T189HcRt833axBH3O^$r4;J7c= zC@@a_Cw|vjeUE=G*lJmz#MAZ(k$1SP|GRr{E~|ek zkWAT8wFedD!mtGsWZZIbP|!HZX#k0V5iy&?`|gd!L{SpqUG_Bz zuU7iTaIXpYdFSsHi?wzovWQrYk&|yZsGPBJ9D>jW8T929;NZ^k zVOiWrRHh~kVlgYRt2T}u*tBIXC1lnKt6@wP5G{bc0;U7;hN|1=03(Wrby~_QxS3K5(TC2$2dVc$5(g$f7SexKui*Jp$T)PXT^CYJ@uiFXTRkccmm@i!#^@^$w?EAf1_3}S$~N(M;HAx zq97g#Zi4PGHgmpc+%t(NDI46}Kkg|N+8w!{5r0Ezi;+>AO zSERj%4x2d26V4l*-02f<#^2QafRT+ZcTc)4hKbWYOM+88wF=+heurdC1Crkvr z*?RYqXpMP6t4hN)7dBuTC549k$IS?d{x?Q1N?I=ep<>>-xAb^#>dydMr2t+8#nD@Z z0`n}htGog=PRb{~V!6bwor5&LL74x*1sEY#e}A{bu)fM=^G%)9Pax1I$fF?E5 zYxG; zb9p}V5Bz@F45QxVEAQ+~;(=?`ZOY!bOD|63buUVq;)4$#QtW*=Tlt?G#2^8GLg(Bp z$H#$&AZMm0)wEo$#Qs=$oKP<|(t%#2uR;65@H3U(t+Gq*I^W&*Wm!1r6Li_*`}@h4 z#7cmImMmU24weuW-}00;;kacElfhJL$wm8l_1kmCS?{;d3d@YWF9+hl zK)ldt&ZfVrME`E>@gxBTR(aq<19aU=tw{Gt9$PL{g8LswMBJQypZ1X%{YwAX%uJUl zU!XtOF2_;29i_Yv)wG!oN|=ZU0H0x$@I-b@8E*%;SxF*z=abp}*jz#r@5iKJAf6Ou zc7FKvRl5Sz5*!$Gtb+s==PTZPzW`rtN~xSc=;#Xgdg9QShrVolHjeF@ zBwxWoVMw>FuRpq~@ZhdzlD$MXPrT4*V@U31|JBP)fO+~X>``ndGvhIPYv$6IfMk|A zc9wBr=GsDbF^3qb3)AcXk8(NrbJpYzm+u%l_WEwdEzckUu}S3wmlXF=02doPS}End zzatOlY(6Q_M*RA6ZPM1-MmkDW=iys#o-A0OpGR%xxU`2+T1<{a1fQ(ZRXToHAgY}N7xwE1;aGv2itdiw#sKG| zrT=OBR#B37W z3PFtJp>OCIWyxY~saefuq``z`{_(%q+|Dp$se>9|+bZE5u7+^!gbs$wJ|CF90>)FU zG{fopVpY6_+(_$(6fpYKb^Hzp6F*&%a%DEZv?Gf^(V?R}b?K()Pk5}9Fxd6i1-2CL z!3=mGocOvB(n)gyP+IfpL}CY3*ujUBRJVil{WMemT26;5@Rwwu*b$!`^lf6$!LuH> zcR@a~J?;x9zuMmyCye|>R7?&E^U_{3G*oIn*mL>nflVdN^J z{#S2qi787kT{w;r)HdICO6MWal-D4~z-{r=swmtm+*R)RPYa+8bMEPttqV&mQOX=nRETP_ z0b$%C&(cRx*p#{-Btej5aR5Ra&YNIn8Xqd9Gy3l;>Z!G}pn!mQaxjYaQel%KQ&2n5 z_?)6F!qF`e^?*%-H{LqLa7m(o|0}({l9~pzBLS$Mu&a=Y24^^9>}@^^oNg194{Y*H z=}De;AL%M|i8$$kjk{QO-$bNkRq}@yDe#hC)k&=`DG2F$3B`~UN&imv4rK#zS7X?` zv$Y0N4#&f&)70s{Ql)7gTo^;}r?i1GECoxLb|rp|#lw2b{s1y>DtezqY`prt9t6!X z<|{n83XHQnX^ol-m3pfYJ^?)!wqju8>P+@9#cw^@qIYHs5<_4r5C!NB4}qO_RJuY%I&K~=xEz&C25Eckjp$tULCuiKcQ-R_;Gc2XvsE-7gpZP%l1 zrt9=Z?y6PMV1`;)Ib*dTQnW-OCMlm*%ML}vZLS@TS>fQR8-M3q?P)x)QxyN7Dr^oX z>Rwh=<4rWdcyXU&U9hvg2|n-?KsND++)8vG3;+2c($Bsjc8@JsI4U1qW+zEZhIXu` zu|+F3%HG;Obe>!*g*lGkleL#^8e1lZCSmimaJ$j_G+)N3eK3*&1kcfp^^HytGd1YH z^PTpD0)i1OJwMGDp*aWd4g@cOeNB)qzu)H>-t-F`eRz-%7r3VH1e+&2;y+{@x3awG z2iUwK?0=}}J!TYaEmaxw1gBpY80!wny8C}-#no=X50(+EAm|?~aU&ORgPB`ef=t;0 zoBq&YX0hQ9C&i+dDATl5&+u1UAfje8y+6B9BX~5~P;zJ(l{l*5`M(IZDGVPd|0B~M zpT%6v`sV;a`D{HAocy5oR-W3127EY~=Yh`|GieB=Wsze_nLwwPAT@V3jDLw6QWDpF zaB=%Ao%)FZQ0q+|j;8H#=UI%}MOt&*)%Y7|Od?py8fS0DO^Z659?)^~kjfZMQ(F{1 zf6(>iP$Sdr>l-aWIitCxdPgG;(8`d)U_ZgQR%g&!rfK_U%27q6Wxc@Q;Jjd4`#?Gm z2OVPau~vIu55W9tbyK7!p`D);+C@2VBZ<2TK8it;Ad)E-I` z`h}j*0}f)lVJo6#_$Qo4^v(zJpqaDy|DcQ5z6QEPro3mdeQ{hVCh2AaKp=*rWi)Qx=B^L zOO;wLX*o=M!D1)Zof-k6}$@xfwcK ze!->AQLi+NFOuW?^fxkbV7r=#jN>_|K?IH04-u^{Qb2{uQfJ>h6abSoq>|ZV7TBjG zBm>hkKWN02#Zn(Z&zU(6*Ai~m``@)B_C(>hT|+_wPdoCLn%2)U^RHhSp@Zwr7PkO< zhr`FGSuQRAXMAvb+qPn$hg2WeZPnO#ZQ(VSJ4ga#u6FF{cJ(kblhs-C+aab4R9E)x zbow+eiqKH-N}?6kg;t|cg+I*ojgVZ^6U8e4>Jto+7Yxx=1LB%qMu&~jl#>i$Px*So zMG-I9trh&Z`#OFZK@H6o{Vn{4_+LUfiCQxc_JN;NZ}&YmRW zKF%-V2oh)mm3@$7tq*jOv05~>1oC%HWU83=zRJgb__T6dg2$}em`=diS3#4-Tfyx_ zWy2cjmlT-Pr?zd@8J2G_{}(s`jm64_s$r$ROF5iJewfs%&aR{bDV)kWkX~pSir)f#Uq?tijYW&>2-9qowG#^NAGL?x)9NuAeQg;dsxbOngW?A+y8td}6U1hS(fW zMhXhSsJs_oe{n~Xk%-fWGO_yg827@$GjsU}Dwz>XC&$GYJo9QcpU}u9u;bDlYcoWY z_C~b?c6V7}r-orI^wC=W z+}!B`R!S^2j-_cm$bMw9&do50HpA?tt$L(TT#Cp%6NVvC*Vc((gqcJ z!laWKK@15jM*X?gd1e%qcT`^zD5CO=XE5I5Y?vbdO1-llyzkw3ylvNV)55}ziy%{m zq@RjjM|-OQojxr2-(Q>4DA9?U7&|q5RT{Wv6Q+EH&me08j|?OF8{RcqkncJEIol&8 zq8R4}YCdE3dFSO@3Y-CbfdpUYB&YN0u~h8>CID1u1z)S{D;Vi!Ho?cTj$-AS-s9Fq zS?#oCNZ}_W*n-{WXL@0vA)u73Kk5KjKiyB6AjCX-16jx8ouLwOJ zM(az?pGk1s4E?4yZkW2ar1HYK-{?8Ru6A+b$8!f+UHVRD4NVJ8k)RwJHaU|l_{(7r z?IT2pgw?MXYX@k{)s_~DxwPx9kf;80S||@9e?CWSSywSgRtqZ6d3Z;4P?tK(%0>CU ztcsA%`Av;YRT4_&Zhk#{Z=T?FxOK0}YMQgCsAr3q2^)Qr>Ia#uPF41DN|sG|f-JqK zilHT8K=9_7aoL{5KNP*V;+MZ|zHP;iFoJCr`ns!ZuwwyQ^ZnWfJ6T@8IVp~n@6~xc zNS~2g>=c?G+f00uLmE(hT(r$U^|dU}){2jSh7e6C$kP)%SKez}-i6;QvwY&IoJ=y1 z0lzO3rA?h_Z`zFQSQH)Zw%*LIMj$;Y=dUh5?rhHF2}E;oOZoyTXiaUtaL zQpFC}^eWVZvOc7N=HqE!@#+JY(}5j_MO43xKq=9OzXtU;N$?{+pLifBQSAV~FM+)U-Rm-%!;P2I zvuRp}V(`=G!${5ExDj?C1P5rkKDZTQ-ZF&sc+U33vSJFL8U{(g*#)VhTO#xy~q(Y+|`^xu8P2DSsECq1*F|l{g zZ7Fjx^WO+2X7G4TiD0J$&{{FZ$D)&>mWX)5l2r4fUGyL2*SgME8_UZJC zl{Bf+^4<$1f;c!B-t9vI2hB_9I%+pvoXo}XAGD^4x2Am3Iv#fmT%MudN)hH;ThitpQ^x}6%GgvF_G*()@BT*x4?&SjjBr+nCy5d_6W=MqLD9usy&Pf zAFc~fQtzEM|3mLx+Isl&7LIBDS9=XSG@=M!dKlF9Z$8N~jkvhRbNr}|4NEVX3A&|r z=BQ=Wo(+8EeuvE|8!Z@w{nqJez_vxI3c0T{($Y&!{9l`8qf0&e3M2r3L|QG%n7?3x04kJ^4wRXv|EYnwWZ1o&(((Ez)jbNH2WhR4=h0<3uCXhe8= zYY`y`2aozhmIZO7xLiaBT#zqSpvsV-uRmP15^OIt z#wL-M8#2E}g}ixY@tbd>s}D+zsoue6mKQiaP}NNlZ_3IVOJ0{X)|+}lvSb55Psj%A zr$qKqL6-L-ZC+f6yqng8NZ)V7Y)fA^LmG@4+NO&+;XULX=vAG`x;JqObKe*c;PAC8 z)Bcxw*sBexCL+Rkfsckn$JX#%qk(%+_k{+ECThBLjxhHV8bI5R=iR*C=sQW)IraTN zWv<$_SZh(dOuy&4_KE;ffw3gMK4E*5VKl!;t_%qvB8sY_YHQb zYt@X96_^hKx!76b&YAg5f!0@96V#H?q#*Vmsz50eX92UsFo`~r=OAT}o(px}8^?^Q`#>F4z!WR40W)d$qhVLF1+e4xv!=|g6FT<^?1awoG-_X|a?(u4;Ns!f2vBCjE+8uf^ zAgsuH!0zC^6%tG*uQ^y>r&jD{7amH_SKXe za1An{2AH}l-QTK%B;>`o^3?GVZdL|u6;0 z_z0!6xOhz@6uBDx1y;-lU9N{^3EBdfDr1S3QP8R^V)X;8Mw+%=H^;c1Sbs0(_={VH zNh~+yFg;XIicI7liiSq9A5NSz3!H97#eCxbTBeHZTj03hXoQ+N`;MnN7)opbm`+%6 z3~5{1=}GKn%3UtJEhLtjK`~H3Qyrvm{;nVIZB&d2gndS~j2Fod>o@0SMSjUeOMN9A0iC9o48ucFa z?a&l{381-zzQ@-G59Gim5f=E5eywn?ydlf<6m?RBSMRXwO~Z6N@zI96?82N2?d9{V zChceA77});i@yx2ZeGj~%tnNJVi#&=!7${G_xGXzXzL_C{x3`2* zPj>j(P1PO$6Wif&F=LxsECYtc$W|%W)5SX>BG`*J?t!rLt;#S1S_6&aH47a8Sp|LM zzI+Hd{J%o)f!v|$ItUGW;1sB__KrGU3KLB8lX#!m^hW(t&;Nu|+))G3rlp{_4wyLk zu3A=pek%g;Q45pt+(>`2WSLyF#2!^tcOa4MO$c}EHY(8TIEGx}u%S8Wo(A4H0l6Rx z%-$HS3;t_Lw_j>eeq)U~wM#5!wJ3S-7Ae5YUNWk$JOaber_!uC13l;aA`nunQNYR-@W{h>GS1#E}>e$M#> z1bYo+!en^^avNa)N%K>oH%=vQQ~{6ihv?zI%ldppI{+}S&J(V3a;aPYvC!~3I!FH) zXmp!ORjZK;;+yh*Hm~FSQI-up$!x#}i~*tva5jham94b-q+oRwLYLiog;DCAY7=nO zy$^XT8eGi)<&i=;dJHf5b1`3~!p`!>CrF_HY7sE$lCwaWuZGMIYYr=YZbu?%9(127 z<%1S|+ET(r%62@pq#0={I z4H6edgZBqDK3$_Jv}ap&lPr^U$rboPNAu;CEjF~=G&$vQhh-a4

Bsq&QG4>18IIq@mGq`YN>q! z->OyCaQ)?F%R?oOj}>E2J7sj%j#(xOeqqRnirU1YZ7B?<26B1Z&U|rxKA}PUgXYqb zgSItC)_q!czyv=bpKuI@5sgL=a%NxgMJWtx9r+@)1_eA(PP|yX) z#sy`Yvs3YYPTBI&5l&Sy6oDOJSKl>W1h`=;U!?OX*eH6DS&U%ElAKRfdy`;N+Jep? zO5=y#V4W-SSGnnG-xg)nnEJsY7Y_6GleIZRZVlR(WoeS|ua9)&?Ox-W3#Hj*PFO{m zJ@T#7URV2=Tbn-?1xQd;TRLvEDOmW^1d`vrTecEnVYo00vZjnL0gDa%>BW04wLfR) zw8co$B`G|9o_T!;;0(InKE#t6XIGrWDTC1yL<;qBKzM_jZ70wC@d$&c7t)gO&G7G2 z39i_1eby*wx(1HQ78W>-;!7(87@(!;?<=--D!U|W0aQd5koskm#~W(m@gM6%@RY`( zkee(;vuJAeakik4h@iDd5GXW}VuU95K6IM!jY=y4XCrM7Vjulb&?hPeZWjK`-$r3J z-qSLZB>kHOy-f6VO5?XjZb!!={I?_3D`m_BVeVa1HVobnD_?z|u4e*gV!hsyfe1|r z>1jkXxH!Ig#XSsXE6}|{lfWxHDch$w#nJ>uk{0x)TG?AeB_7bnC?Z9*Ocy!|k8C*5 z9b|v9hX!ESzTauqIvUiHV-h}P#bd1O)is6M8)_-{gJfK8SRSw70e$?vDWUTSAtUOL zgg3jVy))Xrj4u221oAvwb6?DMklTK$Y=Fs673#*mKx^GU3ffY79y3jwNnJwY2L#@O zGED8WVD!VEy`=Qut?BPqF&94Fkh8qK$Eg?NT}j3$x+;66-`zs@h!oX%#l_(ahLK1> zEPr-F1oZ}*dXVEjV}0TPtCqEAK=raN$isUhZ6H#$iGI0k~56*%5{=_w);K7@5eKlK0X+6Vo@< zk#~i%vNk=CbZ%N2{IIxnlt2D2b_K3fzz^}* zW2%1ZmUVhjk7a^ba=QwOycc7No`j>+;^19Xl#t2`4N4=sUbbg(o-9HfKFcvapBIR@ zR^-rS1aQ9gh-6uHkPm=r`!YpTC>XJ!eaP-!wVr|h1*&3WTqLwcLl=-j78)rSb@xW@ z=A~0^v5L%B~1Trx~RsX*6wUi4_VBoIU^j`S3Ck9^}MyFukw~`1uKYtoSl~HFVXv)`F<; zwcMI7(zdI7qILTK5#mPtLS>#=CTwM)50O}W{l3M?u*Zv#U~Y!t-WE$0vA<-+OAX~IxDQ|qbYLD(6}I4!#xZVJcnCD0dxB#evk+lz&!g2Gr`M?gMe-jHjad-&y<-PBEykt$p>aM~DGzwp#xs0eCp~2*mmPwYPIaWjNy2yDNTBWPT?wgS zDkcouTes@zVc}hCyk>3TSC}g-f(yxb%a;1InOC%-u55ZzFoEW23bv#p#Kbx$;N<|I9p#z7lnW#UuQ>U8us9|5+2KjT)chs1eM?r@x9^HrfYRD@H0xs{f= z%B_P^`S{{jL3X$65OvqS0fPr`eHFrUvnKz^EWKAJG=EY*`k7##wOv`zHeh=$ss43( z)Swg9)Xo{8m-Dyy1iJ?~1iogt1I5MR#$Q{v_!Q^sCM0l0*m?SLl0{hA93Y*s1hkjH zlBioCnjDy$m^XhVm@8`hm=+=Up&Bk)lF6umk72CmKzo{Oj%FY^<%AsNxhWYYJ#HD{ zl1tyhRRzpU?he|9tB`-lHQcAsn)Hky8|^g}vsI5FD8C(pmEkR|J!%U;+=uf+O6@+- zgf+@d$R#`?sL)-Hl4V>i(#w;OiaZmelj_-dt@dySau(IZ_`xQ$kvICP(T~L7V@ENe zCDZHnS91;B-~+wKHI6fega0XC)F?;yba#n5ln`yP7j~9GV5(^Fb@jb3;{qN;7R5_> zZ|*sf-ioS7*I!%#T0Nla9pCOO=xww|%xmkpP6kk8ur5-6Z)2zk^(>mWL+F^GCYZYx zm9SxYhr(B*SD8+=ThS6t4k+wN)YA1^GLb9;&2B>Ge~&h@xXM=-mxd+LDA1w_;oAc@ z3I!OE1Br1Sj5(Tm61m;&_|)kLX(Sl7BtJ$bV|!WsAxW}NOFg$`;xgvRhyf>TQcFMw zYk62cMNkz7Y~o(Qvb^c)C{aY!`Pg2+t4?5Rm`Jsc%S1LydchAXzMRWE3E0ks4f0i$ zt_DkEY}hN%+U&H4hI*dwaJu4+uc?%Umkd#xUx%R$327hk*Mm1<$d71Jd~Mi(5%?HeCfIWtzV<9#l*0g&IBP?Fp;`t?w544S1unpMT)9LGmSNE;d8v)7< zKoLy@vcRdP9Zu7PD45owAcK9T8#l);C)aKyd-gh`&m8CE!J_?(w`1O8c%W!IYBA>- zY~=LHHkoUA^vuTI3qosbTeQ1n*zVTdQ6D`y@*O|y&o>m)B<)gA4H*cw@7dANM*Xg;*N-mPZ_m}lzba&Ja!SRx#Ar&kXij5%cOc%* zWA@2-ZG)T&qlHPgydXavz%Px6O~!s{a>jOC5Bg>TZyl{=F92o58Wppos-Bn2@<4d6 zvhC5l?CKGb2Aq7psd`WCXR7ho3=q*LNvEB!fI3LO6RR}B!I6#|nTq3IDDV}DQqmJG zciMv&t0k8fr4npxYcx3;h#4`9m1SdbT*X?pi=4I}od9tW?(xOucT_^MWp_NgzryA3 zpO@jMI#*YdGJB&Lc4g{Um3^kh&guEyql)m_l%OuqF3xrBAko$J0xS7Y|Je@WXY$Bp z0G1E@v*RO{E7ba{g4`8OZ!QnNdeYO#K>M<7+OJWk7w8KMY?C)W{6iKVwNHj9XQ;!6 zCK^aVY7Bk^7>DI`JfQfc;Tclif~FxV@cKExW|8<2?C6cNzIQ&X%qG?k2zC@zqvn>R%TY8~wrvCe`GMFb7Ek zW&!{1Q$cBXzu-X~E}bMo+o*jXfz~2_Dg4BToV6>g1Opk)Z@rFy?*UdshE~VOh(}}l zQg}C17(z(WIJwsuq}EwZTuTB#lwcyqGu%-3gGyE=WdEPM3&AKEBV{Z{C7^ka#x^bh zReO8VJJ{}NIi*jN-+3pb9dfRn*Kuw*K4WOqA4m)!I5FNuP>SMb>fWECn_E(z+fPrV zC^L0BC5z(HT=B94TfdP1$isn4MEAR}CNPOQX6_*1MWLBccs=>&KCj>Zi1J9DJ+o5G z?*`;R01q;Ll31IXd9ESJm!Uq)D*{J{L@{1W<_G01F{&P&`ibrKP#ZLpVvLuCC?i#= zk-q8y2_%Ogw9rS=s0ftWautg|zIjx1*S+S!7y*EwGRTum)Gz$Ftr)XUN|j=A4LQ3d zh8R_TSRgiFUkWJQG3BXI+uuHx1Ld~T@l@+M$tXemm98xzk0B87+Gs&#c8szEYR(~G zn@O^~Hy|fokriN;m_HKVz@q_AUuVU+^@+&uot7_m7V@X6YXJ`tE*S1HJf&W9lwYpJ ze@dzo3P7*d3t1KG_tQ$k6KHDTfrR*-L6oeR);)#*#>!od}Ggp&JlN_p(h1`qm9@2~D zF%!v^S%OLSQArdoznStxgovK`HjU8%uPX$4yAmp$Vk{fI+OdN6B(5wDZ2Q5K8n_E!MK#OAOVFy~%xgi>k zZ*ZeAGE#d&C3?DT^#%++{7ngazwB@KuY0^pkU~1dT~dsErNkK7|F;h`|H`;!9H$)d zNhp6L!*!kYr4ZJWZ!ufy%h21HcnjF0SKbGeeq^!*is&d$%T5BhC|Yv0)Cn~U(yJLOS^+v9(TY(WfV ztUqJS-dNCUrmo`3K*eU6OWIU^M*KDlUb>sriy8-w!nu^)OO#W;;+U|Ro&0n=E(c07{;-q|Y184hg@y$ISWVpK~x1on`|+YL}h&nC{X z*>26=crRk0FT%$`XW1PF;?ywhSM6sZIoq0<)$ch?D@_}M$kw2-2*^KQND&N@KwJ>i z512@n6c^eWvALId@-0yLBIm+;3rJCF+<=c}L1j-f1|c7!lc86&l;u?Dj9S7&D(%zE zu*3;wb!6VoE>+owm7OqP2j_w%9W7ewxM^p4J4VVFv&yp z7@qaL$N~ym|-!wr$hw*NMp6%`0&!9UTZJv>U z1JgNZ8+5Tm)1t4agb1W>)-N+)qP8PKxYe*Ju+}zCE554?PcK!|1UN?!sQB77$Fgf8FzvL!H@Hxps*$MfIr2CDR;(5Z81n~)m(K3 z=~qcC8sPXs$?*v7xm;mQW$WtgZK#DgER0;KQundq!5tw5G`3=rhH?cB=`@UN&1kYI zAL_D-GXb?2z4}V*Rim$om87#6cL_1XKUJygjgQkJMWSIbxIafek-b>h5h5+HGtp!X zm^vti9*nE?D=v0T@U$RP@m^A|1w&mZD0c`1LhHW6&i6lPtt#V76CY<%MHZHW^LBGlPK08$YI*jMozEKN^MQ8V>37 z53X>q8sT$S{`hg-@sLk6NHduuRc24{Xoo~Z6QRaP7NgQ~bMUYiv&wK7+)x(_WIton zZO$%XFw9ZTsCYf6{c{NZut%#8{n-f$?ST>S;+1ZZu>@I&CUj+vm3wlU)_4@VW7UcrC-h*^_Ak6kM zsh1AHOe8xT-cM}-?7DbAYsrV1mP(#zK>Fp9KTwM8@qfAtI~V~8#HY|NpQag`$Z~EkAc;h*HWEtPq1MRYz3O? zSW+S;x^7_DQo*Sut1!+}t*7EmZbFD6KcNk=gwS^GP#kF7Ezzusplct47Z~VEr4FNJx zM>7p;0EwT>$xIg%6=h|3o2K3G&ROl$cj8=#aejIH(hXA-2i>I&`@}J~DbE6@0<*|j zD1rtS5=s&hiF{!2#SX0zbg{YJDcUO+WPgZktmXooW+tC^Rtk#M;BK^%gkKfmreneD zJH=mOp6}^KdwAr)Frvd&97VRW#dmr_ z9kNs1JSc(54)Ii`$}C35cCZ!nnt^pmIsX$@iM|c`+ZaL~3OS-bvjbTaI76gzQQ#W~ zs~A9))ReR{g(;&))^;A!BK;C_GRbmk`P@oUa1g5UZ=58wzck@oJxh*AOt{ zjyqG@^$?E~P4RrEErk(|5nNQXh^y-c!}kb2){=1Dd$1sg;uPm2tGacC4+>jgkT z%*+Xn+{M?-x7mBOOD_tG?@90bwdA^+vw2t~c2og`lfR6(^6yS`K|4US$r++BiOJt#?4nTFkS~VS z|Iha7z>9SMGT8N1H{Gk52_Qt~-GsEwMQ@-%WpMw$F@Eir{Q?OI7J#dwvJZ#P7;@*tfF|0c!RgC^hh>_s2%4}WN*3f z1>^nQ9W2MAE2WwA787XwaUUDk+To~11}CY%7Ya-=zW^_r!03c;+#o?G>L^B0pm4St zy9?8EHiN_O#s(e~bMR1b{!+NKQkp;hy>Uso z?=H8YA#oFFRSg5;<+S{@_Q&w99krwQ?6%#9V;q-vNu* z+Y^vwonI#?h@_E?duWavK7Ous%Ni^lAofIek{z=)8m?=maS(Q-%+ioCzsw-QhTn+p zC{qiQOd3+knWp+KSm)X~r9C{+m8oiy#)7$nEf7Ek(2#`v}5VMWw!#&_g^-6=BRfCu4>XSZ<;*$i}^ z8xR3bHrwr4*6gxDa7-aYfvi+9vc!{Q+!*;mx$nV1HR!PwP450rlb?<7>do!|W!}Je zepxDhzeB&9ZmhHoMb`bbG?0g;>|v0#a-3I1c5*WM?+nr^r%fGny^*@+8882ic7~*5 z#vTV)%PC69y|jo9NzD9(P`tnobSD2 zMJxFLHo%KrIZDGH&6Od`&l9OR@R)UT7cC5Y)r3tep`vC3GXz3uEk^as<}c}$af=s0%rU> zU+F1$`+66aQs(myU?&R98pplA@FGT?9KO}ok^MZfR~%&ZtP4!=55~8>fWD5?V|b`3}!e5^D9WJ z9#^}zhuK$frwtxR8p$N0W(gw&fl{LJXolaBLkI=C2i+S$_5B9eJ9$%zaerUEi z+-LH>kkjFB9TOR|2E_&Nw(5QGgMhZ4IK3f!&P1zDYihlN1r{Vplg(|}S`6a735Ri} z()F>{1d67kBW#}g=mZVzgm)tFNLha^q;L&pDqNnR>T^t7@N`}2(J(uA5?rWS67>?E zuFy!cim0YOil4V$JKcwh$=U0dHL+wVs^(^C|Hsu1FFY4v1LjL&u6ulsO$6>7L^S_o z&RA#4mtVBd1&9)>+@E@Xw9Gz+&MBl$Zg>lxQr{eeY0%#@k>>G;a=k+{GQ;`6BeflV zgyOZd=eCWuFAm`NVt%!ar84$bxGdk@Dr=zzaoc$RRP^N{sMx_a)Um_IwQUBy?$Ib% z26B}U`4Q_Cw^5lyYxc*c1d397$(09Pg4gj`Lm~#7XiD?moX;b!4IvZ>17S*1!vKPr za1T#a!9c3&GeADeq0MLEax0oeqdppi)nR|>Oe@s|9=6xC@;<`R_%b1e(i-ZU>@7L~ n000001M Date: Thu, 4 May 2017 18:53:04 +0300 Subject: [PATCH 0468/1124] fixes for UPDATE and DELETE --- src/planner_tree_modification.c | 56 +++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 17 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 19b4a34b..f14593d2 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -27,6 +27,13 @@ #define PARENTHOOD_TAG CppAsString(PARENTHOOD) +typedef enum +{ + FP_FOUND, /* Found partition */ + FP_PLAIN_TABLE, /* Table isn't partitioned by pg_pathman */ + FP_NON_SINGULAR_RESULT /* Multiple or no partitions */ +} FindPartitionResult; + static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse); @@ -36,7 +43,7 @@ static void partition_filter_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); -static Oid find_deepest_partition(Oid relid, Index idx, Expr *quals); +static FindPartitionResult find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition); /* @@ -244,6 +251,7 @@ handle_modification_query(Query *parse) Expr *quals; Index result_rel; Oid child; + FindPartitionResult fp_result; /* Fetch index of result relation */ result_rel = parse->resultRelation; @@ -265,13 +273,13 @@ handle_modification_query(Query *parse) * Parse syntax tree and extract deepest partition (if there is only one * satisfying quals) */ - child = find_deepest_partition(rte->relid, result_rel, quals); + fp_result = find_deepest_partition(rte->relid, result_rel, quals, &child); /* * If only one partition is affected, * substitute parent table with partition. */ - if (OidIsValid(child)) + if (fp_result == FP_FOUND) { Relation child_rel, parent_rel; @@ -333,8 +341,8 @@ handle_modification_query(Query *parse) * Find a single deepest subpartition. If there are more than one partitions * satisfies quals or no such partition at all then return InvalidOid. */ -static Oid -find_deepest_partition(Oid relid, Index idx, Expr *quals) +static FindPartitionResult +find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition) { const PartRelationInfo *prel; Node *prel_expr; @@ -342,16 +350,19 @@ find_deepest_partition(Oid relid, Index idx, Expr *quals) List *ranges; WrapperNode *wrap; - /* Exit if there's no quals (no use) */ - if (!quals) return InvalidOid; - prel = get_pathman_relation_info(relid); /* Exit if it's not partitioned */ - if (!prel) return InvalidOid; + if (!prel) + return FP_PLAIN_TABLE; /* Exit if we must include parent */ - if (prel->enable_parent) return InvalidOid; + if (prel->enable_parent) + return FP_NON_SINGULAR_RESULT; + + /* Exit if there's no quals (no use) */ + if (!quals) + return FP_NON_SINGULAR_RESULT; /* Prepare partitioning expression */ prel_expr = PrelExpressionForRelid(prel, idx); @@ -370,21 +381,32 @@ find_deepest_partition(Oid relid, Index idx, Expr *quals) if (irange_lower(irange) == irange_upper(irange)) { Oid *children = PrelGetChildrenArray(prel), - partition = children[irange_lower(irange)], + child = children[irange_lower(irange)], subpartition; + FindPartitionResult result; /* * Try to go deeper and see if there is subpartition */ - subpartition = find_deepest_partition(partition, idx, quals); - if (OidIsValid(subpartition)) - return subpartition; - - return partition; + result = find_deepest_partition(child, + idx, + quals, + &subpartition); + switch(result) + { + case FP_FOUND: + *partition = subpartition; + return FP_FOUND; + case FP_PLAIN_TABLE: + *partition = child; + return FP_FOUND; + case FP_NON_SINGULAR_RESULT: + return FP_NON_SINGULAR_RESULT; + } } } - return InvalidOid; + return FP_NON_SINGULAR_RESULT; } /* From 8cf2371194719b88fa7c4e3885afd7931671b4c3 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 5 May 2017 14:12:18 +0300 Subject: [PATCH 0469/1124] rename some fields in PartRelationInfo --- src/hooks.c | 2 +- src/include/relation_info.h | 36 ++++++++++++++++++++--------------- src/init.c | 4 ++-- src/partition_creation.c | 18 +++++++++--------- src/partition_filter.c | 14 +++++++------- src/pg_pathman.c | 18 +++++++++--------- src/pl_funcs.c | 6 +++--- src/pl_range_funcs.c | 28 +++++++++++++-------------- src/relation_info.c | 38 ++++++++++++++++++------------------- src/utility_stmt_hooking.c | 2 +- 10 files changed, 86 insertions(+), 80 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 5ce731c0..fe40b6be 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -325,7 +325,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, TypeCacheEntry *tce; /* Determine operator type */ - tce = lookup_type_cache(prel->atttype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); + tce = lookup_type_cache(prel->ev_type, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); /* Make pathkeys */ pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 73e59232..ef3b5738 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -119,7 +119,7 @@ typedef enum } PartType; /* - * Child relation info for RANGE partitioning + * Child relation info for RANGE partitioning. */ typedef struct { @@ -130,34 +130,38 @@ typedef struct /* * PartRelationInfo - * Per-relation partitioning information + * Per-relation partitioning information. + * Allows us to perform partition pruning. */ typedef struct { Oid key; /* partitioned table's Oid */ - bool valid; /* is this entry valid? */ - bool enable_parent; /* include parent to the plan */ + bool valid, /* is this entry valid? */ + enable_parent; /* should plan include parent? */ PartType parttype; /* partitioning type (HASH | RANGE) */ + /* Partition dispatch info */ uint32 children_count; Oid *children; /* Oids of child partitions */ RangeEntry *ranges; /* per-partition range entry or NULL */ + /* Partitioning expression */ const char *expr_cstr; /* original expression */ Node *expr; /* planned expression */ List *expr_vars; /* vars from expression, lazy */ Bitmapset *expr_atts; /* attnums from expression */ - Oid atttype; /* expression type */ - int32 atttypmod; /* expression type modifier */ - bool attbyval; /* is partitioned column stored by value? */ - int16 attlen; /* length of the partitioned column's type */ - int attalign; /* alignment of the part column's type */ - Oid attcollid; /* collation of the partitioned column */ + /* Partitioning expression's value */ + Oid ev_type; /* expression type */ + int32 ev_typmod; /* expression type modifier */ + bool ev_byval; /* is expression's val stored by value? */ + int16 ev_len; /* length of the expression val's type */ + int ev_align; /* alignment of the expression val's type */ + Oid ev_collid; /* collation of the expression val */ - Oid cmp_proc, /* comparison fuction for 'atttype' */ - hash_proc; /* hash function for 'atttype' */ + Oid cmp_proc, /* comparison fuction for 'ev_type' */ + hash_proc; /* hash function for 'ev_type' */ } PartRelationInfo; #define PART_EXPR_VARNO ( 1 ) @@ -176,6 +180,7 @@ typedef struct /* * PartBoundInfo * Cached bounds of the specified partition. + * Allows us to deminish overhead of check constraints. */ typedef struct { @@ -255,6 +260,7 @@ PrelExpressionForRelid(const PartRelationInfo *prel, Index rel_index) { Node *expr; + /* TODO: implement some kind of cache */ if (rel_index != PART_EXPR_VARNO) { expr = copyObject(prel->expr); @@ -360,7 +366,7 @@ FreeRangesArray(PartRelationInfo *prel) if (prel->ranges) { /* Remove persistent entries if not byVal */ - if (!prel->attbyval) + if (!prel->ev_byval) { for (i = 0; i < PrelChildrenCount(prel); i++) { @@ -370,8 +376,8 @@ FreeRangesArray(PartRelationInfo *prel) if (!OidIsValid(child)) continue; - FreeBound(&prel->ranges[i].min, prel->attbyval); - FreeBound(&prel->ranges[i].max, prel->attbyval); + FreeBound(&prel->ranges[i].min, prel->ev_byval); + FreeBound(&prel->ranges[i].max, prel->ev_byval); } } diff --git a/src/init.c b/src/init.c index dbb028da..680b754a 100644 --- a/src/init.c +++ b/src/init.c @@ -878,7 +878,7 @@ validate_range_constraint(const Expr *expr, *lower_null = *upper_null = true; /* Find type cache entry for partitioned expression type */ - tce = lookup_type_cache(prel->atttype, TYPECACHE_BTREE_OPFAMILY); + tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); /* Is it an AND clause? */ if (and_clause((Node *) expr)) @@ -1034,7 +1034,7 @@ read_opexpr_const(const OpExpr *opexpr, /* Cast Const to a proper type if needed */ *value = perform_type_cast(boundary->constvalue, getBaseType(boundary->consttype), - getBaseType(prel->atttype), + getBaseType(prel->ev_type), &cast_success); if (!cast_success) diff --git a/src/partition_creation.c b/src/partition_creation.c index 7f170808..4b6bd845 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -354,7 +354,7 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, shout_if_prel_is_invalid(relid, prel, PT_RANGE); /* Fetch base types of prel->atttype & value_type */ - base_bound_type = getBaseType(prel->atttype); + base_bound_type = getBaseType(prel->ev_type); base_value_type = getBaseType(value_type); /* Search for a suitable partition if we didn't hold it */ @@ -398,12 +398,12 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, /* Copy datums in order to protect them from cache invalidation */ bound_min = CopyBound(&ranges[0].min, - prel->attbyval, - prel->attlen); + prel->ev_byval, + prel->ev_len); bound_max = CopyBound(&ranges[PrelLastChild(prel)].max, - prel->attbyval, - prel->attlen); + prel->ev_byval, + prel->ev_len); /* Check if interval is set */ if (isnull[Anum_pathman_config_range_interval - 1]) @@ -426,7 +426,7 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, &bound_min, &bound_max, base_bound_type, interval_binary, interval_type, value, base_value_type, - prel->attcollid); + prel->ev_collid); } } else @@ -1259,15 +1259,15 @@ check_range_available(Oid parent_relid, /* Fetch comparison function */ fill_type_cmp_fmgr_info(&cmp_func, getBaseType(value_type), - getBaseType(prel->atttype)); + getBaseType(prel->ev_type)); ranges = PrelGetRangesArray(prel); for (i = 0; i < PrelChildrenCount(prel); i++) { int c1, c2; - c1 = cmp_bounds(&cmp_func, prel->attcollid, start, &ranges[i].max); - c2 = cmp_bounds(&cmp_func, prel->attcollid, end, &ranges[i].min); + c1 = cmp_bounds(&cmp_func, prel->ev_collid, start, &ranges[i].max); + c2 = cmp_bounds(&cmp_func, prel->ev_collid, end, &ranges[i].min); /* There's something! */ if (c1 < 0 && c2 > 0) diff --git a/src/partition_filter.c b/src/partition_filter.c index 0e7225fa..52b13bb6 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -396,10 +396,10 @@ find_partitions_for_value(Datum value, Oid value_type, temp_const.constisnull = false; /* ... and some other important data */ - CopyToTempConst(consttypmod, atttypmod); - CopyToTempConst(constcollid, attcollid); - CopyToTempConst(constlen, attlen); - CopyToTempConst(constbyval, attbyval); + CopyToTempConst(consttypmod, ev_typmod); + CopyToTempConst(constcollid, ev_collid); + CopyToTempConst(constlen, ev_len); + CopyToTempConst(constbyval, ev_byval); /* We use 0 since varno doesn't matter for Const */ InitWalkerContext(&wcxt, 0, prel, NULL, true); @@ -431,7 +431,7 @@ select_partition_for_insert(Datum value, Oid value_type, else if (nparts == 0) { selected_partid = create_partitions_for_value(PrelParentRelid(prel), - value, prel->atttype); + value, prel->ev_type); /* get_pathman_relation_info() will refresh this entry */ invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); @@ -446,7 +446,7 @@ select_partition_for_insert(Datum value, Oid value_type, /* Could not find suitable partition */ if (rri_holder == NULL) elog(ERROR, ERR_PART_ATTR_NO_PART, - datum_to_cstring(value, prel->atttype)); + datum_to_cstring(value, prel->ev_type)); return rri_holder; } @@ -630,7 +630,7 @@ partition_filter_exec(CustomScanState *node) elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); /* Search for a matching partition */ - rri_holder = select_partition_for_insert(value, prel->atttype, prel, + rri_holder = select_partition_for_insert(value, prel->ev_type, prel, &state->result_parts, estate); /* Switch back and clean up per-tuple context */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index ae338ac8..5c5caa0f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -740,11 +740,11 @@ handle_const(const Const *c, WalkerContext *context) bool cast_success; /* Peform type cast if types mismatch */ - if (prel->atttype != c->consttype) + if (prel->ev_type != c->consttype) { value = perform_type_cast(c->constvalue, getBaseType(c->consttype), - getBaseType(prel->atttype), + getBaseType(prel->ev_type), &cast_success); if (!cast_success) @@ -770,14 +770,14 @@ handle_const(const Const *c, WalkerContext *context) fill_type_cmp_fmgr_info(&cmp_finfo, getBaseType(c->consttype), - getBaseType(prel->atttype)); + getBaseType(prel->ev_type)); select_range_partitions(c->constvalue, &cmp_finfo, PrelGetRangesArray(context->prel), PrelChildrenCount(context->prel), strategy, - prel->attcollid, + prel->ev_collid, result); /* output */ result->paramsel = estimate_paramsel_using_prel(prel, strategy); @@ -1042,7 +1042,7 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, return; } - tce = lookup_type_cache(prel->atttype, TYPECACHE_BTREE_OPFAMILY); + tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); /* There's no strategy for this operator, go to end */ @@ -1077,17 +1077,17 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, * if operator collation is different from default attribute collation. * In this case we just return all of them. */ - if (expr->opcollid != prel->attcollid && + if (expr->opcollid != prel->ev_collid && strategy != BTEqualStrategyNumber) goto binary_opexpr_return; collid = OidIsValid(expr->opcollid) ? expr->opcollid : - prel->attcollid; + prel->ev_collid; fill_type_cmp_fmgr_info(&cmp_func, getBaseType(c->consttype), - getBaseType(prel->atttype)); + getBaseType(prel->ev_type)); select_range_partitions(c->constvalue, &cmp_func, @@ -1122,7 +1122,7 @@ handle_binary_opexpr_param(const PartRelationInfo *prel, int strategy; /* Determine operator type */ - tce = lookup_type_cache(prel->atttype, TYPECACHE_BTREE_OPFAMILY); + tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); result->rangeset = list_make1_irange_full(prel, IR_LOSSY); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 6182cc1a..5682d5e2 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -186,7 +186,7 @@ get_partition_key_type(PG_FUNCTION_ARGS) prel = get_pathman_relation_info(relid); shout_if_prel_is_invalid(relid, prel, PT_ANY); - PG_RETURN_OID(prel->atttype); + PG_RETURN_OID(prel->ev_type); } /* @@ -486,7 +486,7 @@ show_partition_list_internal(PG_FUNCTION_ARGS) { Datum rmin = CStringGetTextDatum( datum_to_cstring(BoundGetValue(&re->min), - prel->atttype)); + prel->ev_type)); values[Anum_pathman_pl_range_min - 1] = rmin; } @@ -497,7 +497,7 @@ show_partition_list_internal(PG_FUNCTION_ARGS) { Datum rmax = CStringGetTextDatum( datum_to_cstring(BoundGetValue(&re->max), - prel->atttype)); + prel->ev_type)); values[Anum_pathman_pl_range_max - 1] = rmax; } diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index fff7c76d..b2ee72c6 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -407,10 +407,10 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); /* Check type of 'dummy' (for correct output) */ - if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 1)) != getBaseType(prel->atttype)) + if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 1)) != getBaseType(prel->ev_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_typeof(dummy) should be %s", - format_type_be(getBaseType(prel->atttype))))); + format_type_be(getBaseType(prel->ev_type))))); ranges = PrelGetRangesArray(prel); @@ -425,8 +425,8 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) elems[1] = ranges[i].max; arr = construct_infinitable_array(elems, 2, - prel->atttype, prel->attlen, - prel->attbyval, prel->attalign); + prel->ev_type, prel->ev_len, + prel->ev_byval, prel->ev_align); PG_RETURN_ARRAYTYPE_P(arr); } @@ -474,10 +474,10 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); /* Check type of 'dummy' (for correct output) */ - if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 2)) != getBaseType(prel->atttype)) + if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 2)) != getBaseType(prel->ev_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_typeof(dummy) should be %s", - format_type_be(getBaseType(prel->atttype))))); + format_type_be(getBaseType(prel->ev_type))))); /* Now we have to deal with 'idx' */ @@ -505,10 +505,10 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) elems[1] = ranges[partition_idx].max; PG_RETURN_ARRAYTYPE_P(construct_infinitable_array(elems, 2, - prel->atttype, - prel->attlen, - prel->attbyval, - prel->attalign)); + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align)); } @@ -690,7 +690,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) } /* Check that partitions are adjacent */ - check_range_adjacence(prel->cmp_proc, prel->attcollid, rentry_list); + check_range_adjacence(prel->cmp_proc, prel->ev_collid, rentry_list); /* First determine the bounds of a new constraint */ first = (RangeEntry *) linitial(rentry_list); @@ -698,7 +698,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Swap ranges if 'last' < 'first' */ fmgr_info(prel->cmp_proc, &cmp_proc); - if (cmp_bounds(&cmp_proc, prel->attcollid, &last->min, &first->min) < 0) + if (cmp_bounds(&cmp_proc, prel->ev_collid, &last->min, &first->min) < 0) { RangeEntry *tmp = last; @@ -709,7 +709,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Drop old constraint and create a new one */ modify_range_constraint(parts[0], prel->expr_cstr, - prel->atttype, + prel->ev_type, &first->min, &last->max); @@ -792,7 +792,7 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) /* Drop old constraint and create a new one */ modify_range_constraint(next->child_oid, prel->expr_cstr, - prel->atttype, + prel->ev_type, &cur->min, &next->max); } diff --git a/src/relation_info.c b/src/relation_info.c index cfff65e0..1a4e9a38 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -167,7 +167,7 @@ refresh_pathman_relation_info(Oid relid, prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); /* Read config values */ - prel->atttype = DatumGetObjectId(values[Anum_pathman_config_atttype - 1]); + prel->ev_type = DatumGetObjectId(values[Anum_pathman_config_atttype - 1]); expr = TextDatumGetCString(values[Anum_pathman_config_expression_p - 1]); /* Expression and attname should be saved in cache context */ @@ -190,23 +190,23 @@ refresh_pathman_relation_info(Oid relid, MemoryContextSwitchTo(old_mcxt); - htup = SearchSysCache1(TYPEOID, prel->atttype); + htup = SearchSysCache1(TYPEOID, prel->ev_type); if (HeapTupleIsValid(htup)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(htup); - prel->atttypmod = typtup->typtypmod; - prel->attcollid = typtup->typcollation; + prel->ev_typmod = typtup->typtypmod; + prel->ev_collid = typtup->typcollation; ReleaseSysCache(htup); } - else elog(ERROR, "cache lookup failed for type %u", prel->atttype); + else elog(ERROR, "cache lookup failed for type %u", prel->ev_type); /* Fetch HASH & CMP fuctions and other stuff from type cache */ - typcache = lookup_type_cache(prel->atttype, + typcache = lookup_type_cache(prel->ev_type, TYPECACHE_CMP_PROC | TYPECACHE_HASH_PROC); - prel->attbyval = typcache->typbyval; - prel->attlen = typcache->typlen; - prel->attalign = typcache->typalign; + prel->ev_byval = typcache->typbyval; + prel->ev_len = typcache->typlen; + prel->ev_align = typcache->typalign; prel->cmp_proc = typcache->cmp_proc; prel->hash_proc = typcache->hash_proc; @@ -493,12 +493,12 @@ fill_prel_with_partitions(PartRelationInfo *prel, old_mcxt = MemoryContextSwitchTo(cache_mcxt); { prel->ranges[i].min = CopyBound(&bound_info->range_min, - prel->attbyval, - prel->attlen); + prel->ev_byval, + prel->ev_len); prel->ranges[i].max = CopyBound(&bound_info->range_max, - prel->attbyval, - prel->attlen); + prel->ev_byval, + prel->ev_len); } MemoryContextSwitchTo(old_mcxt); } @@ -526,7 +526,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, /* Prepare function info */ fmgr_info(prel->cmp_proc, &cmp_info.flinfo); - cmp_info.collid = prel->attcollid; + cmp_info.collid = prel->ev_collid; /* Sort partitions by RangeEntry->min asc */ qsort_arg((void *) prel->ranges, PrelChildrenCount(prel), @@ -1099,7 +1099,7 @@ get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) /* Initialize other fields */ pbin_local.child_rel = partition; - pbin_local.byval = prel->attbyval; + pbin_local.byval = prel->ev_byval; /* Try to build constraint's expression tree (may emit ERROR) */ con_expr = get_partition_constraint_expr(partition); @@ -1220,14 +1220,14 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, pbin->range_min = lower_null ? MakeBoundInf(MINUS_INFINITY) : MakeBound(datumCopy(lower, - prel->attbyval, - prel->attlen)); + prel->ev_byval, + prel->ev_len)); pbin->range_max = upper_null ? MakeBoundInf(PLUS_INFINITY) : MakeBound(datumCopy(upper, - prel->attbyval, - prel->attlen)); + prel->ev_byval, + prel->ev_len)); /* Switch back */ MemoryContextSwitchTo(old_mcxt); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 64d563db..43bec993 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -652,7 +652,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Search for a matching partition */ rri_holder = select_partition_for_insert(value, - prel->atttype, prel, + prel->ev_type, prel, &parts_storage, estate); child_result_rel = rri_holder->result_rel_info; estate->es_result_relation_info = child_result_rel; From ba578fe311f3fbcde8cddb94a1db8811f94e174c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 5 May 2017 14:36:53 +0300 Subject: [PATCH 0470/1124] light refactoring (get rid of const Node *varnode) --- src/pg_pathman.c | 61 +++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 34 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 5c5caa0f..a7d6092e 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -56,21 +56,17 @@ static WrapperNode *handle_boolexpr(const BoolExpr *expr, WalkerContext *context static WrapperNode *handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context); static WrapperNode *handle_opexpr(const OpExpr *expr, WalkerContext *context); -static void handle_binary_opexpr(WalkerContext *context, - WrapperNode *result, - const Node *varnode, - const Const *c); +static void handle_binary_opexpr(const Const *c, WalkerContext *context, + WrapperNode *result); static void handle_binary_opexpr_param(const PartRelationInfo *prel, - WrapperNode *result, - const Node *varnode); + WrapperNode *result); -static bool pull_var_param(const WalkerContext *context, - const OpExpr *expr, - Node **var_ptr, - Node **param_ptr); +static bool is_key_op_param(const OpExpr *expr, + const WalkerContext *context, + Node **param_ptr); -static Const *extract_const(WalkerContext *wcxt, Param *param); +static Const *extract_const(Param *param, WalkerContext *wcxt); /* Copied from PostgreSQL (allpaths.c) */ @@ -93,13 +89,13 @@ static void generate_mergeappend_paths(PlannerInfo *root, /* We can transform Param into Const provided that 'econtext' is available */ -#define IsConstValue(wcxt, node) \ +#define IsConstValue(node, wcxt) \ ( IsA((node), Const) || (WcxtHasExprContext(wcxt) ? IsA((node), Param) : false) ) -#define ExtractConst(wcxt, node) \ +#define ExtractConst(node, wcxt) \ ( \ IsA((node), Param) ? \ - extract_const((wcxt), (Param *) (node)) : \ + extract_const((Param *) (node), (wcxt)) : \ ((Const *) (node)) \ ) @@ -994,7 +990,7 @@ static WrapperNode * handle_opexpr(const OpExpr *expr, WalkerContext *context) { WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); - Node *var, *param; + Node *param; const PartRelationInfo *prel = context->prel; result->orig = (const Node *) expr; @@ -1002,17 +998,18 @@ handle_opexpr(const OpExpr *expr, WalkerContext *context) if (list_length(expr->args) == 2) { - if (pull_var_param(context, expr, &var, ¶m)) + /* Is it KEY OP PARAM or PARAM OP KEY? */ + if (is_key_op_param(expr, context, ¶m)) { - if (IsConstValue(context, param)) + if (IsConstValue(param, context)) { - handle_binary_opexpr(context, result, var, - ExtractConst(context, param)); + handle_binary_opexpr(ExtractConst(param, context), context, result); return result; } + /* TODO: estimate selectivity for param if it's Var */ else if (IsA(param, Param) || IsA(param, Var)) { - handle_binary_opexpr_param(prel, result, var); + handle_binary_opexpr_param(prel, result); return result; } } @@ -1024,10 +1021,10 @@ handle_opexpr(const OpExpr *expr, WalkerContext *context) } /* Binary operator handler */ -/* FIXME: varnode */ static void -handle_binary_opexpr(WalkerContext *context, WrapperNode *result, - const Node *varnode, const Const *c) +handle_binary_opexpr(const Const *c, + WalkerContext *context, + WrapperNode *result) { int strategy; TypeCacheEntry *tce; @@ -1112,10 +1109,9 @@ handle_binary_opexpr(WalkerContext *context, WrapperNode *result, } /* Estimate selectivity of parametrized quals */ -/* FIXME: varnode */ static void handle_binary_opexpr_param(const PartRelationInfo *prel, - WrapperNode *result, const Node *varnode) + WrapperNode *result) { const OpExpr *expr = (const OpExpr *) result->orig; TypeCacheEntry *tce; @@ -1131,30 +1127,27 @@ handle_binary_opexpr_param(const PartRelationInfo *prel, /* - * Checks if expression is a KEY OP PARAM or PARAM OP KEY, where KEY is - * partition expression and PARAM is whatever. + * Checks if expression is a KEY OP PARAM or PARAM OP KEY, where + * KEY is partitioning expression and PARAM is whatever. * * NOTE: returns false if partition key is not in expression. */ static bool -pull_var_param(const WalkerContext *context, - const OpExpr *expr, - Node **var_ptr, - Node **param_ptr) +is_key_op_param(const OpExpr *expr, + const WalkerContext *context, + Node **param_ptr) /* ret value #1 */ { Node *left = linitial(expr->args), *right = lsecond(expr->args); if (match_expr_to_operand(context->prel_expr, left)) { - *var_ptr = left; *param_ptr = right; return true; } if (match_expr_to_operand(context->prel_expr, right)) { - *var_ptr = right; *param_ptr = left; return true; } @@ -1164,7 +1157,7 @@ pull_var_param(const WalkerContext *context, /* Extract (evaluate) Const from Param node */ static Const * -extract_const(WalkerContext *wcxt, Param *param) +extract_const(Param *param, WalkerContext *wcxt) { ExprState *estate = ExecInitExpr((Expr *) param, NULL); bool isnull; From 6ee0093417d862180c33c907dc728620b8e3c6ba Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 5 May 2017 16:06:07 +0300 Subject: [PATCH 0471/1124] improve error message for case when default range interval is NULL --- expected/pathman_interval.out | 6 +++--- src/partition_creation.c | 8 +++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 9a66e947..61e9cab3 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -18,7 +18,7 @@ SELECT set_interval('test_interval.abc', NULL::INT2); /* pg_pathman shouldn't be able to create a new partition */ INSERT INTO test_interval.abc VALUES (250); -ERROR: cannot find appropriate partition for key '250' +ERROR: cannot spawn new partition for key '250' /* Set a trivial interval */ SELECT set_interval('test_interval.abc', 0); ERROR: interval should not be trivial @@ -61,7 +61,7 @@ SELECT set_interval('test_interval.abc', NULL::INT4); /* pg_pathman shouldn't be able to create a new partition */ INSERT INTO test_interval.abc VALUES (250); -ERROR: cannot find appropriate partition for key '250' +ERROR: cannot spawn new partition for key '250' /* Set a trivial interval */ SELECT set_interval('test_interval.abc', 0); ERROR: interval should not be trivial @@ -104,7 +104,7 @@ SELECT set_interval('test_interval.abc', NULL::INT8); /* pg_pathman shouldn't be able to create a new partition */ INSERT INTO test_interval.abc VALUES (250); -ERROR: cannot find appropriate partition for key '250' +ERROR: cannot spawn new partition for key '250' /* Set a trivial interval */ SELECT set_interval('test_interval.abc', 0); ERROR: interval should not be trivial diff --git a/src/partition_creation.c b/src/partition_creation.c index 4b6bd845..c05293b0 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -408,9 +408,11 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, /* Check if interval is set */ if (isnull[Anum_pathman_config_range_interval - 1]) { - elog(ERROR, - "cannot find appropriate partition for key '%s'", - datum_to_cstring(value, value_type)); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot spawn new partition for key '%s'", + datum_to_cstring(value, value_type)), + errdetail("default range interval is NULL"))); } /* Retrieve interval as TEXT from tuple */ From 310d76be751671621b0f22f47d078f566c6c815b Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 5 May 2017 16:33:30 +0300 Subject: [PATCH 0472/1124] fix update trigger to support multilevel partitioning --- src/partition_filter.c | 1 + src/pl_funcs.c | 218 ++++++++++++++++++++++++++++++++--------- 2 files changed, 171 insertions(+), 48 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 47ad1e88..bca97afe 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -456,6 +456,7 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, else selected_partid = parts[0]; /* Replace parent table with a suitable partition */ + /* TODO: write a correct comment */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); rri_holder = scan_result_parts_storage(selected_partid, parts_storage); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ef06c581..b21dad7e 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -113,6 +113,10 @@ static void pathman_update_trigger_func_move_tuple(Relation source_rel, HeapTuple old_tuple, HeapTuple new_tuple); +static Oid find_target_partition(Relation source_rel, HeapTuple tuple); +static Oid find_topmost_parent(Oid partition); +static Oid find_deepest_partition(Oid parent, Relation source_rel, HeapTuple tuple); + /* * ------------------------ @@ -1086,26 +1090,26 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) Relation source_rel; - Oid parent_relid, - source_relid, + // Oid parent_relid, + Oid source_relid, target_relid; HeapTuple old_tuple, new_tuple; - Datum value; - Oid value_type; - bool isnull; - ExprDoneCond itemIsDone; + // Datum value; + // Oid value_type; + // bool isnull; + // ExprDoneCond itemIsDone; - Oid *parts; - int nparts; + // Oid *parts; + // int nparts; - ExprContext *econtext; - ExprState *expr_state; - MemoryContext old_mcxt; - PartParentSearch parent_search; - const PartRelationInfo *prel; + // ExprContext *econtext; + // ExprState *expr_state; + // MemoryContext old_mcxt; + // PartParentSearch parent_search; + // const PartRelationInfo *prel; /* Handle user calls */ if (!CALLED_AS_TRIGGER(fcinfo)) @@ -1128,22 +1132,161 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) old_tuple = trigdata->tg_trigtuple; new_tuple = trigdata->tg_newtuple; - /* Find parent relation and partitioning info */ - parent_relid = get_parent_of_partition(source_relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + // /* Find parent relation and partitioning info */ + // parent_relid = get_parent_of_partition(source_relid, &parent_search); + // if (parent_search != PPS_ENTRY_PART_PARENT) + // elog(ERROR, "relation \"%s\" is not a partition", + // RelationGetRelationName(source_rel)); + + // /* Fetch partition dispatch info */ + // prel = get_pathman_relation_info(parent_relid); + // shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); + + // /* Execute partitioning expression */ + // econtext = CreateStandaloneExprContext(); + // old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + // expr_state = pathman_update_trigger_build_expr_state(prel, + // source_rel, + // new_tuple, + // &value_type); + // value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); + // MemoryContextSwitchTo(old_mcxt); + + // if (isnull) + // elog(ERROR, ERR_PART_ATTR_NULL); + + // if (itemIsDone != ExprSingleResult) + // elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); + + // /* Search for matching partitions */ + // parts = find_partitions_for_value(value, value_type, prel, &nparts); + + + // /* We can free expression context now */ + // FreeExprContext(econtext, false); + + // if (nparts > 1) + // elog(ERROR, ERR_PART_ATTR_MULTIPLE); + // else if (nparts == 0) + // { + // target_relid = create_partitions_for_value(PrelParentRelid(prel), + // value, value_type); + + // /* get_pathman_relation_info() will refresh this entry */ + // invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); + // } + // else target_relid = parts[0]; + + // pfree(parts); + target_relid = find_target_partition(source_rel, new_tuple); + + /* Convert tuple if target partition has changed */ + if (target_relid != source_relid) + { + Relation target_rel; + LOCKMODE lockmode = RowExclusiveLock; /* UPDATE */ + + /* Lock partition and check if it exists */ + LockRelationOid(target_relid, lockmode); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(target_relid))) + /* TODO: !!! */ + elog(ERROR, ERR_PART_ATTR_NO_PART, "()"); + // elog(ERROR, ERR_PART_ATTR_NO_PART, datum_to_cstring(value, value_type)); + + /* Open partition */ + target_rel = heap_open(target_relid, lockmode); + + /* Move tuple from source relation to the selected partition */ + pathman_update_trigger_func_move_tuple(source_rel, target_rel, + old_tuple, new_tuple); + + /* Close partition */ + heap_close(target_rel, lockmode); + + /* We've made some changes */ + PG_RETURN_VOID(); + } + + /* Just return NEW tuple */ + PG_RETURN_POINTER(new_tuple); +} + +/* + * Find partition satisfying values of the tuple + */ +static Oid +find_target_partition(Relation source_rel, HeapTuple tuple) +{ + Oid source_relid, + target_relid, + parent_relid; + + source_relid = RelationGetRelid(source_rel); + parent_relid = find_topmost_parent(source_relid); + target_relid = find_deepest_partition(parent_relid, source_rel, tuple); + + return target_relid; +} + +static Oid +find_topmost_parent(Oid relid) +{ + Oid last; + PartParentSearch parent_search; + + last = relid; + + /* Iterate through parents until the topmost */ + while (1) + { + Oid parent = get_parent_of_partition(last, &parent_search); + + if (parent_search != PPS_ENTRY_PART_PARENT) + break; + last = parent; + } + + /* If relation doesn't have parent then just throw an error */ + if (last == relid) elog(ERROR, "relation \"%s\" is not a partition", - RelationGetRelationName(source_rel)); + get_rel_name(relid)); + + return last; +} + +/* + * Recursive search for the deepest partition satisfying the given tuple + */ +static Oid +find_deepest_partition(Oid parent, Relation source_rel, HeapTuple tuple) +{ + const PartRelationInfo *prel; + Oid *parts; + int nparts; + + ExprContext *econtext; + ExprState *expr_state; + MemoryContext old_mcxt; + + Datum value; + Oid value_type; + bool isnull; + ExprDoneCond itemIsDone; + + Oid target_relid; + Oid subpartition; /* Fetch partition dispatch info */ - prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); + prel = get_pathman_relation_info(parent); + if (!prel) + return InvalidOid; /* Execute partitioning expression */ econtext = CreateStandaloneExprContext(); old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); expr_state = pathman_update_trigger_build_expr_state(prel, source_rel, - new_tuple, + tuple, &value_type); value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); MemoryContextSwitchTo(old_mcxt); @@ -1170,37 +1313,16 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) /* get_pathman_relation_info() will refresh this entry */ invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); } - else target_relid = parts[0]; - + else + target_relid = parts[0]; pfree(parts); - /* Convert tuple if target partition has changed */ - if (target_relid != source_relid) - { - Relation target_rel; - LOCKMODE lockmode = RowExclusiveLock; /* UPDATE */ - - /* Lock partition and check if it exists */ - LockRelationOid(target_relid, lockmode); - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(target_relid))) - elog(ERROR, ERR_PART_ATTR_NO_PART, datum_to_cstring(value, value_type)); - - /* Open partition */ - target_rel = heap_open(target_relid, lockmode); - - /* Move tuple from source relation to the selected partition */ - pathman_update_trigger_func_move_tuple(source_rel, target_rel, - old_tuple, new_tuple); - - /* Close partition */ - heap_close(target_rel, lockmode); - - /* We've made some changes */ - PG_RETURN_VOID(); - } + /* Try to go deeper recursively and see if there is subpartition */ + subpartition = find_deepest_partition(target_relid, source_rel, tuple); + if (OidIsValid(subpartition)) + return subpartition; - /* Just return NEW tuple */ - PG_RETURN_POINTER(new_tuple); + return target_relid; } struct replace_vars_cxt From 85823b2cabbdca59d8439268f14be77264410225 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 5 May 2017 17:58:30 +0300 Subject: [PATCH 0473/1124] add parent`s columns to column list for update trigger --- src/pl_funcs.c | 95 +++++++++++++++----------------------------------- 1 file changed, 29 insertions(+), 66 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index b21dad7e..ef8e6550 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -113,6 +113,7 @@ static void pathman_update_trigger_func_move_tuple(Relation source_rel, HeapTuple old_tuple, HeapTuple new_tuple); +static void collect_update_trigger_columns(Oid relid, List **columns); static Oid find_target_partition(Relation source_rel, HeapTuple tuple); static Oid find_topmost_parent(Oid partition); static Oid find_deepest_partition(Oid parent, Relation source_rel, HeapTuple tuple); @@ -1087,30 +1088,12 @@ Datum pathman_update_trigger_func(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; - Relation source_rel; - - // Oid parent_relid, Oid source_relid, target_relid; - HeapTuple old_tuple, new_tuple; - // Datum value; - // Oid value_type; - // bool isnull; - // ExprDoneCond itemIsDone; - - // Oid *parts; - // int nparts; - - // ExprContext *econtext; - // ExprState *expr_state; - // MemoryContext old_mcxt; - // PartParentSearch parent_search; - // const PartRelationInfo *prel; - /* Handle user calls */ if (!CALLED_AS_TRIGGER(fcinfo)) elog(ERROR, "this function should not be called directly"); @@ -1132,54 +1115,11 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) old_tuple = trigdata->tg_trigtuple; new_tuple = trigdata->tg_newtuple; - // /* Find parent relation and partitioning info */ - // parent_relid = get_parent_of_partition(source_relid, &parent_search); - // if (parent_search != PPS_ENTRY_PART_PARENT) - // elog(ERROR, "relation \"%s\" is not a partition", - // RelationGetRelationName(source_rel)); - - // /* Fetch partition dispatch info */ - // prel = get_pathman_relation_info(parent_relid); - // shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); - - // /* Execute partitioning expression */ - // econtext = CreateStandaloneExprContext(); - // old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); - // expr_state = pathman_update_trigger_build_expr_state(prel, - // source_rel, - // new_tuple, - // &value_type); - // value = ExecEvalExpr(expr_state, econtext, &isnull, &itemIsDone); - // MemoryContextSwitchTo(old_mcxt); - - // if (isnull) - // elog(ERROR, ERR_PART_ATTR_NULL); - - // if (itemIsDone != ExprSingleResult) - // elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); - - // /* Search for matching partitions */ - // parts = find_partitions_for_value(value, value_type, prel, &nparts); - - - // /* We can free expression context now */ - // FreeExprContext(econtext, false); - - // if (nparts > 1) - // elog(ERROR, ERR_PART_ATTR_MULTIPLE); - // else if (nparts == 0) - // { - // target_relid = create_partitions_for_value(PrelParentRelid(prel), - // value, value_type); - - // /* get_pathman_relation_info() will refresh this entry */ - // invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); - // } - // else target_relid = parts[0]; - - // pfree(parts); + /* Find (or create) target partition */ target_relid = find_target_partition(source_rel, new_tuple); + /* TODO: check for InvalidOid */ + /* Convert tuple if target partition has changed */ if (target_relid != source_relid) { @@ -1549,7 +1489,7 @@ create_update_triggers(PG_FUNCTION_ARGS) const char *trigname; const PartRelationInfo *prel; uint32 i; - List *columns; + List *columns = NIL; /* Check that table is partitioned */ prel = get_pathman_relation_info(parent); @@ -1559,7 +1499,8 @@ create_update_triggers(PG_FUNCTION_ARGS) trigname = build_update_trigger_name_internal(parent); /* Create trigger for parent */ - columns = PrelExpressionColumnNames(prel); + // columns = PrelExpressionColumnNames(prel); + collect_update_trigger_columns(parent, &columns); create_single_update_trigger_internal(parent, trigname, columns); /* Fetch children array */ @@ -1572,6 +1513,28 @@ create_update_triggers(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +static void +collect_update_trigger_columns(Oid relid, List **columns) +{ + const PartRelationInfo *prel; + Oid parent; + PartParentSearch parent_search; + + prel = get_pathman_relation_info(relid); + if (!prel) + return; + + /* Collect columns from current level */ + *columns = list_concat(*columns, PrelExpressionColumnNames(prel)); + + /* Collect columns from parent */ + parent = get_parent_of_partition(relid, &parent_search); + if (parent_search != PPS_ENTRY_PART_PARENT) + return; + + collect_update_trigger_columns(parent, columns); +} + /* Create an UPDATE trigger for partition */ Datum create_single_update_trigger(PG_FUNCTION_ARGS) From a3a841bac18cc76e66490b916fa9fc0d427e1eae Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 5 May 2017 18:28:28 +0300 Subject: [PATCH 0474/1124] refactoring, remove column 'atttype' from table pathman_config --- expected/pathman_basic.out | 16 +++--- expected/pathman_calamity.out | 26 ++++----- expected/pathman_column_type.out | 18 +++--- expected/pathman_interval.out | 32 +++++------ expected/pathman_permissions.out | 6 +- init.sql | 45 ++++++++------- sql/pathman_calamity.sql | 12 ++-- sql/pathman_column_type.sql | 6 +- sql/pathman_interval.sql | 8 +-- src/include/partition_creation.h | 2 +- src/include/pathman.h | 3 +- src/include/relation_info.h | 2 +- src/init.c | 10 +--- src/partition_creation.c | 31 +++++++---- src/pl_funcs.c | 8 +-- src/pl_range_funcs.c | 96 ++++++++++++++++++++++++-------- src/relation_info.c | 25 ++++----- 17 files changed, 196 insertions(+), 150 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 2c96c7bc..6900ee9e 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1461,16 +1461,16 @@ INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); */ ALTER TABLE test.range_rel DROP COLUMN data; SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype -----------------+---------+----------+----------------+-------------------------------------------------------------------------------------------------------------------------+--------- - test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} | 1114 + partrel | attname | parttype | range_interval | expression_p +----------------+---------+----------+----------------+------------------------------------------------------------------------------------------------------------------------- + test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} (1 row) DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 20 other objects SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype ----------+---------+----------+----------------+--------------+--------- + partrel | attname | parttype | range_interval | expression_p +---------+---------+----------+----------------+-------------- (0 rows) /* Check overlaps */ @@ -1632,9 +1632,9 @@ SELECT pathman.create_partitions_from_range('test."RangeRel"', 'dt', '2015-01-01 DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 5 other objects SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype ---------------------+---------+----------+----------------+------------------------------------------------------------------------------------------------------------------------+--------- - test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 23 + partrel | attname | parttype | range_interval | expression_p +--------------------+---------+----------+----------------+------------------------------------------------------------------------------------------------------------------------ + test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} (1 row) CREATE TABLE test."RangeRel" ( diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index d122908d..36b546f5 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -270,20 +270,20 @@ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ (1 row) /* check function validate_interval_value() */ -SELECT validate_interval_value(NULL, 2, '1 mon'); /* not ok */ -ERROR: 'atttype' should not be NULL -SELECT validate_interval_value('interval'::regtype, NULL, '1 mon'); /* not ok */ -ERROR: 'parttype' should not be NULL -SELECT validate_interval_value('int4'::regtype, 2, '1 mon'); /* not ok */ -ERROR: invalid input syntax for integer: "1 mon" -SELECT validate_interval_value('interval'::regtype, 1, '1 mon'); /* not ok */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'oid', 1, 'HASH', NULL); /* not ok */ ERROR: interval should be NULL for HASH partitioned table -SELECT validate_interval_value('interval'::regtype, 2, NULL); /* OK */ - validate_interval_value -------------------------- - t -(1 row) - +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ +ERROR: cannot find type name for attribute "expr" of relation "pg_class" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ +ERROR: unrecognized token: "cooked_expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ +ERROR: cannot find type name for attribute "expr" of relation "pg_class" /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); validate_relname diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index b89bc448..66e916cb 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -32,10 +32,10 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; /* check that parsed expression was cleared */ -SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype ------------------------+---------+----------+----------------+--------------+--------- - test_column_type.test | val | 2 | 10 | | +SELECT partrel, expression_p FROM pathman_config; + partrel | expression_p +-----------------------+-------------- + test_column_type.test | (1 row) /* make sure that everything works properly */ @@ -44,11 +44,11 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -/* check that expression, atttype is changed */ -SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype ------------------------+---------+----------+----------------+-------------------------------------------------------------------------------------------------------------------------+--------- - test_column_type.test | val | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 1700 +/* check that expression has been built */ +SELECT partrel, expression_p FROM pathman_config; + partrel | expression_p +-----------------------+------------------------------------------------------------------------------------------------------------------------- + test_column_type.test | {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) SELECT context, entries FROM pathman_cache_stats ORDER BY context; diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 61e9cab3..ed86c3a5 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -37,10 +37,10 @@ SELECT set_interval('test_interval.abc', 1000); (1 row) INSERT INTO test_interval.abc VALUES (250); -SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype --------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- - test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 21 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 21 +SELECT partrel, range_interval FROM pathman_config; + partrel | range_interval +-------------------+---------------- + test_interval.abc | 1000 (1 row) DROP TABLE test_interval.abc CASCADE; @@ -80,10 +80,10 @@ SELECT set_interval('test_interval.abc', 1000); (1 row) INSERT INTO test_interval.abc VALUES (250); -SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype --------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- - test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 23 +SELECT partrel, range_interval FROM pathman_config; + partrel | range_interval +-------------------+---------------- + test_interval.abc | 1000 (1 row) DROP TABLE test_interval.abc CASCADE; @@ -123,10 +123,10 @@ SELECT set_interval('test_interval.abc', 1000); (1 row) INSERT INTO test_interval.abc VALUES (250); -SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype --------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- - test_interval.abc | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 20 +SELECT partrel, range_interval FROM pathman_config; + partrel | range_interval +-------------------+---------------- + test_interval.abc | 1000 (1 row) DROP TABLE test_interval.abc CASCADE; @@ -156,10 +156,10 @@ SELECT set_interval('test_interval.abc', '1 month'::INTERVAL); (1 row) -SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype --------------------+---------+----------+----------------+-------------------------------------------------------------------------------------------------------------------------+--------- - test_interval.abc | dt | 2 | @ 1 mon | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 1082 +SELECT partrel, range_interval FROM pathman_config; + partrel | range_interval +-------------------+---------------- + test_interval.abc | @ 1 mon (1 row) DROP TABLE test_interval.abc CASCADE; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index d4b509d3..bea7f79c 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -33,9 +33,9 @@ NOTICE: sequence "user1_table_seq" does not exist, skipping /* Should be able to see */ SET ROLE user2; SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p | atttype --------------------------+---------+----------+----------------+-----------------------------------------------------------------------------------------------------------------------+--------- - permissions.user1_table | id | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} | 23 + partrel | attname | parttype | range_interval | expression_p +-------------------------+---------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- + permissions.user1_table | id | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) SELECT * FROM pathman_config_params; diff --git a/init.sql b/init.sql index 06840410..bb6da5bc 100644 --- a/init.sql +++ b/init.sql @@ -15,37 +15,40 @@ * text to Datum */ CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( - atttype OID, + partrel REGCLASS, + expression TEXT, parttype INTEGER, - range_interval TEXT) + range_interval TEXT, + expression_p TEXT) RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' LANGUAGE C; /* - * Pathman config - * partrel - regclass (relation type, stored as Oid) - * attname - partitioning key - * parttype - partitioning type: - * 1 - HASH - * 2 - RANGE - * range_interval - base interval for RANGE partitioning as string + * Main config. + * partrel - regclass (relation type, stored as Oid) + * attname - partitioning expression (key) + * parttype - partitioning type: (1 - HASH, 2 - RANGE) + * range_interval - base interval for RANGE partitioning as string + * expression_p - cooked partitioning expression (parsed & rewritten) */ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( partrel REGCLASS NOT NULL PRIMARY KEY, - attname TEXT NOT NULL, /* expression */ + attname TEXT NOT NULL, parttype INTEGER NOT NULL, - range_interval TEXT, - expression_p TEXT, /* parsed expression (until plan) */ - atttype OID, /* expression type */ + range_interval TEXT DEFAULT NULL, + expression_p TEXT DEFAULT NULL, /* check for allowed part types */ CONSTRAINT pathman_config_parttype_check CHECK (parttype IN (1, 2)), /* check for correct interval */ - CONSTRAINT pathman_config_interval_check CHECK (@extschema@.validate_interval_value(atttype, - parttype, - range_interval)) + CONSTRAINT pathman_config_interval_check + CHECK (@extschema@.validate_interval_value(partrel, + attname, + parttype, + range_interval, + expression_p)) ); @@ -64,11 +67,11 @@ LANGUAGE C STRICT; /* * Optional parameters for partitioned tables. - * partrel - regclass (relation type, stored as Oid) - * enable_parent - add parent table to plan - * auto - enable automatic partition creation - * init_callback - text signature of cb to be executed on partition - * creation + * partrel - regclass (relation type, stored as Oid) + * enable_parent - add parent table to plan + * auto - enable automatic partition creation + * init_callback - text signature of cb to be executed on partition creation + * spawn_using_bgw - use background worker in order to auto create partitions */ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( partrel REGCLASS NOT NULL PRIMARY KEY, diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index c78e0bf1..8c53775a 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -126,11 +126,13 @@ SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ /* check function validate_interval_value() */ -SELECT validate_interval_value(NULL, 2, '1 mon'); /* not ok */ -SELECT validate_interval_value('interval'::regtype, NULL, '1 mon'); /* not ok */ -SELECT validate_interval_value('int4'::regtype, 2, '1 mon'); /* not ok */ -SELECT validate_interval_value('interval'::regtype, 1, '1 mon'); /* not ok */ -SELECT validate_interval_value('interval'::regtype, 2, NULL); /* OK */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'oid', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index 94609a2a..758d2f72 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -21,13 +21,13 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; /* check that parsed expression was cleared */ -SELECT * FROM pathman_config; +SELECT partrel, expression_p FROM pathman_config; /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -/* check that expression, atttype is changed */ -SELECT * FROM pathman_config; +/* check that expression has been built */ +SELECT partrel, expression_p FROM pathman_config; SELECT context, entries FROM pathman_cache_stats ORDER BY context; diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql index ec49254b..59393ca4 100644 --- a/sql/pathman_interval.sql +++ b/sql/pathman_interval.sql @@ -26,7 +26,7 @@ WHERE partrel = 'test_interval.abc'::REGCLASS; /* Set a normal interval */ SELECT set_interval('test_interval.abc', 1000); INSERT INTO test_interval.abc VALUES (250); -SELECT * FROM pathman_config; +SELECT partrel, range_interval FROM pathman_config; DROP TABLE test_interval.abc CASCADE; @@ -52,7 +52,7 @@ WHERE partrel = 'test_interval.abc'::REGCLASS; /* Set a normal interval */ SELECT set_interval('test_interval.abc', 1000); INSERT INTO test_interval.abc VALUES (250); -SELECT * FROM pathman_config; +SELECT partrel, range_interval FROM pathman_config; DROP TABLE test_interval.abc CASCADE; @@ -78,7 +78,7 @@ WHERE partrel = 'test_interval.abc'::REGCLASS; /* Set a normal interval */ SELECT set_interval('test_interval.abc', 1000); INSERT INTO test_interval.abc VALUES (250); -SELECT * FROM pathman_config; +SELECT partrel, range_interval FROM pathman_config; DROP TABLE test_interval.abc CASCADE; @@ -95,7 +95,7 @@ SELECT set_interval('test_interval.abc', '1 second'::INTERVAL); /* Set a normal interval */ SELECT set_interval('test_interval.abc', '1 month'::INTERVAL); -SELECT * FROM pathman_config; +SELECT partrel, range_interval FROM pathman_config; DROP TABLE test_interval.abc CASCADE; diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index a194c165..42454ca9 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -30,9 +30,9 @@ Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, /* Create one RANGE partition */ Oid create_single_range_partition_internal(Oid parent_relid, - Oid value_type, const Bound *start_value, const Bound *end_value, + Oid value_type, RangeVar *partition_rv, char *tablespace); diff --git a/src/include/pathman.h b/src/include/pathman.h index 6dd2447e..e7a983b6 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -44,13 +44,12 @@ * Definitions for the "pathman_config" table. */ #define PATHMAN_CONFIG "pathman_config" -#define Natts_pathman_config 6 +#define Natts_pathman_config 5 #define Anum_pathman_config_partrel 1 /* partitioned relation (regclass) */ #define Anum_pathman_config_expression 2 /* partition expression (original) */ #define Anum_pathman_config_parttype 3 /* partitioning type (1|2) */ #define Anum_pathman_config_range_interval 4 /* interval for RANGE pt. (text) */ #define Anum_pathman_config_expression_p 5 /* parsed partitioning expression (text) */ -#define Anum_pathman_config_atttype 6 /* partitioned atttype (oid) */ /* type modifier (typmod) for 'range_interval' */ #define PATHMAN_CONFIG_interval_typmod -1 diff --git a/src/include/relation_info.h b/src/include/relation_info.h index ef3b5738..5e3fa46c 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -288,7 +288,7 @@ Node *parse_partitioning_expression(const Oid relid, char **query_string_out, Node **parsetree_out); -Datum plan_partitioning_expression(const Oid relid, +Datum cook_partitioning_expression(const Oid relid, const char *expr_cstr, Oid *expr_type); diff --git a/src/init.c b/src/init.c index 680b754a..0333d263 100644 --- a/src/init.c +++ b/src/init.c @@ -694,10 +694,6 @@ pathman_config_invalidate_parsed_expression(Oid relid) values[Anum_pathman_config_expression_p - 1] = (Datum) 0; nulls[Anum_pathman_config_expression_p - 1] = true; - /* Reset expression type */ - values[Anum_pathman_config_atttype - 1] = (Datum) 0; - nulls[Anum_pathman_config_atttype - 1] = true; - rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); /* Form new tuple and perform an update */ @@ -717,7 +713,6 @@ pathman_config_refresh_parsed_expression(Oid relid, ItemPointer iptr) { char *expr_cstr; - Oid expr_type; Datum expr_datum; Relation rel; @@ -725,16 +720,13 @@ pathman_config_refresh_parsed_expression(Oid relid, /* get and parse expression */ expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); - expr_datum = plan_partitioning_expression(relid, expr_cstr, &expr_type); + expr_datum = cook_partitioning_expression(relid, expr_cstr, NULL); pfree(expr_cstr); /* prepare tuple values */ values[Anum_pathman_config_expression_p - 1] = expr_datum; isnull[Anum_pathman_config_expression_p - 1] = false; - values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_type); - isnull[Anum_pathman_config_atttype - 1] = false; - rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); htup_new = heap_form_tuple(RelationGetDescr(rel), values, isnull); diff --git a/src/partition_creation.c b/src/partition_creation.c index c05293b0..7707bec1 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -93,9 +93,9 @@ static Node *build_partitioning_expression(Oid parent_relid, /* Create one RANGE partition [start_value, end_value) */ Oid create_single_range_partition_internal(Oid parent_relid, - Oid value_type, const Bound *start_value, const Bound *end_value, + Oid value_type, RangeVar *partition_rv, char *tablespace) { @@ -584,8 +584,8 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ bounds[1] = MakeBound(should_append ? cur_leading_bound : cur_following_bound); last_partition = create_single_range_partition_internal(parent_relid, - range_bound_type, &bounds[0], &bounds[1], + range_bound_type, NULL, NULL); #ifdef USE_ASSERT_CHECKING @@ -1707,24 +1707,33 @@ build_partitioning_expression(Oid parent_relid, List **columns) /* ret val #2 */ { /* Values extracted from PATHMAN_CONFIG */ - Datum config_values[Natts_pathman_config]; - bool config_nulls[Natts_pathman_config]; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + char *expr_cstr; Node *expr; - char *expr_string; /* Check that table is registered in PATHMAN_CONFIG */ - if (!pathman_config_contains_relation(parent_relid, config_values, - config_nulls, NULL, NULL)) + if (!pathman_config_contains_relation(parent_relid, values, + isnull, NULL, NULL)) elog(ERROR, "table \"%s\" is not partitioned", get_rel_name_or_relid(parent_relid)); + expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); + expr = parse_partitioning_expression(parent_relid, expr_cstr, NULL, NULL); + pfree(expr_cstr); + /* We need expression type for hash functions */ if (expr_type) - *expr_type = DatumGetObjectId(config_values[Anum_pathman_config_atttype - 1]); + { + char *expr_p_cstr; - expr_string = TextDatumGetCString(config_values[Anum_pathman_config_expression - 1]); - expr = parse_partitioning_expression(parent_relid, expr_string, NULL, NULL); - pfree(expr_string); + /* We can safely assume that this field will always remain not null */ + Assert(!isnull[Anum_pathman_config_expression_p - 1]); + expr_p_cstr = + TextDatumGetCString(values[Anum_pathman_config_expression_p - 1]); + + *expr_type = exprType(stringToNode(expr_p_cstr)); + } if (columns) { diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 5682d5e2..bc018cb5 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -774,14 +774,13 @@ add_to_pathman_config(PG_FUNCTION_ARGS) } /* Parse and check expression */ - expr_datum = plan_partitioning_expression(relid, expression, &expr_type); + expr_datum = cook_partitioning_expression(relid, expression, &expr_type); /* Check hash function for HASH partitioning */ if (parttype == PT_HASH) { - TypeCacheEntry *tce; + TypeCacheEntry *tce = lookup_type_cache(expr_type, TYPECACHE_HASH_PROC); - tce = lookup_type_cache(expr_type, TYPECACHE_HASH_PROC); if (!OidIsValid(tce->hash_proc)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -803,9 +802,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) values[Anum_pathman_config_expression_p - 1] = expr_datum; isnull[Anum_pathman_config_expression_p - 1] = false; - values[Anum_pathman_config_atttype - 1] = ObjectIdGetDatum(expr_type); - isnull[Anum_pathman_config_atttype - 1] = false; - /* Insert new row into PATHMAN_CONFIG */ pathman_config = heap_open(get_pathman_config_relid(false), RowExclusiveLock); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index b2ee72c6..0e6f3c21 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -21,6 +21,7 @@ #include "catalog/heap.h" #include "commands/tablecmds.h" #include "executor/spi.h" +#include "nodes/nodeFuncs.h" #include "parser/parse_relation.h" #include "parser/parse_expr.h" #include "utils/array.h" @@ -82,19 +83,17 @@ Datum create_single_range_partition_pl(PG_FUNCTION_ARGS) { Oid parent_relid, - value_type; + partition_relid; /* RANGE boundaries + value type */ Bound start, end; + Oid bounds_type; /* Optional: name & tablespace */ RangeVar *partition_name_rv; char *tablespace; - /* Result (REGCLASS) */ - Oid partition_relid; - /* Handle 'parent_relid' */ if (!PG_ARGISNULL(0)) @@ -104,7 +103,7 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'parent_relid' should not be NULL"))); - value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 1); start = PG_ARGISNULL(1) ? MakeBoundInf(MINUS_INFINITY) : @@ -135,9 +134,9 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* Create a new RANGE partition and return its Oid */ partition_relid = create_single_range_partition_internal(parent_relid, - value_type, &start, &end, + bounds_type, partition_name_rv, tablespace); @@ -162,7 +161,7 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) /* Bounds */ ArrayType *bounds; - Oid elemtype; + Oid bounds_type; Datum *datums; bool *nulls; int ndatums; @@ -180,7 +179,7 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(1)) { bounds = PG_GETARG_ARRAYTYPE_P(1); - elemtype = ARR_ELEMTYPE(bounds); + bounds_type = ARR_ELEMTYPE(bounds); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'bounds' should not be NULL"))); @@ -197,8 +196,8 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) tablespaces = deconstruct_text_array(PG_GETARG_DATUM(3), &ntablespaces); /* Extract bounds */ - get_typlenbyvalalign(elemtype, &typlen, &typbyval, &typalign); - deconstruct_array(bounds, elemtype, + get_typlenbyvalalign(bounds_type, &typlen, &typbyval, &typalign); + deconstruct_array(bounds, bounds_type, typlen, typbyval, typalign, &datums, &nulls, &ndatums); @@ -216,8 +215,8 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) /* Check if bounds array is ascending */ fill_type_cmp_fmgr_info(&cmp_func, - getBaseType(elemtype), - getBaseType(elemtype)); + getBaseType(bounds_type), + getBaseType(bounds_type)); /* Validate bounds */ for (i = 0; i < ndatums; i++) @@ -252,9 +251,9 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) char *tablespace = tablespaces ? tablespaces[i] : NULL; (void) create_single_range_partition_internal(parent_relid, - elemtype, &start, &end, + bounds_type, name, tablespace); } @@ -811,28 +810,77 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) Datum validate_interval_value(PG_FUNCTION_ARGS) { - Oid atttype; +#define ARG_PARTREL 0 +#define ARG_EXPRESSION 1 +#define ARG_PARTTYPE 2 +#define ARG_RANGE_INTERVAL 3 +#define ARG_EXPRESSION_P 4 + + Oid partrel; PartType parttype; + char *expr_cstr; + Oid expr_type; - if (PG_ARGISNULL(0)) + if (PG_ARGISNULL(ARG_PARTREL)) + { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'atttype' should not be NULL"))); + errmsg("'partrel' should not be NULL"))); + } + else partrel = PG_GETARG_OID(ARG_PARTREL); + /* Check that relation exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partrel))) + elog(ERROR, "relation \"%u\" does not exist", partrel); - if (PG_ARGISNULL(1)) + if (PG_ARGISNULL(ARG_EXPRESSION)) + { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'parttype' should not be NULL"))); + errmsg("'expression' should not be NULL"))); + } + else expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION)); + + /* + * Fetch partitioning expression's type using + * either user's expression or parsed expression. + */ + if (PG_ARGISNULL(ARG_EXPRESSION_P)) + { + Datum expr_datum; - atttype = PG_GETARG_OID(0); - parttype = DatumGetPartType(PG_GETARG_DATUM(1)); + /* We'll have to parse expression with our own hands */ + expr_datum = cook_partitioning_expression(partrel, expr_cstr, &expr_type); + + /* Free both expressions */ + pfree(DatumGetPointer(expr_datum)); + pfree(expr_cstr); + } + else + { + char *expr_p_cstr; + + /* Good, let's use a cached parsed expression */ + expr_p_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION_P)); + expr_type = exprType(stringToNode(expr_p_cstr)); + + /* Free both expressions */ + pfree(expr_p_cstr); + pfree(expr_cstr); + } + + if (PG_ARGISNULL(ARG_PARTTYPE)) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parttype' should not be NULL"))); + } + else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); /* * NULL interval is fine for both HASH and RANGE. * But for RANGE we need to make some additional checks. */ - if (!PG_ARGISNULL(2)) + if (!PG_ARGISNULL(ARG_RANGE_INTERVAL)) { - Datum interval_text = PG_GETARG_DATUM(2), + Datum interval_text = PG_GETARG_DATUM(ARG_RANGE_INTERVAL), interval_value; Oid interval_type; @@ -843,11 +891,11 @@ validate_interval_value(PG_FUNCTION_ARGS) /* Try converting textual representation */ interval_value = extract_binary_interval_from_text(interval_text, - atttype, + expr_type, &interval_type); /* Check that interval isn't trivial */ - if (interval_is_trivial(atttype, interval_value, interval_type)) + if (interval_is_trivial(expr_type, interval_value, interval_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("interval should not be trivial"))); diff --git a/src/relation_info.c b/src/relation_info.c index 1a4e9a38..cd96cdc1 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -166,8 +166,7 @@ refresh_pathman_relation_info(Oid relid, /* Set partitioning type */ prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - /* Read config values */ - prel->ev_type = DatumGetObjectId(values[Anum_pathman_config_atttype - 1]); + /* Fetch cooked partitioning expression */ expr = TextDatumGetCString(values[Anum_pathman_config_expression_p - 1]); /* Expression and attname should be saved in cache context */ @@ -190,6 +189,9 @@ refresh_pathman_relation_info(Oid relid, MemoryContextSwitchTo(old_mcxt); + /* First, fetch type of partitioning expression */ + prel->ev_type = exprType(prel->expr); + htup = SearchSysCache1(TYPEOID, prel->ev_type); if (HeapTupleIsValid(htup)) { @@ -591,16 +593,12 @@ parse_partitioning_expression(const Oid relid, return ((ResTarget *) linitial(select_stmt->targetList))->val; } -/* - * Parses expression related to 'relid', and returns its type, - * raw expression tree, and if specified returns its plan - */ +/* Parse partitioning expression and return its type and nodeToString() */ Datum -plan_partitioning_expression(const Oid relid, +cook_partitioning_expression(const Oid relid, const char *expr_cstr, - Oid *expr_type_out) + Oid *expr_type_out) /* ret value #1 */ { - Node *parsetree; List *querytree_list; TargetEntry *target_entry; @@ -686,16 +684,15 @@ plan_partitioning_expression(const Oid relid, errmsg("functions in partitioning expression must be marked IMMUTABLE"))); Assert(expr); - - /* Set 'expr_type_out' if needed */ - if (expr_type_out) - *expr_type_out = exprType(expr); - expr_serialized = nodeToString(expr); /* Switch to previous mcxt */ MemoryContextSwitchTo(old_mcxt); + /* Set 'expr_type_out' if needed */ + if (expr_type_out) + *expr_type_out = exprType(expr); + expr_datum = CStringGetTextDatum(expr_serialized); /* Free memory */ From 0f7f4beb8502cf03e961b3c962f46a4cbf370567 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 5 May 2017 18:49:59 +0300 Subject: [PATCH 0475/1124] Start working on test for fdw, add debug print for slots --- .gitignore | 1 + src/debug_print.c | 84 ++++++++++++++++++++ src/partition_filter.c | 6 +- src/partition_update.c | 8 +- tests/python/partitioning_test.py | 123 +++++++++++++++++++++++++++--- 5 files changed, 203 insertions(+), 19 deletions(-) diff --git a/.gitignore b/.gitignore index 9cf8da8f..55a84f78 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ regression.out *.gcda *.gcno *.gcov +*.log pg_pathman--*.sql tags cscope* diff --git a/src/debug_print.c b/src/debug_print.c index 36016861..d70aac51 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -8,12 +8,16 @@ * ------------------------------------------------------------------------ */ +#include #include "rangeset.h" #include "postgres.h" +#include "fmgr.h" +#include "executor/tuptable.h" #include "nodes/bitmapset.h" #include "nodes/pg_list.h" #include "lib/stringinfo.h" +#include "utils/lsyscache.h" /* @@ -99,3 +103,83 @@ irange_print(IndexRange irange) return str.data; } + + +/* ---------------- + * printatt + * ---------------- + */ +static char * +printatt(unsigned attributeId, + Form_pg_attribute attributeP, + char *value) +{ + return psprintf("\t%2d: %s%s%s%s\t(typeid = %u, len = %d, typmod = %d, byval = %c)\n", + attributeId, + NameStr(attributeP->attname), + value != NULL ? " = \"" : "", + value != NULL ? value : "", + value != NULL ? "\"" : "", + (unsigned int) (attributeP->atttypid), + attributeP->attlen, + attributeP->atttypmod, + attributeP->attbyval ? 't' : 'f'); +} + +/* ---------------- + * debugtup - print one tuple for an interactive backend + * ---------------- + */ +static char * +debugtup(TupleTableSlot *slot) +{ + TupleDesc typeinfo = slot->tts_tupleDescriptor; + int natts = typeinfo->natts; + int i; + Datum attr; + char *value; + bool isnull; + Oid typoutput; + bool typisvarlena; + + int result_len = 0; + char *result = (char *) palloc(result_len + 1); + + for (i = 0; i < natts; ++i) + { + char *s; + int len; + + attr = slot_getattr(slot, i + 1, &isnull); + if (isnull) + continue; + getTypeOutputInfo(typeinfo->attrs[i]->atttypid, + &typoutput, &typisvarlena); + + value = OidOutputFunctionCall(typoutput, attr); + + s = printatt((unsigned) i + 1, typeinfo->attrs[i], value); + len = strlen(s); + result = (char *) repalloc(result, result_len + len + 1); + strncpy(result + result_len, s, len); + result_len += len; + } + + result[result_len] = '\0'; + return result; +} + +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +slot_print(TupleTableSlot *slot) +{ + if (TupIsNull(slot)) + return NULL; + + if (!slot->tts_tupleDescriptor) + return NULL; + + return debugtup(slot); +} diff --git a/src/partition_filter.c b/src/partition_filter.c index 9daf8251..874a064d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -657,6 +657,7 @@ partition_filter_exec(CustomScanState *node) EState *estate = node->ss.ps.state; PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; + ResultRelInfo *saved_resultRelInfo; /* clean ctid for old slot */ state->ctid = NULL; @@ -664,8 +665,9 @@ partition_filter_exec(CustomScanState *node) slot = ExecProcNode(child_ps); /* Save original ResultRelInfo */ + saved_resultRelInfo = estate->es_result_relation_info; if (!state->result_parts.saved_rel_info) - state->result_parts.saved_rel_info = estate->es_result_relation_info; + state->result_parts.saved_rel_info = saved_resultRelInfo; if (!TupIsNull(slot)) { @@ -732,7 +734,7 @@ partition_filter_exec(CustomScanState *node) junkfilter = rri_holder->orig_junkFilter; Assert(junkfilter != NULL); - relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; + relkind = saved_resultRelInfo->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) { bool isNull; diff --git a/src/partition_update.c b/src/partition_update.c index aaaa4555..74a05f84 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -157,10 +157,8 @@ partition_update_exec(CustomScanState *node) oldtuple = NULL; relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION) + if (child_state->ctid != NULL) { - Assert(child_state->ctid != NULL); - tupleid = child_state->ctid; tuple_ctid = *tupleid; /* be sure we don't free * ctid!! */ @@ -192,7 +190,7 @@ partition_update_exec(CustomScanState *node) tupleid = NULL; } else - elog(ERROR, "PartitionUpdate supports only relations and foreign tables"); + elog(ERROR, "updates supported only on basic relations and foreign tables"); /* delete old tuple */ estate->es_result_relation_info = child_state->result_parts.saved_rel_info; @@ -266,7 +264,7 @@ ExecDeleteInternal(ItemPointer tupleid, tupleid, oldtuple); if (!dodelete) - elog(ERROR, "In partitioned tables the old row always should be deleted"); + elog(ERROR, "the old row always should be deleted from child table"); } if (resultRelInfo->ri_FdwRoutine) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 9dc404af..e1384945 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -19,6 +19,40 @@ from testgres import get_new_node, stop_all +# set setup base logging config, it can be turned on by `use_logging` +# parameter on node setup + +import logging +import logging.config + +logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tests.log') +LOG_CONFIG = { + 'version':1, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + 'file': { + 'class': 'logging.FileHandler', + 'filename': logfile, + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + }, + 'formatters': { + 'base_format': { + 'format': '%(node)-5s: %(message)s', + }, + }, + 'root': { + 'handlers': ('file', ), + 'level': 'DEBUG', + }, +} + +logging.config.dictConfig(LOG_CONFIG) # Helper function for json equality def ordered(obj, skip_keys=None): @@ -53,6 +87,14 @@ def setUp(self): def tearDown(self): stop_all() + def set_trace(self, con, external_command): + ''' this function starts gdb on selected connection ''' + + pid = con.execute('SELECT pg_backend_pid()')[0][0] + p = subprocess.Popen([external_command], stdin=subprocess.PIPE) + p.communicate(str.encode(str(pid))) + input("press ENTER to continue..") + def start_new_pathman_cluster(self, name='test', allows_streaming=False): node = get_new_node(name) node.init(allows_streaming=allows_streaming) @@ -368,12 +410,19 @@ def check_tablespace(node, tablename, tablespace): self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) - @if_fdw_enabled - def test_foreign_table(self): - """Test foreign tables""" + def make_basic_fdw_setup(self): + '''' + Create basic FDW setup: + - create range partitioned table in master + - create foreign server + - create foreign table and insert some data into it + - attach foreign table to partitioned one + + Do not forget to cleanup after use + ''' # Start master server - master = get_new_node('test') + master = get_new_node('test', use_logging=True) master.init() master.append_conf( 'postgresql.conf', @@ -382,13 +431,6 @@ def test_foreign_table(self): master.psql('postgres', 'create extension pg_pathman') master.psql('postgres', 'create extension postgres_fdw') - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions master.psql( 'postgres', '''create table abc(id serial, name text); @@ -425,6 +467,22 @@ def test_foreign_table(self): 'postgres', 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') + return (master, fserv) + + @if_fdw_enabled + def test_foreign_table(self): + """Test foreign tables""" + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + + master, fserv = self.make_basic_fdw_setup() + # Check that table attached to partitioned table self.assertEqual( master.safe_psql('postgres', 'select * from ftable'), @@ -469,6 +527,48 @@ def test_foreign_table(self): ) master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') + fserv.cleanup() + master.cleanup() + + fserv.stop() + master.stop() + + def test_update_node_on_fdw_tables(self): + ''' Test update node on foreign tables ''' + + master, fserv = self.make_basic_fdw_setup() + + # create second foreign table + fserv.safe_psql('postgres', 'create table ftable2(id serial, name text)') + fserv.safe_psql('postgres', 'insert into ftable2 values (35, \'foreign\')') + + master.safe_psql( + 'postgres', + '''import foreign schema public limit to (ftable2) + from server fserv into public''' + ) + master.safe_psql( + 'postgres', + 'select attach_range_partition(\'abc\', \'ftable2\', 30, 40)') + + master.safe_psql('postgres', + 'set pg_pathman.enable_partitionupdate=on') + + with master.connect() as con: + con.begin() + con.execute("set pg_pathman.enable_partitionupdate=on") + con.execute("insert into abc select i, 'local' from generate_series(1, 19) i") + con.commit() + + self.set_trace(con, 'pg_debug') + import ipdb; ipdb.set_trace() + pass + + # cases + # - update from local to foreign + # - update from foreign to local + # - update from foreign to foreign + def test_parallel_nodes(self): """Test parallel queries under partitions""" @@ -1070,7 +1170,6 @@ def test_update_node_plan1(self): node.stop() node.cleanup() - if __name__ == "__main__": unittest.main() From 87224c9abb568c71496500658eaac66d48a124c6 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 10 May 2017 13:37:21 +0300 Subject: [PATCH 0476/1124] Fix UPDATEs from local table to foreign --- src/debug_print.c | 68 +++++++++++++++++++++++++++++++ src/include/partition_filter.h | 2 +- src/partition_filter.c | 31 +++++++------- tests/python/partitioning_test.py | 33 +++++++++------ 4 files changed, 106 insertions(+), 28 deletions(-) diff --git a/src/debug_print.c b/src/debug_print.c index d70aac51..9734ca06 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -15,6 +15,7 @@ #include "fmgr.h" #include "executor/tuptable.h" #include "nodes/bitmapset.h" +#include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "lib/stringinfo.h" #include "utils/lsyscache.h" @@ -183,3 +184,70 @@ slot_print(TupleTableSlot *slot) return debugtup(slot); } + +/* + * rt_print + * return contents of range table + */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +rt_print(const List *rtable) +{ +#define APPEND_STR(si, ...) \ +{ \ + char *line = psprintf(__VA_ARGS__); \ + appendStringInfo(&si, "%s", line); \ + pfree(line); \ +} + + const ListCell *l; + int i = 1; + + StringInfoData str; + + initStringInfo(&str); + APPEND_STR(str, "resno\trefname \trelid\tinFromCl\n"); + APPEND_STR(str, "-----\t---------\t-----\t--------\n"); + + foreach(l, rtable) + { + RangeTblEntry *rte = lfirst(l); + + switch (rte->rtekind) + { + case RTE_RELATION: + APPEND_STR(str, "%d\t%s\t%u\t%c", + i, rte->eref->aliasname, rte->relid, rte->relkind); + break; + case RTE_SUBQUERY: + APPEND_STR(str, "%d\t%s\t[subquery]", + i, rte->eref->aliasname); + break; + case RTE_JOIN: + APPEND_STR(str, "%d\t%s\t[join]", + i, rte->eref->aliasname); + break; + case RTE_FUNCTION: + APPEND_STR(str, "%d\t%s\t[rangefunction]", i, rte->eref->aliasname); + break; + case RTE_VALUES: + APPEND_STR(str, "%d\t%s\t[values list]", i, rte->eref->aliasname); + break; + case RTE_CTE: + APPEND_STR(str, "%d\t%s\t[cte]", i, rte->eref->aliasname); + break; + default: + elog(ERROR, "%d\t%s\t[unknown rtekind]", + i, rte->eref->aliasname); + } + + APPEND_STR(str, "\t%s\t%s\n", (rte->inh ? "inh" : ""), + (rte->inFromCl ? "inFromCl" : "")); + + i++; + } + return str.data; +#undef APPEND_STR +} diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 943d5d32..68c57aef 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -40,7 +40,7 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ - JunkFilter *orig_junkFilter; /* we keep original JunkFilter from + JunkFilter *updates_junkFilter; /* we keep junkfilter from scanned ResultRelInfo here */ } ResultRelInfoHolder; diff --git a/src/partition_filter.c b/src/partition_filter.c index 874a064d..e8c99af2 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -310,12 +310,23 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) CopyToResultRelInfo(ri_onConflictSetProj); CopyToResultRelInfo(ri_onConflictSetWhere); + /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ + child_result_rel_info->ri_ConstraintExprs = NULL; + + /* Fill the ResultRelInfo holder */ + rri_holder->partid = partid; + rri_holder->result_rel_info = child_result_rel_info; + rri_holder->updates_junkFilter = NULL; + if (parts_storage->command_type == CMD_UPDATE) { char relkind; - JunkFilter *junkfilter = child_result_rel_info->ri_junkFilter; + JunkFilter *junkfilter = parts_storage->saved_rel_info->ri_junkFilter; - relkind = child_result_rel_info->ri_RelationDesc->rd_rel->relkind; + /* we don't need junk work in UPDATE */ + child_result_rel_info->ri_junkFilter = NULL; + + relkind = base_rel->rd_rel->relkind; if (relkind == RELKIND_RELATION) { junkfilter->jf_junkAttNo = ExecFindJunkAttribute(junkfilter, "ctid"); @@ -333,19 +344,9 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) else elog(ERROR, "wrong type of relation"); + rri_holder->updates_junkFilter = junkfilter; } - /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ - child_result_rel_info->ri_ConstraintExprs = NULL; - - /* Fill the ResultRelInfo holder */ - rri_holder->partid = partid; - rri_holder->result_rel_info = child_result_rel_info; - rri_holder->orig_junkFilter = child_result_rel_info->ri_junkFilter; - - if (parts_storage->command_type == CMD_UPDATE) - child_result_rel_info->ri_junkFilter = NULL; - /* Generate tuple transformation map and some other stuff */ rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); @@ -731,7 +732,7 @@ partition_filter_exec(CustomScanState *node) * we need this step because if there will be conversion * then junk attributes will be removed from slot */ - junkfilter = rri_holder->orig_junkFilter; + junkfilter = rri_holder->updates_junkFilter; Assert(junkfilter != NULL); relkind = saved_resultRelInfo->ri_RelationDesc->rd_rel->relkind; @@ -769,7 +770,7 @@ partition_filter_exec(CustomScanState *node) slot = state->tup_convert_slot; } else if (state->command_type == CMD_UPDATE) - slot = ExecFilterJunk(rri_holder->orig_junkFilter, slot); + slot = ExecFilterJunk(rri_holder->updates_junkFilter, slot); return slot; } diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index e1384945..e102c332 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -88,7 +88,12 @@ def tearDown(self): stop_all() def set_trace(self, con, external_command): - ''' this function starts gdb on selected connection ''' + ''' this function is used to debug selected backend: + `self.set_trace(con, 'pg_debug')` where `pg_debug` is your + external script that expects pid of postgres backend + + !! don't forget to remove calls of this function after debug + ''' pid = con.execute('SELECT pg_backend_pid()')[0][0] p = subprocess.Popen([external_command], stdin=subprocess.PIPE) @@ -422,7 +427,7 @@ def make_basic_fdw_setup(self): ''' # Start master server - master = get_new_node('test', use_logging=True) + master = get_new_node('test') master.init() master.append_conf( 'postgresql.conf', @@ -556,18 +561,22 @@ def test_update_node_on_fdw_tables(self): with master.connect() as con: con.begin() - con.execute("set pg_pathman.enable_partitionupdate=on") - con.execute("insert into abc select i, 'local' from generate_series(1, 19) i") + con.execute('set pg_pathman.enable_partitionupdate=on') + con.execute('insert into abc select i from generate_series(1, 19) i') con.commit() - self.set_trace(con, 'pg_debug') - import ipdb; ipdb.set_trace() - pass + source_relid = con.execute('select tableoid from abc where id=9')[0][0] + dest_relid = con.execute('select tableoid from abc where id=35')[0][0] + self.assertNotEqual(source_relid, dest_relid) + + # cases + # - update from local to foreign + # - update from foreign to local + # - update from foreign to foreign - # cases - # - update from local to foreign - # - update from foreign to local - # - update from foreign to foreign + con.execute('update abc set id=36 where id=9') + result_relid = con.execute('select tableoid from abc where id=35')[0][0] + self.assertEqual(result_relid, dest_relid) def test_parallel_nodes(self): """Test parallel queries under partitions""" @@ -1092,7 +1101,7 @@ def test_concurrent_detach(self): def test_update_node_plan1(self): ''' Test scan on all partititions when using update node. - We can't use regression tests here because 9.5 and 9.5 give + We can't use regression tests here because 9.5 and 9.6 give different plans ''' From 5592c6746785125b8b0d43252ef0254b8bfcfa36 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 10 May 2017 15:00:04 +0300 Subject: [PATCH 0477/1124] more calamity tests for coverage --- expected/pathman_calamity.out | 56 ++++++++++++++++++++++++++++++----- sql/pathman_calamity.sql | 35 +++++++++++++++++----- src/partition_creation.c | 14 ++++----- src/pl_range_funcs.c | 20 ++++++------- 4 files changed, 93 insertions(+), 32 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 36b546f5..925d6d8d 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -270,19 +270,21 @@ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ (1 row) /* check function validate_interval_value() */ -SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ ERROR: relation "1" does not exist -SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ ERROR: 'partrel' should not be NULL -SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ ERROR: 'expression' should not be NULL -SELECT validate_interval_value('pg_class', 'oid', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'oid', NULL, '1 mon', 'cooked_expr'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'oid', 1, 'HASH', NULL); /* not ok */ ERROR: interval should be NULL for HASH partitioned table -SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ ERROR: cannot find type name for attribute "expr" of relation "pg_class" -SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ ERROR: unrecognized token: "cooked_expr" -SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ ERROR: cannot find type name for attribute "expr" of relation "pg_class" /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); @@ -749,8 +751,46 @@ SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ DROP TABLE calamity.test_range_oid CASCADE; NOTICE: drop cascades to table calamity.test_range_oid_1 +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('{pg_class}'); /* not ok */ +ERROR: cannot merge partitions +\errverbose +ERROR: XX000: cannot merge partitions +DETAIL: there must be at least two partitions +LOCATION: merge_range_partitions, pl_range_funcs.c:625 +SELECT merge_range_partitions('{pg_class, pg_inherits}'); /* not ok */ +ERROR: cannot merge partitions +\errverbose +ERROR: XX000: cannot merge partitions +DETAIL: relation "pg_class" is not a partition +LOCATION: merge_range_partitions, pl_range_funcs.c:636 +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); +NOTICE: sequence "merge_test_a_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); +NOTICE: sequence "merge_test_b_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('{calamity.merge_test_a_1, + calamity.merge_test_b_1}'); /* not ok */ +ERROR: cannot merge partitions +\errverbose +ERROR: XX000: cannot merge partitions +DETAIL: all relations must share the same parent +LOCATION: merge_range_partitions, pl_range_funcs.c:645 +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 4 other objects DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 18 other objects +NOTICE: drop cascades to 20 other objects DROP EXTENSION pg_pathman; /* * ------------------------------------- diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 8c53775a..896982d8 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -126,13 +126,14 @@ SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ /* check function validate_interval_value() */ -SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'oid', 1, 'HASH', NULL); /* not ok */ -SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ -SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'oid', NULL, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'oid', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); @@ -315,6 +316,26 @@ SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ DROP TABLE calamity.test_range_oid CASCADE; + +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('{pg_class}'); /* not ok */ +\errverbose +SELECT merge_range_partitions('{pg_class, pg_inherits}'); /* not ok */ +\errverbose + +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); + +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + +SELECT merge_range_partitions('{calamity.merge_test_a_1, + calamity.merge_test_b_1}'); /* not ok */ +\errverbose + +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; + + DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/partition_creation.c b/src/partition_creation.c index 7707bec1..8afc201f 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1707,14 +1707,13 @@ build_partitioning_expression(Oid parent_relid, List **columns) /* ret val #2 */ { /* Values extracted from PATHMAN_CONFIG */ - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - char *expr_cstr; - Node *expr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + char *expr_cstr; + Node *expr; /* Check that table is registered in PATHMAN_CONFIG */ - if (!pathman_config_contains_relation(parent_relid, values, - isnull, NULL, NULL)) + if (!pathman_config_contains_relation(parent_relid, values, isnull, NULL, NULL)) elog(ERROR, "table \"%s\" is not partitioned", get_rel_name_or_relid(parent_relid)); @@ -1732,13 +1731,14 @@ build_partitioning_expression(Oid parent_relid, expr_p_cstr = TextDatumGetCString(values[Anum_pathman_config_expression_p - 1]); + /* Finally return expression type */ *expr_type = exprType(stringToNode(expr_p_cstr)); } if (columns) { /* Column list should be empty */ - Assert(*columns == NIL); + AssertArg(*columns == NIL); extract_column_names(expr, columns); } diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 0e6f3c21..97eb566e 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -803,9 +803,9 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) } /* - * Takes text representation of interval value and checks if it is corresponds - * to partitioning key. The function throws an error if it fails to convert - * text to Datum + * Takes text representation of interval value and checks + * if it corresponds to partitioning expression. + * NOTE: throws an ERROR if it fails to convert text to Datum. */ Datum validate_interval_value(PG_FUNCTION_ARGS) @@ -839,6 +839,13 @@ validate_interval_value(PG_FUNCTION_ARGS) } else expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION)); + if (PG_ARGISNULL(ARG_PARTTYPE)) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parttype' should not be NULL"))); + } + else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); + /* * Fetch partitioning expression's type using * either user's expression or parsed expression. @@ -867,13 +874,6 @@ validate_interval_value(PG_FUNCTION_ARGS) pfree(expr_cstr); } - if (PG_ARGISNULL(ARG_PARTTYPE)) - { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'parttype' should not be NULL"))); - } - else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); - /* * NULL interval is fine for both HASH and RANGE. * But for RANGE we need to make some additional checks. From 879dc1d77ce5c1233ffca9f19231282c3e05e939 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 10 May 2017 15:09:32 +0300 Subject: [PATCH 0478/1124] remove \errverbose from tests --- expected/pathman_calamity.out | 12 ------------ sql/pathman_calamity.sql | 3 --- 2 files changed, 15 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 925d6d8d..5c493c66 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -754,16 +754,8 @@ NOTICE: drop cascades to table calamity.test_range_oid_1 /* check function merge_range_partitions() */ SELECT merge_range_partitions('{pg_class}'); /* not ok */ ERROR: cannot merge partitions -\errverbose -ERROR: XX000: cannot merge partitions -DETAIL: there must be at least two partitions -LOCATION: merge_range_partitions, pl_range_funcs.c:625 SELECT merge_range_partitions('{pg_class, pg_inherits}'); /* not ok */ ERROR: cannot merge partitions -\errverbose -ERROR: XX000: cannot merge partitions -DETAIL: relation "pg_class" is not a partition -LOCATION: merge_range_partitions, pl_range_funcs.c:636 CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); @@ -783,10 +775,6 @@ NOTICE: sequence "merge_test_b_seq" does not exist, skipping SELECT merge_range_partitions('{calamity.merge_test_a_1, calamity.merge_test_b_1}'); /* not ok */ ERROR: cannot merge partitions -\errverbose -ERROR: XX000: cannot merge partitions -DETAIL: all relations must share the same parent -LOCATION: merge_range_partitions, pl_range_funcs.c:645 DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 4 other objects DROP SCHEMA calamity CASCADE; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 896982d8..586da042 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -319,9 +319,7 @@ DROP TABLE calamity.test_range_oid CASCADE; /* check function merge_range_partitions() */ SELECT merge_range_partitions('{pg_class}'); /* not ok */ -\errverbose SELECT merge_range_partitions('{pg_class, pg_inherits}'); /* not ok */ -\errverbose CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); @@ -331,7 +329,6 @@ SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); SELECT merge_range_partitions('{calamity.merge_test_a_1, calamity.merge_test_b_1}'); /* not ok */ -\errverbose DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; From c0cfde5182c1442396fc5bfbd569eee57fbe8700 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Wed, 10 May 2017 15:50:44 +0300 Subject: [PATCH 0479/1124] fix update trigger column list collecting function; refactoring --- src/partition_filter.c | 96 +++++++++++++++++++----------------------- src/pl_funcs.c | 31 +++++++------- 2 files changed, 59 insertions(+), 68 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index bca97afe..63c59282 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -66,7 +66,9 @@ int pg_pathman_insert_into_fdw = PF_FDW_INSERT_POSTGRES; CustomScanMethods partition_filter_plan_methods; CustomExecMethods partition_filter_exec_methods; - +static ExprState *prepare_expr_state(Node *expr, + Oid relid, + EState *estate); static void prepare_rri_for_insert(EState *estate, ResultRelInfoHolder *rri_holder, const ResultPartsStorage *rps_storage, @@ -455,8 +457,6 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, } else selected_partid = parts[0]; - /* Replace parent table with a suitable partition */ - /* TODO: write a correct comment */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); rri_holder = scan_result_parts_storage(selected_partid, parts_storage); @@ -471,35 +471,13 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, /* Build an expression state if not yet */ if (!rri_holder->expr_state) - { - MemoryContext tmp_mcxt; - Node *expr; - Index varno = 1; - ListCell *lc; - - /* Change varno in Vars according to range table */ - expr = copyObject(subprel->expr); - foreach(lc, estate->es_range_table) - { - RangeTblEntry *entry = lfirst(lc); - if (entry->relid == selected_partid) - { - if (varno > 1) - ChangeVarNodes(expr, 1, varno, 0); - break; - } - varno += 1; - } - - /* Prepare state for expression execution */ - tmp_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); - rri_holder->expr_state = ExecInitExpr((Expr *) expr, NULL); - MemoryContextSwitchTo(tmp_mcxt); - } + rri_holder->expr_state = prepare_expr_state(subprel->expr, + selected_partid, + estate); Assert(rri_holder->expr_state != NULL); - /* Dive in */ + /* Recursively search for subpartitions */ rri_holder = select_partition_for_insert(econtext, rri_holder->expr_state, subprel, parts_storage, @@ -516,6 +494,38 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, return rri_holder; } +static ExprState * +prepare_expr_state(Node *expr, + Oid relid, + EState *estate) +{ + ExprState *expr_state; + MemoryContext old_mcxt; + Index varno = 1; + Node *expr_copy; + ListCell *lc; + + /* Change varno in Vars according to range table */ + expr_copy = copyObject(expr); + foreach(lc, estate->es_range_table) + { + RangeTblEntry *entry = lfirst(lc); + if (entry->relid == relid) + { + if (varno > 1) + ChangeVarNodes(expr_copy, 1, varno, 0); + break; + } + varno += 1; + } + + /* Prepare state for expression execution */ + old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + expr_state = ExecInitExpr((Expr *) expr_copy, NULL); + MemoryContextSwitchTo(old_mcxt); + + return expr_state; +} /* * -------------------------------- @@ -596,40 +606,22 @@ partition_filter_create_scan_state(CustomScan *node) void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { - Index varno = 1; - Node *expr; - MemoryContext old_mcxt; PartitionFilterState *state = (PartitionFilterState *) node; - const PartRelationInfo *prel; - ListCell *lc; /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); if (state->expr_state == NULL) { + const PartRelationInfo *prel; + /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); Assert(prel != NULL); - /* Change varno in Vars according to range table */ - expr = copyObject(prel->expr); - foreach(lc, estate->es_range_table) - { - RangeTblEntry *entry = lfirst(lc); - if (entry->relid == state->partitioned_table) - { - if (varno > 1) - ChangeVarNodes(expr, 1, varno, 0); - break; - } - varno += 1; - } - - /* Prepare state for expression execution */ - old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); - state->expr_state = ExecInitExpr((Expr *) expr, NULL); - MemoryContextSwitchTo(old_mcxt); + state->expr_state = prepare_expr_state(prel->expr, + state->partitioned_table, + estate); } /* Init ResultRelInfo cache */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ef8e6550..982292e5 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1118,8 +1118,6 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) /* Find (or create) target partition */ target_relid = find_target_partition(source_rel, new_tuple); - /* TODO: check for InvalidOid */ - /* Convert tuple if target partition has changed */ if (target_relid != source_relid) { @@ -1129,9 +1127,7 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) /* Lock partition and check if it exists */ LockRelationOid(target_relid, lockmode); if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(target_relid))) - /* TODO: !!! */ - elog(ERROR, ERR_PART_ATTR_NO_PART, "()"); - // elog(ERROR, ERR_PART_ATTR_NO_PART, datum_to_cstring(value, value_type)); + elog(ERROR, "no suitable target partition"); /* Open partition */ target_rel = heap_open(target_relid, lockmode); @@ -1152,7 +1148,7 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) } /* - * Find partition satisfying values of the tuple + * Find partition satisfying values of the tuple or return InvalidOid */ static Oid find_target_partition(Relation source_rel, HeapTuple tuple) @@ -1247,20 +1243,24 @@ find_deepest_partition(Oid parent, Relation source_rel, HeapTuple tuple) elog(ERROR, ERR_PART_ATTR_MULTIPLE); else if (nparts == 0) { - target_relid = create_partitions_for_value(PrelParentRelid(prel), + /* No partition found, create a new one */ + target_relid = create_partitions_for_value(PrelParentRelid(prel), value, value_type); - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); + /* Get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); } else + { + /* Found partition */ target_relid = parts[0]; - pfree(parts); - /* Try to go deeper recursively and see if there is subpartition */ - subpartition = find_deepest_partition(target_relid, source_rel, tuple); - if (OidIsValid(subpartition)) - return subpartition; + /* Try to go deeper recursively and see if there is subpartition */ + subpartition = find_deepest_partition(target_relid, source_rel, tuple); + if (OidIsValid(subpartition)) + return subpartition; + } + pfree(parts); return target_relid; } @@ -1499,7 +1499,6 @@ create_update_triggers(PG_FUNCTION_ARGS) trigname = build_update_trigger_name_internal(parent); /* Create trigger for parent */ - // columns = PrelExpressionColumnNames(prel); collect_update_trigger_columns(parent, &columns); create_single_update_trigger_internal(parent, trigname, columns); @@ -1525,7 +1524,7 @@ collect_update_trigger_columns(Oid relid, List **columns) return; /* Collect columns from current level */ - *columns = list_concat(*columns, PrelExpressionColumnNames(prel)); + *columns = list_union(*columns, PrelExpressionColumnNames(prel)); /* Collect columns from parent */ parent = get_parent_of_partition(relid, &parent_search); From f3ee021c9706537282344958b10034ae9888dbcc Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 10 May 2017 15:55:43 +0300 Subject: [PATCH 0480/1124] refactoring (static inline), introduce function WrongPartType() --- src/include/relation_info.h | 44 +++++++++++++++++++++++++++++++++--- src/partition_creation.c | 3 +-- src/pg_pathman.c | 7 +++--- src/pl_funcs.c | 2 +- src/rangeset.c | 2 +- src/relation_info.c | 45 ++----------------------------------- 6 files changed, 49 insertions(+), 54 deletions(-) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 5e3fa46c..953ad54d 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -308,9 +308,47 @@ void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); -/* Safe casts for PartType */ -PartType DatumGetPartType(Datum datum); -char *PartTypeToCString(PartType parttype); +/* PartType wrappers */ + +static inline void +WrongPartType(PartType parttype) +{ + elog(ERROR, "Unknown partitioning type %u", parttype); +} + +static inline PartType +DatumGetPartType(Datum datum) +{ + uint32 parttype = DatumGetUInt32(datum); + + if (parttype < 1 || parttype > 2) + WrongPartType(parttype); + + return (PartType) parttype; +} + +static inline char * +PartTypeToCString(PartType parttype) +{ + static char *hash_str = "1", + *range_str = "2"; + + switch (parttype) + { + case PT_HASH: + return hash_str; + + case PT_RANGE: + return range_str; + + default: + WrongPartType(parttype); + return NULL; /* keep compiler happy */ + } +} + + + /* PartRelationInfo checker */ void shout_if_prel_is_invalid(const Oid parent_oid, diff --git a/src/partition_creation.c b/src/partition_creation.c index 8afc201f..63fd71e5 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1588,8 +1588,7 @@ invoke_init_callback_internal(init_callback_params *cb_params) break; default: - elog(ERROR, "Unknown partitioning type %u", cb_params->parttype); - break; + WrongPartType(cb_params->parttype); } /* Fetch function call data */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a7d6092e..3fc4921c 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -781,8 +781,7 @@ handle_const(const Const *c, WalkerContext *context) break; default: - elog(ERROR, "Unknown partitioning type %u", prel->parttype); - break; + WrongPartType(prel->parttype); } return result; @@ -966,7 +965,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) break; default: - elog(ERROR, "Unknown partitioning type %u", prel->parttype); + WrongPartType(prel->parttype); } /* Free resources */ @@ -1100,7 +1099,7 @@ handle_binary_opexpr(const Const *c, } default: - elog(ERROR, "Unknown partitioning type %u", prel->parttype); + WrongPartType(prel->parttype); } binary_opexpr_return: diff --git a/src/pl_funcs.c b/src/pl_funcs.c index bc018cb5..fb5ac8c2 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -506,7 +506,7 @@ show_partition_list_internal(PG_FUNCTION_ARGS) break; default: - elog(ERROR, "Unknown partitioning type %u", prel->parttype); + WrongPartType(prel->parttype); } /* Fill tuptable */ diff --git a/src/rangeset.c b/src/rangeset.c index 6715ee0e..15599f74 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -162,7 +162,7 @@ irange_union_internal(IndexRange first, /* IndexRanges intersect */ if (iranges_intersect(first, second)) { - /* Calculate the intersection of 'first' and 'second' */ + /* Calculate the union of 'first' and 'second' */ IndexRange ir_union = irange_union_simple(first, second); /* if lossiness is the same, unite them and skip */ diff --git a/src/relation_info.c b/src/relation_info.c index cd96cdc1..ac44cc27 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -509,10 +509,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, default: { DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("Unknown partitioning type for relation \"%s\"", - get_rel_name_or_relid(PrelParentRelid(prel))), - errhint(INIT_ERROR_HINT))); + WrongPartType(prel->parttype); } break; } @@ -1243,10 +1240,7 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, default: { DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("Unknown partitioning type for relation \"%s\"", - get_rel_name_or_relid(PrelParentRelid(prel))), - errhint(INIT_ERROR_HINT))); + WrongPartType(prel->parttype); } break; } @@ -1264,41 +1258,6 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) } -/* - * Safe PartType wrapper. - */ -PartType -DatumGetPartType(Datum datum) -{ - uint32 val = DatumGetUInt32(datum); - - if (val < 1 || val > 2) - elog(ERROR, "Unknown partitioning type %u", val); - - return (PartType) val; -} - -char * -PartTypeToCString(PartType parttype) -{ - static char *hash_str = "1", - *range_str = "2"; - - switch (parttype) - { - case PT_HASH: - return hash_str; - - case PT_RANGE: - return range_str; - - default: - elog(ERROR, "Unknown partitioning type %u", parttype); - return NULL; /* keep compiler happy */ - } -} - - /* * Common PartRelationInfo checks. Emit ERROR if anything is wrong. */ From f6e97fcf8435d83079b4501a3f2bd77138b8e36a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 10 May 2017 17:01:34 +0300 Subject: [PATCH 0481/1124] fix formatting, make use of WrongPartType() --- init.sql | 9 ++++----- range.sql | 2 +- src/include/relation_info.h | 2 -- src/pathman_workers.c | 2 +- src/relation_info.c | 4 +--- 5 files changed, 7 insertions(+), 12 deletions(-) diff --git a/init.sql b/init.sql index bb6da5bc..27172a79 100644 --- a/init.sql +++ b/init.sql @@ -380,11 +380,10 @@ BEGIN USING p_min, p_max INTO ctids; - EXECUTE format(' - WITH data AS ( - DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) - INSERT INTO %1$s SELECT * FROM data', - relation) + EXECUTE format('WITH data AS ( + DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) + INSERT INTO %1$s SELECT * FROM data', + relation) USING ctids; /* Get number of inserted rows */ diff --git a/range.sql b/range.sql index 371a9f83..89b19ff7 100644 --- a/range.sql +++ b/range.sql @@ -150,7 +150,7 @@ BEGIN end_value, v_atttype::TEXT) USING - expression; + expression; END IF; /* Insert new entry to pathman config */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 953ad54d..c9a66dea 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -348,8 +348,6 @@ PartTypeToCString(PartType parttype) } - - /* PartRelationInfo checker */ void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 27d7a05f..c293ecfb 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -513,7 +513,7 @@ bgw_main_concurrent_part(Datum main_arg) { failures_count = PART_WORKER_MAX_ATTEMPTS; - elog(LOG, "relation %u is not partitioned (or does not exist)", + elog(LOG, "relation \"%u\" is not partitioned (or does not exist)", part_slot->relid); } } diff --git a/src/relation_info.c b/src/relation_info.c index ac44cc27..799cc74c 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1293,9 +1293,7 @@ shout_if_prel_is_invalid(const Oid parent_oid, break; default: - elog(ERROR, - "expected_str selection not implemented for type %d", - expected_part_type); + WrongPartType(expected_part_type); } elog(ERROR, "relation \"%s\" is not partitioned by %s", From d69df585d9a504d1faeb5ba1ddd806ded7c3dbc1 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Wed, 10 May 2017 18:28:17 +0300 Subject: [PATCH 0482/1124] Fix incompatible functions of pg core for pg 9.6 1C version --- src/hooks.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/hooks.c b/src/hooks.c index 53bf30a5..118b3ece 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -147,17 +147,31 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (saved_jointype == JOIN_UNIQUE_INNER) return; /* No way to do this with a parameterized inner path */ +#if PG_VERSION_NUM >= 90603 + initial_cost_nestloop(root, &workspace, jointype, + outer, inner, /* built paths */ + extra); +#else initial_cost_nestloop(root, &workspace, jointype, outer, inner, /* built paths */ extra->sjinfo, &extra->semifactors); +#endif pathkeys = build_join_pathkeys(root, joinrel, jointype, outer->pathkeys); +#if PG_VERSION_NUM >= 90603 + nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, + extra, outer, inner, + extra->restrictlist, + pathkeys, + calc_nestloop_required_outer(outer, inner)); +#else nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, extra->sjinfo, &extra->semifactors, outer, inner, extra->restrictlist, pathkeys, calc_nestloop_required_outer(outer, inner)); +#endif /* Discard all clauses that are to be evaluated by 'inner' */ foreach (rinfo_lc, extra->restrictlist) From 070bd3f5679930b8cb99e4b62f0624eb20923a32 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 10 May 2017 19:06:52 +0300 Subject: [PATCH 0483/1124] rename columns in pathman_config, refactoring --- expected/pathman_basic.out | 82 +++++----- expected/pathman_bgw.out | 40 ++--- expected/pathman_calamity.out | 10 +- expected/pathman_callbacks.out | 36 ++--- expected/pathman_column_type.out | 10 +- expected/pathman_domains.out | 46 +++--- expected/pathman_inserts.out | 8 +- expected/pathman_permissions.out | 6 +- hash.sql | 6 +- init.sql | 66 ++++---- range.sql | 249 +++++++++++++++---------------- sql/pathman_column_type.sql | 4 +- sql/pathman_inserts.sql | 4 +- src/include/init.h | 4 + src/init.c | 5 - src/partition_creation.c | 4 +- src/partition_filter.c | 2 +- src/pl_funcs.c | 44 +++--- 18 files changed, 314 insertions(+), 312 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 6900ee9e..fa053a30 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -273,10 +273,10 @@ SELECT pathman.create_range_partitions('test.improved_dummy', 'val', SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.improved_dummy'::REGCLASS ORDER BY partition; - parent | partition | parttype | partattr | range_min | range_max ----------------------+-----------------------+----------+----------+-----------+----------- - test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 - test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 + test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 (2 rows) SELECT pathman.drop_partitions('test.improved_dummy'); @@ -298,10 +298,10 @@ SELECT pathman.create_range_partitions('test.improved_dummy', 'val', SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.improved_dummy'::REGCLASS ORDER BY partition; - parent | partition | parttype | partattr | range_min | range_max ----------------------+-----------+----------+----------+-----------+----------- - test.improved_dummy | p1 | 2 | val | 1 | 2 - test.improved_dummy | p2 | 2 | val | 2 | 3 + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 (2 rows) SELECT pathman.drop_partitions('test.improved_dummy'); @@ -324,10 +324,10 @@ SELECT pathman.create_range_partitions('test.improved_dummy', 'val', SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.improved_dummy'::REGCLASS ORDER BY partition; - parent | partition | parttype | partattr | range_min | range_max ----------------------+-----------+----------+----------+-----------+----------- - test.improved_dummy | p1 | 2 | val | 1 | 2 - test.improved_dummy | p2 | 2 | val | 2 | 3 + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 (2 rows) DROP TABLE test.improved_dummy CASCADE; @@ -1065,12 +1065,12 @@ SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); (1 row) SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; - parent | partition | parttype | partattr | range_min | range_max ---------------------+----------------------+----------+----------+-----------+----------- - test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 - test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 - test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 - test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 (4 rows) SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); @@ -1080,11 +1080,11 @@ SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); (1 row) SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; - parent | partition | parttype | partattr | range_min | range_max ---------------------+----------------------+----------+----------+-----------+----------- - test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 - test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 - test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 (3 rows) SELECT pathman.append_range_partition('test.range_rel'); @@ -1215,16 +1215,16 @@ SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_in (1 row) SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; - parent | partition | parttype | partattr | range_min | range_max -----------------+-------------------------------+----------+----------+--------------------------+-------------------------- - test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 - test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 - test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 - test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 - test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 - test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 - test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 - test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | + parent | partition | parttype | expr | range_min | range_max +----------------+-------------------------------+----------+------+--------------------------+-------------------------- + test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 + test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 + test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 + test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 + test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 + test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | (8 rows) INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); @@ -1461,16 +1461,16 @@ INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); */ ALTER TABLE test.range_rel DROP COLUMN data; SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval | expression_p -----------------+---------+----------+----------------+------------------------------------------------------------------------------------------------------------------------- - test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} + partrel | expr | parttype | range_interval | cooked_expr +----------------+------+----------+----------------+------------------------------------------------------------------------------------------------------------------------- + test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} (1 row) DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 20 other objects SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval | expression_p ----------+---------+----------+----------------+-------------- + partrel | expr | parttype | range_interval | cooked_expr +---------+------+----------+----------------+------------- (0 rows) /* Check overlaps */ @@ -1632,9 +1632,9 @@ SELECT pathman.create_partitions_from_range('test."RangeRel"', 'dt', '2015-01-01 DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 5 other objects SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval | expression_p ---------------------+---------+----------+----------------+------------------------------------------------------------------------------------------------------------------------ - test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} + partrel | expr | parttype | range_interval | cooked_expr +--------------------+------+----------+----------------+------------------------------------------------------------------------------------------------------------------------ + test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} (1 row) CREATE TABLE test."RangeRel" ( diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index 1b6f6878..d78c4885 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -22,11 +22,11 @@ SELECT set_spawn_using_bgw('test_bgw.test_1', true); INSERT INTO test_bgw.test_1 VALUES (11); SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ - parent | partition | parttype | partattr | range_min | range_max ------------------+-------------------+----------+----------+-----------+----------- - test_bgw.test_1 | test_bgw.test_1_1 | 2 | val | 1 | 6 - test_bgw.test_1 | test_bgw.test_1_2 | 2 | val | 6 | 11 - test_bgw.test_1 | test_bgw.test_1_3 | 2 | val | 11 | 16 + parent | partition | parttype | expr | range_min | range_max +-----------------+-------------------+----------+------+-----------+----------- + test_bgw.test_1 | test_bgw.test_1_1 | 2 | val | 1 | 6 + test_bgw.test_1 | test_bgw.test_1_2 | 2 | val | 6 | 11 + test_bgw.test_1 | test_bgw.test_1_3 | 2 | val | 11 | 16 (3 rows) DROP TABLE test_bgw.test_1 CASCADE; @@ -48,11 +48,11 @@ SELECT set_spawn_using_bgw('test_bgw.test_2', true); INSERT INTO test_bgw.test_2 VALUES (11); SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ - parent | partition | parttype | partattr | range_min | range_max ------------------+-------------------+----------+----------+-----------+----------- - test_bgw.test_2 | test_bgw.test_2_1 | 2 | val | 1 | 6 - test_bgw.test_2 | test_bgw.test_2_2 | 2 | val | 6 | 11 - test_bgw.test_2 | test_bgw.test_2_3 | 2 | val | 11 | 16 + parent | partition | parttype | expr | range_min | range_max +-----------------+-------------------+----------+------+-----------+----------- + test_bgw.test_2 | test_bgw.test_2_1 | 2 | val | 1 | 6 + test_bgw.test_2 | test_bgw.test_2_2 | 2 | val | 6 | 11 + test_bgw.test_2 | test_bgw.test_2_3 | 2 | val | 11 | 16 (3 rows) DROP TABLE test_bgw.test_2 CASCADE; @@ -74,11 +74,11 @@ SELECT set_spawn_using_bgw('test_bgw.test_3', true); INSERT INTO test_bgw.test_3 VALUES (11); SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ - parent | partition | parttype | partattr | range_min | range_max ------------------+-------------------+----------+----------+-----------+----------- - test_bgw.test_3 | test_bgw.test_3_1 | 2 | val | 1 | 6 - test_bgw.test_3 | test_bgw.test_3_2 | 2 | val | 6 | 11 - test_bgw.test_3 | test_bgw.test_3_3 | 2 | val | 11 | 16 + parent | partition | parttype | expr | range_min | range_max +-----------------+-------------------+----------+------+-----------+----------- + test_bgw.test_3 | test_bgw.test_3_1 | 2 | val | 1 | 6 + test_bgw.test_3 | test_bgw.test_3_2 | 2 | val | 6 | 11 + test_bgw.test_3 | test_bgw.test_3_3 | 2 | val | 11 | 16 (3 rows) DROP TABLE test_bgw.test_3 CASCADE; @@ -100,11 +100,11 @@ SELECT set_spawn_using_bgw('test_bgw.test_4', true); INSERT INTO test_bgw.test_4 VALUES ('20170215'); SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ - parent | partition | parttype | partattr | range_min | range_max ------------------+-------------------+----------+----------+------------+------------ - test_bgw.test_4 | test_bgw.test_4_1 | 2 | val | 02-13-2017 | 02-14-2017 - test_bgw.test_4 | test_bgw.test_4_2 | 2 | val | 02-14-2017 | 02-15-2017 - test_bgw.test_4 | test_bgw.test_4_3 | 2 | val | 02-15-2017 | 02-16-2017 + parent | partition | parttype | expr | range_min | range_max +-----------------+-------------------+----------+------+------------+------------ + test_bgw.test_4 | test_bgw.test_4_1 | 2 | val | 02-13-2017 | 02-14-2017 + test_bgw.test_4 | test_bgw.test_4_2 | 2 | val | 02-14-2017 | 02-15-2017 + test_bgw.test_4 | test_bgw.test_4_3 | 2 | val | 02-15-2017 | 02-16-2017 (3 rows) DROP TABLE test_bgw.test_4 CASCADE; diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 5c493c66..76567373 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -473,7 +473,7 @@ SELECT generate_range_bounds('1-jan-2017'::DATE, SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ ERROR: 'parent_relid' should not be NULL SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ -WARNING: relation "pg_class" is not partitioned +WARNING: table "pg_class" is not partitioned check_range_available ----------------------- @@ -931,10 +931,10 @@ SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ (1 row) SELECT * FROM pathman_partition_list; /* OK */ - parent | partition | parttype | partattr | range_min | range_max --------------------+---------------------+----------+----------+-----------+----------- - calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 - calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 (2 rows) SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index a178a972..d5ae1c5c 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -233,18 +233,18 @@ $$ LANGUAGE plpgsql; SELECT * FROM pathman_partition_list WHERE parent = 'callbacks.abc'::REGCLASS ORDER BY range_min::INT4; - parent | partition | parttype | partattr | range_min | range_max ----------------+------------------+----------+----------+-----------+----------- - callbacks.abc | callbacks.abc_1 | 2 | a | 1 | 11 - callbacks.abc | callbacks.abc_2 | 2 | a | 11 | 21 - callbacks.abc | callbacks.abc_3 | 2 | a | 21 | 31 - callbacks.abc | callbacks.abc_4 | 2 | a | 31 | 41 - callbacks.abc | callbacks.abc_5 | 2 | a | 41 | 51 - callbacks.abc | callbacks.abc_6 | 2 | a | 51 | 61 - callbacks.abc | callbacks.abc_7 | 2 | a | 61 | 71 - callbacks.abc | callbacks.abc_8 | 2 | a | 71 | 81 - callbacks.abc | callbacks.abc_9 | 2 | a | 81 | 91 - callbacks.abc | callbacks.abc_10 | 2 | a | 91 | 101 + parent | partition | parttype | expr | range_min | range_max +---------------+------------------+----------+------+-----------+----------- + callbacks.abc | callbacks.abc_1 | 2 | a | 1 | 11 + callbacks.abc | callbacks.abc_2 | 2 | a | 11 | 21 + callbacks.abc | callbacks.abc_3 | 2 | a | 21 | 31 + callbacks.abc | callbacks.abc_4 | 2 | a | 31 | 41 + callbacks.abc | callbacks.abc_5 | 2 | a | 41 | 51 + callbacks.abc | callbacks.abc_6 | 2 | a | 51 | 61 + callbacks.abc | callbacks.abc_7 | 2 | a | 61 | 71 + callbacks.abc | callbacks.abc_8 | 2 | a | 71 | 81 + callbacks.abc | callbacks.abc_9 | 2 | a | 81 | 91 + callbacks.abc | callbacks.abc_10 | 2 | a | 91 | 101 (10 rows) SELECT set_init_callback('callbacks.abc', @@ -405,12 +405,12 @@ NOTICE: dropping partition callbacks.abc_146 SELECT * FROM pathman_partition_list WHERE parent = 'callbacks.abc'::REGCLASS ORDER BY range_min::INT4; - parent | partition | parttype | partattr | range_min | range_max ----------------+-------------------+----------+----------+-----------+----------- - callbacks.abc | callbacks.abc_147 | 2 | a | 1461 | 1471 - callbacks.abc | callbacks.abc_148 | 2 | a | 1471 | 1481 - callbacks.abc | callbacks.abc_149 | 2 | a | 1481 | 1491 - callbacks.abc | callbacks.abc_150 | 2 | a | 1491 | 1501 + parent | partition | parttype | expr | range_min | range_max +---------------+-------------------+----------+------+-----------+----------- + callbacks.abc | callbacks.abc_147 | 2 | a | 1461 | 1471 + callbacks.abc | callbacks.abc_148 | 2 | a | 1471 | 1481 + callbacks.abc | callbacks.abc_149 | 2 | a | 1481 | 1491 + callbacks.abc | callbacks.abc_150 | 2 | a | 1491 | 1501 (4 rows) DROP TABLE callbacks.abc CASCADE; diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index 66e916cb..6b0b605f 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -32,9 +32,9 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; /* check that parsed expression was cleared */ -SELECT partrel, expression_p FROM pathman_config; - partrel | expression_p ------------------------+-------------- +SELECT partrel, cooked_expr FROM pathman_config; + partrel | cooked_expr +-----------------------+------------- test_column_type.test | (1 row) @@ -45,8 +45,8 @@ SELECT * FROM test_column_type.test; (0 rows) /* check that expression has been built */ -SELECT partrel, expression_p FROM pathman_config; - partrel | expression_p +SELECT partrel, cooked_expr FROM pathman_config; + partrel | cooked_expr -----------------------+------------------------------------------------------------------------------------------------------------------------- test_column_type.test | {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index 2b3170c5..188ae60e 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -69,22 +69,22 @@ WHERE val < 450; SELECT * FROM pathman_partition_list ORDER BY range_min::INT, range_max::INT; - parent | partition | parttype | partattr | range_min | range_max --------------------+----------------------+----------+----------+-----------+----------- - domains.dom_table | domains.dom_table_13 | 2 | val | -199 | -99 - domains.dom_table | domains.dom_table_11 | 2 | val | -99 | 1 - domains.dom_table | domains.dom_table_1 | 2 | val | 1 | 50 - domains.dom_table | domains.dom_table_14 | 2 | val | 50 | 201 - domains.dom_table | domains.dom_table_3 | 2 | val | 201 | 301 - domains.dom_table | domains.dom_table_4 | 2 | val | 301 | 401 - domains.dom_table | domains.dom_table_5 | 2 | val | 401 | 501 - domains.dom_table | domains.dom_table_6 | 2 | val | 501 | 601 - domains.dom_table | domains.dom_table_7 | 2 | val | 601 | 701 - domains.dom_table | domains.dom_table_8 | 2 | val | 701 | 801 - domains.dom_table | domains.dom_table_9 | 2 | val | 801 | 901 - domains.dom_table | domains.dom_table_10 | 2 | val | 901 | 1001 - domains.dom_table | domains.dom_table_12 | 2 | val | 1001 | 1101 - domains.dom_table | domains.dom_table_15 | 2 | val | 1101 | 1201 + parent | partition | parttype | expr | range_min | range_max +-------------------+----------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_13 | 2 | val | -199 | -99 + domains.dom_table | domains.dom_table_11 | 2 | val | -99 | 1 + domains.dom_table | domains.dom_table_1 | 2 | val | 1 | 50 + domains.dom_table | domains.dom_table_14 | 2 | val | 50 | 201 + domains.dom_table | domains.dom_table_3 | 2 | val | 201 | 301 + domains.dom_table | domains.dom_table_4 | 2 | val | 301 | 401 + domains.dom_table | domains.dom_table_5 | 2 | val | 401 | 501 + domains.dom_table | domains.dom_table_6 | 2 | val | 501 | 601 + domains.dom_table | domains.dom_table_7 | 2 | val | 601 | 701 + domains.dom_table | domains.dom_table_8 | 2 | val | 701 | 801 + domains.dom_table | domains.dom_table_9 | 2 | val | 801 | 901 + domains.dom_table | domains.dom_table_10 | 2 | val | 901 | 1001 + domains.dom_table | domains.dom_table_12 | 2 | val | 1001 | 1101 + domains.dom_table | domains.dom_table_15 | 2 | val | 1101 | 1201 (14 rows) SELECT drop_partitions('domains.dom_table'); @@ -115,13 +115,13 @@ SELECT create_hash_partitions('domains.dom_table', 'val', 5); SELECT * FROM pathman_partition_list ORDER BY "partition"::TEXT; - parent | partition | parttype | partattr | range_min | range_max --------------------+---------------------+----------+----------+-----------+----------- - domains.dom_table | domains.dom_table_0 | 1 | val | | - domains.dom_table | domains.dom_table_1 | 1 | val | | - domains.dom_table | domains.dom_table_2 | 1 | val | | - domains.dom_table | domains.dom_table_3 | 1 | val | | - domains.dom_table | domains.dom_table_4 | 1 | val | | + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_0 | 1 | val | | + domains.dom_table | domains.dom_table_1 | 1 | val | | + domains.dom_table | domains.dom_table_2 | 1 | val | | + domains.dom_table | domains.dom_table_3 | 1 | val | | + domains.dom_table | domains.dom_table_4 | 1 | val | | (5 rows) DROP SCHEMA domains CASCADE; diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index eec46463..44cc88a8 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -238,13 +238,11 @@ NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (1 row) INSERT INTO test_inserts.storage VALUES(121, 'query_3') -RETURNING (SELECT attname - FROM pathman_config - WHERE partrel = 'test_inserts.storage'::regclass); +RETURNING (SELECT get_partition_key('test_inserts.storage')); NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) - attname ---------- + get_partition_key +------------------- b (1 row) diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index bea7f79c..681c4627 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -33,9 +33,9 @@ NOTICE: sequence "user1_table_seq" does not exist, skipping /* Should be able to see */ SET ROLE user2; SELECT * FROM pathman_config; - partrel | attname | parttype | range_interval | expression_p --------------------------+---------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- - permissions.user1_table | id | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} + partrel | expr | parttype | range_interval | cooked_expr +-------------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- + permissions.user1_table | id | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) SELECT * FROM pathman_config_params; diff --git a/hash.sql b/hash.sql index 4c21f9df..9416311b 100644 --- a/hash.sql +++ b/hash.sql @@ -72,7 +72,6 @@ RETURNS REGCLASS AS $$ DECLARE parent_relid REGCLASS; - part_attname TEXT; /* partitioned column */ old_constr_name TEXT; /* name of old_partition's constraint */ old_constr_def TEXT; /* definition of old_partition's constraint */ rel_persistence CHAR; @@ -111,9 +110,8 @@ BEGIN RAISE EXCEPTION 'partition must have a compatible tuple format'; END IF; - /* Get partitioning expression */ - part_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; - IF part_attname IS NULL THEN + /* Check that table is partitioned */ + IF @extschema@.get_partition_key(parent_relid) IS NULL THEN RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; END IF; diff --git a/init.sql b/init.sql index 27172a79..84582672 100644 --- a/init.sql +++ b/init.sql @@ -16,10 +16,10 @@ */ CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( partrel REGCLASS, - expression TEXT, + expr TEXT, parttype INTEGER, range_interval TEXT, - expression_p TEXT) + cooked_expr TEXT) RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' LANGUAGE C; @@ -27,17 +27,17 @@ LANGUAGE C; /* * Main config. * partrel - regclass (relation type, stored as Oid) - * attname - partitioning expression (key) + * expr - partitioning expression (key) * parttype - partitioning type: (1 - HASH, 2 - RANGE) * range_interval - base interval for RANGE partitioning as string - * expression_p - cooked partitioning expression (parsed & rewritten) + * cooked_expr - cooked partitioning expression (parsed & rewritten) */ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( partrel REGCLASS NOT NULL PRIMARY KEY, - attname TEXT NOT NULL, + expr TEXT NOT NULL, parttype INTEGER NOT NULL, range_interval TEXT DEFAULT NULL, - expression_p TEXT DEFAULT NULL, + cooked_expr TEXT DEFAULT NULL, /* check for allowed part types */ CONSTRAINT pathman_config_parttype_check CHECK (parttype IN (1, 2)), @@ -45,10 +45,10 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( /* check for correct interval */ CONSTRAINT pathman_config_interval_check CHECK (@extschema@.validate_interval_value(partrel, - attname, + expr, parttype, range_interval, - expression_p)) + cooked_expr)) ); @@ -256,7 +256,7 @@ RETURNS TABLE ( parent REGCLASS, partition REGCLASS, parttype INT4, - partattr TEXT, + expr TEXT, range_min TEXT, range_max TEXT) AS 'pg_pathman', 'show_partition_list_internal' @@ -341,14 +341,13 @@ CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( AS $$ DECLARE - v_attr TEXT; + part_expr TEXT; v_limit_clause TEXT := ''; v_where_clause TEXT := ''; ctids TID[]; BEGIN - SELECT attname INTO v_attr - FROM @extschema@.pathman_config WHERE partrel = relation; + part_expr := @extschema@.get_partition_key(relation); p_total := 0; @@ -359,14 +358,14 @@ BEGIN /* Format WHERE clause if needed */ IF NOT p_min IS NULL THEN - v_where_clause := format('%1$s >= $1', v_attr); + v_where_clause := format('%1$s >= $1', part_expr); END IF; IF NOT p_max IS NULL THEN IF NOT p_min IS NULL THEN v_where_clause := v_where_clause || ' AND '; END IF; - v_where_clause := v_where_clause || format('%1$s < $2', v_attr); + v_where_clause := v_where_clause || format('%1$s < $2', part_expr); END IF; IF v_where_clause != '' THEN @@ -738,23 +737,42 @@ LANGUAGE C STRICT; /* - * Partitioning key + * Create DDL trigger to call pathman_ddl_trigger_func(). + */ +CREATE EVENT TRIGGER pathman_ddl_trigger +ON sql_drop +EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); + + +/* + * Partitioning key. */ CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( relid REGCLASS) RETURNS TEXT AS $$ - SELECT attname FROM pathman_config WHERE partrel = relid; + SELECT expr FROM @extschema@.pathman_config WHERE partrel = relid; $$ LANGUAGE sql STRICT; +/* + * Partitioning key type. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( + relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type' +LANGUAGE C STRICT; /* - * Create DDL trigger to call pathman_ddl_trigger_func(). + * Partitioning type. */ -CREATE EVENT TRIGGER pathman_ddl_trigger -ON sql_drop -EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); +CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( + relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT parttype FROM @extschema@.pathman_config WHERE partrel = relid; +$$ +LANGUAGE sql STRICT; /* @@ -781,14 +799,6 @@ CREATE OR REPLACE FUNCTION @extschema@.get_base_type( RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' LANGUAGE C STRICT; -/* - * Return partition key type - */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( - relid REGCLASS) -RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type' -LANGUAGE C STRICT; - /* * Return tablespace name for specified relation. */ diff --git a/range.sql b/range.sql index 89b19ff7..6b8e08d7 100644 --- a/range.sql +++ b/range.sql @@ -32,29 +32,29 @@ CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( RETURNS VOID AS $$ DECLARE - v_min start_value%TYPE; - v_max start_value%TYPE; - v_count BIGINT; + min_value start_value%TYPE; + max_value start_value%TYPE; + rows_count BIGINT; BEGIN /* Get min and max values */ EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) FROM %2$s WHERE NOT %1$s IS NULL', expression, parent_relid::TEXT) - INTO v_count, v_min, v_max; + INTO rows_count, min_value, max_value; /* Check if column has NULL values */ - IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN + IF rows_count > 0 AND (min_value IS NULL OR max_value IS NULL) THEN RAISE EXCEPTION 'expression "%" returns NULL values', expression; END IF; /* Check lower boundary */ - IF start_value > v_min THEN + IF start_value > min_value THEN RAISE EXCEPTION 'start value is less than min value of "%"', expression; END IF; /* Check upper boundary */ - IF end_value <= v_max THEN + IF end_value <= max_value THEN RAISE EXCEPTION 'not enough partitions to fit all values of "%"', expression; END IF; END @@ -96,10 +96,10 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE - v_rows_count BIGINT; - v_atttype REGTYPE; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; + rows_count BIGINT; + value_type REGTYPE; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; @@ -115,21 +115,21 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) - INTO v_rows_count, v_max; + INTO rows_count, max_value; - IF v_rows_count = 0 THEN + IF rows_count = 0 THEN RAISE EXCEPTION 'cannot determine partitions count for empty table'; END IF; p_count := 0; - WHILE v_cur_value <= v_max + WHILE cur_value <= max_value LOOP - v_cur_value := v_cur_value + p_interval; + cur_value := cur_value + p_interval; p_count := p_count + 1; END LOOP; END IF; - v_atttype := @extschema@.get_base_type(pg_typeof(start_value)); + value_type := @extschema@.get_base_type(pg_typeof(start_value)); /* * In case when user doesn't want to automatically create partitions @@ -144,11 +144,12 @@ BEGIN END LOOP; /* Check boundaries */ - EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', $1, ''%s'', ''%s''::%s)', - parent_relid, - start_value, - end_value, - v_atttype::TEXT) + EXECUTE + format('SELECT @extschema@.check_boundaries(''%s'', $1, ''%s'', ''%s''::%s)', + parent_relid, + start_value, + end_value, + value_type::TEXT) USING expression; END IF; @@ -196,9 +197,9 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE - v_rows_count BIGINT; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; @@ -214,20 +215,20 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) - INTO v_rows_count, v_max; + INTO rows_count, max_value; - IF v_rows_count = 0 THEN + IF rows_count = 0 THEN RAISE EXCEPTION 'cannot determine partitions count for empty table'; END IF; - IF v_max IS NULL THEN + IF max_value IS NULL THEN RAISE EXCEPTION 'expression "%" can return NULL values', expression; END IF; p_count := 0; - WHILE v_cur_value <= v_max + WHILE cur_value <= max_value LOOP - v_cur_value := v_cur_value + p_interval; + cur_value := cur_value + p_interval; p_count := p_count + 1; END LOOP; END IF; @@ -262,7 +263,9 @@ BEGIN IF p_count != 0 THEN part_count := @extschema@.create_range_partitions_internal( parent_relid, - @extschema@.generate_range_bounds(start_value, p_interval, p_count), + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), NULL, NULL); END IF; @@ -292,7 +295,8 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE - part_count INTEGER; + part_count INTEGER := 0; + BEGIN IF array_ndims(bounds) > 1 THEN RAISE EXCEPTION 'Bounds array must be a one dimensional array'; @@ -468,38 +472,36 @@ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( RETURNS ANYARRAY AS $$ DECLARE - v_parent REGCLASS; - v_attname TEXT; - v_atttype REGTYPE; - v_cond TEXT; - v_new_partition TEXT; - v_part_type INTEGER; - v_check_name TEXT; + parent_relid REGCLASS; + part_type INTEGER; + part_expr TEXT; + part_expr_type REGTYPE; + check_name TEXT; + check_cond TEXT; + new_partition TEXT; BEGIN - v_parent = @extschema@.get_parent_of_partition(partition_relid); + parent_relid = @extschema@.get_parent_of_partition(partition_relid); /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(v_parent); + PERFORM @extschema@.lock_partitioned_relation(parent_relid); /* Acquire data modification lock (prevent further modifications) */ PERFORM @extschema@.prevent_relation_modification(partition_relid); - v_atttype = @extschema@.get_partition_key_type(v_parent); + part_expr_type = @extschema@.get_partition_key_type(parent_relid); + part_expr := @extschema@.get_partition_key(parent_relid); - SELECT attname, parttype - FROM @extschema@.pathman_config - WHERE partrel = v_parent - INTO v_attname, v_part_type; + part_type := @extschema@.get_partition_type(parent_relid); /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN + IF part_type != 2 THEN RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; /* Get partition values range */ EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', - @extschema@.get_base_type(v_atttype)::TEXT) + @extschema@.get_base_type(part_expr_type)::TEXT) USING partition_relid INTO p_range; @@ -515,34 +517,34 @@ BEGIN END IF; /* Create new partition */ - v_new_partition := @extschema@.create_single_range_partition(v_parent, - split_value, - p_range[2], - partition_name, - tablespace); + new_partition := @extschema@.create_single_range_partition(parent_relid, + split_value, + p_range[2], + partition_name, + tablespace); /* Copy data */ - v_cond := @extschema@.build_range_condition(v_new_partition::regclass, - v_attname, split_value, p_range[2]); + check_cond := @extschema@.build_range_condition(new_partition::regclass, + part_expr, split_value, p_range[2]); EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) INSERT INTO %s SELECT * FROM part_data', partition_relid::TEXT, - v_cond, - v_new_partition); + check_cond, + new_partition); /* Alter original partition */ - v_cond := @extschema@.build_range_condition(partition_relid::regclass, - v_attname, p_range[1], split_value); - v_check_name := @extschema@.build_check_constraint_name(partition_relid); + check_cond := @extschema@.build_range_condition(partition_relid::regclass, + part_expr, p_range[1], split_value); + check_name := @extschema@.build_check_constraint_name(partition_relid); EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', partition_relid::TEXT, - v_check_name); + check_name); EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', partition_relid::TEXT, - v_check_name, - v_cond); + check_name, + check_cond); END $$ LANGUAGE plpgsql; @@ -570,9 +572,9 @@ CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( RETURNS TEXT AS $$ DECLARE - v_atttype REGTYPE; - v_part_name TEXT; - v_interval TEXT; + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; BEGIN PERFORM @extschema@.validate_relname(parent_relid); @@ -580,31 +582,31 @@ BEGIN /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(parent_relid); - v_atttype := @extschema@.get_partition_key_type(parent_relid); + part_expr_type := @extschema@.get_partition_key_type(parent_relid); - IF NOT @extschema@.is_date_type(v_atttype) AND - NOT @extschema@.is_operator_supported(v_atttype, '+') THEN - RAISE EXCEPTION 'type % does not support ''+'' operator', v_atttype::REGTYPE; + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '+') THEN + RAISE EXCEPTION 'type % does not support ''+'' operator', part_expr_type::REGTYPE; END IF; SELECT range_interval FROM @extschema@.pathman_config WHERE partrel = parent_relid - INTO v_interval; + INTO part_interval; EXECUTE format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', - @extschema@.get_base_type(v_atttype)::TEXT) + @extschema@.get_base_type(part_expr_type)::TEXT) USING parent_relid, - v_atttype, - v_interval, + part_expr_type, + part_interval, partition_name, tablespace INTO - v_part_name; + part_name; - RETURN v_part_name; + RETURN part_name; END $$ LANGUAGE plpgsql; @@ -625,8 +627,8 @@ CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( RETURNS TEXT AS $$ DECLARE - v_part_name TEXT; - v_atttype REGTYPE; + part_expr_type REGTYPE; + part_name TEXT; v_args_format TEXT; BEGIN @@ -634,11 +636,11 @@ BEGIN RAISE EXCEPTION 'cannot append to empty partitions set'; END IF; - v_atttype := @extschema@.get_base_type(p_atttype); + part_expr_type := @extschema@.get_base_type(p_atttype); /* We have to pass fake NULL casted to column's type */ EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', - v_atttype::TEXT) + part_expr_type::TEXT) USING parent_relid INTO p_range; @@ -647,9 +649,9 @@ BEGIN END IF; IF @extschema@.is_date_type(p_atttype) THEN - v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', v_atttype::TEXT); + v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); ELSE - v_args_format := format('$1, $2, $2 + $3::%s, $4, $5', v_atttype::TEXT); + v_args_format := format('$1, $2, $2 + $3::%s, $4, $5', part_expr_type::TEXT); END IF; EXECUTE @@ -661,9 +663,9 @@ BEGIN partition_name, tablespace INTO - v_part_name; + part_name; - RETURN v_part_name; + RETURN part_name; END $$ LANGUAGE plpgsql; @@ -678,9 +680,9 @@ CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( RETURNS TEXT AS $$ DECLARE - v_atttype REGTYPE; - v_part_name TEXT; - v_interval TEXT; + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; BEGIN PERFORM @extschema@.validate_relname(parent_relid); @@ -688,31 +690,31 @@ BEGIN /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(parent_relid); - v_atttype := @extschema@.get_partition_key_type(parent_relid); + part_expr_type := @extschema@.get_partition_key_type(parent_relid); - IF NOT @extschema@.is_date_type(v_atttype) AND - NOT @extschema@.is_operator_supported(v_atttype, '-') THEN - RAISE EXCEPTION 'type % does not support ''-'' operator', v_atttype::REGTYPE; + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '-') THEN + RAISE EXCEPTION 'type % does not support ''-'' operator', part_expr_type::REGTYPE; END IF; SELECT range_interval FROM @extschema@.pathman_config WHERE partrel = parent_relid - INTO v_interval; + INTO part_interval; EXECUTE format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', - @extschema@.get_base_type(v_atttype)::TEXT) + @extschema@.get_base_type(part_expr_type)::TEXT) USING parent_relid, - v_atttype, - v_interval, + part_expr_type, + part_interval, partition_name, tablespace INTO - v_part_name; + part_name; - RETURN v_part_name; + RETURN part_name; END $$ LANGUAGE plpgsql; @@ -733,8 +735,8 @@ CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( RETURNS TEXT AS $$ DECLARE - v_part_name TEXT; - v_atttype REGTYPE; + part_expr_type REGTYPE; + part_name TEXT; v_args_format TEXT; BEGIN @@ -742,11 +744,11 @@ BEGIN RAISE EXCEPTION 'cannot prepend to empty partitions set'; END IF; - v_atttype := @extschema@.get_base_type(p_atttype); + part_expr_type := @extschema@.get_base_type(p_atttype); /* We have to pass fake NULL casted to column's type */ EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', - v_atttype::TEXT) + part_expr_type::TEXT) USING parent_relid INTO p_range; @@ -755,9 +757,9 @@ BEGIN END IF; IF @extschema@.is_date_type(p_atttype) THEN - v_args_format := format('$1, ($2 - $3::interval)::%s, $2, $4, $5', v_atttype::TEXT); + v_args_format := format('$1, ($2 - $3::interval)::%s, $2, $4, $5', part_expr_type::TEXT); ELSE - v_args_format := format('$1, $2 - $3::%s, $2, $4, $5', v_atttype::TEXT); + v_args_format := format('$1, $2 - $3::%s, $2, $4, $5', part_expr_type::TEXT); END IF; EXECUTE @@ -769,9 +771,9 @@ BEGIN partition_name, tablespace INTO - v_part_name; + part_name; - RETURN v_part_name; + RETURN part_name; END $$ LANGUAGE plpgsql; @@ -788,7 +790,7 @@ CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( RETURNS TEXT AS $$ DECLARE - v_part_name TEXT; + part_name TEXT; BEGIN PERFORM @extschema@.validate_relname(parent_relid); @@ -808,13 +810,13 @@ BEGIN END IF; /* Create new partition */ - v_part_name := @extschema@.create_single_range_partition(parent_relid, - start_value, - end_value, - partition_name, - tablespace); + part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); - RETURN v_part_name; + RETURN part_name; END $$ LANGUAGE plpgsql; @@ -831,21 +833,18 @@ $$ DECLARE parent_relid REGCLASS; part_name TEXT; + part_type INTEGER; v_relkind CHAR; v_rows BIGINT; - v_part_type INTEGER; BEGIN parent_relid := @extschema@.get_parent_of_partition(partition_relid); part_name := partition_relid::TEXT; /* save the name to be returned */ - SELECT parttype - FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_part_type; + part_type := @extschema@.get_partition_type(parent_relid); /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN + IF part_type != 2 THEN RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; @@ -894,7 +893,7 @@ CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( RETURNS TEXT AS $$ DECLARE - v_attname TEXT; + part_expr TEXT; rel_persistence CHAR; v_init_callback REGPROCEDURE; @@ -924,9 +923,9 @@ BEGIN /* Set inheritance */ EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); - v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + part_expr := @extschema@.get_partition_key(parent_relid); - IF v_attname IS NULL THEN + IF part_expr IS NULL THEN RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; END IF; @@ -935,7 +934,7 @@ BEGIN partition_relid::TEXT, @extschema@.build_check_constraint_name(partition_relid), @extschema@.build_range_condition(partition_relid, - v_attname, + part_expr, start_value, end_value)); @@ -972,8 +971,8 @@ CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( RETURNS TEXT AS $$ DECLARE - v_attname TEXT; parent_relid REGCLASS; + part_expr TEXT; BEGIN parent_relid := @extschema@.get_parent_of_partition(partition_relid); @@ -981,11 +980,9 @@ BEGIN /* Acquire lock on parent */ PERFORM @extschema@.prevent_relation_modification(parent_relid); - v_attname := attname - FROM @extschema@.pathman_config - WHERE partrel = parent_relid; + part_expr := @extschema@.get_partition_key(parent_relid); - IF v_attname IS NULL THEN + IF part_expr IS NULL THEN RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; END IF; diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index 758d2f72..47d38cc5 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -21,13 +21,13 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; /* check that parsed expression was cleared */ -SELECT partrel, expression_p FROM pathman_config; +SELECT partrel, cooked_expr FROM pathman_config; /* make sure that everything works properly */ SELECT * FROM test_column_type.test; /* check that expression has been built */ -SELECT partrel, expression_p FROM pathman_config; +SELECT partrel, cooked_expr FROM pathman_config; SELECT context, entries FROM pathman_cache_stats ORDER BY context; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index 3737baa6..ff46c848 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -115,9 +115,7 @@ INSERT INTO test_inserts.storage VALUES(121, 'query_2') RETURNING (SELECT generate_series(1, 10) LIMIT 1); INSERT INTO test_inserts.storage VALUES(121, 'query_3') -RETURNING (SELECT attname - FROM pathman_config - WHERE partrel = 'test_inserts.storage'::regclass); +RETURNING (SELECT get_partition_key('test_inserts.storage')); INSERT INTO test_inserts.storage VALUES(121, 'query_4') RETURNING 1, 2, 3, 4; diff --git a/src/include/init.h b/src/include/init.h index 778da9bb..d5e877c0 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -24,6 +24,10 @@ /* Help user in case of emergency */ #define INIT_ERROR_HINT "pg_pathman will be disabled to allow you to resolve this issue" +/* Initial size of 'partitioned_rels' table */ +#define PART_RELS_SIZE 10 +#define CHILD_FACTOR 500 + /* * pg_pathman's initialization state structure. diff --git a/src/init.c b/src/init.c index 0333d263..7415e7de 100644 --- a/src/init.c +++ b/src/init.c @@ -39,11 +39,6 @@ #include "utils/typcache.h" -/* Initial size of 'partitioned_rels' table */ -#define PART_RELS_SIZE 10 -#define CHILD_FACTOR 500 - - /* Various memory contexts for caches */ MemoryContext TopPathmanContext = NULL; MemoryContext PathmanRelationCacheContext = NULL; diff --git a/src/partition_creation.c b/src/partition_creation.c index 63fd71e5..41fa6657 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -305,7 +305,7 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) } } else - elog(ERROR, "relation \"%s\" is not partitioned", + elog(ERROR, "table \"%s\" is not partitioned", get_rel_name_or_relid(relid)); /* Check that 'last_partition' is valid */ @@ -1250,7 +1250,7 @@ check_range_available(Oid parent_relid, /* If there's no prel, return TRUE (overlap is not possible) */ if (!prel) { - ereport(WARNING, (errmsg("relation \"%s\" is not partitioned", + ereport(WARNING, (errmsg("table \"%s\" is not partitioned", get_rel_name_or_relid(parent_relid)))); return true; } diff --git a/src/partition_filter.c b/src/partition_filter.c index 52b13bb6..51e285c7 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -607,7 +607,7 @@ partition_filter_exec(CustomScanState *node) if (!prel) { if (!state->warning_triggered) - elog(WARNING, "Relation \"%s\" is not partitioned, " + elog(WARNING, "table \"%s\" is not partitioned, " "PartitionFilter will behave as a normal INSERT", get_rel_name_or_relid(state->partitioned_table)); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index fb5ac8c2..38f6a9c1 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -343,18 +343,16 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) Datum show_partition_list_internal(PG_FUNCTION_ARGS) { - show_partition_list_cxt *usercxt; - FuncCallContext *funccxt; - MemoryContext old_mcxt; - SPITupleTable *tuptable; + show_partition_list_cxt *usercxt; + FuncCallContext *funccxt; + MemoryContext old_mcxt; + SPITupleTable *tuptable; - /* - * Initialize tuple descriptor & function call context. - */ + /* Initialize tuple descriptor & function call context */ if (SRF_IS_FIRSTCALL()) { TupleDesc tupdesc; - MemoryContext tuptabcxt; + MemoryContext tuptab_mcxt; funccxt = SRF_FIRSTCALL_INIT(); @@ -381,7 +379,7 @@ show_partition_list_internal(PG_FUNCTION_ARGS) TupleDescInitEntry(tupdesc, Anum_pathman_pl_parttype, "parttype", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_pl_partattr, - "partattr", TEXTOID, -1, 0); + "expr", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_pl_range_min, "range_min", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_pl_range_max, @@ -391,18 +389,18 @@ show_partition_list_internal(PG_FUNCTION_ARGS) funccxt->user_fctx = (void *) usercxt; /* initialize tuple table context */ - tuptabcxt = AllocSetContextCreate(CurrentMemoryContext, - "pg_pathman TupTable", - ALLOCSET_DEFAULT_SIZES); - MemoryContextSwitchTo(tuptabcxt); + tuptab_mcxt = AllocSetContextCreate(CurrentMemoryContext, + "tuptable for pathman_partition_list", + ALLOCSET_DEFAULT_SIZES); + MemoryContextSwitchTo(tuptab_mcxt); - /* initialize tuple table for partitions list, we use it as buffer */ + /* Initialize tuple table for partitions list, we use it as buffer */ tuptable = (SPITupleTable *) palloc0(sizeof(SPITupleTable)); usercxt->tuptable = tuptable; - tuptable->tuptabcxt = tuptabcxt; + tuptable->tuptabcxt = tuptab_mcxt; - /* set up initial allocations */ - tuptable->alloced = tuptable->free = 128; + /* Set up initial allocations */ + tuptable->alloced = tuptable->free = PART_RELS_SIZE * CHILD_FACTOR; tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple)); MemoryContextSwitchTo(old_mcxt); @@ -520,10 +518,13 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* Double the size of the pointer array */ tuptable->free = tuptable->alloced; tuptable->alloced += tuptable->free; - tuptable->vals = (HeapTuple *) repalloc_huge(tuptable->vals, - tuptable->alloced * sizeof(HeapTuple)); + + tuptable->vals = (HeapTuple *) + repalloc_huge(tuptable->vals, + tuptable->alloced * sizeof(HeapTuple)); } + /* Add tuple to table and decrement 'free' */ tuptable->vals[tuptable->alloced - tuptable->free] = htup; (tuptable->free)--; @@ -545,10 +546,11 @@ show_partition_list_internal(PG_FUNCTION_ARGS) usercxt = (show_partition_list_cxt *) funccxt->user_fctx; tuptable = usercxt->tuptable; + /* Iterate through used slots */ if (usercxt->child_number < (tuptable->alloced - tuptable->free)) { - HeapTuple htup = usercxt->tuptable->vals[usercxt->child_number]; - usercxt->child_number++; + HeapTuple htup = usercxt->tuptable->vals[usercxt->child_number++]; + SRF_RETURN_NEXT(funccxt, HeapTupleGetDatum(htup)); } From e58905b6f5eef657da3395f58fe5371c118fa96d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 10 May 2017 19:09:01 +0300 Subject: [PATCH 0484/1124] also rename columns in pathman.h --- src/include/pathman.h | 4 ++-- src/init.c | 14 +++++++------- src/partition_creation.c | 6 +++--- src/pl_funcs.c | 8 ++++---- src/relation_info.c | 8 ++++---- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/include/pathman.h b/src/include/pathman.h index e7a983b6..3bd38467 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -46,10 +46,10 @@ #define PATHMAN_CONFIG "pathman_config" #define Natts_pathman_config 5 #define Anum_pathman_config_partrel 1 /* partitioned relation (regclass) */ -#define Anum_pathman_config_expression 2 /* partition expression (original) */ +#define Anum_pathman_config_expr 2 /* partition expression (original) */ #define Anum_pathman_config_parttype 3 /* partitioning type (1|2) */ #define Anum_pathman_config_range_interval 4 /* interval for RANGE pt. (text) */ -#define Anum_pathman_config_expression_p 5 /* parsed partitioning expression (text) */ +#define Anum_pathman_config_cooked_expr 5 /* parsed partitioning expression (text) */ /* type modifier (typmod) for 'range_interval' */ #define PATHMAN_CONFIG_interval_typmod -1 diff --git a/src/init.c b/src/init.c index 7415e7de..84db692e 100644 --- a/src/init.c +++ b/src/init.c @@ -636,7 +636,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Perform checks for non-NULL columns */ Assert(!isnull[Anum_pathman_config_partrel - 1]); - Assert(!isnull[Anum_pathman_config_expression - 1]); + Assert(!isnull[Anum_pathman_config_expr - 1]); Assert(!isnull[Anum_pathman_config_parttype - 1]); } @@ -686,8 +686,8 @@ pathman_config_invalidate_parsed_expression(Oid relid) HeapTuple new_htup; /* Reset parsed expression */ - values[Anum_pathman_config_expression_p - 1] = (Datum) 0; - nulls[Anum_pathman_config_expression_p - 1] = true; + values[Anum_pathman_config_cooked_expr - 1] = (Datum) 0; + nulls[Anum_pathman_config_cooked_expr - 1] = true; rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); @@ -714,13 +714,13 @@ pathman_config_refresh_parsed_expression(Oid relid, HeapTuple htup_new; /* get and parse expression */ - expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); + expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); expr_datum = cook_partitioning_expression(relid, expr_cstr, NULL); pfree(expr_cstr); /* prepare tuple values */ - values[Anum_pathman_config_expression_p - 1] = expr_datum; - isnull[Anum_pathman_config_expression_p - 1] = false; + values[Anum_pathman_config_cooked_expr - 1] = expr_datum; + isnull[Anum_pathman_config_cooked_expr - 1] = false; rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); @@ -816,7 +816,7 @@ read_pathman_config(void) /* These attributes are marked as NOT NULL, check anyway */ Assert(!isnull[Anum_pathman_config_partrel - 1]); Assert(!isnull[Anum_pathman_config_parttype - 1]); - Assert(!isnull[Anum_pathman_config_expression - 1]); + Assert(!isnull[Anum_pathman_config_expr - 1]); /* Extract values from Datums */ relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); diff --git a/src/partition_creation.c b/src/partition_creation.c index 41fa6657..00d458ea 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1716,7 +1716,7 @@ build_partitioning_expression(Oid parent_relid, elog(ERROR, "table \"%s\" is not partitioned", get_rel_name_or_relid(parent_relid)); - expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); + expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); expr = parse_partitioning_expression(parent_relid, expr_cstr, NULL, NULL); pfree(expr_cstr); @@ -1726,9 +1726,9 @@ build_partitioning_expression(Oid parent_relid, char *expr_p_cstr; /* We can safely assume that this field will always remain not null */ - Assert(!isnull[Anum_pathman_config_expression_p - 1]); + Assert(!isnull[Anum_pathman_config_cooked_expr - 1]); expr_p_cstr = - TextDatumGetCString(values[Anum_pathman_config_expression_p - 1]); + TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); /* Finally return expression type */ *expr_type = exprType(stringToNode(expr_p_cstr)); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 38f6a9c1..8855336d 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -798,11 +798,11 @@ add_to_pathman_config(PG_FUNCTION_ARGS) values[Anum_pathman_config_parttype - 1] = Int32GetDatum(parttype); isnull[Anum_pathman_config_parttype - 1] = false; - values[Anum_pathman_config_expression - 1] = CStringGetTextDatum(expression); - isnull[Anum_pathman_config_expression - 1] = false; + values[Anum_pathman_config_expr - 1] = CStringGetTextDatum(expression); + isnull[Anum_pathman_config_expr - 1] = false; - values[Anum_pathman_config_expression_p - 1] = expr_datum; - isnull[Anum_pathman_config_expression_p - 1] = false; + values[Anum_pathman_config_cooked_expr - 1] = expr_datum; + isnull[Anum_pathman_config_cooked_expr - 1] = false; /* Insert new row into PATHMAN_CONFIG */ pathman_config = heap_open(get_pathman_config_relid(false), RowExclusiveLock); diff --git a/src/relation_info.c b/src/relation_info.c index 799cc74c..4d70ab4f 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -167,13 +167,13 @@ refresh_pathman_relation_info(Oid relid, prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); /* Fetch cooked partitioning expression */ - expr = TextDatumGetCString(values[Anum_pathman_config_expression_p - 1]); + expr = TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); /* Expression and attname should be saved in cache context */ old_mcxt = MemoryContextSwitchTo(PathmanRelationCacheContext); /* Build partitioning expression tree */ - prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expression - 1]); + prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); prel->expr = (Node *) stringToNode(expr); fix_opfuncids(prel->expr); @@ -360,7 +360,7 @@ get_pathman_relation_info(Oid relid) /* Check that PATHMAN_CONFIG table contains this relation */ if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) { - bool upd_expr = isnull[Anum_pathman_config_expression_p - 1]; + bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; if (upd_expr) pathman_config_refresh_parsed_expression(relid, values, isnull, &iptr); @@ -1012,7 +1012,7 @@ try_perform_parent_refresh(Oid parent) if (pathman_config_contains_relation(parent, values, isnull, NULL, &iptr)) { - bool should_update_expr = isnull[Anum_pathman_config_expression_p - 1]; + bool should_update_expr = isnull[Anum_pathman_config_cooked_expr - 1]; if (should_update_expr) pathman_config_refresh_parsed_expression(parent, values, isnull, &iptr); From be140b9ac18a03dfd5c91261001fe8b5a7b783ab Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 11 May 2017 11:51:52 +0300 Subject: [PATCH 0485/1124] Add NOP trigger for FDW updates --- hash.sql | 4 ++ init.sql | 24 ++++++++++++ range.sql | 6 ++- src/include/init.h | 1 + src/include/partition_creation.h | 5 +++ src/init.c | 11 ++++++ src/partition_creation.c | 65 +++++++++++++++++++++++++------ src/partition_update.c | 13 +++++++ src/pl_funcs.c | 65 +++++++++++++++++++++++++++++++ tests/python/partitioning_test.py | 5 +++ 10 files changed, 187 insertions(+), 12 deletions(-) diff --git a/hash.sql b/hash.sql index 4c21f9df..b0eff3b8 100644 --- a/hash.sql +++ b/hash.sql @@ -138,6 +138,10 @@ BEGIN @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); + IF @extschema@.is_relation_foreign(partition_relid) THEN + PERFORM @extschema@.create_single_nop_trigger(parent_relid, partition_relid); + END IF; + /* Fetch init_callback from 'params' table */ WITH stub_callback(stub) as (values (0)) SELECT init_callback diff --git a/init.sql b/init.sql index bb6da5bc..1ea4355b 100644 --- a/init.sql +++ b/init.sql @@ -737,6 +737,22 @@ CREATE OR REPLACE FUNCTION @extschema@.has_update_trigger( RETURNS BOOL AS 'pg_pathman', 'has_update_trigger' LANGUAGE C STRICT; +/* + * Function for NOP triggers. + * NOP trigger is a trigger that we use to turn off direct modify of FDW tables + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_nop_trigger_func() +RETURNS TRIGGER AS 'pg_pathman', 'pathman_nop_trigger_func' +LANGUAGE C STRICT; + +/* + * Creates single NOP trigger. + */ +CREATE OR REPLACE FUNCTION @extschema@.create_single_nop_trigger( + parent_relid REGCLASS, + partition_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'create_single_nop_trigger' +LANGUAGE C STRICT; /* * Partitioning key @@ -930,3 +946,11 @@ LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' LANGUAGE C STRICT; + +/* + * Check if relation is foreign table + */ +CREATE OR REPLACE FUNCTION @extschema@.is_relation_foreign( + relid REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'is_relation_foreign' +LANGUAGE C STRICT; diff --git a/range.sql b/range.sql index 371a9f83..009f11f1 100644 --- a/range.sql +++ b/range.sql @@ -948,10 +948,14 @@ BEGIN INTO v_init_callback; /* If update trigger is enabled then create one for this partition */ - if @extschema@.has_update_trigger(parent_relid) THEN + IF @extschema@.has_update_trigger(parent_relid) THEN PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); END IF; + IF @extschema@.is_relation_foreign(partition_relid) THEN + PERFORM @extschema@.create_single_nop_trigger(parent_relid, partition_relid); + END IF; + /* Invoke an initialization callback */ PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, partition_relid, diff --git a/src/include/init.h b/src/include/init.h index 778da9bb..769bf119 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -199,6 +199,7 @@ char *build_sequence_name_internal(Oid relid); char *build_update_trigger_name_internal(Oid relid); char *build_update_trigger_func_name_internal(Oid relid); +char *build_nop_trigger_name_internal(Oid relid); bool pathman_config_contains_relation(Oid relid, Datum *values, diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 42454ca9..106054c9 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -86,6 +86,11 @@ void create_single_update_trigger_internal(Oid partition_relid, bool has_update_trigger_internal(Oid parent); +/* NOP triggers */ +void create_single_nop_trigger_internal(Oid relid, + const char *trigname, + List *columns); + /* Partitioning callback type */ typedef enum { diff --git a/src/init.c b/src/init.c index 0333d263..25ce724c 100644 --- a/src/init.c +++ b/src/init.c @@ -583,6 +583,17 @@ build_update_trigger_name_internal(Oid relid) return psprintf("%s_upd_trig", get_rel_name(relid)); } +/* + * Generate name for NOP trigger. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_nop_trigger_name_internal(Oid relid) +{ + AssertArg(OidIsValid(relid)); + return psprintf("%s_nop_trig", get_rel_name(relid)); +} + /* * Generate name for update trigger's function. * NOTE: this function does not perform sanity checks at all. diff --git a/src/partition_creation.c b/src/partition_creation.c index 7707bec1..5f237575 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -84,6 +84,8 @@ static Node *build_partitioning_expression(Oid parent_relid, Oid *expr_type, List **columns); +static bool has_trigger_internal(Oid relid, const char *trigname); + /* * --------------------------------------- * Public interface (partition creation) @@ -215,7 +217,8 @@ create_single_partition_common(Oid parent_relid, init_callback_params *callback_params, List *trigger_columns) { - Relation child_relation; + Relation child_relation; + const char *trigger_name; /* Open the relation and add new check constraint & fkeys */ child_relation = heap_open(partition_relid, AccessExclusiveLock); @@ -230,8 +233,6 @@ create_single_partition_common(Oid parent_relid, /* Create trigger if needed */ if (has_update_trigger_internal(parent_relid)) { - const char *trigger_name; - trigger_name = build_update_trigger_name_internal(parent_relid); create_single_update_trigger_internal(partition_relid, trigger_name, @@ -1782,26 +1783,22 @@ create_single_update_trigger_internal(Oid partition_relid, InvalidOid, InvalidOid, false); } -/* Check if relation has pg_pathman's update trigger */ -bool -has_update_trigger_internal(Oid parent_relid) +/* Check if relation has some trigger */ +static bool +has_trigger_internal(Oid relid, const char *trigname) { bool res = false; Relation tgrel; SysScanDesc scan; ScanKeyData key[1]; HeapTuple tuple; - const char *trigname; - - /* Build update trigger's name */ - trigname = build_update_trigger_name_internal(parent_relid); tgrel = heap_open(TriggerRelationId, RowExclusiveLock); ScanKeyInit(&key[0], Anum_pg_trigger_tgrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(parent_relid)); + ObjectIdGetDatum(relid)); scan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, NULL, lengthof(key), key); @@ -1822,3 +1819,49 @@ has_update_trigger_internal(Oid parent_relid) return res; } + +/* Check if relation has pg_pathman's update trigger */ +bool +has_update_trigger_internal(Oid parent_relid) +{ + const char *trigname; + + /* Build update trigger's name */ + trigname = build_update_trigger_name_internal(parent_relid); + return has_trigger_internal(parent_relid, trigname); +} + +/* Create trigger for partition that does nothing */ +void +create_single_nop_trigger_internal(Oid relid, + const char *trigname, + List *columns) +{ + CreateTrigStmt *stmt; + List *func; + + /* do nothing if relation has trigger already */ + if (has_trigger_internal(relid, trigname)) + return; + + func = list_make2(makeString(get_namespace_name(get_pathman_schema())), + makeString(CppAsString(pathman_nop_trigger_func))); + + stmt = makeNode(CreateTrigStmt); + stmt->trigname = (char *) trigname; + stmt->relation = makeRangeVarFromRelid(relid); + stmt->funcname = func; + stmt->args = NIL; + stmt->row = true; + stmt->timing = TRIGGER_TYPE_BEFORE; + stmt->events = TRIGGER_TYPE_UPDATE; + stmt->columns = columns; + stmt->whenClause = NULL; + stmt->isconstraint = false; + stmt->deferrable = false; + stmt->initdeferred = false; + stmt->constrrel = NULL; + + (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, + InvalidOid, InvalidOid, false); +} diff --git a/src/partition_update.c b/src/partition_update.c index 74a05f84..fe6ddc64 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -60,6 +60,16 @@ init_partition_update_static_data(void) NULL); } +/* + * By default UPDATE queries will make ForeignUpdate nodes for foreign tables. + * This function modifies these nodes so they will work as SELECTs + */ +static void +modify_fdw_scan(ForeignScan *node) +{ + node->scan.plan.plan_node_id = CMD_SELECT; + node->operation = CMD_SELECT; +} Plan * make_partition_update(Plan *subplan, @@ -76,6 +86,9 @@ make_partition_update(Plan *subplan, cscan->scan.plan.plan_rows = subplan->plan_rows; cscan->scan.plan.plan_width = subplan->plan_width; + if (IsA(subplan, ForeignScan)) + modify_fdw_scan((ForeignScan *) subplan); + /* Setup methods and child plan */ cscan->methods = &partition_update_plan_methods; pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, diff --git a/src/pl_funcs.c b/src/pl_funcs.c index e57832f7..f7c17f3d 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -71,8 +71,12 @@ PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( create_update_triggers ); PG_FUNCTION_INFO_V1( pathman_update_trigger_func ); +PG_FUNCTION_INFO_V1( pathman_nop_trigger_func ); PG_FUNCTION_INFO_V1( create_single_update_trigger ); PG_FUNCTION_INFO_V1( has_update_trigger ); +PG_FUNCTION_INFO_V1( is_relation_foreign ); + +PG_FUNCTION_INFO_V1( create_single_nop_trigger ); PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( get_pathman_lib_version ); @@ -1213,6 +1217,24 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) PG_RETURN_POINTER(new_tuple); } +Datum +pathman_nop_trigger_func(PG_FUNCTION_ARGS) +{ + TriggerData *trigdata = (TriggerData *) fcinfo->context; + + /* Handle user calls */ + if (!CALLED_AS_TRIGGER(fcinfo)) + elog(ERROR, "this function should not be called directly"); + + /* Handle wrong fire mode */ + if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) + elog(ERROR, "%s: must be fired for row", + trigdata->tg_trigger->tgname); + + /* Just return NEW tuple */ + PG_RETURN_POINTER(trigdata->tg_newtuple); +} + struct replace_vars_cxt { HeapTuple new_tuple; @@ -1456,6 +1478,49 @@ has_update_trigger(PG_FUNCTION_ARGS) PG_RETURN_BOOL(has_update_trigger_internal(parent_relid)); } +/* Check if relation is foreign table */ +Datum +is_relation_foreign(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + Relation rel; + bool res; + + /* Check that relation exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid))); + + rel = heap_open(relid, NoLock); + res = (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE); + heap_close(rel, NoLock); + PG_RETURN_BOOL(res); +} + +/* Create a trigger for partition that does nothing */ +Datum +create_single_nop_trigger(PG_FUNCTION_ARGS) +{ + Oid parent = PG_GETARG_OID(0); + Oid child = PG_GETARG_OID(1); + const char *trigname; + const PartRelationInfo *prel; + List *columns; + + /* Check that table is partitioned */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_ANY); + + /* Acquire trigger and attribute names */ + trigname = build_nop_trigger_name_internal(parent); + + /* Generate list of columns used in expression */ + columns = PrelExpressionColumnNames(prel); + create_single_nop_trigger_internal(child, trigname, columns); + + PG_RETURN_VOID(); +} + /* * ------- diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index e102c332..0a5bef57 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -538,6 +538,7 @@ def test_foreign_table(self): fserv.stop() master.stop() + @if_fdw_enabled def test_update_node_on_fdw_tables(self): ''' Test update node on foreign tables ''' @@ -578,6 +579,10 @@ def test_update_node_on_fdw_tables(self): result_relid = con.execute('select tableoid from abc where id=35')[0][0] self.assertEqual(result_relid, dest_relid) + self.set_trace(con, 'pg_debug') + import ipdb; ipdb.set_trace() + pass + def test_parallel_nodes(self): """Test parallel queries under partitions""" From b71356931e9f40b62e4460a16fd11ff820711869 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 11 May 2017 12:03:17 +0300 Subject: [PATCH 0486/1124] Fix if macro definition for pgpro v9.6.3 --- src/hooks.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 118b3ece..86f3a6a0 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -147,7 +147,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (saved_jointype == JOIN_UNIQUE_INNER) return; /* No way to do this with a parameterized inner path */ -#if PG_VERSION_NUM >= 90603 +#if defined PGPRO_VERSION && PG_VERSION_NUM >= 90603 initial_cost_nestloop(root, &workspace, jointype, outer, inner, /* built paths */ extra); @@ -159,7 +159,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, pathkeys = build_join_pathkeys(root, joinrel, jointype, outer->pathkeys); -#if PG_VERSION_NUM >= 90603 +#if defined PGPRO_VERSION && PG_VERSION_NUM >= 90603 nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, extra, outer, inner, extra->restrictlist, From c681907638cd9dba8c4ab25af2cf7fb129603d64 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 11 May 2017 12:56:47 +0300 Subject: [PATCH 0487/1124] Add temp fix to pathman_basic.out --- expected/pathman_basic.out | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index a5902c58..a88e74db 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1023,31 +1023,29 @@ SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id JOIN test.num_range_rel j3 on j3.id = j1.id WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - QUERY PLAN -------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------- Sort Sort Key: j2.dt -> Merge Join - Merge Cond: (j3.id = j2.id) + Merge Cond: (j2.id = j3.id) + -> Merge Join + Merge Cond: (j1.id = j2.id) + -> Merge Append + Sort Key: j1.id + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 + -> Merge Append + Sort Key: j2.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + -> Index Scan using range_rel_3_pkey on range_rel_3 j2_1 + -> Index Scan using range_rel_4_pkey on range_rel_4 j2_2 -> Append -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 - -> Materialize - -> Merge Join - Merge Cond: (j2.id = j1.id) - -> Merge Append - Sort Key: j2.id - -> Index Scan using range_rel_2_pkey on range_rel_2 j2 - -> Index Scan using range_rel_3_pkey on range_rel_3 j2_1 - -> Index Scan using range_rel_4_pkey on range_rel_4 j2_2 - -> Materialize - -> Merge Append - Sort Key: j1.id - -> Index Scan using range_rel_1_pkey on range_rel_1 j1 - -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 -(22 rows) +(20 rows) SET enable_hashjoin = ON; SET enable_mergejoin = OFF; From 55e0fed889231298927108d734113b4041e1598e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 11 May 2017 13:58:43 +0300 Subject: [PATCH 0488/1124] Move junk cleaning code to PartitionUpdate --- hash.sql | 4 +- src/include/partition_filter.h | 6 +- src/partition_filter.c | 39 ++-------- src/partition_update.c | 115 +++++++++++++++++------------- tests/python/partitioning_test.py | 12 ++-- 5 files changed, 84 insertions(+), 92 deletions(-) diff --git a/hash.sql b/hash.sql index b0eff3b8..677239b6 100644 --- a/hash.sql +++ b/hash.sql @@ -138,8 +138,8 @@ BEGIN @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); - IF @extschema@.is_relation_foreign(partition_relid) THEN - PERFORM @extschema@.create_single_nop_trigger(parent_relid, partition_relid); + IF @extschema@.is_relation_foreign(new_partition) THEN + PERFORM @extschema@.create_single_nop_trigger(parent_relid, new_partition); END IF; /* Fetch init_callback from 'params' table */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 68c57aef..1519a246 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -97,11 +97,11 @@ typedef struct bool warning_triggered; /* warning message counter */ TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ - ItemPointer ctid; /* ctid of scanned tuple - if there any, or NULL, - filled when command_type == CMD_UPDATE*/ CmdType command_type; + TupleTableSlot *subplan_slot; /* slot that was returned from subplan */ + JunkFilter *src_junkFilter; /* junkfilter for subplan_slot */ + ExprContext *tup_convert_econtext; /* ExprContext for projections */ ExprState *expr_state; /* for partitioning expression */ } PartitionFilterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 88c470a3..5def7e1f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -656,16 +656,18 @@ partition_filter_exec(CustomScanState *node) TupleTableSlot *slot; ResultRelInfo *saved_resultRelInfo; - /* clean ctid for old slot */ - state->ctid = NULL; - slot = ExecProcNode(child_ps); + state->subplan_slot = slot; + state->src_junkFilter = NULL; /* Save original ResultRelInfo */ saved_resultRelInfo = estate->es_result_relation_info; if (!state->result_parts.saved_rel_info) state->result_parts.saved_rel_info = saved_resultRelInfo; + if (state->tup_convert_slot) + ExecClearTuple(state->tup_convert_slot); + if (!TupIsNull(slot)) { MemoryContext old_mcxt; @@ -717,33 +719,8 @@ partition_filter_exec(CustomScanState *node) /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = resultRelInfo; - if (state->command_type == CMD_UPDATE) - { - JunkFilter *junkfilter; - Datum datum; - char relkind; - - /* - * extract `ctid` junk attribute and save it in state, - * we need this step because if there will be conversion - * then junk attributes will be removed from slot - */ - junkfilter = rri_holder->updates_junkFilter; - Assert(junkfilter != NULL); - - relkind = saved_resultRelInfo->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION) - { - bool isNull; - - datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, &isNull); - /* shouldn't ever get a null result... */ - if (isNull) - elog(ERROR, "ctid is NULL"); - - state->ctid = (ItemPointer) DatumGetPointer(datum); - } - } + /* pass junkfilter to upper node */ + state->src_junkFilter = rri_holder->updates_junkFilter; /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) @@ -765,8 +742,6 @@ partition_filter_exec(CustomScanState *node) /* Now replace the original slot */ slot = state->tup_convert_slot; } - else if (state->command_type == CMD_UPDATE) - slot = ExecFilterJunk(rri_holder->updates_junkFilter, slot); return slot; } diff --git a/src/partition_update.c b/src/partition_update.c index fe6ddc64..e925ac6d 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -151,72 +151,89 @@ partition_update_exec(CustomScanState *node) if (!TupIsNull(slot)) { - Datum datum; - bool isNull; - char relkind; - ResultRelInfo *resultRelInfo; - ItemPointer tupleid; - ItemPointerData tuple_ctid; - EPQState epqstate; - HeapTupleData oldtupdata; - HeapTuple oldtuple; - - PartitionFilterState *child_state = (PartitionFilterState *) child_ps; + Datum datum; + bool isNull; + char relkind; + ResultRelInfo *resultRelInfo, + *sourceRelInfo; + ItemPointer tupleid = NULL; + ItemPointerData tuple_ctid; + EPQState epqstate; + HeapTupleData oldtupdata; + HeapTuple oldtuple = NULL; + PartitionFilterState *child_state; + JunkFilter *junkfilter; + + child_state = (PartitionFilterState *) child_ps; Assert(child_state->command_type == CMD_UPDATE); - EvalPlanQualSetSlot(&epqstate, slot); + EvalPlanQualSetSlot(&epqstate, child_state->subplan_slot); + sourceRelInfo = child_state->result_parts.saved_rel_info; resultRelInfo = estate->es_result_relation_info; - oldtuple = NULL; - relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; + junkfilter = child_state->src_junkFilter; - if (child_state->ctid != NULL) + if (junkfilter != NULL) { - tupleid = child_state->ctid; - tuple_ctid = *tupleid; /* be sure we don't free - * ctid!! */ - tupleid = &tuple_ctid; - } - else if (relkind == RELKIND_FOREIGN_TABLE) - { - JunkFilter *junkfilter = resultRelInfo->ri_junkFilter; - - if (junkfilter != NULL && AttributeNumberIsValid(junkfilter->jf_junkAttNo)) + relkind = sourceRelInfo->ri_RelationDesc->rd_rel->relkind; + if (relkind == RELKIND_RELATION) { - datum = ExecGetJunkAttribute(slot, - junkfilter->jf_junkAttNo, - &isNull); + bool isNull; + + datum = ExecGetJunkAttribute(child_state->subplan_slot, + junkfilter->jf_junkAttNo, &isNull); /* shouldn't ever get a null result... */ if (isNull) - elog(ERROR, "wholerow is NULL"); + elog(ERROR, "ctid is NULL"); - oldtupdata.t_data = DatumGetHeapTupleHeader(datum); - oldtupdata.t_len = - HeapTupleHeaderGetDatumLength(oldtupdata.t_data); - ItemPointerSetInvalid(&(oldtupdata.t_self)); - - /* Historically, view triggers see invalid t_tableOid. */ - oldtupdata.t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); - oldtuple = &oldtupdata; + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* be sure we don't free + * ctid!! */ + tupleid = &tuple_ctid; } - - tupleid = NULL; + else if (relkind == RELKIND_FOREIGN_TABLE) + { + if (AttributeNumberIsValid(junkfilter->jf_junkAttNo)) + { + datum = ExecGetJunkAttribute(child_state->subplan_slot, + junkfilter->jf_junkAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "wholerow is NULL"); + + oldtupdata.t_data = DatumGetHeapTupleHeader(datum); + oldtupdata.t_len = + HeapTupleHeaderGetDatumLength(oldtupdata.t_data); + ItemPointerSetInvalid(&(oldtupdata.t_self)); + + /* Historically, view triggers see invalid t_tableOid. */ + oldtupdata.t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); + oldtuple = &oldtupdata; + } + } + else + elog(ERROR, "got unexpected type of relation"); + + /* + * Clean from junk attributes before INSERT, + * but only if slot wasn't converted in PartitionFilter + */ + if (TupIsNull(child_state->tup_convert_slot)) + slot = ExecFilterJunk(junkfilter, slot); } - else - elog(ERROR, "updates supported only on basic relations and foreign tables"); - - /* delete old tuple */ - estate->es_result_relation_info = child_state->result_parts.saved_rel_info; /* - * We have two cases here: - * normal relations - tupleid points to actual tuple - * foreign tables - tupleid is invalid, slot is required + * Delete old tuple. We have two cases here: + * 1) local tables - tupleid points to actual tuple + * 2) foreign tables - tupleid is invalid, slot is required */ - ExecDeleteInternal(tupleid, oldtuple, slot, &epqstate, estate); - estate->es_result_relation_info = resultRelInfo; + estate->es_result_relation_info = sourceRelInfo; + ExecDeleteInternal(tupleid, oldtuple, child_state->subplan_slot, + &epqstate, estate); /* we've got the slot that can be inserted to child partition */ + estate->es_result_relation_info = resultRelInfo; return slot; } diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 0a5bef57..9bda06d9 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -557,12 +557,12 @@ def test_update_node_on_fdw_tables(self): 'postgres', 'select attach_range_partition(\'abc\', \'ftable2\', 30, 40)') - master.safe_psql('postgres', - 'set pg_pathman.enable_partitionupdate=on') + #master.safe_psql('postgres', + # 'set pg_pathman.enable_partitionupdate=on') with master.connect() as con: con.begin() - con.execute('set pg_pathman.enable_partitionupdate=on') + #con.execute('set pg_pathman.enable_partitionupdate=on') con.execute('insert into abc select i from generate_series(1, 19) i') con.commit() @@ -575,9 +575,9 @@ def test_update_node_on_fdw_tables(self): # - update from foreign to local # - update from foreign to foreign - con.execute('update abc set id=36 where id=9') - result_relid = con.execute('select tableoid from abc where id=35')[0][0] - self.assertEqual(result_relid, dest_relid) + #con.execute('update abc set id=36 where id=9') + #result_relid = con.execute('select tableoid from abc where id=35')[0][0] + #self.assertEqual(result_relid, dest_relid) self.set_trace(con, 'pg_debug') import ipdb; ipdb.set_trace() From 0ae222ad073cb331df3e9945859c90ab5b2a2586 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 11 May 2017 14:03:52 +0300 Subject: [PATCH 0489/1124] refactoring (Pl/PgSQL), remove function common_relation_checks() --- expected/pathman_foreign_keys.out | 4 +- hash.sql | 14 ++----- init.sql | 47 +++++++++++++---------- range.sql | 64 +++++++++++++++---------------- 4 files changed, 62 insertions(+), 67 deletions(-) diff --git a/expected/pathman_foreign_keys.out b/expected/pathman_foreign_keys.out index df83efc3..7a9db3e0 100644 --- a/expected/pathman_foreign_keys.out +++ b/expected/pathman_foreign_keys.out @@ -71,8 +71,8 @@ CREATE TABLE fkeys.replies( INSERT INTO fkeys.messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; INSERT INTO fkeys.replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; SELECT create_range_partitions('fkeys.messages', 'id', 1, 100, 2); /* not ok */ -WARNING: foreign key "replies_message_id_fkey" references relation "fkeys.messages" -ERROR: relation "fkeys.messages" is referenced from other relations +WARNING: foreign key "replies_message_id_fkey" references table "fkeys.messages" +ERROR: table "fkeys.messages" is referenced from other tables ALTER TABLE fkeys.replies DROP CONSTRAINT replies_message_id_fkey; SELECT create_range_partitions('fkeys.messages', 'id', 1, 100, 2); /* ok */ NOTICE: sequence "messages_seq" does not exist, skipping diff --git a/hash.sql b/hash.sql index 9416311b..b8412184 100644 --- a/hash.sql +++ b/hash.sql @@ -21,18 +21,10 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( RETURNS INTEGER AS $$ BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - expression := lower(expression); - PERFORM @extschema@.common_relation_checks(parent_relid, expression); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); /* Insert new entry to pathman config */ EXECUTE format('ANALYZE %s', parent_relid); diff --git a/init.sql b/init.sql index 84582672..000747a9 100644 --- a/init.sql +++ b/init.sql @@ -438,51 +438,58 @@ $$ LANGUAGE plpgsql STRICT; /* - * Aggregates several common relation checks before partitioning. - * Suitable for every partitioning type. + * Check a few things and take locks before partitioning. */ -CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( - relation REGCLASS, - expression TEXT) -RETURNS BOOLEAN AS +CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( + parent_relid REGCLASS, + expression TEXT, + partition_data BOOLEAN) +RETURNS VOID AS $$ DECLARE - v_rec RECORD; + constr_name TEXT; is_referenced BOOLEAN; rel_persistence CHAR; BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + /* Ignore temporary tables */ SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = relation INTO rel_persistence; + WHERE oid = parent_relid INTO rel_persistence; IF rel_persistence = 't'::CHAR THEN - RAISE EXCEPTION 'temporary table "%" cannot be partitioned', - relation::TEXT; + RAISE EXCEPTION 'temporary table "%" cannot be partitioned', parent_relid; END IF; IF EXISTS (SELECT * FROM @extschema@.pathman_config - WHERE partrel = relation) THEN - RAISE EXCEPTION 'relation "%" has already been partitioned', relation; + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has already been partitioned', parent_relid; END IF; /* Check if there are foreign keys that reference the relation */ - FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint - WHERE confrelid = relation::REGCLASS::OID) + FOR constr_name IN (SELECT conname FROM pg_catalog.pg_constraint + WHERE confrelid = parent_relid::REGCLASS::OID) LOOP is_referenced := TRUE; - RAISE WARNING 'foreign key "%" references relation "%"', - v_rec.conname, relation; + RAISE WARNING 'foreign key "%" references table "%"', constr_name, parent_relid; END LOOP; IF is_referenced THEN - RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; + RAISE EXCEPTION 'table "%" is referenced from other tables', parent_relid; END IF; - RETURN FALSE; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; + /* * Returns relname without quotes or something. diff --git a/range.sql b/range.sql index 6b8e08d7..d5c37ef6 100644 --- a/range.sql +++ b/range.sql @@ -60,29 +60,6 @@ BEGIN END $$ LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( - parent_relid REGCLASS, - expression TEXT, - partition_data BOOLEAN) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - expression := lower(expression); - PERFORM @extschema@.common_relation_checks(parent_relid, expression); -END -$$ LANGUAGE plpgsql; - /* * Creates RANGE partitions for specified relation based on datetime attribute */ @@ -106,7 +83,9 @@ DECLARE BEGIN expression := lower(expression); - PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); IF p_count < 0 THEN RAISE EXCEPTION '"p_count" must not be less than 0'; @@ -206,7 +185,9 @@ DECLARE BEGIN expression := lower(expression); - PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); IF p_count < 0 THEN RAISE EXCEPTION 'partitions count must not be less than zero'; @@ -307,7 +288,9 @@ BEGIN END IF; expression := lower(expression); - PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, @@ -358,7 +341,9 @@ DECLARE BEGIN expression := lower(expression); - PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, @@ -415,8 +400,9 @@ DECLARE BEGIN expression := lower(expression); - PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, - partition_data); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, @@ -483,6 +469,9 @@ DECLARE BEGIN parent_relid = @extschema@.get_parent_of_partition(partition_relid); + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + /* Acquire lock on parent */ PERFORM @extschema@.lock_partitioned_relation(parent_relid); @@ -839,8 +828,11 @@ DECLARE BEGIN parent_relid := @extschema@.get_parent_of_partition(partition_relid); - part_name := partition_relid::TEXT; /* save the name to be returned */ + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + part_name := partition_relid::TEXT; /* save the name to be returned */ part_type := @extschema@.get_partition_type(parent_relid); /* Check if this is a RANGE partition */ @@ -972,18 +964,22 @@ RETURNS TEXT AS $$ DECLARE parent_relid REGCLASS; - part_expr TEXT; + part_type INTEGER; BEGIN parent_relid := @extschema@.get_parent_of_partition(partition_relid); + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + /* Acquire lock on parent */ PERFORM @extschema@.prevent_relation_modification(parent_relid); - part_expr := @extschema@.get_partition_key(parent_relid); + part_type := @extschema@.get_partition_type(parent_relid); - IF part_expr IS NULL THEN - RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; /* Remove inheritance */ From bf251027e9037b8370d174d3deabbba835d6a05b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 11 May 2017 15:04:29 +0300 Subject: [PATCH 0490/1124] refactoring, add test for drop_triggers() --- expected/pathman_calamity.out | 46 +++++++++++++++++++++++++++++++++++ init.sql | 22 +++++++++-------- sql/pathman_calamity.sql | 16 ++++++++++++ 3 files changed, 74 insertions(+), 10 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 76567373..13206f76 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -777,6 +777,52 @@ SELECT merge_range_partitions('{calamity.merge_test_a_1, ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 4 other objects +/* check function drop_triggers() */ +CREATE TABLE calamity.trig_test_tbl(val INT4 NOT NULL); +SELECT create_hash_partitions('calamity.trig_test_tbl', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_update_triggers('calamity.trig_test_tbl'); + create_update_triggers +------------------------ + +(1 row) + +SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; + count +------- + 1 +(1 row) + +SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; + count +------- + 1 +(1 row) + +SELECT drop_triggers('calamity.trig_test_tbl'); /* OK */ + drop_triggers +--------------- + +(1 row) + +SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; + count +------- + 0 +(1 row) + +DROP TABLE calamity.trig_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects DROP SCHEMA calamity CASCADE; NOTICE: drop cascades to 20 other objects DROP EXTENSION pg_pathman; diff --git a/init.sql b/init.sql index 000747a9..3226aca6 100644 --- a/init.sql +++ b/init.sql @@ -447,7 +447,7 @@ CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( RETURNS VOID AS $$ DECLARE - constr_name TEXT; + constr_name TEXT; is_referenced BOOLEAN; rel_persistence CHAR; @@ -516,9 +516,10 @@ CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() RETURNS event_trigger AS $$ DECLARE - obj record; - pg_class_oid oid; - relids regclass[]; + obj RECORD; + pg_class_oid OID; + relids REGCLASS[]; + BEGIN pg_class_oid = 'pg_catalog.pg_class'::regclass; @@ -545,20 +546,21 @@ CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( RETURNS VOID AS $$ DECLARE - triggername TEXT; - rec RECORD; + triggername TEXT; + relation OID; BEGIN triggername := @extschema@.build_update_trigger_name(parent_relid); /* Drop trigger for each partition if exists */ - FOR rec IN (SELECT pg_catalog.pg_inherits.* FROM pg_catalog.pg_inherits - JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid - WHERE inhparent = parent_relid AND tgname = triggername) + FOR relation IN (SELECT pg_catalog.pg_inherits.inhrelid + FROM pg_catalog.pg_inherits + JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid + WHERE inhparent = parent_relid AND tgname = triggername) LOOP EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', triggername, - rec.inhrelid::REGCLASS::TEXT); + relation::REGCLASS); END LOOP; /* Drop trigger on parent */ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 586da042..b7e4ec8c 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -333,6 +333,22 @@ SELECT merge_range_partitions('{calamity.merge_test_a_1, DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +/* check function drop_triggers() */ +CREATE TABLE calamity.trig_test_tbl(val INT4 NOT NULL); +SELECT create_hash_partitions('calamity.trig_test_tbl', 'val', 2); +SELECT create_update_triggers('calamity.trig_test_tbl'); + +SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; +SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; + +SELECT drop_triggers('calamity.trig_test_tbl'); /* OK */ + +SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; +SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; + +DROP TABLE calamity.trig_test_tbl CASCADE; + + DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; From 3d8966aca87a2f903431f08ed81222b464ff06f4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 11 May 2017 17:04:29 +0300 Subject: [PATCH 0491/1124] store table row types for wholerow references in AppendRelInfo --- src/pg_pathman.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 5ced8862..3b409eed 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -334,6 +334,10 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, appinfo->child_relid = childRTindex; appinfo->parent_reloid = parent_rte->relid; + /* Store table row types for wholerow references */ + appinfo->parent_reltype = RelationGetDescr(parent_relation)->tdtypeid; + appinfo->child_reltype = RelationGetDescr(child_relation)->tdtypeid; + make_inh_translation_list(parent_relation, child_relation, childRTindex, &appinfo->translated_vars); From 65786abff97329554f897c61612489cbc2649f3a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 11 May 2017 18:36:08 +0300 Subject: [PATCH 0492/1124] take lock in add_to_pathman_config() --- src/pl_funcs.c | 11 +++++++---- src/relation_info.c | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 8855336d..5b578ceb 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -725,6 +725,9 @@ add_to_pathman_config(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'parent_relid' should not be NULL"))); + /* Lock relation */ + xact_lock_rel_exclusive(relid, true); + /* Check that relation exists */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -792,11 +795,11 @@ add_to_pathman_config(PG_FUNCTION_ARGS) /* * Initialize columns (partrel, attname, parttype, range_interval). */ - values[Anum_pathman_config_partrel - 1] = ObjectIdGetDatum(relid); - isnull[Anum_pathman_config_partrel - 1] = false; + values[Anum_pathman_config_partrel - 1] = ObjectIdGetDatum(relid); + isnull[Anum_pathman_config_partrel - 1] = false; - values[Anum_pathman_config_parttype - 1] = Int32GetDatum(parttype); - isnull[Anum_pathman_config_parttype - 1] = false; + values[Anum_pathman_config_parttype - 1] = Int32GetDatum(parttype); + isnull[Anum_pathman_config_parttype - 1] = false; values[Anum_pathman_config_expr - 1] = CStringGetTextDatum(expression); isnull[Anum_pathman_config_expr - 1] = false; diff --git a/src/relation_info.c b/src/relation_info.c index 4d70ab4f..d71abfeb 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -590,7 +590,7 @@ parse_partitioning_expression(const Oid relid, return ((ResTarget *) linitial(select_stmt->targetList))->val; } -/* Parse partitioning expression and return its type and nodeToString() */ +/* Parse partitioning expression and return its type and nodeToString() as TEXT */ Datum cook_partitioning_expression(const Oid relid, const char *expr_cstr, From 9c167fef34157824d6fe70e0ef8e9becbd28d0a8 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 11 May 2017 18:41:52 +0300 Subject: [PATCH 0493/1124] rewrite drop_triggers() in C and adapted it to multilevel partitioning --- init.sql | 34 +-------- src/include/partition_creation.h | 3 + src/include/utils.h | 11 +-- src/partition_creation.c | 15 +++- src/partition_filter.c | 3 +- src/pl_funcs.c | 127 +++++++++++++++++++++++++++---- src/pl_range_funcs.c | 14 ---- src/utils.c | 13 ++++ 8 files changed, 156 insertions(+), 64 deletions(-) diff --git a/init.sql b/init.sql index e27b533e..59a563d5 100644 --- a/init.sql +++ b/init.sql @@ -534,35 +534,8 @@ LANGUAGE plpgsql; */ CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( parent_relid REGCLASS) -RETURNS VOID AS -$$ -DECLARE - triggername TEXT; - rec RECORD; - -BEGIN - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Drop trigger for each partition if exists */ - FOR rec IN (SELECT pg_catalog.pg_inherits.* FROM pg_catalog.pg_inherits - JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid - WHERE inhparent = parent_relid AND tgname = triggername) - LOOP - EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', - triggername, - rec.inhrelid::REGCLASS::TEXT); - END LOOP; - - /* Drop trigger on parent */ - IF EXISTS (SELECT * FROM pg_catalog.pg_trigger - WHERE tgname = triggername AND tgrelid = parent_relid) - THEN - EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', - triggername, - parent_relid::TEXT); - END IF; -END -$$ LANGUAGE plpgsql STRICT; +RETURNS VOID AS 'pg_pathman', 'drop_update_triggers' +LANGUAGE C STRICT; /* * Drop partitions. If delete_data set to TRUE, partitions @@ -767,7 +740,8 @@ LANGUAGE C STRICT; * Get parent of pg_pathman's partition. */ CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition( - partition_relid REGCLASS) + partition_relid REGCLASS, + raise_error BOOL DEFAULT TRUE) RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' LANGUAGE C STRICT; diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index a194c165..df48fee7 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -86,6 +86,9 @@ void create_single_update_trigger_internal(Oid partition_relid, bool has_update_trigger_internal(Oid parent); +void drop_single_update_trigger_internal(Oid relid, + const char *trigname); + /* Partitioning callback type */ typedef enum { diff --git a/src/include/utils.h b/src/include/utils.h index 16100df7..bcff887c 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -30,13 +30,14 @@ bool match_expr_to_operand(Node *expr, Node *operand); * Misc. */ Oid get_pathman_schema(void); -List * list_reverse(List *l); +List *list_reverse(List *l); /* * Useful functions for relations. */ Oid get_rel_owner(Oid relid); -char * get_rel_name_or_relid(Oid relid); +char *get_rel_name_or_relid(Oid relid); +char *get_qualified_rel_name(Oid relid); RangeVar *makeRangeVarFromRelid(Oid relid); /* @@ -52,13 +53,13 @@ void extract_op_func_and_ret_type(char *opname, /* * Print values and cast types. */ -char * datum_to_cstring(Datum datum, Oid typid); +char *datum_to_cstring(Datum datum, Oid typid); Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); Datum extract_binary_interval_from_text(Datum interval_text, Oid part_atttype, Oid *interval_type); -char ** deconstruct_text_array(Datum array, int *array_size); -RangeVar ** qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); +char **deconstruct_text_array(Datum array, int *array_size); +RangeVar **qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); #endif /* PATHMAN_UTILS_H */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 4bb54b2e..991f852e 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1741,7 +1741,7 @@ build_partitioning_expression(Oid parent_relid, /* * ------------------------- - * Update trigger creation + * Update triggers management * ------------------------- */ @@ -1774,6 +1774,8 @@ create_single_update_trigger_internal(Oid partition_relid, (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, InvalidOid, InvalidOid, false); + + CommandCounterIncrement(); } /* Check if relation has pg_pathman's update trigger */ @@ -1816,3 +1818,14 @@ has_update_trigger_internal(Oid parent_relid) return res; } + +void +drop_single_update_trigger_internal(Oid relid, + const char *trigname) +{ + Oid trigoid; + + trigoid = get_trigger_oid(relid, trigname, true); + if (OidIsValid(trigoid)) + RemoveTriggerById(trigoid); +} diff --git a/src/partition_filter.c b/src/partition_filter.c index 63c59282..b2978eea 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -478,7 +478,8 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, Assert(rri_holder->expr_state != NULL); /* Recursively search for subpartitions */ - rri_holder = select_partition_for_insert(econtext, rri_holder->expr_state, + rri_holder = select_partition_for_insert(econtext, + rri_holder->expr_state, subprel, parts_storage, estate); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 982292e5..80858fbb 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -70,6 +70,7 @@ PG_FUNCTION_INFO_V1( invoke_on_partition_created_callback ); PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( create_update_triggers ); +PG_FUNCTION_INFO_V1( drop_update_triggers ); PG_FUNCTION_INFO_V1( pathman_update_trigger_func ); PG_FUNCTION_INFO_V1( create_single_update_trigger ); PG_FUNCTION_INFO_V1( has_update_trigger ); @@ -113,6 +114,9 @@ static void pathman_update_trigger_func_move_tuple(Relation source_rel, HeapTuple old_tuple, HeapTuple new_tuple); +static void create_update_triggers_internal(Oid relid); +static void drop_update_triggers_internal(Oid relid); + static void collect_update_trigger_columns(Oid relid, List **columns); static Oid find_target_partition(Relation source_rel, HeapTuple tuple); static Oid find_topmost_parent(Oid partition); @@ -150,6 +154,7 @@ get_parent_of_partition_pl(PG_FUNCTION_ARGS) Oid partition = PG_GETARG_OID(0); PartParentSearch parent_search; Oid parent; + bool emit_error = PG_GETARG_BOOL(1); /* Fetch parent & write down search status */ parent = get_parent_of_partition(partition, &parent_search); @@ -160,14 +165,13 @@ get_parent_of_partition_pl(PG_FUNCTION_ARGS) /* It must be parent known by pg_pathman */ if (parent_search == PPS_ENTRY_PART_PARENT) PG_RETURN_OID(parent); - else - { + + if (emit_error) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"%s\" is not a partition", get_rel_name_or_relid(partition)))); - PG_RETURN_NULL(); - } + PG_RETURN_NULL(); } /* @@ -1480,11 +1484,41 @@ pathman_update_trigger_func_move_tuple(Relation source_rel, FreeTupleDesc(target_tupdesc); } -/* Create UPDATE triggers for all partitions */ +/* + * Create UPDATE triggers for all partitions and subpartitions + */ Datum create_update_triggers(PG_FUNCTION_ARGS) { - Oid parent = PG_GETARG_OID(0); + Oid relid = PG_GETARG_OID(0), + parent; + PartParentSearch parent_search; + + /* + * If table has parent then we should check that parent has update trigger. + * In the ideal world this error should never be thrown since we create and + * drop update triggers for the whole partitions tree and not its parts + */ + parent = get_parent_of_partition(relid, &parent_search); + if (parent_search == PPS_ENTRY_PART_PARENT) + if(!has_update_trigger_internal(parent)) + ereport(ERROR, + (errmsg("Parent table must have an update trigger"), + errhint("Try to perform SELECT %s.create_update_triggers('%s');", + get_namespace_name(get_pathman_schema()), + get_qualified_rel_name(parent)))); + + /* Recursively add triggers */ + create_update_triggers_internal(relid); + PG_RETURN_VOID(); +} + +/* + * Create UPDATE triggers recursively + */ +static void +create_update_triggers_internal(Oid relid) +{ Oid *children; const char *trigname; const PartRelationInfo *prel; @@ -1492,24 +1526,30 @@ create_update_triggers(PG_FUNCTION_ARGS) List *columns = NIL; /* Check that table is partitioned */ - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_ANY); + prel = get_pathman_relation_info(relid); + /* TODO: check this only for topmost relid? */ + // shout_if_prel_is_invalid(relid, prel, PT_ANY); + if (!prel) + return; - /* Acquire trigger and attribute names */ - trigname = build_update_trigger_name_internal(parent); + /* Acquire trigger name */ + trigname = build_update_trigger_name_internal(relid); /* Create trigger for parent */ - collect_update_trigger_columns(parent, &columns); - create_single_update_trigger_internal(parent, trigname, columns); + collect_update_trigger_columns(relid, &columns); + create_single_update_trigger_internal(relid, trigname, columns); /* Fetch children array */ children = PrelGetChildrenArray(prel); /* Create triggers for each partition */ for (i = 0; i < PrelChildrenCount(prel); i++) + { create_single_update_trigger_internal(children[i], trigname, columns); - PG_RETURN_VOID(); + /* Perform the same procedure on subpartitions */ + create_update_triggers_internal(children[i]); + } } static void @@ -1558,6 +1598,67 @@ create_single_update_trigger(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +/* + * Drop UPDATE triggers for all partitions and subpartitions + */ +Datum +drop_update_triggers(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0), + parent; + PartParentSearch parent_search; + + /* + * We can drop triggers only if relid is the topmost parent table (or if + * its parent doesn't have update triggers (which should never happen in + * the ideal world) + */ + parent = get_parent_of_partition(relid, &parent_search); + if (parent_search == PPS_ENTRY_PART_PARENT) + if(has_update_trigger_internal(parent)) + ereport(ERROR, + (errmsg("Parent table must not have an update trigger"), + errhint("Try to perform SELECT %s.drop_triggers('%s');", + get_namespace_name(get_pathman_schema()), + get_qualified_rel_name(parent)))); + + /* Recursively drop triggers */ + drop_update_triggers_internal(relid); + PG_RETURN_VOID(); +} + +static void +drop_update_triggers_internal(Oid relid) +{ + Oid *children; + const char *trigname; + const PartRelationInfo *prel; + uint32 i; + + prel = get_pathman_relation_info(relid); + if (!prel) + return; + + /* Acquire trigger name */ + trigname = build_update_trigger_name_internal(relid); + + /* Fetch children array */ + children = PrelGetChildrenArray(prel); + + /* Drop triggers on partitions */ + for (i = 0; i < PrelChildrenCount(prel); i++) + { + drop_single_update_trigger_internal(children[i], trigname); + + /* Recursively drop triggers on subpartitions */ + drop_update_triggers_internal(children[i]); + + } + + /* Drop trigger on parent */ + drop_single_update_trigger_internal(relid, trigname); +} + /* Check if relation has pg_pathman's update trigger */ Datum has_update_trigger(PG_FUNCTION_ARGS) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index fff7c76d..a4570b80 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -64,7 +64,6 @@ static void modify_range_constraint(Oid partition_relid, Oid expression_type, const Bound *lower, const Bound *upper); -static char *get_qualified_rel_name(Oid relid); static void drop_table_by_oid(Oid relid); static bool interval_is_trivial(Oid atttype, Datum interval, @@ -1156,19 +1155,6 @@ check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges) } } -/* - * Return palloced fully qualified relation name as a cstring - */ -static char * -get_qualified_rel_name(Oid relid) -{ - Oid nspid = get_rel_namespace(relid); - - return psprintf("%s.%s", - quote_identifier(get_namespace_name(nspid)), - quote_identifier(get_rel_name(relid))); -} - /* * Drop table using it's Oid */ diff --git a/src/utils.c b/src/utils.c index 5f070e30..25cb987b 100644 --- a/src/utils.c +++ b/src/utils.c @@ -193,6 +193,19 @@ get_rel_name_or_relid(Oid relid) return relname; } +/* + * Return palloced fully qualified relation name as a cstring + */ +char * +get_qualified_rel_name(Oid relid) +{ + Oid nspid = get_rel_namespace(relid); + + return psprintf("%s.%s", + quote_identifier(get_namespace_name(nspid)), + quote_identifier(get_rel_name(relid))); +} + RangeVar * makeRangeVarFromRelid(Oid relid) { From 5d43c5721d3e4ee216a5c8681c6aa91db9babea4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 11 May 2017 18:48:27 +0300 Subject: [PATCH 0494/1124] canonicalize user's partitioning expression in add_to_pathman_config() --- src/pl_funcs.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 5b578ceb..84b49dc0 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -32,6 +32,7 @@ #include "nodes/nodeFuncs.h" #include "utils/builtins.h" #include "utils/inval.h" +#include "utils/ruleutils.h" #include "utils/snapmgr.h" #include "utils/lsyscache.h" #include "utils/syscache.h" @@ -781,6 +782,11 @@ add_to_pathman_config(PG_FUNCTION_ARGS) /* Parse and check expression */ expr_datum = cook_partitioning_expression(relid, expression, &expr_type); + /* Canonicalize user's expression (trim whitespaces etc) */ + expression = deparse_expression(stringToNode(TextDatumGetCString(expr_datum)), + deparse_context_for(get_rel_name(relid), relid), + false, false); + /* Check hash function for HASH partitioning */ if (parttype == PT_HASH) { From 24ca95de424fac6c15a6368c77581feef66885b3 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 11 May 2017 19:27:27 +0300 Subject: [PATCH 0495/1124] Fix updates on foreign tables --- expected/pathman_update_node.out | 4 +- src/hooks.c | 26 ++++++++++--- src/partition_filter.c | 22 +---------- src/partition_update.c | 14 ------- src/planner_tree_modification.c | 61 ++++++++++++++++++++++++++----- tests/python/partitioning_test.py | 16 +++----- 6 files changed, 82 insertions(+), 61 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index f4312b2c..2976e767 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -18,7 +18,7 @@ NOTICE: sequence "test_range_seq" does not exist, skipping EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; QUERY PLAN ------------------------------------------------------------------- - Insert on test_range_2 + Update on test_range_2 -> Custom Scan (PrepareInsert) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 @@ -31,7 +31,7 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 1 EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; QUERY PLAN ------------------------------------------------------------------- - Insert on test_range_2 + Update on test_range_2 -> Custom Scan (PrepareInsert) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 diff --git a/src/hooks.c b/src/hooks.c index efa7609e..7bda2f63 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -36,6 +36,7 @@ #include "utils/typcache.h" #include "utils/lsyscache.h" +static PlannerInfo* pathman_planner_info = NULL; /* Borrowed from joinpath.c */ #define PATH_PARAM_BY_REL(path, rel) \ @@ -279,6 +280,9 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady()) return; + /* save root, we will use in plan modify stage */ + pathman_planner_info = root; + /* * Skip if it's a result relation (UPDATE | DELETE | INSERT), * or not a (partitioned) physical relation at all. @@ -524,18 +528,21 @@ pg_pathman_enable_assign_hook(bool newval, void *extra) PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { -#define ExecuteForPlanTree(planned_stmt, proc) \ +#define ExecuteForPlanTree(planned_stmt, context, proc) \ do { \ ListCell *lc; \ - proc((planned_stmt)->rtable, (planned_stmt)->planTree); \ + proc((context), (planned_stmt)->planTree); \ foreach (lc, (planned_stmt)->subplans) \ - proc((planned_stmt)->rtable, (Plan *) lfirst(lc)); \ + proc((context), (Plan *) lfirst(lc)); \ } while (0) PlannedStmt *result; uint32 query_id = parse->queryId; bool pathman_ready = IsPathmanReady(); /* in case it changes */ + /* rel_pathlist_hook will set this variable */ + pathman_planner_info = NULL; + PG_TRY(); { if (pathman_ready && pathman_hooks_enabled) @@ -555,14 +562,17 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready && pathman_hooks_enabled) { + List *update_nodes_context; + /* Give rowmark-related attributes correct names */ - ExecuteForPlanTree(result, postprocess_lock_rows); + ExecuteForPlanTree(result, result->rtable, postprocess_lock_rows); /* Add PartitionFilter node for INSERT queries */ - ExecuteForPlanTree(result, add_partition_filters); + ExecuteForPlanTree(result, result->rtable, add_partition_filters); /* Add PartitionUpdate node for UPDATE queries */ - ExecuteForPlanTree(result, add_partition_update_nodes); + update_nodes_context = list_make2(result->rtable, pathman_planner_info); + ExecuteForPlanTree(result, update_nodes_context, add_partition_update_nodes); /* Decrement relation tags refcount */ decr_refcount_relation_tags(); @@ -587,6 +597,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Finally return the Plan */ return result; +#undef ExecuteForPlanTree } /* @@ -867,6 +878,9 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, */ cstate->saved_junkFilter = cstate->resultRelInfo->ri_junkFilter; cstate->resultRelInfo->ri_junkFilter = NULL; + + /* hack, change UPDATE operation to INSERT */ + mt_state->operation = CMD_INSERT; } } } diff --git a/src/partition_filter.c b/src/partition_filter.c index 5def7e1f..81cdcbc4 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -320,30 +320,12 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) if (parts_storage->command_type == CMD_UPDATE) { - char relkind; JunkFilter *junkfilter = parts_storage->saved_rel_info->ri_junkFilter; - /* we don't need junk work in UPDATE */ + /* we don't need junk cleaning in ExecModifyTable */ child_result_rel_info->ri_junkFilter = NULL; - relkind = base_rel->rd_rel->relkind; - if (relkind == RELKIND_RELATION) - { - junkfilter->jf_junkAttNo = ExecFindJunkAttribute(junkfilter, "ctid"); - if (!AttributeNumberIsValid(junkfilter->jf_junkAttNo)) - elog(ERROR, "could not find junk ctid column"); - } - else if (relkind == RELKIND_FOREIGN_TABLE) - { - /* - * When there is an AFTER trigger, there should be a - * wholerow attribute. - */ - junkfilter->jf_junkAttNo = ExecFindJunkAttribute(junkfilter, "wholerow"); - } - else - elog(ERROR, "wrong type of relation"); - + /* instead we do junk filtering ourselves */ rri_holder->updates_junkFilter = junkfilter; } diff --git a/src/partition_update.c b/src/partition_update.c index e925ac6d..7b50c81c 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -60,17 +60,6 @@ init_partition_update_static_data(void) NULL); } -/* - * By default UPDATE queries will make ForeignUpdate nodes for foreign tables. - * This function modifies these nodes so they will work as SELECTs - */ -static void -modify_fdw_scan(ForeignScan *node) -{ - node->scan.plan.plan_node_id = CMD_SELECT; - node->operation = CMD_SELECT; -} - Plan * make_partition_update(Plan *subplan, Oid parent_relid, @@ -86,9 +75,6 @@ make_partition_update(Plan *subplan, cscan->scan.plan.plan_rows = subplan->plan_rows; cscan->scan.plan.plan_width = subplan->plan_width; - if (IsA(subplan, ForeignScan)) - modify_fdw_scan((ForeignScan *) subplan); - /* Setup methods and child plan */ cscan->methods = &partition_update_plan_methods; pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index d9af9e00..bffc880c 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -19,6 +19,7 @@ #include "rewrite/rewriteManip.h" #include "access/htup_details.h" +#include "foreign/fdwapi.h" #include "miscadmin.h" #include "optimizer/clauses.h" #include "storage/lmgr.h" @@ -375,10 +376,10 @@ add_partition_filters(List *rtable, Plan *plan) /* Add PartitionUpdate nodes to the plan tree */ void -add_partition_update_nodes(List *rtable, Plan *plan) +add_partition_update_nodes(List *context, Plan *plan) { if (pg_pathman_enable_partition_update) - plan_tree_walker(plan, partition_update_visitor, rtable); + plan_tree_walker(plan, partition_update_visitor, context); } /* @@ -430,6 +431,45 @@ partition_filter_visitor(Plan *plan, void *context) } +static List * +recreate_fdw_private_list(PlannerInfo *root, List *rtable, ModifyTable *node) +{ + ListCell *lc; + int i = 0; + List *fdw_private_list = NIL; + + /* we need DELETE queries for FDW */ + node->operation = CMD_DELETE; + + foreach(lc, node->resultRelations) + { + Index rti = lfirst_int(lc); + FdwRoutine *fdwroutine; + List *fdw_private; + + RangeTblEntry *rte = rt_fetch(rti, rtable); + Assert(rte->rtekind == RTE_RELATION); + if (rte->relkind != RELKIND_FOREIGN_TABLE) + continue; + + fdwroutine = GetFdwRoutineByRelId(rte->relid); + + if (fdwroutine != NULL && + fdwroutine->PlanForeignModify != NULL) + fdw_private = fdwroutine->PlanForeignModify(root, node, rti, i); + else + fdw_private = NIL; + + fdw_private_list = lappend(fdw_private_list, fdw_private); + i++; + } + + /* restore operation */ + node->operation = CMD_UPDATE; + return fdw_private_list; +} + + /* * Add partition update to ModifyTable node's children. * @@ -438,11 +478,12 @@ partition_filter_visitor(Plan *plan, void *context) static void partition_update_visitor(Plan *plan, void *context) { - List *rtable = (List *) context; - ModifyTable *modify_table = (ModifyTable *) plan; - ListCell *lc1, - *lc2, - *lc3; + List *rtable = (List *) linitial((List *) context); + PlannerInfo *root = (PlannerInfo *) lsecond((List *) context); + ModifyTable *modify_table = (ModifyTable *) plan; + ListCell *lc1, + *lc2, + *lc3; /* Skip if not ModifyTable with 'UPDATE' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) @@ -476,8 +517,6 @@ partition_update_visitor(Plan *plan, void *context) { List *returning_list = NIL; - modify_table->operation = CMD_INSERT; - /* Extract returning list if possible */ if (lc3) { @@ -488,6 +527,10 @@ partition_update_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), relid, returning_list); + + /* change fdw queries to DELETE */ + modify_table->fdwPrivLists = + recreate_fdw_private_list(root, rtable, modify_table); } } } diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 9bda06d9..cc67eaf0 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -557,12 +557,12 @@ def test_update_node_on_fdw_tables(self): 'postgres', 'select attach_range_partition(\'abc\', \'ftable2\', 30, 40)') - #master.safe_psql('postgres', - # 'set pg_pathman.enable_partitionupdate=on') + master.safe_psql('postgres', + 'set pg_pathman.enable_partitionupdate=on') with master.connect() as con: con.begin() - #con.execute('set pg_pathman.enable_partitionupdate=on') + con.execute('set pg_pathman.enable_partitionupdate=on') con.execute('insert into abc select i from generate_series(1, 19) i') con.commit() @@ -575,13 +575,9 @@ def test_update_node_on_fdw_tables(self): # - update from foreign to local # - update from foreign to foreign - #con.execute('update abc set id=36 where id=9') - #result_relid = con.execute('select tableoid from abc where id=35')[0][0] - #self.assertEqual(result_relid, dest_relid) - - self.set_trace(con, 'pg_debug') - import ipdb; ipdb.set_trace() - pass + con.execute('update abc set id=36 where id=9') + result_relid = con.execute('select tableoid from abc where id=35')[0][0] + self.assertEqual(result_relid, dest_relid) def test_parallel_nodes(self): """Test parallel queries under partitions""" From 66655d3f39c07a43432ac99610421e2b6de3cb22 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 11 May 2017 19:35:10 +0300 Subject: [PATCH 0496/1124] Add more tests for update node --- tests/python/partitioning_test.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index cc67eaf0..b7091213 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -563,7 +563,7 @@ def test_update_node_on_fdw_tables(self): with master.connect() as con: con.begin() con.execute('set pg_pathman.enable_partitionupdate=on') - con.execute('insert into abc select i from generate_series(1, 19) i') + con.execute("insert into abc select i, 'local' from generate_series(1, 19) i") con.commit() source_relid = con.execute('select tableoid from abc where id=9')[0][0] @@ -572,13 +572,21 @@ def test_update_node_on_fdw_tables(self): # cases # - update from local to foreign - # - update from foreign to local # - update from foreign to foreign + # - update from foreign to local con.execute('update abc set id=36 where id=9') - result_relid = con.execute('select tableoid from abc where id=35')[0][0] + result_relid = con.execute('select tableoid from abc where id=36')[0][0] self.assertEqual(result_relid, dest_relid) + con.execute('update abc set id=38 where id=36') + result_relid = con.execute('select tableoid from abc where id=38')[0][0] + self.assertEqual(result_relid, dest_relid) + + con.execute('update abc set id=9 where id=35') + result_relid = con.execute('select tableoid from abc where id=9')[0][0] + self.assertEqual(result_relid, source_relid) + def test_parallel_nodes(self): """Test parallel queries under partitions""" From 89dcaef45ac634b20e52099071fe822f9ae96561 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 11 May 2017 19:38:58 +0300 Subject: [PATCH 0497/1124] use find_inheritance_children_array() in add_to_pathman_config() --- src/init.c | 2 -- src/pl_funcs.c | 9 ++++++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/init.c b/src/init.c index 84db692e..4b0d6d19 100644 --- a/src/init.c +++ b/src/init.c @@ -418,8 +418,6 @@ find_inheritance_children_array(Oid parentrelId, uint32 i; - Assert(lockmode != NoLock); - /* Init safe return values */ *children_size = 0; *children = NULL; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 84b49dc0..74b5f4c6 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -708,6 +708,9 @@ add_to_pathman_config(PG_FUNCTION_ARGS) char *expression; PartType parttype; + Oid *children; + uint32 children_count; + Relation pathman_config; Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; @@ -823,8 +826,12 @@ add_to_pathman_config(PG_FUNCTION_ARGS) heap_close(pathman_config, RowExclusiveLock); /* Update caches only if this relation has children */ - if (has_subclass(relid)) + if (FCS_FOUND == find_inheritance_children_array(relid, NoLock, true, + &children_count, + &children)) { + pfree(children); + /* Now try to create a PartRelationInfo */ PG_TRY(); { From 171696f89b4aee712d5b09307824703c2cbfd1f2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 11 May 2017 19:44:19 +0300 Subject: [PATCH 0498/1124] don't use analyze before add_to_pathman_config() --- expected/pathman_permissions.out | 1 - hash.sql | 1 - range.sql | 5 ----- 3 files changed, 7 deletions(-) diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 681c4627..ea176668 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -19,7 +19,6 @@ GRANT SELECT ON permissions.user1_table TO user2; /* Should fail (don't own parent) */ SET ROLE user2; SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); -WARNING: skipping "user1_table" --- only table or database owner can analyze it ERROR: only the owner or superuser can change partitioning configuration of table "user1_table" /* Should be ok */ SET ROLE user1; diff --git a/hash.sql b/hash.sql index b8412184..06056245 100644 --- a/hash.sql +++ b/hash.sql @@ -27,7 +27,6 @@ BEGIN partition_data); /* Insert new entry to pathman config */ - EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression); /* Create partitions */ diff --git a/range.sql b/range.sql index d5c37ef6..83f9e894 100644 --- a/range.sql +++ b/range.sql @@ -134,7 +134,6 @@ BEGIN END IF; /* Insert new entry to pathman config */ - EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, p_interval::TEXT); @@ -234,7 +233,6 @@ BEGIN END IF; /* Insert new entry to pathman config */ - EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, p_interval::TEXT); @@ -299,7 +297,6 @@ BEGIN bounds[array_length(bounds, 1) - 1]); /* Insert new entry to pathman config */ - EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); /* Create sequence for child partitions names */ @@ -352,7 +349,6 @@ BEGIN end_value); /* Insert new entry to pathman config */ - EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, p_interval::TEXT); @@ -411,7 +407,6 @@ BEGIN end_value); /* Insert new entry to pathman config */ - EXECUTE format('ANALYZE %s', parent_relid); PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, p_interval::TEXT); From be854e78e8092b9e6f3a8696899c324b7f477b8b Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 12 May 2017 13:46:25 +0300 Subject: [PATCH 0499/1124] auto trigger creation on create_hash_partitions() and create_range_partitions() if upper parent has trigger --- hash.sql | 13 +++++++++++++ init.sql | 5 +++-- range.sql | 22 ++++++++++++++++++++++ src/partition_creation.c | 19 ++++++++++++++----- src/pl_funcs.c | 30 +++++++++++++++++------------- 5 files changed, 69 insertions(+), 20 deletions(-) diff --git a/hash.sql b/hash.sql index c942e8c6..1349c98f 100644 --- a/hash.sql +++ b/hash.sql @@ -20,6 +20,9 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( tablespaces TEXT[] DEFAULT NULL) RETURNS INTEGER AS $$ +DECLARE + v_upper_parent REGCLASS; + BEGIN PERFORM @extschema@.validate_relname(parent_relid); @@ -45,6 +48,16 @@ BEGIN partition_names, tablespaces); + /* + * If there is an upper level parent partitioned by pg_pathman and it has + * update triggers then create them too + */ + v_upper_parent = @extschema@.get_parent_of_partition(parent_relid, false); + IF NOT v_upper_parent IS NULL AND @extschema@.has_update_trigger(v_upper_parent) + THEN + PERFORM @extschema@.create_update_triggers(parent_relid); + END IF; + /* Copy data */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); diff --git a/init.sql b/init.sql index 59a563d5..82da5ca9 100644 --- a/init.sql +++ b/init.sql @@ -533,7 +533,8 @@ LANGUAGE plpgsql; * Drop triggers */ CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( - parent_relid REGCLASS) + parent_relid REGCLASS, + force BOOL DEFAULT FALSE) RETURNS VOID AS 'pg_pathman', 'drop_update_triggers' LANGUAGE C STRICT; @@ -560,7 +561,7 @@ BEGIN PERFORM @extschema@.prevent_relation_modification(parent_relid); /* First, drop all triggers */ - PERFORM @extschema@.drop_triggers(parent_relid); + PERFORM @extschema@.drop_triggers(parent_relid, TRUE); SELECT count(*) FROM @extschema@.pathman_config WHERE partrel = parent_relid INTO conf_num; diff --git a/range.sql b/range.sql index 40894c7e..8e952256 100644 --- a/range.sql +++ b/range.sql @@ -103,6 +103,7 @@ DECLARE end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; + v_upper_parent REGCLASS; BEGIN expression := lower(expression); @@ -171,6 +172,16 @@ BEGIN NULL); END IF; + /* + * If there is an upper level parent partitioned by pg_pathman and it has + * update triggers then create them too + */ + v_upper_parent = @extschema@.get_parent_of_partition(parent_relid, false); + IF NOT v_upper_parent IS NULL AND @extschema@.has_update_trigger(v_upper_parent) + THEN + PERFORM @extschema@.create_update_triggers(parent_relid); + END IF; + /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -202,6 +213,7 @@ DECLARE end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; + v_upper_parent REGCLASS; BEGIN expression := lower(expression); @@ -267,6 +279,16 @@ BEGIN NULL); END IF; + /* + * If there is an upper level parent partitioned by pg_pathman and it has + * update triggers then create them too + */ + v_upper_parent = @extschema@.get_parent_of_partition(parent_relid, false); + IF NOT v_upper_parent IS NULL AND @extschema@.has_update_trigger(v_upper_parent) + THEN + PERFORM @extschema@.create_update_triggers(parent_relid); + END IF; + /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); diff --git a/src/partition_creation.c b/src/partition_creation.c index 991f852e..b445cb8a 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -25,6 +25,7 @@ #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "catalog/toasting.h" +#include "commands/defrem.h" #include "commands/event_trigger.h" #include "commands/sequence.h" #include "commands/tablecmds.h" @@ -1823,9 +1824,17 @@ void drop_single_update_trigger_internal(Oid relid, const char *trigname) { - Oid trigoid; - - trigoid = get_trigger_oid(relid, trigname, true); - if (OidIsValid(trigoid)) - RemoveTriggerById(trigoid); + DropStmt *n = makeNode(DropStmt); + const char *relname = get_qualified_rel_name(relid); + List *namelist = stringToQualifiedNameList(relname); + + namelist = lappend(namelist, makeString((char *) trigname)); + n->removeType = OBJECT_TRIGGER; + n->missing_ok = true; + n->objects = list_make1(namelist); + n->arguments = NIL; + n->behavior = DROP_RESTRICT; /* default behavior */ + n->concurrent = false; + + RemoveObjects(n); } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 80858fbb..222bc056 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1607,20 +1607,24 @@ drop_update_triggers(PG_FUNCTION_ARGS) Oid relid = PG_GETARG_OID(0), parent; PartParentSearch parent_search; + bool force = PG_GETARG_BOOL(1); - /* - * We can drop triggers only if relid is the topmost parent table (or if - * its parent doesn't have update triggers (which should never happen in - * the ideal world) - */ - parent = get_parent_of_partition(relid, &parent_search); - if (parent_search == PPS_ENTRY_PART_PARENT) - if(has_update_trigger_internal(parent)) - ereport(ERROR, - (errmsg("Parent table must not have an update trigger"), - errhint("Try to perform SELECT %s.drop_triggers('%s');", - get_namespace_name(get_pathman_schema()), - get_qualified_rel_name(parent)))); + if (!force) + { + /* + * We can drop triggers only if relid is the topmost parent table (or if + * its parent doesn't have update triggers (which should never happen in + * the ideal world) + */ + parent = get_parent_of_partition(relid, &parent_search); + if (parent_search == PPS_ENTRY_PART_PARENT) + if(has_update_trigger_internal(parent)) + ereport(ERROR, + (errmsg("Parent table must not have an update trigger"), + errhint("Try to perform SELECT %s.drop_triggers('%s');", + get_namespace_name(get_pathman_schema()), + get_qualified_rel_name(parent)))); + } /* Recursively drop triggers */ drop_update_triggers_internal(relid); From 3a4f17ce0684a4af5997b6cb6a74ef939b218e08 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Fri, 12 May 2017 16:14:20 +0300 Subject: [PATCH 0500/1124] Add different versions of mergejoin test case --- Makefile | 3 +- expected/pathman_basic.out | 31 ----------- expected/pathman_mergejoin.out | 88 ++++++++++++++++++++++++++++++++ expected/pathman_mergejoin_0.out | 86 +++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 8 --- sql/pathman_mergejoin.sql | 42 +++++++++++++++ 6 files changed, 218 insertions(+), 40 deletions(-) create mode 100644 expected/pathman_mergejoin.out create mode 100644 expected/pathman_mergejoin_0.out create mode 100644 sql/pathman_mergejoin.sql diff --git a/Makefile b/Makefile index b908802b..d7637caa 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,8 @@ REGRESS = pathman_basic \ pathman_runtime_nodes \ pathman_utility_stmt_hooking \ pathman_calamity \ - pathman_join_clause + pathman_join_clause \ + pathman_mergejoin EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index a88e74db..e00cc3d1 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1015,38 +1015,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test. /* * Join */ -SET enable_hashjoin = OFF; set enable_nestloop = OFF; -SET enable_mergejoin = ON; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - QUERY PLAN ---------------------------------------------------------------------------------- - Sort - Sort Key: j2.dt - -> Merge Join - Merge Cond: (j2.id = j3.id) - -> Merge Join - Merge Cond: (j1.id = j2.id) - -> Merge Append - Sort Key: j1.id - -> Index Scan using range_rel_1_pkey on range_rel_1 j1 - -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 - -> Merge Append - Sort Key: j2.id - -> Index Scan using range_rel_2_pkey on range_rel_2 j2 - -> Index Scan using range_rel_3_pkey on range_rel_3 j2_1 - -> Index Scan using range_rel_4_pkey on range_rel_4 j2_2 - -> Append - -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 - -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 - -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 -(20 rows) - SET enable_hashjoin = ON; SET enable_mergejoin = OFF; EXPLAIN (COSTS OFF) diff --git a/expected/pathman_mergejoin.out b/expected/pathman_mergejoin.out new file mode 100644 index 00000000..3a7dfafd --- /dev/null +++ b/expected/pathman_mergejoin.out @@ -0,0 +1,88 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: partitioning key "dt" must be NOT NULL +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); +NOTICE: sequence "range_rel_seq" does not exist, skipping + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); +NOTICE: sequence "num_range_rel_seq" does not exist, skipping + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Materialize + -> Merge Join + Merge Cond: (j2.id = j1.id) + -> Merge Append + Sort Key: j2.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + -> Index Scan using range_rel_3_pkey on range_rel_3 j2_1 + -> Index Scan using range_rel_4_pkey on range_rel_4 j2_2 + -> Materialize + -> Merge Append + Sort Key: j1.id + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 +(22 rows) + diff --git a/expected/pathman_mergejoin_0.out b/expected/pathman_mergejoin_0.out new file mode 100644 index 00000000..ae19c0ff --- /dev/null +++ b/expected/pathman_mergejoin_0.out @@ -0,0 +1,86 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: partitioning key "dt" must be NOT NULL +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); +NOTICE: sequence "range_rel_seq" does not exist, skipping + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); +NOTICE: sequence "num_range_rel_seq" does not exist, skipping + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Merge Join + Merge Cond: (j1.id = j2.id) + -> Merge Append + Sort Key: j1.id + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 + -> Merge Append + Sort Key: j2.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + -> Index Scan using range_rel_3_pkey on range_rel_3 j2_1 + -> Index Scan using range_rel_4_pkey on range_rel_4 j2_2 + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 +(20 rows) + diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 6dc25deb..329d6dc2 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -247,15 +247,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test. /* * Join */ -SET enable_hashjoin = OFF; set enable_nestloop = OFF; -SET enable_mergejoin = ON; - -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_mergejoin = OFF; EXPLAIN (COSTS OFF) diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql new file mode 100644 index 00000000..46df040d --- /dev/null +++ b/sql/pathman_mergejoin.sql @@ -0,0 +1,42 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); +SELECT COUNT(*) FROM test.num_range_rel; +SELECT COUNT(*) FROM ONLY test.num_range_rel; +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; From 075f472dd342750b33007616892eb406a6831f09 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 12 May 2017 17:29:36 +0300 Subject: [PATCH 0501/1124] refactoring in walk_expr_tree()'s machinery --- expected/pathman_basic.out | 14 ++ sql/pathman_basic.sql | 2 + src/hooks.c | 4 +- src/include/pathman.h | 6 +- src/nodes_common.c | 2 +- src/partition_filter.c | 2 +- src/pg_pathman.c | 227 ++++++++++++-------------------- src/planner_tree_modification.c | 2 +- src/rangeset.c | 3 + 9 files changed, 110 insertions(+), 152 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index fa053a30..bc61ad37 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -450,6 +450,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; -> Seq Scan on hash_rel_2 (4 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; QUERY PLAN -------------------------- @@ -609,6 +616,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; -> Seq Scan on hash_rel_2 (4 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; QUERY PLAN -------------------------- diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index ebad5bf2..22216c03 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -165,6 +165,7 @@ SET enable_bitmapscan = OFF; SET enable_seqscan = ON; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; @@ -189,6 +190,7 @@ SET enable_bitmapscan = OFF; SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; diff --git a/src/hooks.c b/src/hooks.c index fe40b6be..4f5fec03 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -137,7 +137,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, { WrapperNode *wrap; - InitWalkerContext(&context, part_expr, inner_prel, NULL, false); + InitWalkerContext(&context, part_expr, inner_prel, NULL); wrap = walk_expr_tree((Expr *) lfirst(lc), &context); paramsel *= wrap->paramsel; } @@ -345,7 +345,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, ranges = list_make1_irange_full(prel, IR_COMPLETE); /* Make wrappers over restrictions and collect final rangeset */ - InitWalkerContext(&context, part_expr, prel, NULL, false); + InitWalkerContext(&context, part_expr, prel, NULL); wrappers = NIL; foreach(lc, rel->baserestrictinfo) { diff --git a/src/include/pathman.h b/src/include/pathman.h index 3bd38467..63d3fc7b 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -146,16 +146,14 @@ typedef struct Node *prel_expr; /* expression from PartRelationInfo */ const PartRelationInfo *prel; /* main partitioning structure */ ExprContext *econtext; /* for ExecEvalExpr() */ - bool for_insert; /* are we in PartitionFilter now? */ } WalkerContext; /* Usual initialization procedure for WalkerContext */ -#define InitWalkerContext(context, expr, prel_info, ecxt, for_ins) \ +#define InitWalkerContext(context, expr, prel_info, ecxt) \ do { \ (context)->prel_expr = (expr); \ (context)->prel = (prel_info); \ (context)->econtext = (ecxt); \ - (context)->for_insert = (for_ins); \ } while (0) /* Check that WalkerContext contains ExprContext (plan execution stage) */ @@ -166,11 +164,11 @@ WrapperNode *walk_expr_tree(Expr *expr, WalkerContext *context); void select_range_partitions(const Datum value, + const Oid collid, FmgrInfo *cmp_func, const RangeEntry *ranges, const int nranges, const int strategy, - const Oid collid, WrapperNode *result); diff --git a/src/nodes_common.c b/src/nodes_common.c index e67bc0cb..984ff908 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -723,7 +723,7 @@ rescan_append_common(CustomScanState *node) /* First we select all available partitions... */ ranges = list_make1_irange_full(prel, IR_COMPLETE); - InitWalkerContext(&wcxt, prel_expr, prel, econtext, false); + InitWalkerContext(&wcxt, prel_expr, prel, econtext); foreach (lc, scan_state->canon_custom_exprs) { WrapperNode *wn; diff --git a/src/partition_filter.c b/src/partition_filter.c index 51e285c7..74053a46 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -402,7 +402,7 @@ find_partitions_for_value(Datum value, Oid value_type, CopyToTempConst(constbyval, ev_byval); /* We use 0 since varno doesn't matter for Const */ - InitWalkerContext(&wcxt, 0, prel, NULL, true); + InitWalkerContext(&wcxt, 0, prel, NULL); ranges = walk_expr_tree((Expr *) &temp_const, &wcxt)->rangeset; return get_partition_oids(ranges, nparts, prel, false); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 7c2c3ade..c7fccb8e 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -21,6 +21,7 @@ #include "postgres.h" #include "access/sysattr.h" +#include "catalog/pg_type.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "optimizer/clauses.h" @@ -51,17 +52,11 @@ void _PG_init(void); /* Expression tree handlers */ static Node *wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue); -static WrapperNode *handle_const(const Const *c, WalkerContext *context); +static WrapperNode *handle_const(const Const *c, int strategy, WalkerContext *context); static WrapperNode *handle_boolexpr(const BoolExpr *expr, WalkerContext *context); static WrapperNode *handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context); static WrapperNode *handle_opexpr(const OpExpr *expr, WalkerContext *context); -static void handle_binary_opexpr(const Const *c, WalkerContext *context, - WrapperNode *result); - -static void handle_binary_opexpr_param(const PartRelationInfo *prel, - WrapperNode *result); - static bool is_key_op_param(const OpExpr *expr, const WalkerContext *context, Node **param_ptr); @@ -405,11 +400,11 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, /* Given 'value' and 'ranges', return selected partitions list */ void select_range_partitions(const Datum value, + const Oid collid, FmgrInfo *cmp_func, const RangeEntry *ranges, const int nranges, const int strategy, - const Oid collid, WrapperNode *result) /* returned partitions */ { bool lossy = false, @@ -604,7 +599,7 @@ walk_expr_tree(Expr *expr, WalkerContext *context) { /* Useful for INSERT optimization */ case T_Const: - return handle_const((Const *) expr, context); + return handle_const((Const *) expr, BTEqualStrategyNumber, context); /* AND, OR, NOT expressions */ case T_BoolExpr: @@ -614,7 +609,7 @@ walk_expr_tree(Expr *expr, WalkerContext *context) case T_OpExpr: return handle_opexpr((OpExpr *) expr, context); - /* IN expression */ + /* ANY, ALL, IN expressions */ case T_ScalarArrayOpExpr: return handle_arrexpr((ScalarArrayOpExpr *) expr, context); @@ -634,13 +629,11 @@ walk_expr_tree(Expr *expr, WalkerContext *context) static Node * wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) { - bool lossy, found; + bool lossy, found; *alwaysTrue = false; - /* - * TODO: use faster algorithm using knowledge - * that we enumerate indexes sequntially. - */ + + /* TODO: possible optimization (we enumerate indexes sequntially). */ found = irange_list_find(wrap->rangeset, index, &lossy); /* Return NULL for always true and always false. */ @@ -694,35 +687,33 @@ wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) return (Node *) linitial(args); result = makeNode(BoolExpr); - result->xpr.type = T_BoolExpr; result->args = args; result->boolop = expr->boolop; result->location = expr->location; return (Node *) result; } - else - return copyObject(wrap->orig); + else return copyObject(wrap->orig); } - else - return copyObject(wrap->orig); + else return copyObject(wrap->orig); } /* Const handler */ static WrapperNode * -handle_const(const Const *c, WalkerContext *context) +handle_const(const Const *c, int strategy, WalkerContext *context) { WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); - int strategy = BTEqualStrategyNumber; const PartRelationInfo *prel = context->prel; - result->orig = (const Node *) c; + /* Deal with missing strategy */ + if (strategy == 0) + goto handle_const_return; /* * Had to add this check for queries like: * select * from test.hash_rel where txt = NULL; */ - if (!context->for_insert || c->constisnull) + if (c->constisnull) { result->rangeset = NIL; result->paramsel = 0.0; @@ -730,6 +721,28 @@ handle_const(const Const *c, WalkerContext *context) return result; } + /* + * Had to add this check for queries like: + * select * from test.hash_rel where true = false; + * select * from test.hash_rel where false; + * select * from test.hash_rel where $1; + */ + if (c->consttype == BOOLOID) + { + if (c->constvalue == BoolGetDatum(false)) + { + result->rangeset = NIL; + result->paramsel = 0.0; + } + else + { + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + result->paramsel = 1.0; + } + + return result; + } + switch (prel->parttype) { case PT_HASH: @@ -739,6 +752,10 @@ handle_const(const Const *c, WalkerContext *context) uint32 idx; /* index of partition */ bool cast_success; + /* Cannot do much about non-equal strategies */ + if (strategy != BTEqualStrategyNumber) + goto handle_const_return; + /* Peform type cast if types mismatch */ if (prel->ev_type != c->consttype) { @@ -759,35 +776,48 @@ handle_const(const Const *c, WalkerContext *context) idx = hash_to_part_index(DatumGetInt32(hash), PrelChildrenCount(prel)); - result->paramsel = estimate_paramsel_using_prel(prel, strategy); result->rangeset = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); + result->paramsel = estimate_paramsel_using_prel(prel, strategy); + + return result; } - break; case PT_RANGE: { FmgrInfo cmp_finfo; + /* Cannot do much about non-equal strategies + diff. collations */ + if (strategy != BTEqualStrategyNumber && + c->constcollid != prel->ev_collid) + { + goto handle_const_return; + } + fill_type_cmp_fmgr_info(&cmp_finfo, getBaseType(c->consttype), getBaseType(prel->ev_type)); select_range_partitions(c->constvalue, + c->constcollid, &cmp_finfo, PrelGetRangesArray(context->prel), PrelChildrenCount(context->prel), strategy, - prel->ev_collid, result); /* output */ result->paramsel = estimate_paramsel_using_prel(prel, strategy); + + return result; } - break; default: WrongPartType(prel->parttype); } +handle_const_return: + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + result->paramsel = estimate_paramsel_using_prel(prel, strategy); + return result; } @@ -855,7 +885,7 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) static WrapperNode * handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) { - WrapperNode *result = (WrapperNode *) palloc(sizeof(WrapperNode)); + WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); Node *exprnode = (Node *) linitial(expr->args); Node *arraynode = (Node *) lsecond(expr->args); const PartRelationInfo *prel = context->prel; @@ -879,7 +909,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) int num_elems; Datum *elem_values; bool *elem_nulls; - int strategy = BTEqualStrategyNumber; + int strategy = BTEqualStrategyNumber; /* FIXME: wtf! */ /* Extract values from array */ arrayval = DatumGetArrayTypeP(((Const *) arraynode)->constvalue); @@ -940,9 +970,6 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) (const void *) context, sizeof(WalkerContext)); - /* Overload variable to allow search by Const */ - nested_wcxt->for_insert = true; - /* Construct OIDs list */ for (i = 0; i < num_elems; i++) { @@ -992,140 +1019,54 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) static WrapperNode * handle_opexpr(const OpExpr *expr, WalkerContext *context) { - WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); + WrapperNode *result; Node *param; const PartRelationInfo *prel = context->prel; - result->orig = (const Node *) expr; - result->args = NIL; - if (list_length(expr->args) == 2) { /* Is it KEY OP PARAM or PARAM OP KEY? */ if (is_key_op_param(expr, context, ¶m)) { + TypeCacheEntry *tce; + int strategy; + + tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); + strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); + if (IsConstValue(param, context)) { - handle_binary_opexpr(ExtractConst(param, context), context, result); + result = handle_const(ExtractConst(param, context), + strategy, context); + + /* Save expression */ + result->orig = (const Node *) expr; + return result; } /* TODO: estimate selectivity for param if it's Var */ else if (IsA(param, Param) || IsA(param, Var)) { - handle_binary_opexpr_param(prel, result); - return result; - } - } - } - - result->rangeset = list_make1_irange_full(prel, IR_LOSSY); - result->paramsel = 1.0; - return result; -} - -/* Binary operator handler */ -static void -handle_binary_opexpr(const Const *c, - WalkerContext *context, - WrapperNode *result) -{ - int strategy; - TypeCacheEntry *tce; - const OpExpr *expr = (const OpExpr *) result->orig; - const PartRelationInfo *prel = context->prel; - - /* Exit if Constant is NULL */ - if (c->constisnull) - { - result->rangeset = NIL; - result->paramsel = 1.0; - return; - } - - tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); - strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); - - /* There's no strategy for this operator, go to end */ - if (strategy == 0) - goto binary_opexpr_return; - - switch (prel->parttype) - { - case PT_HASH: - /* If strategy is "=", select one partiton */ - if (strategy == BTEqualStrategyNumber) - { - Datum value = OidFunctionCall1(prel->hash_proc, c->constvalue); - uint32 idx = hash_to_part_index(DatumGetInt32(value), - PrelChildrenCount(prel)); - + result = (WrapperNode *) palloc0(sizeof(WrapperNode)); + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); result->paramsel = estimate_paramsel_using_prel(prel, strategy); - result->rangeset = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); - return; /* exit on equal */ - } - /* Else go to end */ - else goto binary_opexpr_return; + /* Save expression */ + result->orig = (const Node *) expr; - case PT_RANGE: - { - FmgrInfo cmp_func; - Oid collid; - - /* - * We cannot guarantee that we'll return correct partitions set - * if operator collation is different from default attribute collation. - * In this case we just return all of them. - */ - if (expr->opcollid != prel->ev_collid && - strategy != BTEqualStrategyNumber) - goto binary_opexpr_return; - - collid = OidIsValid(expr->opcollid) ? - expr->opcollid : - prel->ev_collid; - - fill_type_cmp_fmgr_info(&cmp_func, - getBaseType(c->consttype), - getBaseType(prel->ev_type)); - - select_range_partitions(c->constvalue, - &cmp_func, - PrelGetRangesArray(context->prel), - PrelChildrenCount(context->prel), - strategy, - collid, - result); /* output */ - - result->paramsel = estimate_paramsel_using_prel(prel, strategy); - - return; /* done, now exit */ + return result; } - - default: - WrongPartType(prel->parttype); + } } -binary_opexpr_return: + result = (WrapperNode *) palloc0(sizeof(WrapperNode)); result->rangeset = list_make1_irange_full(prel, IR_LOSSY); - result->paramsel = 1.0; -} + result->paramsel = 1.0; /* can't give any estimates */ -/* Estimate selectivity of parametrized quals */ -static void -handle_binary_opexpr_param(const PartRelationInfo *prel, - WrapperNode *result) -{ - const OpExpr *expr = (const OpExpr *) result->orig; - TypeCacheEntry *tce; - int strategy; - - /* Determine operator type */ - tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); - strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); + /* Save expression */ + result->orig = (const Node *) expr; - result->rangeset = list_make1_irange_full(prel, IR_LOSSY); - result->paramsel = estimate_paramsel_using_prel(prel, strategy); + return result; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 4a804101..26ac2de0 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -280,7 +280,7 @@ handle_modification_query(Query *parse) prel_expr = PrelExpressionForRelid(prel, result_rel); /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel_expr, prel, NULL, false); + InitWalkerContext(&context, prel_expr, prel, NULL); wrap = walk_expr_tree(expr, &context); ranges = irange_list_intersection(ranges, wrap->rangeset); diff --git a/src/rangeset.c b/src/rangeset.c index 15599f74..01bec5ee 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -400,12 +400,15 @@ irange_list_find(List *rangeset, int index, bool *lossy) foreach (lc, rangeset) { IndexRange irange = lfirst_irange(lc); + if (index >= irange_lower(irange) && index <= irange_upper(irange)) { if (lossy) *lossy = is_irange_lossy(irange); + return true; } } + return false; } From 6abb30c076812a9fa8b1d6736c9790b21d3cf97b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 12 May 2017 17:43:21 +0300 Subject: [PATCH 0502/1124] make clang happy again --- src/partition_creation.c | 1 + src/relation_info.c | 1 + 2 files changed, 2 insertions(+) diff --git a/src/partition_creation.c b/src/partition_creation.c index 00d458ea..346aaeac 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1589,6 +1589,7 @@ invoke_init_callback_internal(init_callback_params *cb_params) default: WrongPartType(cb_params->parttype); + result = NULL; /* keep compiler happy */ } /* Fetch function call data */ diff --git a/src/relation_info.c b/src/relation_info.c index d71abfeb..537b8137 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1294,6 +1294,7 @@ shout_if_prel_is_invalid(const Oid parent_oid, default: WrongPartType(expected_part_type); + expected_str = NULL; /* keep compiler happy */ } elog(ERROR, "relation \"%s\" is not partitioned by %s", From 4ebe6fea23c5737faa21094e00b99b3ff8715d05 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 12 May 2017 18:24:34 +0300 Subject: [PATCH 0503/1124] fix drop_single_update_trigger_internal() to avoid warnings; fix replace_hash_partition() --- expected/pathman_basic.out | 2 ++ hash.sql | 6 ++++++ range.sql | 2 +- src/partition_creation.c | 25 ++++++++++++++++++++++--- src/pl_funcs.c | 2 -- 5 files changed, 31 insertions(+), 6 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 2c96c7bc..c4531395 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1303,6 +1303,7 @@ SELECT * FROM test.hash_rel WHERE id = 123; /* Test replacing hash partition */ CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); +NOTICE: trigger "hash_rel_upd_trig" for relation "test.hash_rel_0" does not exist, skipping replace_hash_partition ------------------------ test.hash_rel_extern @@ -1338,6 +1339,7 @@ CREATE TABLE test.hash_rel_wrong( id INTEGER NOT NULL, value INTEGER); SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +NOTICE: trigger "hash_rel_upd_trig" for relation "test.hash_rel_1" does not exist, skipping ERROR: column "value" in child table must be marked NOT NULL EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; QUERY PLAN diff --git a/hash.sql b/hash.sql index 1349c98f..0c76f74b 100644 --- a/hash.sql +++ b/hash.sql @@ -143,6 +143,9 @@ BEGIN EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', old_partition, old_constr_name); + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + @extschema@.build_update_trigger_name(parent_relid), + old_partition); /* Attach the new one */ EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); @@ -150,6 +153,9 @@ BEGIN new_partition, @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); + IF @extschema@.has_update_trigger(parent_relid) THEN + PERFORM @extschema@.create_single_update_trigger(parent_relid, new_partition); + END IF; /* Fetch init_callback from 'params' table */ WITH stub_callback(stub) as (values (0)) diff --git a/range.sql b/range.sql index 8e952256..94dac368 100644 --- a/range.sql +++ b/range.sql @@ -970,7 +970,7 @@ BEGIN INTO v_init_callback; /* If update trigger is enabled then create one for this partition */ - if @extschema@.has_update_trigger(parent_relid) THEN + IF @extschema@.has_update_trigger(parent_relid) THEN PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); END IF; diff --git a/src/partition_creation.c b/src/partition_creation.c index b445cb8a..4b8211f7 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1827,14 +1827,33 @@ drop_single_update_trigger_internal(Oid relid, DropStmt *n = makeNode(DropStmt); const char *relname = get_qualified_rel_name(relid); List *namelist = stringToQualifiedNameList(relname); + Relation relation = NULL; + ObjectAddress address; namelist = lappend(namelist, makeString((char *) trigname)); + + /* + * To avoid warning message about missing trigger we check it beforehand. + * and quit if it doesn't + */ + address = get_object_address(OBJECT_TRIGGER, + namelist, NIL, + &relation, + AccessExclusiveLock, + true); + if (!OidIsValid(address.objectId)) + return; + + /* Actually remove trigger */ n->removeType = OBJECT_TRIGGER; - n->missing_ok = true; n->objects = list_make1(namelist); n->arguments = NIL; n->behavior = DROP_RESTRICT; /* default behavior */ - n->concurrent = false; - + n->missing_ok = true; + n->concurrent = false; RemoveObjects(n); + + /* Release any relcache reference count, but keep lock until commit. */ + if (relation) + heap_close(relation, NoLock); } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 222bc056..f2e7fb06 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1527,8 +1527,6 @@ create_update_triggers_internal(Oid relid) /* Check that table is partitioned */ prel = get_pathman_relation_info(relid); - /* TODO: check this only for topmost relid? */ - // shout_if_prel_is_invalid(relid, prel, PT_ANY); if (!prel) return; From 84739e55e18a8c474052a67ac118a08f221a7afb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 12 May 2017 20:09:29 +0300 Subject: [PATCH 0504/1124] refactoring, simplified function handle_arrexpr() --- src/include/pathman.h | 2 +- src/pg_pathman.c | 278 +++++++++++++++++++----------------------- 2 files changed, 127 insertions(+), 153 deletions(-) diff --git a/src/include/pathman.h b/src/include/pathman.h index 63d3fc7b..7ef6ced5 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -160,7 +160,7 @@ typedef struct #define WcxtHasExprContext(wcxt) ( (wcxt)->econtext ) /* Examine expression in order to select partitions */ -WrapperNode *walk_expr_tree(Expr *expr, WalkerContext *context); +WrapperNode *walk_expr_tree(Expr *expr, const WalkerContext *context); void select_range_partitions(const Datum value, diff --git a/src/pg_pathman.c b/src/pg_pathman.c index c7fccb8e..861681a1 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -52,16 +52,30 @@ void _PG_init(void); /* Expression tree handlers */ static Node *wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue); -static WrapperNode *handle_const(const Const *c, int strategy, WalkerContext *context); -static WrapperNode *handle_boolexpr(const BoolExpr *expr, WalkerContext *context); -static WrapperNode *handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context); -static WrapperNode *handle_opexpr(const OpExpr *expr, WalkerContext *context); + +static void handle_const(const Const *c, + const int strategy, + const WalkerContext *context, + WrapperNode *result); + +static void handle_boolexpr(const BoolExpr *expr, + const WalkerContext *context, + WrapperNode *result); + +static void handle_arrexpr(const ScalarArrayOpExpr *expr, + const WalkerContext *context, + WrapperNode *result); + +static void handle_opexpr(const OpExpr *expr, + const WalkerContext *context, + WrapperNode *result); static bool is_key_op_param(const OpExpr *expr, const WalkerContext *context, Node **param_ptr); -static Const *extract_const(Param *param, WalkerContext *wcxt); +static Const *extract_const(Param *param, + const WalkerContext *context); /* Copied from PostgreSQL (allpaths.c) */ @@ -591,35 +605,38 @@ select_range_partitions(const Datum value, /* Examine expression in order to select partitions */ WrapperNode * -walk_expr_tree(Expr *expr, WalkerContext *context) +walk_expr_tree(Expr *expr, const WalkerContext *context) { - WrapperNode *result; + WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); switch (nodeTag(expr)) { /* Useful for INSERT optimization */ case T_Const: - return handle_const((Const *) expr, BTEqualStrategyNumber, context); + handle_const((Const *) expr, BTEqualStrategyNumber, context, result); + return result; /* AND, OR, NOT expressions */ case T_BoolExpr: - return handle_boolexpr((BoolExpr *) expr, context); + handle_boolexpr((BoolExpr *) expr, context, result); + return result; /* =, !=, <, > etc. */ case T_OpExpr: - return handle_opexpr((OpExpr *) expr, context); + handle_opexpr((OpExpr *) expr, context, result); + return result; /* ANY, ALL, IN expressions */ case T_ScalarArrayOpExpr: - return handle_arrexpr((ScalarArrayOpExpr *) expr, context); + handle_arrexpr((ScalarArrayOpExpr *) expr, context, result); + return result; default: - result = (WrapperNode *) palloc(sizeof(WrapperNode)); result->orig = (const Node *) expr; result->args = NIL; - result->paramsel = 1.0; result->rangeset = list_make1_irange_full(context->prel, IR_LOSSY); + result->paramsel = 1.0; return result; } @@ -699,10 +716,12 @@ wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) /* Const handler */ -static WrapperNode * -handle_const(const Const *c, int strategy, WalkerContext *context) +static void +handle_const(const Const *c, + const int strategy, + const WalkerContext *context, + WrapperNode *result) /* ret value #1 */ { - WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); const PartRelationInfo *prel = context->prel; /* Deal with missing strategy */ @@ -718,7 +737,7 @@ handle_const(const Const *c, int strategy, WalkerContext *context) result->rangeset = NIL; result->paramsel = 0.0; - return result; + return; /* done, exit */ } /* @@ -740,7 +759,7 @@ handle_const(const Const *c, int strategy, WalkerContext *context) result->paramsel = 1.0; } - return result; + return; /* done, exit */ } switch (prel->parttype) @@ -779,7 +798,7 @@ handle_const(const Const *c, int strategy, WalkerContext *context) result->rangeset = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); result->paramsel = estimate_paramsel_using_prel(prel, strategy); - return result; + return; /* done, exit */ } case PT_RANGE: @@ -807,7 +826,7 @@ handle_const(const Const *c, int strategy, WalkerContext *context) result->paramsel = estimate_paramsel_using_prel(prel, strategy); - return result; + return; /* done, exit */ } default: @@ -817,17 +836,16 @@ handle_const(const Const *c, int strategy, WalkerContext *context) handle_const_return: result->rangeset = list_make1_irange_full(prel, IR_LOSSY); result->paramsel = estimate_paramsel_using_prel(prel, strategy); - - return result; } /* Boolean expression handler */ -static WrapperNode * -handle_boolexpr(const BoolExpr *expr, WalkerContext *context) +static void +handle_boolexpr(const BoolExpr *expr, + const WalkerContext *context, + WrapperNode *result) { - WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); - ListCell *lc; - const PartRelationInfo *prel = context->prel; + ListCell *lc; + const PartRelationInfo *prel = context->prel; result->orig = (const Node *) expr; result->args = NIL; @@ -840,22 +858,22 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) foreach (lc, expr->args) { - WrapperNode *arg; + WrapperNode *arg_result; - arg = walk_expr_tree((Expr *) lfirst(lc), context); - result->args = lappend(result->args, arg); + arg_result = walk_expr_tree((Expr *) lfirst(lc), context); + result->args = lappend(result->args, arg_result); switch (expr->boolop) { case OR_EXPR: result->rangeset = irange_list_union(result->rangeset, - arg->rangeset); + arg_result->rangeset); break; case AND_EXPR: result->rangeset = irange_list_intersection(result->rangeset, - arg->rangeset); - result->paramsel *= arg->paramsel; + arg_result->rangeset); + result->paramsel *= arg_result->paramsel; break; default: @@ -877,151 +895,110 @@ handle_boolexpr(const BoolExpr *expr, WalkerContext *context) } result->paramsel = 1.0 - result->paramsel; } - - return result; } /* Scalar array expression handler */ -static WrapperNode * -handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) +static void +handle_arrexpr(const ScalarArrayOpExpr *expr, + const WalkerContext *context, + WrapperNode *result) { - WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); - Node *exprnode = (Node *) linitial(expr->args); - Node *arraynode = (Node *) lsecond(expr->args); - const PartRelationInfo *prel = context->prel; + Node *exprnode = (Node *) linitial(expr->args); + Node *arraynode = (Node *) lsecond(expr->args); + const PartRelationInfo *prel = context->prel; + TypeCacheEntry *tce; + int strategy; result->orig = (const Node *) expr; - result->args = NIL; - result->paramsel = 0.0; - Assert(exprnode != NULL); + tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); + strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); if (!match_expr_to_operand(context->prel_expr, exprnode)) goto handle_arrexpr_return; - if (arraynode && IsA(arraynode, Const) && - !((Const *) arraynode)->constisnull) + /* Handle non-null Const arrays */ + if (arraynode && IsA(arraynode, Const) && !((Const *) arraynode)->constisnull) { - ArrayType *arrayval; - int16 elemlen; - bool elembyval; - char elemalign; - int num_elems; - Datum *elem_values; - bool *elem_nulls; - int strategy = BTEqualStrategyNumber; /* FIXME: wtf! */ + ArrayType *arrayval; + + int16 elemlen; + bool elembyval; + char elemalign; + + int num_elems; + + Datum *elem_values; + bool *elem_isnull; + + WalkerContext nested_wcxt; + List *ranges = NIL; + int i; /* Extract values from array */ arrayval = DatumGetArrayTypeP(((Const *) arraynode)->constvalue); + get_typlenbyvalalign(ARR_ELEMTYPE(arrayval), &elemlen, &elembyval, &elemalign); + deconstruct_array(arrayval, ARR_ELEMTYPE(arrayval), elemlen, elembyval, elemalign, - &elem_values, &elem_nulls, &num_elems); + &elem_values, &elem_isnull, &num_elems); - result->rangeset = NIL; + /* Copy WalkerContext */ + memcpy((void *) &nested_wcxt, + (const void *) context, + sizeof(WalkerContext)); - switch (prel->parttype) + /* Select partitions using values */ + for (i = 0; i < num_elems; i++) { - case PT_HASH: - { - List *ranges = NIL; - int i; - - /* Construct OIDs list */ - for (i = 0; i < num_elems; i++) - { - Datum value; - uint32 idx; - List *irange; - double cur_paramsel; - - if (!elem_nulls[i]) - { - /* Invoke base hash function for value type */ - value = OidFunctionCall1(prel->hash_proc, elem_values[i]); - idx = hash_to_part_index(DatumGetUInt32(value), - PrelChildrenCount(prel)); - - irange = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); - } - /* No children if Const is NULL */ - else irange = NIL; - - ranges = irange_list_union(ranges, irange); - - cur_paramsel = estimate_paramsel_using_prel(prel, strategy); - result->paramsel = Max(result->paramsel, cur_paramsel); - } - - result->rangeset = ranges; - } - break; - - case PT_RANGE: - { - WalkerContext *nested_wcxt; - List *ranges = NIL; - int i; - - nested_wcxt = palloc(sizeof(WalkerContext)); - memcpy((void *) nested_wcxt, - (const void *) context, - sizeof(WalkerContext)); - - /* Construct OIDs list */ - for (i = 0; i < num_elems; i++) - { - WrapperNode *wrap; - Const *c = makeConst(ARR_ELEMTYPE(arrayval), - -1, InvalidOid, - datumGetSize(elem_values[i], - elembyval, - elemlen), - elem_values[i], - elem_nulls[i], - elembyval); - - wrap = walk_expr_tree((Expr *) c, nested_wcxt); - ranges = irange_list_union(ranges, wrap->rangeset); - - pfree(c); - - result->paramsel = Max(result->paramsel, wrap->paramsel); - } - - result->rangeset = ranges; - } - break; - - default: - WrongPartType(prel->parttype); + WrapperNode sub_result; + Const c; + + NodeSetTag(&c, T_Const); + c.consttype = ARR_ELEMTYPE(arrayval); + c.consttypmod = -1; + c.constcollid = InvalidOid; + c.constlen = datumGetSize(elem_values[i], + elembyval, + elemlen); + c.constvalue = elem_values[i]; + c.constisnull = elem_isnull[i]; + c.constbyval = elembyval; + c.location = -1; + + handle_const(&c, strategy, &nested_wcxt, &sub_result); + ranges = irange_list_union(ranges, sub_result.rangeset); + + result->paramsel = Max(result->paramsel, sub_result.paramsel); } + result->rangeset = ranges; + if (num_elems == 0) + result->paramsel = 0.0; + /* Free resources */ pfree(elem_values); - pfree(elem_nulls); + pfree(elem_isnull); - return result; + return; /* done, exit */ } - if (arraynode && IsA(arraynode, Param)) - result->paramsel = DEFAULT_INEQ_SEL; - handle_arrexpr_return: result->rangeset = list_make1_irange_full(prel, IR_LOSSY); - result->paramsel = 1.0; - return result; + result->paramsel = estimate_paramsel_using_prel(prel, strategy); } /* Operator expression handler */ -static WrapperNode * -handle_opexpr(const OpExpr *expr, WalkerContext *context) +static void +handle_opexpr(const OpExpr *expr, + const WalkerContext *context, + WrapperNode *result) { - WrapperNode *result; - Node *param; - const PartRelationInfo *prel = context->prel; + Node *param; + const PartRelationInfo *prel = context->prel; if (list_length(expr->args) == 2) { @@ -1036,37 +1013,33 @@ handle_opexpr(const OpExpr *expr, WalkerContext *context) if (IsConstValue(param, context)) { - result = handle_const(ExtractConst(param, context), - strategy, context); + handle_const(ExtractConst(param, context), + strategy, context, result); /* Save expression */ result->orig = (const Node *) expr; - return result; + return; /* done, exit */ } /* TODO: estimate selectivity for param if it's Var */ else if (IsA(param, Param) || IsA(param, Var)) { - result = (WrapperNode *) palloc0(sizeof(WrapperNode)); result->rangeset = list_make1_irange_full(prel, IR_LOSSY); result->paramsel = estimate_paramsel_using_prel(prel, strategy); /* Save expression */ result->orig = (const Node *) expr; - return result; + return; /* done, exit */ } } } - result = (WrapperNode *) palloc0(sizeof(WrapperNode)); result->rangeset = list_make1_irange_full(prel, IR_LOSSY); result->paramsel = 1.0; /* can't give any estimates */ /* Save expression */ result->orig = (const Node *) expr; - - return result; } @@ -1101,11 +1074,12 @@ is_key_op_param(const OpExpr *expr, /* Extract (evaluate) Const from Param node */ static Const * -extract_const(Param *param, WalkerContext *wcxt) +extract_const(Param *param, + const WalkerContext *context) { ExprState *estate = ExecInitExpr((Expr *) param, NULL); bool isnull; - Datum value = ExecEvalExpr(estate, wcxt->econtext, &isnull, NULL); + Datum value = ExecEvalExpr(estate, context->econtext, &isnull, NULL); return makeConst(param->paramtype, param->paramtypmod, param->paramcollid, get_typlen(param->paramtype), From 231046df69c130ac6e07acbdf2a41ff69903634e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 13 May 2017 14:30:45 +0300 Subject: [PATCH 0505/1124] fix ANY/ALL in handle_arrexpr() --- expected/pathman_basic.out | 158 +++++++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 23 ++++++ src/pg_pathman.c | 10 ++- 3 files changed, 189 insertions(+), 2 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index bc61ad37..d9a7fee8 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -564,6 +564,80 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-1, -1, -1, NU One-Time Filter: false (2 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ANY (ARRAY[1500, 2200]); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on num_range_rel_2 + Filter: (id > ANY ('{1500,2200}'::integer[])) + -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ANY (ARRAY[100, 1500]); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on num_range_rel_1 + Filter: (id > ANY ('{100,1500}'::integer[])) + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ALL (ARRAY[1500, 2200]); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (id > ALL ('{1500,2200}'::integer[])) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ALL (ARRAY[100, 1500]); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on num_range_rel_2 + Filter: (id > ALL ('{100,1500}'::integer[])) + -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ANY (ARRAY[1500, 2200]); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on num_range_rel_2 + Filter: (id = ANY ('{1500,2200}'::integer[])) + -> Seq Scan on num_range_rel_3 + Filter: (id = ANY ('{1500,2200}'::integer[])) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ANY (ARRAY[100, 1500]); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on num_range_rel_1 + Filter: (id = ANY ('{100,1500}'::integer[])) + -> Seq Scan on num_range_rel_2 + Filter: (id = ANY ('{100,1500}'::integer[])) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ALL (ARRAY[1500, 2200]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ALL (ARRAY[100, 1500]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; QUERY PLAN -------------------------------------------------------------------------------- @@ -701,6 +775,90 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (NULL::int, NULL, One-Time Filter: false (2 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ANY (ARRAY[1, 2]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on hash_rel_0 + Filter: (value > ANY ('{1,2}'::integer[])) + -> Seq Scan on hash_rel_1 + Filter: (value > ANY ('{1,2}'::integer[])) + -> Seq Scan on hash_rel_2 + Filter: (value > ANY ('{1,2}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ANY (ARRAY[1, 2, 3, 4, 5]); + QUERY PLAN +---------------------------------------------------------- + Append + -> Seq Scan on hash_rel_0 + Filter: (value > ANY ('{1,2,3,4,5}'::integer[])) + -> Seq Scan on hash_rel_1 + Filter: (value > ANY ('{1,2,3,4,5}'::integer[])) + -> Seq Scan on hash_rel_2 + Filter: (value > ANY ('{1,2,3,4,5}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ALL (ARRAY[1, 2]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on hash_rel_0 + Filter: (value > ALL ('{1,2}'::integer[])) + -> Seq Scan on hash_rel_1 + Filter: (value > ALL ('{1,2}'::integer[])) + -> Seq Scan on hash_rel_2 + Filter: (value > ALL ('{1,2}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ALL (ARRAY[1, 2, 3, 4, 5]); + QUERY PLAN +---------------------------------------------------------- + Append + -> Seq Scan on hash_rel_0 + Filter: (value > ALL ('{1,2,3,4,5}'::integer[])) + -> Seq Scan on hash_rel_1 + Filter: (value > ALL ('{1,2,3,4,5}'::integer[])) + -> Seq Scan on hash_rel_2 + Filter: (value > ALL ('{1,2,3,4,5}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ANY (ARRAY[1, 2]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on hash_rel_1 + Filter: (value = ANY ('{1,2}'::integer[])) + -> Seq Scan on hash_rel_2 + Filter: (value = ANY ('{1,2}'::integer[])) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ANY (ARRAY[1, 2, 3, 4, 5]); + QUERY PLAN +---------------------------------------------------------- + Append + -> Seq Scan on hash_rel_0 + Filter: (value = ANY ('{1,2,3,4,5}'::integer[])) + -> Seq Scan on hash_rel_1 + Filter: (value = ANY ('{1,2,3,4,5}'::integer[])) + -> Seq Scan on hash_rel_2 + Filter: (value = ANY ('{1,2,3,4,5}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ALL (ARRAY[1, 2]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ALL (ARRAY[1, 2, 3, 4, 5]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; QUERY PLAN ---------------------------------------------------------------- diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 22216c03..32165522 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -174,11 +174,22 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (2500); EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (500, 1500); EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-500, 500, 1500); EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-1, -1, -1); EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-1, -1, -1, NULL); + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ANY (ARRAY[1500, 2200]); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ANY (ARRAY[100, 1500]); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ALL (ARRAY[1500, 2200]); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ALL (ARRAY[100, 1500]); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ANY (ARRAY[1500, 2200]); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ANY (ARRAY[100, 1500]); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ALL (ARRAY[1500, 2200]); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ALL (ARRAY[100, 1500]); + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; @@ -194,18 +205,30 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (2); EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (2, 1); EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (1, 2); EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (1, 2, -1); EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (0, 0, 0); EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (NULL::int, NULL, NULL); + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ANY (ARRAY[1, 2]); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ANY (ARRAY[1, 2, 3, 4, 5]); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ALL (ARRAY[1, 2]); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ALL (ARRAY[1, 2, 3, 4, 5]); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ANY (ARRAY[1, 2]); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ANY (ARRAY[1, 2, 3, 4, 5]); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ALL (ARRAY[1, 2]); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ALL (ARRAY[1, 2, 3, 4, 5]); + EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 861681a1..1e073ba3 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -932,7 +932,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, bool *elem_isnull; WalkerContext nested_wcxt; - List *ranges = NIL; + List *ranges; int i; /* Extract values from array */ @@ -951,6 +951,9 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, (const void *) context, sizeof(WalkerContext)); + /* Set default ranges for OR | AND */ + ranges = expr->useOr ? NIL : list_make1_irange_full(prel, IR_COMPLETE); + /* Select partitions using values */ for (i = 0; i < num_elems; i++) { @@ -970,7 +973,10 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, c.location = -1; handle_const(&c, strategy, &nested_wcxt, &sub_result); - ranges = irange_list_union(ranges, sub_result.rangeset); + + ranges = expr->useOr ? + irange_list_union(ranges, sub_result.rangeset) : + irange_list_intersection(ranges, sub_result.rangeset); result->paramsel = Max(result->paramsel, sub_result.paramsel); } From 5ff467bca322f1f228657771865c61dd0043d932 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 13 May 2017 16:32:10 +0300 Subject: [PATCH 0506/1124] code cleansing + refactoring, record AUTO dependency on naming sequence --- expected/pathman_basic.out | 11 +- expected/pathman_bgw.out | 5 - expected/pathman_calamity.out | 11 +- expected/pathman_callbacks.out | 3 +- expected/pathman_column_type.out | 2 - expected/pathman_cte.out | 4 +- expected/pathman_domains.out | 3 +- expected/pathman_expressions.out | 3 +- expected/pathman_foreign_keys.out | 4 +- expected/pathman_inserts.out | 4 +- expected/pathman_interval.out | 2 - expected/pathman_only.out | 3 +- expected/pathman_permissions.out | 2 - expected/pathman_runtime_nodes.out | 3 +- expected/pathman_update_trigger.out | 3 +- expected/pathman_updates.out | 3 +- expected/pathman_utility_stmt.out | 4 +- hash.sql | 9 +- init.sql | 165 ++++++++++++++-------------- range.sql | 118 +++++++------------- src/pl_funcs.c | 26 +++++ 21 files changed, 167 insertions(+), 221 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index d9a7fee8..4f11219f 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -135,7 +135,6 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DAT ERROR: partitioning key "dt" must be marked NOT NULL ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); -NOTICE: sequence "range_rel_seq" does not exist, skipping create_range_partitions ------------------------- 4 @@ -157,7 +156,6 @@ CREATE TABLE test.num_range_rel ( id SERIAL PRIMARY KEY, txt TEXT); SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); -NOTICE: sequence "num_range_rel_seq" does not exist, skipping create_range_partitions ------------------------- 4 @@ -193,7 +191,6 @@ SELECT COUNT(*) FROM ONLY test.num_range_rel; CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); -NOTICE: sequence "improved_dummy_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -336,7 +333,6 @@ NOTICE: drop cascades to 2 other objects CREATE TABLE test.insert_into_select(val int NOT NULL); INSERT INTO test.insert_into_select SELECT generate_series(1, 100); SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); -NOTICE: sequence "insert_into_select_seq" does not exist, skipping create_range_partitions ------------------------- 5 @@ -394,7 +390,6 @@ NOTICE: drop cascades to 5 other objects CREATE TABLE test.insert_date_test(val DATE NOT NULL); SELECT pathman.create_partitions_from_range('test.insert_date_test', 'val', date '20161001', date '20170101', interval '1 month'); -NOTICE: sequence "insert_date_test_seq" does not exist, skipping create_partitions_from_range ------------------------------ 4 @@ -1425,7 +1420,6 @@ CREATE TABLE test.zero( value INT NOT NULL); INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); -NOTICE: sequence "zero_seq" does not exist, skipping create_range_partitions ------------------------- 0 @@ -1754,7 +1748,6 @@ CREATE TABLE test."RangeRel" ( INSERT INTO test."RangeRel" (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); -NOTICE: sequence "RangeRel_seq" does not exist, skipping create_range_partitions ------------------------- 3 @@ -2061,7 +2054,6 @@ NOTICE: drop cascades to 3 other objects CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); -NOTICE: sequence "special_case_1_ind_o_s_seq" does not exist, skipping create_range_partitions ------------------------- 4 @@ -2119,7 +2111,6 @@ CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); CREATE INDEX ON test.index_on_childs(c2); INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); -NOTICE: sequence "index_on_childs_seq" does not exist, skipping create_range_partitions ------------------------- 0 @@ -2202,6 +2193,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 47 other objects +NOTICE: drop cascades to 38 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index d78c4885..d513dcf1 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -8,7 +8,6 @@ CREATE SCHEMA test_bgw; /* int4, size of Datum == 4 */ CREATE TABLE test_bgw.test_1(val INT4 NOT NULL); SELECT create_range_partitions('test_bgw.test_1', 'val', 1, 5, 2); -NOTICE: sequence "test_1_seq" does not exist, skipping create_range_partitions ------------------------- 2 @@ -34,7 +33,6 @@ NOTICE: drop cascades to 3 other objects /* int8, size of Datum == 8 */ CREATE TABLE test_bgw.test_2(val INT8 NOT NULL); SELECT create_range_partitions('test_bgw.test_2', 'val', 1, 5, 2); -NOTICE: sequence "test_2_seq" does not exist, skipping create_range_partitions ------------------------- 2 @@ -60,7 +58,6 @@ NOTICE: drop cascades to 3 other objects /* numeric, size of Datum == var */ CREATE TABLE test_bgw.test_3(val NUMERIC NOT NULL); SELECT create_range_partitions('test_bgw.test_3', 'val', 1, 5, 2); -NOTICE: sequence "test_3_seq" does not exist, skipping create_range_partitions ------------------------- 2 @@ -86,7 +83,6 @@ NOTICE: drop cascades to 3 other objects /* date, size of Datum == var */ CREATE TABLE test_bgw.test_4(val DATE NOT NULL); SELECT create_range_partitions('test_bgw.test_4', 'val', '20170213'::date, '1 day'::interval, 2); -NOTICE: sequence "test_4_seq" does not exist, skipping create_range_partitions ------------------------- 2 @@ -110,5 +106,4 @@ SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 par DROP TABLE test_bgw.test_4 CASCADE; NOTICE: drop cascades to 3 other objects DROP SCHEMA test_bgw CASCADE; -NOTICE: drop cascades to 4 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 13206f76..50d30232 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -21,7 +21,6 @@ CREATE TABLE calamity.part_test(val serial); /* test pg_pathman's cache */ INSERT INTO calamity.part_test SELECT generate_series(1, 30); SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); -NOTICE: sequence "part_test_seq" does not exist, skipping create_range_partitions ------------------------- 3 @@ -704,7 +703,6 @@ SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disab /* check function get_part_range_by_idx() */ CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); -NOTICE: sequence "test_range_idx_seq" does not exist, skipping create_range_partitions ------------------------- 1 @@ -731,7 +729,6 @@ NOTICE: drop cascades to table calamity.test_range_idx_1 /* check function get_part_range_by_oid() */ CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); -NOTICE: sequence "test_range_oid_seq" does not exist, skipping create_range_partitions ------------------------- 1 @@ -759,14 +756,12 @@ ERROR: cannot merge partitions CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); -NOTICE: sequence "merge_test_a_seq" does not exist, skipping create_range_partitions ------------------------- 2 (1 row) SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); -NOTICE: sequence "merge_test_b_seq" does not exist, skipping create_range_partitions ------------------------- 2 @@ -824,7 +819,7 @@ SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGC DROP TABLE calamity.trig_test_tbl CASCADE; NOTICE: drop cascades to 2 other objects DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 20 other objects +NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; /* * ------------------------------------- @@ -838,7 +833,6 @@ SET pg_pathman.enable_bounds_cache = false; /* check view pathman_cache_stats */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); -NOTICE: sequence "test_pathman_cache_stats_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -925,7 +919,6 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats; DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to sequence calamity.test_pathman_cache_stats_seq DROP EXTENSION pg_pathman; /* * ------------------------------------------ @@ -937,7 +930,6 @@ CREATE EXTENSION pg_pathman; /* check function pathman_cache_search_relid() */ CREATE TABLE calamity.survivor(val INT NOT NULL); SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); -NOTICE: sequence "survivor_seq" does not exist, skipping create_range_partitions ------------------------- 2 @@ -1000,5 +992,4 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; NOTICE: drop cascades to 2 other objects DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to sequence calamity.survivor_seq DROP EXTENSION pg_pathman; diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index d5ae1c5c..e2d98eb6 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -16,7 +16,6 @@ END $$ language plpgsql; CREATE TABLE callbacks.abc(a serial, b int); SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); -NOTICE: sequence "abc_seq" does not exist, skipping create_range_partitions ------------------------- 2 @@ -416,5 +415,5 @@ ORDER BY range_min::INT4; DROP TABLE callbacks.abc CASCADE; NOTICE: drop cascades to 4 other objects DROP SCHEMA callbacks CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 2 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index 6b0b605f..4382db1f 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -8,7 +8,6 @@ CREATE SCHEMA test_column_type; /* create new table (val int) */ CREATE TABLE test_column_type.test(val INT4 NOT NULL); SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); -NOTICE: sequence "test_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -167,5 +166,4 @@ NOTICE: 0 rows copied from test_column_type.test_4 DROP TABLE test_column_type.test CASCADE; DROP SCHEMA test_column_type CASCADE; -NOTICE: drop cascades to sequence test_column_type.test_seq DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte.out b/expected/pathman_cte.out index 3e028c54..b1fedb09 100644 --- a/expected/pathman_cte.out +++ b/expected/pathman_cte.out @@ -13,7 +13,6 @@ INSERT INTO test_cte.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); -NOTICE: sequence "range_rel_seq" does not exist, skipping create_range_partitions ------------------------- 4 @@ -80,7 +79,6 @@ INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* create 2 partitions */ SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', '2016-01-01'::date, '50 days'::interval); -NOTICE: sequence "cte_del_xacts_seq" does not exist, skipping create_range_partitions ------------------------- 2 @@ -270,5 +268,5 @@ SELECT * FROM test; (4 rows) DROP SCHEMA test_cte CASCADE; -NOTICE: drop cascades to 5 other objects +NOTICE: drop cascades to 3 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index 188ae60e..f78a73dc 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -5,7 +5,6 @@ CREATE DOMAIN domains.dom_test AS numeric CHECK (value < 1200); CREATE TABLE domains.dom_table(val domains.dom_test NOT NULL); INSERT INTO domains.dom_table SELECT generate_series(1, 999); SELECT create_range_partitions('domains.dom_table', 'val', 1, 100); -NOTICE: sequence "dom_table_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -125,5 +124,5 @@ ORDER BY "partition"::TEXT; (5 rows) DROP SCHEMA domains CASCADE; -NOTICE: drop cascades to 8 other objects +NOTICE: drop cascades to 7 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index a55ef371..344f0df8 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -78,7 +78,6 @@ SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::I ERROR: start value is less than min value of "random()" SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); -NOTICE: sequence "range_rel_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -152,5 +151,5 @@ SELECT COUNT(*) FROM test_exprs.range_rel_2; (1 row) DROP SCHEMA test_exprs CASCADE; -NOTICE: drop cascades to 17 other objects +NOTICE: drop cascades to 16 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_foreign_keys.out b/expected/pathman_foreign_keys.out index 7a9db3e0..9f673c86 100644 --- a/expected/pathman_foreign_keys.out +++ b/expected/pathman_foreign_keys.out @@ -10,7 +10,6 @@ CREATE TABLE fkeys.test_fkey( FOREIGN KEY (comment) REFERENCES fkeys.test_ref(comment)); INSERT INTO fkeys.test_fkey SELECT generate_series(1, 1000), 'test'; SELECT create_range_partitions('fkeys.test_fkey', 'id', 1, 100); -NOTICE: sequence "test_fkey_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -75,7 +74,6 @@ WARNING: foreign key "replies_message_id_fkey" references table "fkeys.messages ERROR: table "fkeys.messages" is referenced from other tables ALTER TABLE fkeys.replies DROP CONSTRAINT replies_message_id_fkey; SELECT create_range_partitions('fkeys.messages', 'id', 1, 100, 2); /* ok */ -NOTICE: sequence "messages_seq" does not exist, skipping create_range_partitions ------------------------- 2 @@ -92,5 +90,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM fkeys.messages; DROP TABLE fkeys.messages, fkeys.replies CASCADE; NOTICE: drop cascades to 2 other objects DROP SCHEMA fkeys CASCADE; -NOTICE: drop cascades to 4 other objects +NOTICE: drop cascades to 2 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index 44cc88a8..55dc57f1 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -7,7 +7,6 @@ CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; CREATE UNIQUE INDEX ON test_inserts.storage(a); SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); -NOTICE: sequence "storage_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -858,7 +857,6 @@ NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: CREATE TABLE test_inserts.test_gap(val INT NOT NULL); INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); -NOTICE: sequence "test_gap_seq" does not exist, skipping create_range_partitions ------------------------- 3 @@ -870,5 +868,5 @@ ERROR: cannot spawn a partition DROP TABLE test_inserts.test_gap CASCADE; NOTICE: drop cascades to 2 other objects DROP SCHEMA test_inserts CASCADE; -NOTICE: drop cascades to 20 other objects +NOTICE: drop cascades to 18 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index ed86c3a5..aa58c0eb 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -4,7 +4,6 @@ CREATE SCHEMA test_interval; /* Range partitions for INT2 type */ CREATE TABLE test_interval.abc (id INT2 NOT NULL); SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); -NOTICE: sequence "abc_seq" does not exist, skipping create_range_partitions ------------------------- 2 @@ -272,5 +271,4 @@ ERROR: table "test_interval.abc" is not partitioned by RANGE DROP TABLE test_interval.abc CASCADE; NOTICE: drop cascades to 3 other objects DROP SCHEMA test_interval CASCADE; -NOTICE: drop cascades to sequence test_interval.abc_seq DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only.out b/expected/pathman_only.out index 43ff6bb9..c6b372cb 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -11,7 +11,6 @@ CREATE SCHEMA test_only; CREATE TABLE test_only.from_only_test(val INT NOT NULL); INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); -NOTICE: sequence "from_only_test_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -239,5 +238,5 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test (27 rows) DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 12 other objects +NOTICE: drop cascades to 11 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index ea176668..324e64bd 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -23,7 +23,6 @@ ERROR: only the owner or superuser can change partitioning configuration of tab /* Should be ok */ SET ROLE user1; SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); -NOTICE: sequence "user1_table_seq" does not exist, skipping create_range_partitions ------------------------- 2 @@ -169,7 +168,6 @@ CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); -NOTICE: sequence "dropped_column_seq" does not exist, skipping create_range_partitions ------------------------- 3 diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index 4db58bdb..7e9d8bcb 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -263,7 +263,6 @@ create index on test.runtime_test_3_0 (id); create table test.runtime_test_4(val text, id int not null); insert into test.runtime_test_4(id, val) select * from generate_series(1, 10000) k, md5(k::text); select pathman.create_range_partitions('test.runtime_test_4', 'id', 1, 2000); -NOTICE: sequence "runtime_test_4_seq" does not exist, skipping create_range_partitions ------------------------- 5 @@ -409,6 +408,6 @@ where id = any (select generate_series(-10, -1)); /* should be empty */ set enable_hashjoin = on; set enable_mergejoin = on; DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 37 other objects +NOTICE: drop cascades to 36 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_update_trigger.out b/expected/pathman_update_trigger.out index ae60c733..fc28e96f 100644 --- a/expected/pathman_update_trigger.out +++ b/expected/pathman_update_trigger.out @@ -6,7 +6,6 @@ CREATE SCHEMA test_update_trigger; CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); -NOTICE: sequence "test_range_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -286,5 +285,5 @@ SELECT count(*) FROM test_update_trigger.test_hash; (1 row) DROP SCHEMA test_update_trigger CASCADE; -NOTICE: drop cascades to 18 other objects +NOTICE: drop cascades to 17 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_updates.out b/expected/pathman_updates.out index 4fb1ee58..5901ccfe 100644 --- a/expected/pathman_updates.out +++ b/expected/pathman_updates.out @@ -9,7 +9,6 @@ CREATE SCHEMA test_updates; CREATE TABLE test_updates.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); INSERT INTO test_updates.test SELECT i, i, i FROM generate_series(1, 100) AS i; SELECT create_range_partitions('test_updates.test', 'val', 1, 10); -NOTICE: sequence "test_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -60,5 +59,5 @@ UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLA (1 row) DROP SCHEMA test_updates CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 12 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 8bed134e..ae90d8e2 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -13,7 +13,6 @@ INSERT INTO copy_stmt_hooking.test SELECT generate_series(1, 20), 'comment'; CREATE INDEX ON copy_stmt_hooking.test(val); /* test for RANGE partitioning */ SELECT create_range_partitions('copy_stmt_hooking.test', 'val', 1, 5); -NOTICE: sequence "test_seq" does not exist, skipping create_range_partitions ------------------------- 4 @@ -268,7 +267,6 @@ SELECT create_range_partitions('copy_stmt_hooking.test2', '2017-01-01 00:00:00'::timestamp, interval '1 hour', 5, false ); -NOTICE: sequence "test2_seq" does not exist, skipping create_range_partitions ------------------------- 5 @@ -282,7 +280,7 @@ SELECT COUNT(*) FROM copy_stmt_hooking.test2; (1 row) DROP SCHEMA copy_stmt_hooking CASCADE; -NOTICE: drop cascades to 798 other objects +NOTICE: drop cascades to 796 other objects /* * Test auto check constraint renaming */ diff --git a/hash.sql b/hash.sql index 06056245..b510cfb9 100644 --- a/hash.sql +++ b/hash.sql @@ -18,8 +18,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( partition_data BOOLEAN DEFAULT TRUE, partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ BEGIN expression := lower(expression); PERFORM @extschema@.prepare_for_partitioning(parent_relid, @@ -59,8 +58,7 @@ CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( old_partition REGCLASS, new_partition REGCLASS, lock_parent BOOL DEFAULT TRUE) -RETURNS REGCLASS AS -$$ +RETURNS REGCLASS AS $$ DECLARE parent_relid REGCLASS; old_constr_name TEXT; /* name of old_partition's constraint */ @@ -142,8 +140,7 @@ BEGIN RETURN new_partition; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* * Just create HASH partitions, called by create_hash_partitions(). diff --git a/init.sql b/init.sql index 3226aca6..6cf9aa0b 100644 --- a/init.sql +++ b/init.sql @@ -138,16 +138,14 @@ CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( relation REGCLASS, param TEXT, value ANYELEMENT) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ BEGIN EXECUTE format('INSERT INTO @extschema@.pathman_config_params (partrel, %1$s) VALUES ($1, $2) ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) USING relation, value; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* * Include\exclude parent relation in query plan. @@ -155,13 +153,11 @@ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.set_enable_parent( relation REGCLASS, value BOOLEAN) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ BEGIN PERFORM @extschema@.pathman_set_param(relation, 'enable_parent', value); END -$$ -LANGUAGE plpgsql STRICT; +$$ LANGUAGE plpgsql STRICT; /* * Enable\disable automatic partition creation. @@ -169,13 +165,11 @@ LANGUAGE plpgsql STRICT; CREATE OR REPLACE FUNCTION @extschema@.set_auto( relation REGCLASS, value BOOLEAN) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ BEGIN PERFORM @extschema@.pathman_set_param(relation, 'auto', value); END -$$ -LANGUAGE plpgsql STRICT; +$$ LANGUAGE plpgsql STRICT; /* * Set partition creation callback @@ -183,8 +177,7 @@ LANGUAGE plpgsql STRICT; CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( relation REGCLASS, callback REGPROCEDURE DEFAULT 0) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ DECLARE regproc_text TEXT := NULL; @@ -205,8 +198,7 @@ BEGIN PERFORM @extschema@.pathman_set_param(relation, 'init_callback', regproc_text); END -$$ -LANGUAGE plpgsql STRICT; +$$ LANGUAGE plpgsql STRICT; /* * Set 'spawn using BGW' option @@ -214,13 +206,11 @@ LANGUAGE plpgsql STRICT; CREATE OR REPLACE FUNCTION @extschema@.set_spawn_using_bgw( relation REGCLASS, value BOOLEAN) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ BEGIN PERFORM @extschema@.pathman_set_param(relation, 'spawn_using_bgw', value); END -$$ -LANGUAGE plpgsql STRICT; +$$ LANGUAGE plpgsql STRICT; /* * Set (or reset) default interval for auto created partitions @@ -228,8 +218,7 @@ LANGUAGE plpgsql STRICT; CREATE OR REPLACE FUNCTION @extschema@.set_interval( relation REGCLASS, value ANYELEMENT) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ DECLARE affected INTEGER; BEGIN @@ -244,8 +233,7 @@ BEGIN RAISE EXCEPTION 'table "%" is not partitioned by RANGE', relation; END IF; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* @@ -338,8 +326,7 @@ CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( p_max ANYELEMENT DEFAULT NULL::text, p_limit INT DEFAULT NULL, OUT p_total BIGINT) -AS -$$ +AS $$ DECLARE part_expr TEXT; v_limit_clause TEXT := ''; @@ -389,8 +376,7 @@ BEGIN GET DIAGNOSTICS p_total = ROW_COUNT; RETURN; END -$$ -LANGUAGE plpgsql +$$ LANGUAGE plpgsql SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ /* @@ -399,8 +385,7 @@ SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is O CREATE OR REPLACE FUNCTION @extschema@.partition_data( parent_relid REGCLASS, OUT p_total BIGINT) -AS -$$ +AS $$ BEGIN p_total := 0; @@ -413,8 +398,7 @@ BEGIN GET DIAGNOSTICS p_total = ROW_COUNT; RETURN; END -$$ -LANGUAGE plpgsql STRICT +$$ LANGUAGE plpgsql STRICT SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ /* @@ -422,8 +406,7 @@ SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is O */ CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( parent_relid REGCLASS) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ BEGIN PERFORM @extschema@.validate_relname(parent_relid); @@ -434,8 +417,7 @@ BEGIN /* Drop triggers on update */ PERFORM @extschema@.drop_triggers(parent_relid); END -$$ -LANGUAGE plpgsql STRICT; +$$ LANGUAGE plpgsql STRICT; /* * Check a few things and take locks before partitioning. @@ -444,8 +426,7 @@ CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( parent_relid REGCLASS, expression TEXT, partition_data BOOLEAN) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ DECLARE constr_name TEXT; is_referenced BOOLEAN; @@ -498,23 +479,20 @@ CREATE OR REPLACE FUNCTION @extschema@.get_plain_schema_and_relname( cls REGCLASS, OUT schema TEXT, OUT relname TEXT) -AS -$$ +AS $$ BEGIN SELECT pg_catalog.pg_class.relnamespace::regnamespace, pg_catalog.pg_class.relname FROM pg_catalog.pg_class WHERE oid = cls::oid INTO schema, relname; END -$$ -LANGUAGE plpgsql STRICT; +$$ LANGUAGE plpgsql STRICT; /* * DDL trigger that removes entry from pathman_config table. */ CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() -RETURNS event_trigger AS -$$ +RETURNS event_trigger AS $$ DECLARE obj RECORD; pg_class_oid OID; @@ -535,16 +513,46 @@ BEGIN /* Cleanup params table too */ DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( + parent_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); + + RETURN seq_name; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ /* * Drop triggers */ CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( parent_relid REGCLASS) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ DECLARE triggername TEXT; relation OID; @@ -581,14 +589,12 @@ $$ LANGUAGE plpgsql STRICT; CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( parent_relid REGCLASS, delete_data BOOLEAN DEFAULT FALSE) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ DECLARE - v_rec RECORD; - v_rows BIGINT; - v_part_count INTEGER := 0; - conf_num INTEGER; - v_relkind CHAR; + child REGCLASS; + rows_count BIGINT; + part_count INTEGER := 0; + rel_kind CHAR; BEGIN PERFORM @extschema@.validate_relname(parent_relid); @@ -596,17 +602,18 @@ BEGIN /* Acquire data modification lock */ PERFORM @extschema@.prevent_relation_modification(parent_relid); + IF NOT EXISTS (SELECT FROM @extschema@.pathman_config + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has no partitions', parent_relid::TEXT; + END IF; + /* First, drop all triggers */ PERFORM @extschema@.drop_triggers(parent_relid); - SELECT count(*) FROM @extschema@.pathman_config - WHERE partrel = parent_relid INTO conf_num; + /* Also drop naming sequence */ + PERFORM @extschema@.drop_naming_sequence(parent_relid); - IF conf_num = 0 THEN - RAISE EXCEPTION 'relation "%" has no partitions', parent_relid::TEXT; - END IF; - - FOR v_rec IN (SELECT inhrelid::REGCLASS AS tbl + FOR child IN (SELECT inhrelid::REGCLASS FROM pg_catalog.pg_inherits WHERE inhparent::regclass = parent_relid ORDER BY inhrelid ASC) @@ -614,36 +621,36 @@ BEGIN IF NOT delete_data THEN EXECUTE format('INSERT INTO %s SELECT * FROM %s', parent_relid::TEXT, - v_rec.tbl::TEXT); - GET DIAGNOSTICS v_rows = ROW_COUNT; + child::TEXT); + GET DIAGNOSTICS rows_count = ROW_COUNT; /* Show number of copied rows */ - RAISE NOTICE '% rows copied from %', v_rows, v_rec.tbl::TEXT; + RAISE NOTICE '% rows copied from %', rows_count, child; END IF; SELECT relkind FROM pg_catalog.pg_class - WHERE oid = v_rec.tbl - INTO v_relkind; + WHERE oid = child + INTO rel_kind; /* * Determine the kind of child relation. It can be either a regular * table (r) or a foreign table (f). Depending on relkind we use * DROP TABLE or DROP FOREIGN TABLE. */ - IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', v_rec.tbl::TEXT); + IF rel_kind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', child); ELSE - EXECUTE format('DROP TABLE %s', v_rec.tbl::TEXT); + EXECUTE format('DROP TABLE %s', child); END IF; - v_part_count := v_part_count + 1; + part_count := part_count + 1; END LOOP; /* Finally delete both config entries */ DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; - RETURN v_part_count; + RETURN part_count; END $$ LANGUAGE plpgsql SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ @@ -655,21 +662,20 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( parent_relid REGCLASS, partition_relid REGCLASS) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ DECLARE - rec RECORD; + conid OID; BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); - FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint - WHERE conrelid = parent_relid AND contype = 'f') + FOR conid IN (SELECT oid FROM pg_catalog.pg_constraint + WHERE conrelid = parent_relid AND contype = 'f') LOOP EXECUTE format('ALTER TABLE %s ADD %s', partition_relid::TEXT, - pg_catalog.pg_get_constraintdef(rec.conid)); + pg_catalog.pg_get_constraintdef(conid)); END LOOP; END $$ LANGUAGE plpgsql STRICT; @@ -683,8 +689,7 @@ CREATE OR REPLACE FUNCTION @extschema@.alter_partition( new_name TEXT, new_schema REGNAMESPACE, new_tablespace TEXT) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ DECLARE orig_name TEXT; orig_schema OID; diff --git a/range.sql b/range.sql index 83f9e894..e9bfefe9 100644 --- a/range.sql +++ b/range.sql @@ -8,19 +8,6 @@ * ------------------------------------------------------------------------ */ -CREATE OR REPLACE FUNCTION @extschema@.create_or_replace_sequence( - parent_relid REGCLASS, - OUT seq_name TEXT) -AS $$ -BEGIN - seq_name := @extschema@.build_sequence_name(parent_relid); - - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); - EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); -END -$$ -LANGUAGE plpgsql; - /* * Check RANGE partition boundaries. */ @@ -29,8 +16,7 @@ CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( expression TEXT, start_value ANYELEMENT, end_value ANYELEMENT) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ DECLARE min_value start_value%TYPE; max_value start_value%TYPE; @@ -70,8 +56,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( p_interval INTERVAL, p_count INTEGER DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ DECLARE rows_count BIGINT; value_type REGTYPE; @@ -133,13 +118,13 @@ BEGIN expression; END IF; + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + /* Insert new entry to pathman config */ PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, p_interval::TEXT); - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid); - IF p_count != 0 THEN part_count := @extschema@.create_range_partitions_internal( parent_relid, @@ -172,8 +157,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( p_interval ANYELEMENT, p_count INTEGER DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ DECLARE rows_count BIGINT; max_value start_value%TYPE; @@ -232,13 +216,13 @@ BEGIN end_value); END IF; + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + /* Insert new entry to pathman config */ PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, p_interval::TEXT); - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid); - IF p_count != 0 THEN part_count := @extschema@.create_range_partitions_internal( parent_relid, @@ -271,8 +255,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ DECLARE part_count INTEGER := 0; @@ -296,12 +279,12 @@ BEGIN bounds[0], bounds[array_length(bounds, 1) - 1]); + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + /* Insert new entry to pathman config */ PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid); - /* Create partitions */ part_count := @extschema@.create_range_partitions_internal(parent_relid, bounds, @@ -331,8 +314,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( end_value ANYELEMENT, p_interval ANYELEMENT, partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ DECLARE part_count INTEGER := 0; @@ -348,13 +330,13 @@ BEGIN start_value, end_value); + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + /* Insert new entry to pathman config */ PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, p_interval::TEXT); - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid); - WHILE start_value <= end_value LOOP PERFORM @extschema@.create_single_range_partition( @@ -389,8 +371,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( end_value ANYELEMENT, p_interval INTERVAL, partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ DECLARE part_count INTEGER := 0; @@ -406,13 +387,13 @@ BEGIN start_value, end_value); + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + /* Insert new entry to pathman config */ PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, p_interval::TEXT); - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid); - WHILE start_value <= end_value LOOP EXECUTE @@ -450,8 +431,7 @@ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL, OUT p_range ANYARRAY) -RETURNS ANYARRAY AS -$$ +RETURNS ANYARRAY AS $$ DECLARE parent_relid REGCLASS; part_type INTEGER; @@ -530,8 +510,7 @@ BEGIN check_name, check_cond); END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* * The special case of merging two partitions @@ -539,8 +518,7 @@ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( partition1 REGCLASS, partition2 REGCLASS) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ BEGIN PERFORM @extschema@.merge_range_partitions(array[partition1, partition2]::regclass[]); END @@ -553,8 +531,7 @@ CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ +RETURNS TEXT AS $$ DECLARE part_expr_type REGTYPE; part_name TEXT; @@ -592,8 +569,7 @@ BEGIN RETURN part_name; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* * Spawn logic for append_partition(). We have to @@ -608,8 +584,7 @@ CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( p_range ANYARRAY DEFAULT NULL, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ +RETURNS TEXT AS $$ DECLARE part_expr_type REGTYPE; part_name TEXT; @@ -651,8 +626,7 @@ BEGIN RETURN part_name; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* * Prepend new partition. @@ -661,8 +635,7 @@ CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ +RETURNS TEXT AS $$ DECLARE part_expr_type REGTYPE; part_name TEXT; @@ -700,8 +673,7 @@ BEGIN RETURN part_name; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* * Spawn logic for prepend_partition(). We have to @@ -716,8 +688,7 @@ CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( p_range ANYARRAY DEFAULT NULL, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ +RETURNS TEXT AS $$ DECLARE part_expr_type REGTYPE; part_name TEXT; @@ -759,8 +730,7 @@ BEGIN RETURN part_name; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* * Add new partition @@ -771,8 +741,7 @@ CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( end_value ANYELEMENT, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ +RETURNS TEXT AS $$ DECLARE part_name TEXT; @@ -802,8 +771,7 @@ BEGIN RETURN part_name; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* @@ -812,8 +780,7 @@ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( partition_relid REGCLASS, delete_data BOOLEAN DEFAULT TRUE) -RETURNS TEXT AS -$$ +RETURNS TEXT AS $$ DECLARE parent_relid REGCLASS; part_name TEXT; @@ -865,8 +832,7 @@ BEGIN RETURN part_name; END -$$ -LANGUAGE plpgsql +$$ LANGUAGE plpgsql SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ /* @@ -877,8 +843,7 @@ CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( partition_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT) -RETURNS TEXT AS -$$ +RETURNS TEXT AS $$ DECLARE part_expr TEXT; rel_persistence CHAR; @@ -947,16 +912,14 @@ BEGIN RETURN partition_relid; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* * Detach range partition */ CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( partition_relid REGCLASS) -RETURNS TEXT AS -$$ +RETURNS TEXT AS $$ DECLARE parent_relid REGCLASS; part_type INTEGER; @@ -994,8 +957,7 @@ BEGIN RETURN partition_relid; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 74b5f4c6..03538fd9 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -20,7 +20,9 @@ #include "access/tupconvert.h" #include "access/htup_details.h" +#include "catalog/dependency.h" #include "catalog/indexing.h" +#include "catalog/namespace.h" #include "catalog/pg_inherits_fn.h" #include "catalog/pg_type.h" #include "commands/tablespace.h" @@ -862,6 +864,30 @@ add_to_pathman_config(PG_FUNCTION_ARGS) PG_END_TRY(); } + /* Check if naming sequence exists */ + if (parttype == PT_RANGE) + { + RangeVar *naming_seq_rv; + Oid naming_seq; + + naming_seq_rv = makeRangeVar(get_namespace_name(get_rel_namespace(relid)), + build_sequence_name_internal(relid), + -1); + + naming_seq = RangeVarGetRelid(naming_seq_rv, AccessShareLock, true); + if (OidIsValid(naming_seq)) + { + ObjectAddress parent, + sequence; + + ObjectAddressSet(parent, RelationRelationId, relid); + ObjectAddressSet(sequence, RelationRelationId, naming_seq); + + /* Now this naming sequence is a "part" of partitioned relation */ + recordDependencyOn(&sequence, &parent, DEPENDENCY_AUTO); + } + } + PG_RETURN_BOOL(true); } From 82507b669336c977d860072a06fe54b1560b566f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 13 May 2017 17:28:26 +0300 Subject: [PATCH 0507/1124] use DEPENDENCY_NORMAL instead of DEPENDENCY_AUTO (requires DROP CASCADE, but fixes pg_dump) --- expected/pathman_basic.out | 20 ++++++++++---------- expected/pathman_bgw.out | 8 ++++---- expected/pathman_calamity.out | 8 ++++---- expected/pathman_callbacks.out | 6 +++--- expected/pathman_cte.out | 4 ++-- expected/pathman_expressions.out | 2 +- expected/pathman_foreign_keys.out | 2 +- expected/pathman_inserts.out | 4 ++-- expected/pathman_interval.out | 14 +++++++------- expected/pathman_only.out | 2 +- expected/pathman_permissions.out | 2 +- expected/pathman_runtime_nodes.out | 2 +- expected/pathman_update_trigger.out | 2 +- expected/pathman_updates.out | 2 +- expected/pathman_utility_stmt.out | 4 ++-- src/pl_funcs.c | 2 +- 16 files changed, 42 insertions(+), 42 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 4f11219f..cee11bf7 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -257,7 +257,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 A (5 rows) DROP TABLE test.improved_dummy CASCADE; -NOTICE: drop cascades to 11 other objects +NOTICE: drop cascades to 12 other objects /* since rel_1_4_beta: check create_range_partitions(bounds array) */ CREATE TABLE test.improved_dummy (val INT NOT NULL); SELECT pathman.create_range_partitions('test.improved_dummy', 'val', @@ -328,7 +328,7 @@ ORDER BY partition; (2 rows) DROP TABLE test.improved_dummy CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects /* Test pathman_rel_pathlist_hook() with INSERT query */ CREATE TABLE test.insert_into_select(val int NOT NULL); INSERT INTO test.insert_into_select SELECT generate_series(1, 100); @@ -385,7 +385,7 @@ SELECT count(*) FROM test.insert_into_select_copy; (1 row) DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; -NOTICE: drop cascades to 5 other objects +NOTICE: drop cascades to 6 other objects /* Test INSERT hooking with DATE type */ CREATE TABLE test.insert_date_test(val DATE NOT NULL); SELECT pathman.create_partitions_from_range('test.insert_date_test', 'val', @@ -429,7 +429,7 @@ SELECT max(val) FROM test.insert_date_test; /* check last date */ (1 row) DROP TABLE test.insert_date_test CASCADE; -NOTICE: drop cascades to 8 other objects +NOTICE: drop cascades to 9 other objects SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; VACUUM; @@ -1454,7 +1454,7 @@ SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); (1 row) DROP TABLE test.zero CASCADE; -NOTICE: drop cascades to 4 other objects +NOTICE: drop cascades to 5 other objects /* * Check that altering table columns doesn't break trigger */ @@ -1562,7 +1562,7 @@ NOTICE: 1000 rows copied from test.num_range_rel_3 DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; -NOTICE: drop cascades to 9 other objects +NOTICE: drop cascades to 10 other objects /* Test automatic partition creation */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, @@ -1633,7 +1633,7 @@ SELECT * FROM pathman.pathman_config; (1 row) DROP TABLE test.range_rel CASCADE; -NOTICE: drop cascades to 20 other objects +NOTICE: drop cascades to 21 other objects SELECT * FROM pathman.pathman_config; partrel | expr | parttype | range_interval | cooked_expr ---------+------+----------+----------------+------------- @@ -1795,7 +1795,7 @@ SELECT pathman.create_partitions_from_range('test."RangeRel"', 'dt', '2015-01-01 (1 row) DROP TABLE test."RangeRel" CASCADE; -NOTICE: drop cascades to 5 other objects +NOTICE: drop cascades to 6 other objects SELECT * FROM pathman.pathman_config; partrel | expr | parttype | range_interval | cooked_expr --------------------+------+----------+----------------+------------------------------------------------------------------------------------------------------------------------ @@ -1828,7 +1828,7 @@ SELECT pathman.create_partitions_from_range('test."RangeRel"', 'id', 1, 300, 100 (1 row) DROP TABLE test."RangeRel" CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 4 other objects DROP EXTENSION pg_pathman; /* Test that everything works fine without schemas */ CREATE EXTENSION pg_pathman; @@ -2193,6 +2193,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 38 other objects +NOTICE: drop cascades to 42 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index d513dcf1..3c955c05 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -29,7 +29,7 @@ SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 par (3 rows) DROP TABLE test_bgw.test_1 CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 4 other objects /* int8, size of Datum == 8 */ CREATE TABLE test_bgw.test_2(val INT8 NOT NULL); SELECT create_range_partitions('test_bgw.test_2', 'val', 1, 5, 2); @@ -54,7 +54,7 @@ SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 par (3 rows) DROP TABLE test_bgw.test_2 CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 4 other objects /* numeric, size of Datum == var */ CREATE TABLE test_bgw.test_3(val NUMERIC NOT NULL); SELECT create_range_partitions('test_bgw.test_3', 'val', 1, 5, 2); @@ -79,7 +79,7 @@ SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 par (3 rows) DROP TABLE test_bgw.test_3 CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 4 other objects /* date, size of Datum == var */ CREATE TABLE test_bgw.test_4(val DATE NOT NULL); SELECT create_range_partitions('test_bgw.test_4', 'val', '20170213'::date, '1 day'::interval, 2); @@ -104,6 +104,6 @@ SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 par (3 rows) DROP TABLE test_bgw.test_4 CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 4 other objects DROP SCHEMA test_bgw CASCADE; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 50d30232..13c3c401 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -725,7 +725,7 @@ SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ (1 row) DROP TABLE calamity.test_range_idx CASCADE; -NOTICE: drop cascades to table calamity.test_range_idx_1 +NOTICE: drop cascades to 2 other objects /* check function get_part_range_by_oid() */ CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); @@ -747,7 +747,7 @@ SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ (1 row) DROP TABLE calamity.test_range_oid CASCADE; -NOTICE: drop cascades to table calamity.test_range_oid_1 +NOTICE: drop cascades to 2 other objects /* check function merge_range_partitions() */ SELECT merge_range_partitions('{pg_class}'); /* not ok */ ERROR: cannot merge partitions @@ -771,7 +771,7 @@ SELECT merge_range_partitions('{calamity.merge_test_a_1, calamity.merge_test_b_1}'); /* not ok */ ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; -NOTICE: drop cascades to 4 other objects +NOTICE: drop cascades to 6 other objects /* check function drop_triggers() */ CREATE TABLE calamity.trig_test_tbl(val INT4 NOT NULL); SELECT create_hash_partitions('calamity.trig_test_tbl', 'val', 2); @@ -990,6 +990,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ (3 rows) DROP TABLE calamity.survivor CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index e2d98eb6..2f8e0166 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -66,7 +66,7 @@ WHERE partrel = 'callbacks.abc'::REGCLASS; (1 row) DROP TABLE callbacks.abc CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects /* set callback to be called on RANGE partitions */ CREATE TABLE callbacks.abc(a serial, b int); SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); @@ -196,7 +196,7 @@ $$ language plpgsql; INSERT INTO callbacks.abc VALUES (301, 0); /* +1 new partition */ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "401", "range_min": "301", "parent_schema": "callbacks", "partition_schema": "callbacks"} DROP TABLE callbacks.abc CASCADE; -NOTICE: drop cascades to 4 other objects +NOTICE: drop cascades to 5 other objects /* more complex test using rotation of tables */ CREATE TABLE callbacks.abc(a INT4 NOT NULL); INSERT INTO callbacks.abc @@ -413,7 +413,7 @@ ORDER BY range_min::INT4; (4 rows) DROP TABLE callbacks.abc CASCADE; -NOTICE: drop cascades to 4 other objects +NOTICE: drop cascades to 5 other objects DROP SCHEMA callbacks CASCADE; NOTICE: drop cascades to 2 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_cte.out b/expected/pathman_cte.out index b1fedb09..c7edd5a4 100644 --- a/expected/pathman_cte.out +++ b/expected/pathman_cte.out @@ -33,7 +33,7 @@ SELECT * FROM ttt; (6 rows) DROP TABLE test_cte.range_rel CASCADE; -NOTICE: drop cascades to 4 other objects +NOTICE: drop cascades to 5 other objects CREATE TABLE test_cte.hash_rel ( id INT4, value INTEGER NOT NULL); @@ -225,7 +225,7 @@ SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; -NOTICE: drop cascades to table test_cte.cte_del_xacts_1 +NOTICE: drop cascades to 2 other objects /* Test recursive CTE */ CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 344f0df8..85b50bdd 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -151,5 +151,5 @@ SELECT COUNT(*) FROM test_exprs.range_rel_2; (1 row) DROP SCHEMA test_exprs CASCADE; -NOTICE: drop cascades to 16 other objects +NOTICE: drop cascades to 17 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_foreign_keys.out b/expected/pathman_foreign_keys.out index 9f673c86..00462c3d 100644 --- a/expected/pathman_foreign_keys.out +++ b/expected/pathman_foreign_keys.out @@ -88,7 +88,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM fkeys.messages; (3 rows) DROP TABLE fkeys.messages, fkeys.replies CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects DROP SCHEMA fkeys CASCADE; NOTICE: drop cascades to 2 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index 55dc57f1..9e04ae26 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -866,7 +866,7 @@ DROP TABLE test_inserts.test_gap_2; /* make a gap */ INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ ERROR: cannot spawn a partition DROP TABLE test_inserts.test_gap CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects DROP SCHEMA test_inserts CASCADE; -NOTICE: drop cascades to 18 other objects +NOTICE: drop cascades to 19 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index aa58c0eb..1bcd8216 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -43,7 +43,7 @@ SELECT partrel, range_interval FROM pathman_config; (1 row) DROP TABLE test_interval.abc CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 4 other objects /* Range partitions for INT4 type */ CREATE TABLE test_interval.abc (id INT4 NOT NULL); SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); @@ -86,7 +86,7 @@ SELECT partrel, range_interval FROM pathman_config; (1 row) DROP TABLE test_interval.abc CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 4 other objects /* Range partitions for INT8 type */ CREATE TABLE test_interval.abc (id INT8 NOT NULL); SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); @@ -129,7 +129,7 @@ SELECT partrel, range_interval FROM pathman_config; (1 row) DROP TABLE test_interval.abc CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 4 other objects /* Range partitions for DATE type */ CREATE TABLE test_interval.abc (dt DATE NOT NULL); SELECT create_range_partitions('test_interval.abc', 'dt', @@ -162,7 +162,7 @@ SELECT partrel, range_interval FROM pathman_config; (1 row) DROP TABLE test_interval.abc CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects /* Range partitions for FLOAT4 type */ CREATE TABLE test_interval.abc (x FLOAT4 NOT NULL); SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); @@ -194,7 +194,7 @@ SELECT set_interval('test_interval.abc', 100); (1 row) DROP TABLE test_interval.abc CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects /* Range partitions for FLOAT8 type */ CREATE TABLE test_interval.abc (x FLOAT8 NOT NULL); SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); @@ -226,7 +226,7 @@ SELECT set_interval('test_interval.abc', 100); (1 row) DROP TABLE test_interval.abc CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects /* Range partitions for NUMERIC type */ CREATE TABLE test_interval.abc (x NUMERIC NOT NULL); SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); @@ -255,7 +255,7 @@ SELECT set_interval('test_interval.abc', 100); (1 row) DROP TABLE test_interval.abc CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects /* Hash partitioned table shouldn't accept any interval value */ CREATE TABLE test_interval.abc (id SERIAL); SELECT create_hash_partitions('test_interval.abc', 'id', 3); diff --git a/expected/pathman_only.out b/expected/pathman_only.out index c6b372cb..f90dc56e 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -238,5 +238,5 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test (27 rows) DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 11 other objects +NOTICE: drop cascades to 12 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 324e64bd..4700f8bf 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -227,7 +227,7 @@ ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) * (5 rows) DROP TABLE permissions.dropped_column CASCADE; -NOTICE: drop cascades to 5 other objects +NOTICE: drop cascades to 6 other objects /* Finally reset user */ RESET ROLE; DROP OWNED BY user1; diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index 7e9d8bcb..87617029 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -408,6 +408,6 @@ where id = any (select generate_series(-10, -1)); /* should be empty */ set enable_hashjoin = on; set enable_mergejoin = on; DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 36 other objects +NOTICE: drop cascades to 37 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_update_trigger.out b/expected/pathman_update_trigger.out index fc28e96f..fdc5438a 100644 --- a/expected/pathman_update_trigger.out +++ b/expected/pathman_update_trigger.out @@ -285,5 +285,5 @@ SELECT count(*) FROM test_update_trigger.test_hash; (1 row) DROP SCHEMA test_update_trigger CASCADE; -NOTICE: drop cascades to 17 other objects +NOTICE: drop cascades to 18 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_updates.out b/expected/pathman_updates.out index 5901ccfe..d06f7c5b 100644 --- a/expected/pathman_updates.out +++ b/expected/pathman_updates.out @@ -59,5 +59,5 @@ UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLA (1 row) DROP SCHEMA test_updates CASCADE; -NOTICE: drop cascades to 12 other objects +NOTICE: drop cascades to 13 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index ae90d8e2..913c130d 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -224,7 +224,7 @@ SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; /* drop modified table */ DROP TABLE copy_stmt_hooking.test CASCADE; -NOTICE: drop cascades to 6 other objects +NOTICE: drop cascades to 7 other objects /* create table again */ CREATE TABLE copy_stmt_hooking.test( val int not null, @@ -280,7 +280,7 @@ SELECT COUNT(*) FROM copy_stmt_hooking.test2; (1 row) DROP SCHEMA copy_stmt_hooking CASCADE; -NOTICE: drop cascades to 796 other objects +NOTICE: drop cascades to 797 other objects /* * Test auto check constraint renaming */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 03538fd9..276b25bd 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -884,7 +884,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) ObjectAddressSet(sequence, RelationRelationId, naming_seq); /* Now this naming sequence is a "part" of partitioned relation */ - recordDependencyOn(&sequence, &parent, DEPENDENCY_AUTO); + recordDependencyOn(&sequence, &parent, DEPENDENCY_NORMAL); } } From 9d89e064409a4894cc9695524b432ff41448891e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 11:18:53 +0300 Subject: [PATCH 0508/1124] Turn off update node for FDW tables --- range.sql | 3 +- src/hooks.c | 15 ++++----- src/include/partition_filter.h | 2 +- src/partition_filter.c | 6 ++-- src/planner_tree_modification.c | 52 +++++++++++-------------------- tests/python/partitioning_test.py | 51 +++++++++--------------------- 6 files changed, 45 insertions(+), 84 deletions(-) diff --git a/range.sql b/range.sql index 009f11f1..3c5776d0 100644 --- a/range.sql +++ b/range.sql @@ -952,9 +952,10 @@ BEGIN PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); END IF; + /* IF @extschema@.is_relation_foreign(partition_relid) THEN PERFORM @extschema@.create_single_nop_trigger(parent_relid, partition_relid); - END IF; + END IF; */ /* Invoke an initialization callback */ PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, diff --git a/src/hooks.c b/src/hooks.c index 7bda2f63..92b65b92 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -528,12 +528,12 @@ pg_pathman_enable_assign_hook(bool newval, void *extra) PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { -#define ExecuteForPlanTree(planned_stmt, context, proc) \ +#define ExecuteForPlanTree(planned_stmt, proc) \ do { \ ListCell *lc; \ - proc((context), (planned_stmt)->planTree); \ + proc((planned_stmt)->rtable, (planned_stmt)->planTree); \ foreach (lc, (planned_stmt)->subplans) \ - proc((context), (Plan *) lfirst(lc)); \ + proc((planned_stmt)->rtable, (Plan *) lfirst(lc)); \ } while (0) PlannedStmt *result; @@ -562,17 +562,14 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready && pathman_hooks_enabled) { - List *update_nodes_context; - /* Give rowmark-related attributes correct names */ - ExecuteForPlanTree(result, result->rtable, postprocess_lock_rows); + ExecuteForPlanTree(result, postprocess_lock_rows); /* Add PartitionFilter node for INSERT queries */ - ExecuteForPlanTree(result, result->rtable, add_partition_filters); + ExecuteForPlanTree(result, add_partition_filters); /* Add PartitionUpdate node for UPDATE queries */ - update_nodes_context = list_make2(result->rtable, pathman_planner_info); - ExecuteForPlanTree(result, update_nodes_context, add_partition_update_nodes); + ExecuteForPlanTree(result, add_partition_update_nodes); /* Decrement relation tags refcount */ decr_refcount_relation_tags(); diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 1519a246..ef3b8741 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -40,7 +40,7 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ - JunkFilter *updates_junkFilter; /* we keep junkfilter from scanned + JunkFilter *src_junkFilter; /* we keep junkfilter from scanned ResultRelInfo here */ } ResultRelInfoHolder; diff --git a/src/partition_filter.c b/src/partition_filter.c index 81cdcbc4..8da0f68d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -316,7 +316,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; - rri_holder->updates_junkFilter = NULL; + rri_holder->src_junkFilter = NULL; if (parts_storage->command_type == CMD_UPDATE) { @@ -326,7 +326,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) child_result_rel_info->ri_junkFilter = NULL; /* instead we do junk filtering ourselves */ - rri_holder->updates_junkFilter = junkfilter; + rri_holder->src_junkFilter = junkfilter; } /* Generate tuple transformation map and some other stuff */ @@ -702,7 +702,7 @@ partition_filter_exec(CustomScanState *node) estate->es_result_relation_info = resultRelInfo; /* pass junkfilter to upper node */ - state->src_junkFilter = rri_holder->updates_junkFilter; + state->src_junkFilter = rri_holder->src_junkFilter; /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index bffc880c..5ca1272f 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -40,6 +40,7 @@ static void partition_update_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); +static bool modifytable_contains_fdw(List *rtable, ModifyTable *node); /* * HACK: We have to mark each Query with a unique @@ -431,42 +432,21 @@ partition_filter_visitor(Plan *plan, void *context) } -static List * -recreate_fdw_private_list(PlannerInfo *root, List *rtable, ModifyTable *node) +static bool +modifytable_contains_fdw(List *rtable, ModifyTable *node) { ListCell *lc; - int i = 0; - List *fdw_private_list = NIL; - - /* we need DELETE queries for FDW */ - node->operation = CMD_DELETE; foreach(lc, node->resultRelations) { - Index rti = lfirst_int(lc); - FdwRoutine *fdwroutine; - List *fdw_private; - - RangeTblEntry *rte = rt_fetch(rti, rtable); - Assert(rte->rtekind == RTE_RELATION); - if (rte->relkind != RELKIND_FOREIGN_TABLE) - continue; - - fdwroutine = GetFdwRoutineByRelId(rte->relid); - - if (fdwroutine != NULL && - fdwroutine->PlanForeignModify != NULL) - fdw_private = fdwroutine->PlanForeignModify(root, node, rti, i); - else - fdw_private = NIL; + Index rti = lfirst_int(lc); + RangeTblEntry *rte = rt_fetch(rti, rtable); - fdw_private_list = lappend(fdw_private_list, fdw_private); - i++; + if (rte->relkind == RELKIND_FOREIGN_TABLE) + return true; } - /* restore operation */ - node->operation = CMD_UPDATE; - return fdw_private_list; + return false; } @@ -478,8 +458,7 @@ recreate_fdw_private_list(PlannerInfo *root, List *rtable, ModifyTable *node) static void partition_update_visitor(Plan *plan, void *context) { - List *rtable = (List *) linitial((List *) context); - PlannerInfo *root = (PlannerInfo *) lsecond((List *) context); + List *rtable = (List *) context; ModifyTable *modify_table = (ModifyTable *) plan; ListCell *lc1, *lc2, @@ -491,6 +470,15 @@ partition_update_visitor(Plan *plan, void *context) Assert(rtable && IsA(rtable, List)); + if (modifytable_contains_fdw(rtable, modify_table)) + { + ereport(NOTICE, + (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), + errmsg("discovered mix of local and foreign tables," + " pg_pathman's update node will not be used"))); + return; + } + lc3 = list_head(modify_table->returningLists); forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) { @@ -527,10 +515,6 @@ partition_update_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), relid, returning_list); - - /* change fdw queries to DELETE */ - modify_table->fdwPrivLists = - recreate_fdw_private_list(root, rtable, modify_table); } } } diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index b7091213..0e992be5 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -470,7 +470,7 @@ def make_basic_fdw_setup(self): ) master.safe_psql( 'postgres', - 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') + 'select attach_range_partition(\'abc\', \'ftable\', 20, 100)') return (master, fserv) @@ -538,54 +538,33 @@ def test_foreign_table(self): fserv.stop() master.stop() - @if_fdw_enabled - def test_update_node_on_fdw_tables(self): + def test_update_triggers_on_fdw_tables(self): ''' Test update node on foreign tables ''' master, fserv = self.make_basic_fdw_setup() - # create second foreign table - fserv.safe_psql('postgres', 'create table ftable2(id serial, name text)') - fserv.safe_psql('postgres', 'insert into ftable2 values (35, \'foreign\')') - - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (ftable2) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select attach_range_partition(\'abc\', \'ftable2\', 30, 40)') - - master.safe_psql('postgres', - 'set pg_pathman.enable_partitionupdate=on') - with master.connect() as con: con.begin() - con.execute('set pg_pathman.enable_partitionupdate=on') - con.execute("insert into abc select i, 'local' from generate_series(1, 19) i") + con.execute("select create_update_triggers('abc')") + con.execute("insert into abc select i, i from generate_series(1, 30) i") con.commit() source_relid = con.execute('select tableoid from abc where id=9')[0][0] - dest_relid = con.execute('select tableoid from abc where id=35')[0][0] + dest_relid = con.execute('select tableoid from abc where id=25')[0][0] self.assertNotEqual(source_relid, dest_relid) - # cases - # - update from local to foreign - # - update from foreign to foreign - # - update from foreign to local + self.set_trace(con, 'pg_debug') + import ipdb; ipdb.set_trace() + count1 = con.execute("select count(*) from abc")[0][0] + con.execute('update abc set id=id + 10') + count2 = con.execute("select count(*) from abc")[0][0] + self.assertEqual(count1, count2) - con.execute('update abc set id=36 where id=9') - result_relid = con.execute('select tableoid from abc where id=36')[0][0] - self.assertEqual(result_relid, dest_relid) - - con.execute('update abc set id=38 where id=36') - result_relid = con.execute('select tableoid from abc where id=38')[0][0] - self.assertEqual(result_relid, dest_relid) + fserv.cleanup() + master.cleanup() - con.execute('update abc set id=9 where id=35') - result_relid = con.execute('select tableoid from abc where id=9')[0][0] - self.assertEqual(result_relid, source_relid) + fserv.stop() + master.stop() def test_parallel_nodes(self): """Test parallel queries under partitions""" From 856c71f185e5f75bd58f76eb6671ede045333788 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 14:49:24 +0300 Subject: [PATCH 0509/1124] Comment tests for update triggers for FDW tables (until bug fixed in core), fix tests for update node --- tests/python/partitioning_test.py | 54 +++++++++++++++---------------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 0e992be5..684369cc 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -538,33 +538,31 @@ def test_foreign_table(self): fserv.stop() master.stop() - def test_update_triggers_on_fdw_tables(self): - ''' Test update node on foreign tables ''' - - master, fserv = self.make_basic_fdw_setup() - - with master.connect() as con: - con.begin() - con.execute("select create_update_triggers('abc')") - con.execute("insert into abc select i, i from generate_series(1, 30) i") - con.commit() - - source_relid = con.execute('select tableoid from abc where id=9')[0][0] - dest_relid = con.execute('select tableoid from abc where id=25')[0][0] - self.assertNotEqual(source_relid, dest_relid) - - self.set_trace(con, 'pg_debug') - import ipdb; ipdb.set_trace() - count1 = con.execute("select count(*) from abc")[0][0] - con.execute('update abc set id=id + 10') - count2 = con.execute("select count(*) from abc")[0][0] - self.assertEqual(count1, count2) - - fserv.cleanup() - master.cleanup() - - fserv.stop() - master.stop() +# def test_update_triggers_on_fdw_tables(self): +# ''' Test update node on foreign tables ''' +# +# master, fserv = self.make_basic_fdw_setup() +# +# with master.connect() as con: +# con.begin() +# con.execute("select create_update_triggers('abc')") +# con.execute("insert into abc select i, i from generate_series(1, 30) i") +# con.commit() +# +# source_relid = con.execute('select tableoid from abc where id=9')[0][0] +# dest_relid = con.execute('select tableoid from abc where id=25')[0][0] +# self.assertNotEqual(source_relid, dest_relid) +# +# count1 = con.execute("select count(*) from abc")[0][0] +# con.execute('update abc set id=id + 10') +# count2 = con.execute("select count(*) from abc")[0][0] +# self.assertEqual(count1, count2) +# +# fserv.cleanup() +# master.cleanup() +# +# fserv.stop() +# master.stop() def test_parallel_nodes(self): """Test parallel queries under partitions""" @@ -1129,7 +1127,7 @@ def test_update_node_plan1(self): plan = plan[0]["Plan"] self.assertEqual(plan["Node Type"], "ModifyTable") - self.assertEqual(plan["Operation"], "Insert") + self.assertEqual(plan["Operation"], "Update") self.assertEqual(plan["Relation Name"], "test_range") self.assertEqual(len(plan["Target Tables"]), 11) From 32dff1646901eed048bcc370b63319b98e21bc80 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 15:08:32 +0300 Subject: [PATCH 0510/1124] Fix python tests --- tests/python/partitioning_test.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 684369cc..120b4865 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -535,9 +535,6 @@ def test_foreign_table(self): fserv.cleanup() master.cleanup() - fserv.stop() - master.stop() - # def test_update_triggers_on_fdw_tables(self): # ''' Test update node on foreign tables ''' # @@ -560,9 +557,6 @@ def test_foreign_table(self): # # fserv.cleanup() # master.cleanup() -# -# fserv.stop() -# master.stop() def test_parallel_nodes(self): """Test parallel queries under partitions""" From 338f057c039243e7c084a8d54070464eaf0b5481 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 15:49:55 +0300 Subject: [PATCH 0511/1124] Add more tests for update node --- expected/pathman_update_node.out | 77 +++++++++++++++++++++++++++++++- sql/pathman_update_node.sql | 20 +++++++++ 2 files changed, 96 insertions(+), 1 deletion(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 2976e767..45ca80e1 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -291,6 +291,81 @@ SELECT count(*) FROM test_update_node.test_range; 90 (1 row) +DROP TABLE test_update_node.test_range CASCADE; +NOTICE: drop cascades to 12 other objects +/* recreate table and mass move */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_1 | 1 + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 +(10 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* move everything to next partition */ +UPDATE test_update_node.test_range SET val = val + 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 + test_update_node.test_range_11 | 101 +(10 rows) + +/* move everything to previous partition */ +UPDATE test_update_node.test_range SET val = val - 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_1 | 1 + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 +(10 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + /* Partition table by HASH (INT4) */ CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i; @@ -345,5 +420,5 @@ SELECT count(*) FROM test_update_node.test_hash; (1 row) DROP SCHEMA test_update_node CASCADE; -NOTICE: drop cascades to 18 other objects +NOTICE: drop cascades to 17 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index 754dffc2..f451010e 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -144,7 +144,27 @@ WHERE val = 115; UPDATE test_update_node.test_range SET val = 55 WHERE val = 115; SELECT count(*) FROM test_update_node.test_range; +DROP TABLE test_update_node.test_range CASCADE; +/* recreate table and mass move */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; +SELECT count(*) FROM test_update_node.test_range; + +/* move everything to next partition */ +UPDATE test_update_node.test_range SET val = val + 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + +/* move everything to previous partition */ +UPDATE test_update_node.test_range SET val = val - 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; +SELECT count(*) FROM test_update_node.test_range; /* Partition table by HASH (INT4) */ CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); From 52b2086714fb9582f60cc2fa7b3d70c51e65e3e9 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 16:07:13 +0300 Subject: [PATCH 0512/1124] Remove FDW tables support from update node functions --- hash.sql | 4 -- init.sql | 25 ------------- range.sql | 5 --- src/include/partition_update.h | 1 - src/init.c | 11 ------ src/partition_creation.c | 35 ------------------ src/partition_update.c | 67 ++++++---------------------------- src/pl_funcs.c | 66 --------------------------------- 8 files changed, 11 insertions(+), 203 deletions(-) diff --git a/hash.sql b/hash.sql index 677239b6..4c21f9df 100644 --- a/hash.sql +++ b/hash.sql @@ -138,10 +138,6 @@ BEGIN @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); - IF @extschema@.is_relation_foreign(new_partition) THEN - PERFORM @extschema@.create_single_nop_trigger(parent_relid, new_partition); - END IF; - /* Fetch init_callback from 'params' table */ WITH stub_callback(stub) as (values (0)) SELECT init_callback diff --git a/init.sql b/init.sql index 1ea4355b..b106f318 100644 --- a/init.sql +++ b/init.sql @@ -737,23 +737,6 @@ CREATE OR REPLACE FUNCTION @extschema@.has_update_trigger( RETURNS BOOL AS 'pg_pathman', 'has_update_trigger' LANGUAGE C STRICT; -/* - * Function for NOP triggers. - * NOP trigger is a trigger that we use to turn off direct modify of FDW tables - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_nop_trigger_func() -RETURNS TRIGGER AS 'pg_pathman', 'pathman_nop_trigger_func' -LANGUAGE C STRICT; - -/* - * Creates single NOP trigger. - */ -CREATE OR REPLACE FUNCTION @extschema@.create_single_nop_trigger( - parent_relid REGCLASS, - partition_relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'create_single_nop_trigger' -LANGUAGE C STRICT; - /* * Partitioning key */ @@ -946,11 +929,3 @@ LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' LANGUAGE C STRICT; - -/* - * Check if relation is foreign table - */ -CREATE OR REPLACE FUNCTION @extschema@.is_relation_foreign( - relid REGCLASS) -RETURNS BOOL AS 'pg_pathman', 'is_relation_foreign' -LANGUAGE C STRICT; diff --git a/range.sql b/range.sql index 3c5776d0..099a04bc 100644 --- a/range.sql +++ b/range.sql @@ -952,11 +952,6 @@ BEGIN PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); END IF; - /* - IF @extschema@.is_relation_foreign(partition_relid) THEN - PERFORM @extschema@.create_single_nop_trigger(parent_relid, partition_relid); - END IF; */ - /* Invoke an initialization callback */ PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, partition_relid, diff --git a/src/include/partition_update.h b/src/include/partition_update.h index 84668587..b82ec61a 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -29,7 +29,6 @@ typedef struct PartitionUpdateState CustomScanState css; Oid partitioned_table; - List *returning_list; ResultRelInfo *resultRelInfo; JunkFilter *saved_junkFilter; Plan *subplan; /* proxy variable to store subplan */ diff --git a/src/init.c b/src/init.c index 25ce724c..0333d263 100644 --- a/src/init.c +++ b/src/init.c @@ -583,17 +583,6 @@ build_update_trigger_name_internal(Oid relid) return psprintf("%s_upd_trig", get_rel_name(relid)); } -/* - * Generate name for NOP trigger. - * NOTE: this function does not perform sanity checks at all. - */ -char * -build_nop_trigger_name_internal(Oid relid) -{ - AssertArg(OidIsValid(relid)); - return psprintf("%s_nop_trig", get_rel_name(relid)); -} - /* * Generate name for update trigger's function. * NOTE: this function does not perform sanity checks at all. diff --git a/src/partition_creation.c b/src/partition_creation.c index 5f237575..c06bfaa6 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1830,38 +1830,3 @@ has_update_trigger_internal(Oid parent_relid) trigname = build_update_trigger_name_internal(parent_relid); return has_trigger_internal(parent_relid, trigname); } - -/* Create trigger for partition that does nothing */ -void -create_single_nop_trigger_internal(Oid relid, - const char *trigname, - List *columns) -{ - CreateTrigStmt *stmt; - List *func; - - /* do nothing if relation has trigger already */ - if (has_trigger_internal(relid, trigname)) - return; - - func = list_make2(makeString(get_namespace_name(get_pathman_schema())), - makeString(CppAsString(pathman_nop_trigger_func))); - - stmt = makeNode(CreateTrigStmt); - stmt->trigname = (char *) trigname; - stmt->relation = makeRangeVarFromRelid(relid); - stmt->funcname = func; - stmt->args = NIL; - stmt->row = true; - stmt->timing = TRIGGER_TYPE_BEFORE; - stmt->events = TRIGGER_TYPE_UPDATE; - stmt->columns = columns; - stmt->whenClause = NULL; - stmt->isconstraint = false; - stmt->deferrable = false; - stmt->initdeferred = false; - stmt->constrrel = NULL; - - (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, - InvalidOid, InvalidOid, false); -} diff --git a/src/partition_update.c b/src/partition_update.c index 7b50c81c..99a520f5 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -28,7 +28,6 @@ CustomScanMethods partition_update_plan_methods; CustomExecMethods partition_update_exec_methods; static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, - HeapTuple oldtuple, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate); @@ -138,15 +137,12 @@ partition_update_exec(CustomScanState *node) if (!TupIsNull(slot)) { Datum datum; - bool isNull; char relkind; ResultRelInfo *resultRelInfo, *sourceRelInfo; ItemPointer tupleid = NULL; ItemPointerData tuple_ctid; EPQState epqstate; - HeapTupleData oldtupdata; - HeapTuple oldtuple = NULL; PartitionFilterState *child_state; JunkFilter *junkfilter; @@ -178,28 +174,9 @@ partition_update_exec(CustomScanState *node) tupleid = &tuple_ctid; } else if (relkind == RELKIND_FOREIGN_TABLE) - { - if (AttributeNumberIsValid(junkfilter->jf_junkAttNo)) - { - datum = ExecGetJunkAttribute(child_state->subplan_slot, - junkfilter->jf_junkAttNo, - &isNull); - /* shouldn't ever get a null result... */ - if (isNull) - elog(ERROR, "wholerow is NULL"); - - oldtupdata.t_data = DatumGetHeapTupleHeader(datum); - oldtupdata.t_len = - HeapTupleHeaderGetDatumLength(oldtupdata.t_data); - ItemPointerSetInvalid(&(oldtupdata.t_self)); - - /* Historically, view triggers see invalid t_tableOid. */ - oldtupdata.t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); - oldtuple = &oldtupdata; - } - } + elog(ERROR, "update node is not supported for foreign tables"); else - elog(ERROR, "got unexpected type of relation"); + elog(ERROR, "got unexpected type of relation for update"); /* * Clean from junk attributes before INSERT, @@ -209,14 +186,9 @@ partition_update_exec(CustomScanState *node) slot = ExecFilterJunk(junkfilter, slot); } - /* - * Delete old tuple. We have two cases here: - * 1) local tables - tupleid points to actual tuple - * 2) foreign tables - tupleid is invalid, slot is required - */ + /* Delete old tuple */ estate->es_result_relation_info = sourceRelInfo; - ExecDeleteInternal(tupleid, oldtuple, child_state->subplan_slot, - &epqstate, estate); + ExecDeleteInternal(tupleid, child_state->subplan_slot, &epqstate, estate); /* we've got the slot that can be inserted to child partition */ estate->es_result_relation_info = resultRelInfo; @@ -254,15 +226,14 @@ partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *e */ static TupleTableSlot * ExecDeleteInternal(ItemPointer tupleid, - HeapTuple oldtuple, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate) { - ResultRelInfo *resultRelInfo; - Relation resultRelationDesc; - HTSU_Result result; - HeapUpdateFailureData hufd; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + HeapUpdateFailureData hufd; /* * get information on the (current) result relation @@ -277,29 +248,13 @@ ExecDeleteInternal(ItemPointer tupleid, bool dodelete; dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo, - tupleid, oldtuple); + tupleid, NULL); if (!dodelete) elog(ERROR, "the old row always should be deleted from child table"); } - if (resultRelInfo->ri_FdwRoutine) - { - TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(resultRelationDesc)); - - /* - * delete from foreign table: let the FDW do it - */ - ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); - resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate, - resultRelInfo, - slot, - planSlot); - - /* we don't need slot anymore */ - ExecDropSingleTupleTableSlot(slot); - } - else if (tupleid != NULL) + if (tupleid != NULL) { /* delete the tuple */ ldelete:; @@ -358,7 +313,7 @@ ldelete:; elog(ERROR, "tupleid should be specified for deletion"); /* AFTER ROW DELETE Triggers */ - ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple); + ExecARDeleteTriggers(estate, resultRelInfo, tupleid, NULL); return NULL; } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index f7c17f3d..246f11d8 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -71,12 +71,8 @@ PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( create_update_triggers ); PG_FUNCTION_INFO_V1( pathman_update_trigger_func ); -PG_FUNCTION_INFO_V1( pathman_nop_trigger_func ); PG_FUNCTION_INFO_V1( create_single_update_trigger ); PG_FUNCTION_INFO_V1( has_update_trigger ); -PG_FUNCTION_INFO_V1( is_relation_foreign ); - -PG_FUNCTION_INFO_V1( create_single_nop_trigger ); PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( get_pathman_lib_version ); @@ -1217,24 +1213,6 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) PG_RETURN_POINTER(new_tuple); } -Datum -pathman_nop_trigger_func(PG_FUNCTION_ARGS) -{ - TriggerData *trigdata = (TriggerData *) fcinfo->context; - - /* Handle user calls */ - if (!CALLED_AS_TRIGGER(fcinfo)) - elog(ERROR, "this function should not be called directly"); - - /* Handle wrong fire mode */ - if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) - elog(ERROR, "%s: must be fired for row", - trigdata->tg_trigger->tgname); - - /* Just return NEW tuple */ - PG_RETURN_POINTER(trigdata->tg_newtuple); -} - struct replace_vars_cxt { HeapTuple new_tuple; @@ -1478,50 +1456,6 @@ has_update_trigger(PG_FUNCTION_ARGS) PG_RETURN_BOOL(has_update_trigger_internal(parent_relid)); } -/* Check if relation is foreign table */ -Datum -is_relation_foreign(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - Relation rel; - bool res; - - /* Check that relation exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("relation \"%u\" does not exist", relid))); - - rel = heap_open(relid, NoLock); - res = (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE); - heap_close(rel, NoLock); - PG_RETURN_BOOL(res); -} - -/* Create a trigger for partition that does nothing */ -Datum -create_single_nop_trigger(PG_FUNCTION_ARGS) -{ - Oid parent = PG_GETARG_OID(0); - Oid child = PG_GETARG_OID(1); - const char *trigname; - const PartRelationInfo *prel; - List *columns; - - /* Check that table is partitioned */ - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_ANY); - - /* Acquire trigger and attribute names */ - trigname = build_nop_trigger_name_internal(parent); - - /* Generate list of columns used in expression */ - columns = PrelExpressionColumnNames(prel); - create_single_nop_trigger_internal(child, trigname, columns); - - PG_RETURN_VOID(); -} - - /* * ------- * DEBUG From 05515ecd8391798fe069678c85107e97ee2a1881 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 16:10:34 +0300 Subject: [PATCH 0513/1124] Clean code from NOP definitions --- src/include/init.h | 2 -- src/include/partition_creation.h | 5 ----- 2 files changed, 7 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index 769bf119..bea36d67 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -199,8 +199,6 @@ char *build_sequence_name_internal(Oid relid); char *build_update_trigger_name_internal(Oid relid); char *build_update_trigger_func_name_internal(Oid relid); -char *build_nop_trigger_name_internal(Oid relid); - bool pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 106054c9..42454ca9 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -86,11 +86,6 @@ void create_single_update_trigger_internal(Oid partition_relid, bool has_update_trigger_internal(Oid parent); -/* NOP triggers */ -void create_single_nop_trigger_internal(Oid relid, - const char *trigname, - List *columns); - /* Partitioning callback type */ typedef enum { From ea1a91d91b3c61a3edff29e3323d65417d14064d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 16:13:26 +0300 Subject: [PATCH 0514/1124] Cleanup code --- src/hooks.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 92b65b92..5d64b073 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -36,8 +36,6 @@ #include "utils/typcache.h" #include "utils/lsyscache.h" -static PlannerInfo* pathman_planner_info = NULL; - /* Borrowed from joinpath.c */ #define PATH_PARAM_BY_REL(path, rel) \ ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids)) @@ -280,9 +278,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady()) return; - /* save root, we will use in plan modify stage */ - pathman_planner_info = root; - /* * Skip if it's a result relation (UPDATE | DELETE | INSERT), * or not a (partitioned) physical relation at all. @@ -540,9 +535,6 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) uint32 query_id = parse->queryId; bool pathman_ready = IsPathmanReady(); /* in case it changes */ - /* rel_pathlist_hook will set this variable */ - pathman_planner_info = NULL; - PG_TRY(); { if (pathman_ready && pathman_hooks_enabled) From 7293034e9999359c498f822720b088d1d2c95d9e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 15 May 2017 16:19:18 +0300 Subject: [PATCH 0515/1124] Fix tests --- expected/pathman_update_node.out | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 45ca80e1..a6214a52 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -8,7 +8,6 @@ CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); CREATE INDEX val_idx ON test_update_node.test_range (val); INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); -NOTICE: sequence "test_range_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -292,7 +291,7 @@ SELECT count(*) FROM test_update_node.test_range; (1 row) DROP TABLE test_update_node.test_range CASCADE; -NOTICE: drop cascades to 12 other objects +NOTICE: drop cascades to 13 other objects /* recreate table and mass move */ CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; From d6eeacd48ba8f28148cf0a5439b003aaddc37233 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 15 May 2017 17:51:05 +0300 Subject: [PATCH 0516/1124] small fixes & new TODOs --- src/hooks.c | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 86f3a6a0..1c9ed725 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -147,7 +147,8 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (saved_jointype == JOIN_UNIQUE_INNER) return; /* No way to do this with a parameterized inner path */ -#if defined PGPRO_VERSION && PG_VERSION_NUM >= 90603 +/* TODO: create macro initial_cost_nestloop_compat() */ +#if defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603 initial_cost_nestloop(root, &workspace, jointype, outer, inner, /* built paths */ extra); @@ -159,20 +160,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, pathkeys = build_join_pathkeys(root, joinrel, jointype, outer->pathkeys); -#if defined PGPRO_VERSION && PG_VERSION_NUM >= 90603 - nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, - extra, outer, inner, - extra->restrictlist, - pathkeys, - calc_nestloop_required_outer(outer, inner)); -#else - nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, - extra->sjinfo, &extra->semifactors, - outer, inner, extra->restrictlist, - pathkeys, - calc_nestloop_required_outer(outer, inner)); -#endif - /* Discard all clauses that are to be evaluated by 'inner' */ foreach (rinfo_lc, extra->restrictlist) { @@ -183,6 +170,24 @@ pathman_join_pathlist_hook(PlannerInfo *root, filtered_joinclauses = lappend(filtered_joinclauses, rinfo); } +/* TODO: create macro create_nestloop_path_compat() */ +#if defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603 + nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, + extra, + outer, inner, + filtered_joinclauses, + pathkeys, + calc_nestloop_required_outer(outer, inner)); +#else + nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, + extra->sjinfo, + &extra->semifactors, + outer, inner, + filtered_joinclauses, + pathkeys, + calc_nestloop_required_outer(outer, inner)); +#endif + /* * Override 'rows' value produced by standard estimator. * Currently we use get_parameterized_joinrel_size() since From ef1454c3e7386027c31b5025b2238143d635fe66 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 15 May 2017 18:14:33 +0300 Subject: [PATCH 0517/1124] improve mergejoin regression test --- expected/pathman_mergejoin.out | 5 +++++ sql/pathman_mergejoin.sql | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/expected/pathman_mergejoin.out b/expected/pathman_mergejoin.out index 3a7dfafd..ef4b0908 100644 --- a/expected/pathman_mergejoin.out +++ b/expected/pathman_mergejoin.out @@ -86,3 +86,8 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 (22 rows) +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql index 46df040d..0bc01a66 100644 --- a/sql/pathman_mergejoin.sql +++ b/sql/pathman_mergejoin.sql @@ -5,6 +5,7 @@ CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; + CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt TIMESTAMP, @@ -35,8 +36,16 @@ INSERT INTO test.num_range_rel SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id JOIN test.num_range_rel j3 on j3.id = j1.id WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; + + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman; From b6d45de01881e3b32296146d5eb6f391a05c6682 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 15 May 2017 18:26:21 +0300 Subject: [PATCH 0518/1124] fix regression test for PgPro 9.6.3 (1C) --- expected/pathman_mergejoin_0.out | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/expected/pathman_mergejoin_0.out b/expected/pathman_mergejoin_0.out index ae19c0ff..203ed34d 100644 --- a/expected/pathman_mergejoin_0.out +++ b/expected/pathman_mergejoin_0.out @@ -84,3 +84,8 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 (20 rows) +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; From 0c5323da0109e43a1a52d87b618d19e55086ad7e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 15 May 2017 18:31:24 +0300 Subject: [PATCH 0519/1124] bump version number (1.3.2) --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 4c72cdcb..74ec39e6 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.3.1", + "version": "1.3.2", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.3.sql", "docfile": "README.md", - "version": "1.3.1", + "version": "1.3.2", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index e9a5c7e4..8e08b730 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10300 + 10302 (1 row) set client_min_messages = NOTICE; diff --git a/src/init.h b/src/init.h index 6b342ed2..0ae106db 100644 --- a/src/init.h +++ b/src/init.h @@ -99,7 +99,7 @@ extern PathmanInitState pg_pathman_init_state; #define LOWEST_COMPATIBLE_FRONT 0x010300 /* Current version on native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010300 +#define CURRENT_LIB_VERSION 0x010302 void *pathman_cache_search_relid(HTAB *cache_table, From 72e3c002d4d6615b0a71ad0ab0630112d65125ba Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 15 May 2017 18:39:29 +0300 Subject: [PATCH 0520/1124] Add stub version of parse_partitioning_expression and ExprDoneCond flag as result of ExecEvalExpr for pg10 --- src/partition_filter.c | 4 ++++ src/pg_pathman.c | 2 +- src/pl_funcs.c | 4 ++++ src/relation_info.c | 10 +++++++++- src/utility_stmt_hooking.c | 4 ++++ 5 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index d8d22445..810a6abd 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -600,7 +600,9 @@ partition_filter_exec(CustomScanState *node) ResultRelInfoHolder *rri_holder; bool isnull; Datum value; +#if PG_VERSION_NUM < 100000 ExprDoneCond itemIsDone; +#endif TupleTableSlot *tmp_slot; /* Fetch PartRelationInfo for this partitioned relation */ @@ -628,8 +630,10 @@ partition_filter_exec(CustomScanState *node) if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); +#if PG_VERSION_NUM < 100000 if (itemIsDone != ExprSingleResult) elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); +#endif /* Search for a matching partition */ rri_holder = select_partition_for_insert(value, prel->ev_type, prel, diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a4e1e46a..09f140ec 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1939,7 +1939,7 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, NIL, required_outer, TOTAL_COST, - true); + false); #else cheapest = get_cheapest_path_for_pathkeys(rel->pathlist, NIL, diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 0b29986b..d9fb4137 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1127,7 +1127,9 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) Datum value; Oid value_type; bool isnull; +#if PG_VERSION_NUM < 100000 ExprDoneCond itemIsDone; +#endif Oid *parts; int nparts; @@ -1182,8 +1184,10 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); +#if PG_VERSION_NUM < 100000 if (itemIsDone != ExprSingleResult) elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); +#endif /* Search for matching partitions */ parts = find_partitions_for_value(value, value_type, prel, &nparts); diff --git a/src/relation_info.c b/src/relation_info.c index a29a3f0b..e674e229 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -579,13 +579,21 @@ parse_partitioning_expression(const Oid relid, if (list_length(parsetree_list) != 1) elog(ERROR, "expression \"%s\" produced more than one query", exp_cstr); +#if PG_VERSION_NUM >= 100000 + select_stmt = (SelectStmt *) ((RawStmt *) linitial(parsetree_list))->stmt; +#else select_stmt = (SelectStmt *) linitial(parsetree_list); +#endif if (query_string_out) *query_string_out = query_string; if (parsetree_out) +#if PG_VERSION_NUM >= 100000 + *parsetree_out = (Node *) linitial(parsetree_list); +#else *parsetree_out = (Node *) select_stmt; +#endif return ((ResTarget *) linitial(select_stmt->targetList))->val; } @@ -662,7 +670,7 @@ cook_partitioning_expression(const Oid relid, /* This will fail with elog in case of wrong expression */ #if PG_VERSION_NUM >= 100000 - querytree_list = pg_analyze_and_rewrite(NULL/* stub value */, query_string, NULL, 0, NULL); + querytree_list = pg_analyze_and_rewrite((RawStmt *) parsetree/* stub value */, query_string, NULL, 0, NULL); #else querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); #endif diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 617eff5b..ff30d928 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -607,7 +607,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { TupleTableSlot *slot, *tmp_slot; +#if PG_VERSION_NUM < 100000 ExprDoneCond itemIsDone; +#endif bool skip_tuple, isnull; Oid tuple_oid = InvalidOid; @@ -657,8 +659,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); +#if PG_VERSION_NUM < 100000 if (itemIsDone != ExprSingleResult) elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); +#endif /* Search for a matching partition */ rri_holder = select_partition_for_insert(value, From 4c62e4afb4535afee8eab523e07db7266b0d1f99 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Tue, 16 May 2017 16:42:07 +0300 Subject: [PATCH 0521/1124] Make compat version of pg_analyze_and_rewrite --- src/include/compat/pg_compat.h | 34 ++++++++++++++++++++++++++-------- src/relation_info.c | 12 +++--------- 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 3b1d1acc..2a2ac897 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -197,6 +197,14 @@ extern void create_plain_partial_paths(PlannerInfo *root, #endif +/* + * get_rel_persistence() + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 +char get_rel_persistence(Oid relid); +#endif + + /* * InitResultRelInfo * @@ -241,6 +249,24 @@ void McxtStatsInternal(MemoryContext context, int level, #endif +/* + * pg_analyze_and_rewrite + * + * for v10 cast first arg to RawStmt type + */ +#if PG_VERSION_NUM >= 100000 +#define pg_analyze_and_rewrite_compat(parsetree, query_string, paramTypes, \ + numParams, queryEnv) \ + pg_analyze_and_rewrite((RawStmt *) (parsetree), (query_string), \ + (paramTypes), (numParams), (queryEnv)) +#elif PG_VERSION_NUM >= 90500 +#define pg_analyze_and_rewrite_compat(parsetree, query_string, paramTypes, \ + numParams, queryEnv) \ + pg_analyze_and_rewrite((Node *) (parsetree), (query_string), \ + (paramTypes), (numParams)) +#endif + + /* * ProcessUtility * @@ -299,14 +325,6 @@ extern void set_rel_consider_parallel(PlannerInfo *root, #endif -/* - * get_rel_persistence() - */ -#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 -char get_rel_persistence(Oid relid); -#endif - - /* * ------------- * Common code diff --git a/src/relation_info.c b/src/relation_info.c index e674e229..d14e609d 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -589,11 +589,7 @@ parse_partitioning_expression(const Oid relid, *query_string_out = query_string; if (parsetree_out) -#if PG_VERSION_NUM >= 100000 *parsetree_out = (Node *) linitial(parsetree_list); -#else - *parsetree_out = (Node *) select_stmt; -#endif return ((ResTarget *) linitial(select_stmt->targetList))->val; } @@ -669,11 +665,9 @@ cook_partitioning_expression(const Oid relid, old_mcxt = MemoryContextSwitchTo(parse_mcxt); /* This will fail with elog in case of wrong expression */ -#if PG_VERSION_NUM >= 100000 - querytree_list = pg_analyze_and_rewrite((RawStmt *) parsetree/* stub value */, query_string, NULL, 0, NULL); -#else - querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); -#endif + querytree_list = pg_analyze_and_rewrite_compat(parsetree, query_string, + NULL, 0, NULL); + if (list_length(querytree_list) != 1) elog(ERROR, "partitioning expression produced more than 1 query"); From 17d70296914aa844425905a58a2535ba833c4784 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 16 May 2017 23:07:25 +0300 Subject: [PATCH 0522/1124] refactoring & fixes in function handle_arrexpr(), moved static inline functions to rangeset.h --- src/include/rangeset.h | 29 ++++- src/pg_pathman.c | 259 +++++++++++++++++++++++++++++------------ src/rangeset.c | 48 ++++---- 3 files changed, 233 insertions(+), 103 deletions(-) diff --git a/src/include/rangeset.h b/src/include/rangeset.h index 9e1d8cbb..7ca78a74 100644 --- a/src/include/rangeset.h +++ b/src/include/rangeset.h @@ -129,10 +129,30 @@ irange_cmp_lossiness(IndexRange a, IndexRange b) } -/* Various traits */ -bool iranges_intersect(IndexRange a, IndexRange b); -bool iranges_adjoin(IndexRange a, IndexRange b); -bool irange_eq_bounds(IndexRange a, IndexRange b); +/* Check if two ranges intersect */ +static inline bool +iranges_intersect(IndexRange a, IndexRange b) +{ + return (irange_lower(a) <= irange_upper(b)) && + (irange_lower(b) <= irange_upper(a)); +} + +/* Check if two ranges adjoin */ +static inline bool +iranges_adjoin(IndexRange a, IndexRange b) +{ + return (irange_upper(a) == irb_pred(irange_lower(b))) || + (irange_upper(b) == irb_pred(irange_lower(a))); +} + +/* Check if two ranges cover the same area */ +static inline bool +irange_eq_bounds(IndexRange a, IndexRange b) +{ + return (irange_lower(a) == irange_lower(b)) && + (irange_upper(a) == irange_upper(b)); +} + /* Basic operations on IndexRanges */ IndexRange irange_union_simple(IndexRange a, IndexRange b); @@ -141,6 +161,7 @@ IndexRange irange_intersection_simple(IndexRange a, IndexRange b); /* Operations on Lists of IndexRanges */ List *irange_list_union(List *a, List *b); List *irange_list_intersection(List *a, List *b); +List *irange_list_set_lossiness(List *ranges, bool lossy); /* Utility functions */ int irange_list_length(List *rangeset); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 1e073ba3..100f374a 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -720,7 +720,7 @@ static void handle_const(const Const *c, const int strategy, const WalkerContext *context, - WrapperNode *result) /* ret value #1 */ + WrapperNode *result) /* ret value #1 */ { const PartRelationInfo *prel = context->prel; @@ -838,16 +838,103 @@ handle_const(const Const *c, result->paramsel = estimate_paramsel_using_prel(prel, strategy); } +/* Array handler */ +static void +handle_array(ArrayType *array, + const int strategy, + const bool use_or, + const WalkerContext *context, + WrapperNode *result) /* ret value #1 */ +{ + const PartRelationInfo *prel = context->prel; + + /* Elements of the array */ + Datum *elem_values; + bool *elem_isnull; + int elem_count; + + /* Element's properties */ + Oid elem_type; + int16 elem_len; + bool elem_byval; + char elem_align; + + /* Check if we can work with this strategy */ + if (strategy == 0) + goto handle_array_return; + + /* Get element's properties */ + elem_type = ARR_ELEMTYPE(array); + get_typlenbyvalalign(elem_type, &elem_len, &elem_byval, &elem_align); + + /* Extract values from the array */ + deconstruct_array(array, elem_type, elem_len, elem_byval, elem_align, + &elem_values, &elem_isnull, &elem_count); + + /* Handle non-null Const arrays */ + if (elem_count > 0) + { + List *ranges; + int i; + + /* Set default ranges for OR | AND */ + ranges = use_or ? NIL : list_make1_irange_full(prel, IR_COMPLETE); + + /* Select partitions using values */ + for (i = 0; i < elem_count; i++) + { + WrapperNode wrap; + Const c; + + NodeSetTag(&c, T_Const); + c.consttype = elem_type; + c.consttypmod = -1; + c.constcollid = InvalidOid; + c.constlen = datumGetSize(elem_values[i], + elem_byval, + elem_len); + c.constvalue = elem_values[i]; + c.constisnull = elem_isnull[i]; + c.constbyval = elem_byval; + c.location = -1; + + handle_const(&c, strategy, context, &wrap); + + /* Should we use OR | AND? */ + ranges = use_or ? + irange_list_union(ranges, wrap.rangeset) : + irange_list_intersection(ranges, wrap.rangeset); + + result->paramsel = Max(result->paramsel, wrap.paramsel); + } + + /* Free resources */ + pfree(elem_values); + pfree(elem_isnull); + + /* Save rangeset */ + result->rangeset = ranges; + + return; /* done, exit */ + } + +handle_array_return: + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + result->paramsel = estimate_paramsel_using_prel(prel, strategy); +} + /* Boolean expression handler */ static void handle_boolexpr(const BoolExpr *expr, const WalkerContext *context, - WrapperNode *result) + WrapperNode *result) /* ret value #1 */ { - ListCell *lc; - const PartRelationInfo *prel = context->prel; + const PartRelationInfo *prel = context->prel; + ListCell *lc; + /* Save expression */ result->orig = (const Node *) expr; + result->args = NIL; result->paramsel = 1.0; @@ -858,22 +945,22 @@ handle_boolexpr(const BoolExpr *expr, foreach (lc, expr->args) { - WrapperNode *arg_result; + WrapperNode *wrap; - arg_result = walk_expr_tree((Expr *) lfirst(lc), context); - result->args = lappend(result->args, arg_result); + wrap = walk_expr_tree((Expr *) lfirst(lc), context); + result->args = lappend(result->args, wrap); switch (expr->boolop) { case OR_EXPR: result->rangeset = irange_list_union(result->rangeset, - arg_result->rangeset); + wrap->rangeset); break; case AND_EXPR: result->rangeset = irange_list_intersection(result->rangeset, - arg_result->rangeset); - result->paramsel *= arg_result->paramsel; + wrap->rangeset); + result->paramsel *= wrap->paramsel; break; default: @@ -901,107 +988,131 @@ handle_boolexpr(const BoolExpr *expr, static void handle_arrexpr(const ScalarArrayOpExpr *expr, const WalkerContext *context, - WrapperNode *result) + WrapperNode *result) /* ret value #1 */ { - Node *exprnode = (Node *) linitial(expr->args); - Node *arraynode = (Node *) lsecond(expr->args); + Node *part_expr = (Node *) linitial(expr->args); + Node *array = (Node *) lsecond(expr->args); const PartRelationInfo *prel = context->prel; TypeCacheEntry *tce; int strategy; - result->orig = (const Node *) expr; - tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); - if (!match_expr_to_operand(context->prel_expr, exprnode)) + /* Check if expression tree is a partitioning expression */ + if (!match_expr_to_operand(context->prel_expr, part_expr)) goto handle_arrexpr_return; - /* Handle non-null Const arrays */ - if (arraynode && IsA(arraynode, Const) && !((Const *) arraynode)->constisnull) - { - ArrayType *arrayval; - - int16 elemlen; - bool elembyval; - char elemalign; + /* Check if we can work with this strategy */ + if (strategy == 0) + goto handle_arrexpr_return; - int num_elems; + /* Examine the array node */ + switch (nodeTag(array)) + { + case T_Const: + { + Const *c = (Const *) array; - Datum *elem_values; - bool *elem_isnull; + /* Array is NULL */ + if (c->constisnull) + goto handle_arrexpr_return; - WalkerContext nested_wcxt; - List *ranges; - int i; + /* Examine array */ + handle_array(DatumGetArrayTypeP(c->constvalue), + strategy, expr->useOr, context, result); - /* Extract values from array */ - arrayval = DatumGetArrayTypeP(((Const *) arraynode)->constvalue); + /* Save expression */ + result->orig = (const Node *) expr; - get_typlenbyvalalign(ARR_ELEMTYPE(arrayval), - &elemlen, &elembyval, &elemalign); + return; /* done, exit */ + } - deconstruct_array(arrayval, - ARR_ELEMTYPE(arrayval), - elemlen, elembyval, elemalign, - &elem_values, &elem_isnull, &num_elems); + case T_ArrayExpr: + { + ArrayExpr *arr_expr = (ArrayExpr *) array; + Oid elem_type = arr_expr->element_typeid; + bool array_has_params = false; + List *ranges; + ListCell *lc; - /* Copy WalkerContext */ - memcpy((void *) &nested_wcxt, - (const void *) context, - sizeof(WalkerContext)); + /* Set default ranges for OR | AND */ + ranges = expr->useOr ? NIL : list_make1_irange_full(prel, IR_COMPLETE); - /* Set default ranges for OR | AND */ - ranges = expr->useOr ? NIL : list_make1_irange_full(prel, IR_COMPLETE); + /* Walk trough elements list */ + foreach (lc, arr_expr->elements) + { + Node *elem = lfirst(lc); + WrapperNode wrap; - /* Select partitions using values */ - for (i = 0; i < num_elems; i++) - { - WrapperNode sub_result; - Const c; + /* Stop if ALL + quals evaluate to NIL */ + if (!expr->useOr && ranges == NIL) + break; - NodeSetTag(&c, T_Const); - c.consttype = ARR_ELEMTYPE(arrayval); - c.consttypmod = -1; - c.constcollid = InvalidOid; - c.constlen = datumGetSize(elem_values[i], - elembyval, - elemlen); - c.constvalue = elem_values[i]; - c.constisnull = elem_isnull[i]; - c.constbyval = elembyval; - c.location = -1; + /* Is this a const value? */ + if (IsConstValue(elem, context)) + { + Const *c = ExtractConst(elem, context); + + /* Is this an array?.. */ + if (c->consttype != elem_type) + { + /* Array is NULL */ + if (c->constisnull) + goto handle_arrexpr_return; + + /* Examine array */ + handle_array(DatumGetArrayTypeP(c->constvalue), + strategy, expr->useOr, context, &wrap); + } + /* ... or a single element? */ + else handle_const(c, strategy, context, &wrap); + + /* Should we use OR | AND? */ + ranges = expr->useOr ? + irange_list_union(ranges, wrap.rangeset) : + irange_list_intersection(ranges, wrap.rangeset); + } + else array_has_params = true; /* we have non-const nodes */ + } - handle_const(&c, strategy, &nested_wcxt, &sub_result); + /* Check for PARAM-related optimizations */ + if (array_has_params) + { + /* We can't say anything if PARAMs + ANY */ + if (expr->useOr) + goto handle_arrexpr_return; - ranges = expr->useOr ? - irange_list_union(ranges, sub_result.rangeset) : - irange_list_intersection(ranges, sub_result.rangeset); + /* Recheck condition on a narrowed set of partitions */ + ranges = irange_list_set_lossiness(ranges, IR_LOSSY); + } - result->paramsel = Max(result->paramsel, sub_result.paramsel); - } + /* Save rangeset */ + result->rangeset = ranges; - result->rangeset = ranges; - if (num_elems == 0) - result->paramsel = 0.0; + /* Save expression */ + result->orig = (const Node *) expr; - /* Free resources */ - pfree(elem_values); - pfree(elem_isnull); + return; /* done, exit */ + } - return; /* done, exit */ + default: + break; } handle_arrexpr_return: result->rangeset = list_make1_irange_full(prel, IR_LOSSY); result->paramsel = estimate_paramsel_using_prel(prel, strategy); + + /* Save expression */ + result->orig = (const Node *) expr; } /* Operator expression handler */ static void handle_opexpr(const OpExpr *expr, const WalkerContext *context, - WrapperNode *result) + WrapperNode *result) /* ret value #1 */ { Node *param; const PartRelationInfo *prel = context->prel; diff --git a/src/rangeset.c b/src/rangeset.c index 01bec5ee..15bb5849 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -20,31 +20,6 @@ static IndexRange irange_union_internal(IndexRange first, List **new_iranges); -/* Check if two ranges intersect */ -bool -iranges_intersect(IndexRange a, IndexRange b) -{ - return (irange_lower(a) <= irange_upper(b)) && - (irange_lower(b) <= irange_upper(a)); -} - -/* Check if two ranges adjoin */ -bool -iranges_adjoin(IndexRange a, IndexRange b) -{ - return (irange_upper(a) == irb_pred(irange_lower(b))) || - (irange_upper(b) == irb_pred(irange_lower(a))); -} - -/* Check if two ranges cover the same area */ -bool -irange_eq_bounds(IndexRange a, IndexRange b) -{ - return (irange_lower(a) == irange_lower(b)) && - (irange_upper(a) == irange_upper(b)); -} - - /* Make union of two conjuncted ranges */ IndexRange irange_union_simple(IndexRange a, IndexRange b) @@ -371,6 +346,29 @@ irange_list_intersection(List *a, List *b) return result; } +/* Set lossiness of rangeset */ +List * +irange_list_set_lossiness(List *ranges, bool lossy) +{ + List *result = NIL; + ListCell *lc; + + if (ranges == NIL) + return NIL; + + foreach (lc, ranges) + { + IndexRange ir = lfirst_irange(lc); + + result = lappend_irange(result, make_irange(irange_lower(ir), + irange_upper(ir), + lossy)); + } + + /* Unite adjacent and overlapping IndexRanges */ + return irange_list_union(result, NIL); +} + /* Get total number of elements in range list */ int irange_list_length(List *rangeset) From 1a6280eb24426ac87ce254c74ca9311aa2603c7f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 16 May 2017 23:10:18 +0300 Subject: [PATCH 0523/1124] inline static -> static inline --- src/include/nodes_common.h | 2 +- src/include/rangeset.h | 10 +++++----- src/include/relation_info.h | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/include/nodes_common.h b/src/include/nodes_common.h index b996ea61..d41a453d 100644 --- a/src/include/nodes_common.h +++ b/src/include/nodes_common.h @@ -54,7 +54,7 @@ typedef ChildScanCommonData *ChildScanCommon; /* * Destroy exhausted plan states */ -inline static void +static inline void clear_plan_states(CustomScanState *scan_state) { ListCell *state_cell; diff --git a/src/include/rangeset.h b/src/include/rangeset.h index 7ca78a74..96d6bc21 100644 --- a/src/include/rangeset.h +++ b/src/include/rangeset.h @@ -55,7 +55,7 @@ typedef struct { ( list_make1_irange(make_irange(0, PrelLastChild(prel), (lossy))) ) -inline static IndexRange +static inline IndexRange make_irange(uint32 lower, uint32 upper, bool lossy) { IndexRange result = { lower & IRANGE_BOUNDARY_MASK, @@ -72,7 +72,7 @@ make_irange(uint32 lower, uint32 upper, bool lossy) return result; } -inline static IndexRange * +static inline IndexRange * alloc_irange(IndexRange irange) { IndexRange *result = (IndexRange *) palloc(sizeof(IndexRange)); @@ -84,7 +84,7 @@ alloc_irange(IndexRange irange) } /* Return predecessor or 0 if boundary is 0 */ -inline static uint32 +static inline uint32 irb_pred(uint32 boundary) { if (boundary > 0) @@ -94,7 +94,7 @@ irb_pred(uint32 boundary) } /* Return successor or IRANGE_BONDARY_MASK */ -inline static uint32 +static inline uint32 irb_succ(uint32 boundary) { if (boundary >= IRANGE_BOUNDARY_MASK) @@ -113,7 +113,7 @@ typedef enum } ir_cmp_lossiness; /* Comapre lossiness factor of two IndexRanges */ -inline static ir_cmp_lossiness +static inline ir_cmp_lossiness irange_cmp_lossiness(IndexRange a, IndexRange b) { if (is_irange_lossy(a) == is_irange_lossy(b)) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index c9a66dea..05c4abc5 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -87,7 +87,7 @@ FreeBound(Bound *bound, bool byval) pfree(DatumGetPointer(BoundGetValue(bound))); } -inline static int +static inline int cmp_bounds(FmgrInfo *cmp_func, const Oid collid, const Bound *b1, From 10838a14a3616361a7327bc8da6a751111cb7912 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Wed, 17 May 2017 13:38:39 +0300 Subject: [PATCH 0524/1124] Rewrite \d+ in regression tests --- expected/pathman_basic.out | 62 ++++++++++++++++++++++++-------------- sql/pathman_basic.sql | 19 ++++++++++-- 2 files changed, 57 insertions(+), 24 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index c6009416..26551899 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1441,28 +1441,46 @@ SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern') test.hash_rel_extern (1 row) -\d+ test.hash_rel_0 - Table "test.hash_rel_0" - Column | Type | Modifiers | Storage | Stats target | Description ---------+---------+------------------------------------------------------------+---------+--------------+------------- - id | integer | not null default nextval('test.hash_rel_id_seq'::regclass) | plain | | - value | integer | not null | plain | | - abc | integer | | plain | | -Indexes: - "hash_rel_0_pkey" PRIMARY KEY, btree (id) - -\d+ test.hash_rel_extern - Table "test.hash_rel_extern" - Column | Type | Modifiers | Storage | Stats target | Description ---------+---------+------------------------------------------------------------+---------+--------------+------------- - id | integer | not null default nextval('test.hash_rel_id_seq'::regclass) | plain | | - value | integer | not null | plain | | - abc | integer | | plain | | -Indexes: - "hash_rel_extern_pkey" PRIMARY KEY, btree (id) -Check constraints: - "pathman_hash_rel_extern_check" CHECK (pathman.get_hash_part_idx(hashint4(value), 3) = 0) -Inherits: test.hash_rel +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; + parent | partition | parttype +---------------+----------------------+---------- + test.hash_rel | test.hash_rel_1 | 1 + test.hash_rel | test.hash_rel_2 | 1 + test.hash_rel | test.hash_rel_extern | 1 +(3 rows) + +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; + oid | indexes | triggers +----------------------+---------------------------------------------------------------------------------------+---------- + test.hash_rel_0 | {"CREATE UNIQUE INDEX hash_rel_0_pkey ON test.hash_rel_0 USING btree (id)"} | {NULL} + test.hash_rel_extern | {"CREATE UNIQUE INDEX hash_rel_extern_pkey ON test.hash_rel_extern USING btree (id)"} | {NULL} +(2 rows) + +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + is_tuple_convertible +---------------------- + t +(1 row) INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; DROP TABLE test.hash_rel_0; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 180a9e7b..6ad891f1 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -382,8 +382,23 @@ SELECT * FROM test.hash_rel WHERE id = 123; /* Test replacing hash partition */ CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); -\d+ test.hash_rel_0 -\d+ test.hash_rel_extern + +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; DROP TABLE test.hash_rel_0; /* Table with which we are replacing partition must have exact same structure */ From 46bce8f2dcc531ba604f8d8b1e4125901685f362 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 17 May 2017 17:08:48 +0300 Subject: [PATCH 0525/1124] Catch partitioning expression parse and analyze error and show more meaniningful errors --- expected/pathman_expressions.out | 13 ++++++++ sql/pathman_expressions.sql | 5 +++ src/relation_info.c | 57 +++++++++++++++++++++++++++++--- 3 files changed, 70 insertions(+), 5 deletions(-) diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 85b50bdd..d3b8e413 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -16,6 +16,19 @@ SELECT COUNT(*) FROM test_exprs.hash_rel; 5 (1 row) +\set VERBOSITY default +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: partitioning expression parse error +DETAIL: syntax error at or near ")" +QUERY: SELECT public.add_to_pathman_config(parent_relid, expression) +CONTEXT: PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 9 at PERFORM +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: partitioning expression analyze error +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.add_to_pathman_config(parent_relid, expression) +CONTEXT: PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 9 at PERFORM +\set VERBOSITY terse SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); create_hash_partitions ------------------------ diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 9eef9e27..b95eac45 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -15,6 +15,11 @@ INSERT INTO test_exprs.hash_rel (value, value2) SELECT val, val * 2 FROM generate_series(1, 5) val; SELECT COUNT(*) FROM test_exprs.hash_rel; +\set VERBOSITY default +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); + +\set VERBOSITY terse SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; SELECT COUNT(*) FROM test_exprs.hash_rel; diff --git a/src/relation_info.c b/src/relation_info.c index 537b8137..25a2bd50 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -565,8 +565,9 @@ parse_partitioning_expression(const Oid relid, char **query_string_out, /* ret value #1 */ Node **parsetree_out) /* ret value #2 */ { - SelectStmt *select_stmt; - List *parsetree_list; + SelectStmt *select_stmt; + List *parsetree_list; + MemoryContext old_mcxt; const char *sql = "SELECT (%s) FROM ONLY %s.%s"; char *relname = get_rel_name(relid), @@ -575,7 +576,31 @@ parse_partitioning_expression(const Oid relid, quote_identifier(nspname), quote_identifier(relname)); - parsetree_list = raw_parser(query_string); + old_mcxt = CurrentMemoryContext; + + PG_TRY(); + { + parsetree_list = raw_parser(query_string); + } + PG_CATCH(); + { + ErrorData *error; + + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + error = CopyErrorData(); + FlushErrorState(); + + error->detail = error->message; + error->message = "partitioning expression parse error"; + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; + + ReThrowError(error); + } + PG_END_TRY(); + if (list_length(parsetree_list) != 1) elog(ERROR, "expression \"%s\" produced more than one query", exp_cstr); @@ -660,8 +685,30 @@ cook_partitioning_expression(const Oid relid, */ old_mcxt = MemoryContextSwitchTo(parse_mcxt); - /* This will fail with elog in case of wrong expression */ - querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); + PG_TRY(); + { + /* This will fail with elog in case of wrong expression */ + querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); + } + PG_CATCH(); + { + ErrorData *error; + + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + error = CopyErrorData(); + FlushErrorState(); + + error->detail = error->message; + error->message = "partitioning expression analyze error"; + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; + + ReThrowError(error); + } + PG_END_TRY(); + if (list_length(querytree_list) != 1) elog(ERROR, "partitioning expression produced more than 1 query"); From 33f51965d51642746d57ffebec46a1a82a707a69 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 17 May 2017 18:26:03 +0300 Subject: [PATCH 0526/1124] add new regression tests (pathman_array_qual) --- Makefile | 3 +- expected/pathman_array_qual.out | 1726 +++++++++++++++++++++++++++++++ expected/pathman_basic.out | 253 ----- sql/pathman_array_qual.sql | 321 ++++++ sql/pathman_basic.sql | 31 - 5 files changed, 2049 insertions(+), 285 deletions(-) create mode 100644 expected/pathman_array_qual.out create mode 100644 sql/pathman_array_qual.sql diff --git a/Makefile b/Makefile index 9bc24c70..01116492 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,8 @@ DATA = pg_pathman--1.0--1.1.sql \ PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" -REGRESS = pathman_basic \ +REGRESS = pathman_array_qual \ + pathman_basic \ pathman_bgw \ pathman_calamity \ pathman_callbacks \ diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out new file mode 100644 index 00000000..e9b006a8 --- /dev/null +++ b/expected/pathman_array_qual.out @@ -0,0 +1,1726 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); +ANALYZE; +/* + * Test expr IN (...) + */ +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{1,2,3,4}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{-100,100}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,100}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) +(21 rows) + +/* + * Test expr = ANY (...) + */ +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,100}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) +(7 rows) + +/* + * Test expr = ALL (...) + */ +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL ('{100,100}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr < ANY (...) + */ +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ANY ('{100,100}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + Filter: (a < ANY ('{500,550}'::integer[])) +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < ANY ('{100,700}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < ANY ('{NULL,700}'::integer[])) +(9 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +/* + * Test expr < ALL (...) + */ +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{100,100}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{99,100,101}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + Filter: (a < ALL ('{500,550}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{100,700}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (...) + */ +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{100,100}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{99,100,101}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{500,550}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{100,700}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > ANY ('{NULL,700}'::integer[])) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +/* + * Test expr > ALL (...) + */ +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ALL ('{100,100}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_2 + Filter: (a > ALL ('{99,100,101}'::integer[])) + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{500,550}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > ALL ('{100,700}'::integer[])) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{1,100,600}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{1,100,600}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{1,100,600}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{1,100,600}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{1,100,600}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{100,600,1}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{100,600,1}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{100,600,1}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{100,600,1}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a > ANY ('{100,600,1}'::integer[])) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +DEALLOCATE q; +/* + * Test expr > ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{1,100,600}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{1,100,600}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{1,100,600}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{1,100,600}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{1,100,600}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{100,1,600}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{100,1,600}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{100,1,600}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{100,1,600}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{100,1,600}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{100,600,1}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{100,600,1}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{100,600,1}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{100,600,1}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{100,600,1}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 5 +DEALLOCATE q; +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_6 + Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q('{1, 999}'): number of partitions: 1 +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > ALL ('{1,898}'::integer[])) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > ALL ('{1,898}'::integer[])) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > ALL ('{1,898}'::integer[])) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > ALL ('{1,898}'::integer[])) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > ALL ('{1,898}'::integer[])) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 1 +DEALLOCATE q; +/* + * Test expr = ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(100); + a | b +-----+----- + 100 | 100 +(1 row) + +DEALLOCATE q; +DROP SCHEMA array_qual CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index c6009416..e41d04f3 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -517,122 +517,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1 -> Seq Scan on num_range_rel_4 (8 rows) -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (2500); - QUERY PLAN ------------------------------------ - Append - -> Seq Scan on num_range_rel_3 - Filter: (id = 2500) -(3 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (500, 1500); - QUERY PLAN ------------------------------------------------------- - Append - -> Seq Scan on num_range_rel_1 - Filter: (id = ANY ('{500,1500}'::integer[])) - -> Seq Scan on num_range_rel_2 - Filter: (id = ANY ('{500,1500}'::integer[])) -(5 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-500, 500, 1500); - QUERY PLAN ------------------------------------------------------------ - Append - -> Seq Scan on num_range_rel_1 - Filter: (id = ANY ('{-500,500,1500}'::integer[])) - -> Seq Scan on num_range_rel_2 - Filter: (id = ANY ('{-500,500,1500}'::integer[])) -(5 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-1, -1, -1); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-1, -1, -1, NULL); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ANY (ARRAY[1500, 2200]); - QUERY PLAN -------------------------------------------------------- - Append - -> Seq Scan on num_range_rel_2 - Filter: (id > ANY ('{1500,2200}'::integer[])) - -> Seq Scan on num_range_rel_3 - -> Seq Scan on num_range_rel_4 -(5 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ANY (ARRAY[100, 1500]); - QUERY PLAN ------------------------------------------------------- - Append - -> Seq Scan on num_range_rel_1 - Filter: (id > ANY ('{100,1500}'::integer[])) - -> Seq Scan on num_range_rel_2 - -> Seq Scan on num_range_rel_3 - -> Seq Scan on num_range_rel_4 -(6 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ALL (ARRAY[1500, 2200]); - QUERY PLAN -------------------------------------------------------- - Append - -> Seq Scan on num_range_rel_3 - Filter: (id > ALL ('{1500,2200}'::integer[])) - -> Seq Scan on num_range_rel_4 -(4 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ALL (ARRAY[100, 1500]); - QUERY PLAN ------------------------------------------------------- - Append - -> Seq Scan on num_range_rel_2 - Filter: (id > ALL ('{100,1500}'::integer[])) - -> Seq Scan on num_range_rel_3 - -> Seq Scan on num_range_rel_4 -(5 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ANY (ARRAY[1500, 2200]); - QUERY PLAN -------------------------------------------------------- - Append - -> Seq Scan on num_range_rel_2 - Filter: (id = ANY ('{1500,2200}'::integer[])) - -> Seq Scan on num_range_rel_3 - Filter: (id = ANY ('{1500,2200}'::integer[])) -(5 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ANY (ARRAY[100, 1500]); - QUERY PLAN ------------------------------------------------------- - Append - -> Seq Scan on num_range_rel_1 - Filter: (id = ANY ('{100,1500}'::integer[])) - -> Seq Scan on num_range_rel_2 - Filter: (id = ANY ('{100,1500}'::integer[])) -(5 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ALL (ARRAY[1500, 2200]); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ALL (ARRAY[100, 1500]); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; QUERY PLAN -------------------------------------------------------------------------------- @@ -717,143 +601,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; Filter: (value = 1) (5 rows) -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (2); - QUERY PLAN ------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (value = 2) -(3 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (2, 1); - QUERY PLAN ----------------------------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (value = ANY ('{2,1}'::integer[])) - -> Seq Scan on hash_rel_2 - Filter: (value = ANY ('{2,1}'::integer[])) -(5 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (1, 2); - QUERY PLAN ----------------------------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (value = ANY ('{1,2}'::integer[])) - -> Seq Scan on hash_rel_2 - Filter: (value = ANY ('{1,2}'::integer[])) -(5 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (1, 2, -1); - QUERY PLAN -------------------------------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (value = ANY ('{1,2,-1}'::integer[])) - -> Seq Scan on hash_rel_2 - Filter: (value = ANY ('{1,2,-1}'::integer[])) -(5 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (0, 0, 0); - QUERY PLAN ------------------------------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (value = ANY ('{0,0,0}'::integer[])) -(3 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (NULL::int, NULL, NULL); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ANY (ARRAY[1, 2]); - QUERY PLAN ----------------------------------------------------- - Append - -> Seq Scan on hash_rel_0 - Filter: (value > ANY ('{1,2}'::integer[])) - -> Seq Scan on hash_rel_1 - Filter: (value > ANY ('{1,2}'::integer[])) - -> Seq Scan on hash_rel_2 - Filter: (value > ANY ('{1,2}'::integer[])) -(7 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ANY (ARRAY[1, 2, 3, 4, 5]); - QUERY PLAN ----------------------------------------------------------- - Append - -> Seq Scan on hash_rel_0 - Filter: (value > ANY ('{1,2,3,4,5}'::integer[])) - -> Seq Scan on hash_rel_1 - Filter: (value > ANY ('{1,2,3,4,5}'::integer[])) - -> Seq Scan on hash_rel_2 - Filter: (value > ANY ('{1,2,3,4,5}'::integer[])) -(7 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ALL (ARRAY[1, 2]); - QUERY PLAN ----------------------------------------------------- - Append - -> Seq Scan on hash_rel_0 - Filter: (value > ALL ('{1,2}'::integer[])) - -> Seq Scan on hash_rel_1 - Filter: (value > ALL ('{1,2}'::integer[])) - -> Seq Scan on hash_rel_2 - Filter: (value > ALL ('{1,2}'::integer[])) -(7 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ALL (ARRAY[1, 2, 3, 4, 5]); - QUERY PLAN ----------------------------------------------------------- - Append - -> Seq Scan on hash_rel_0 - Filter: (value > ALL ('{1,2,3,4,5}'::integer[])) - -> Seq Scan on hash_rel_1 - Filter: (value > ALL ('{1,2,3,4,5}'::integer[])) - -> Seq Scan on hash_rel_2 - Filter: (value > ALL ('{1,2,3,4,5}'::integer[])) -(7 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ANY (ARRAY[1, 2]); - QUERY PLAN ----------------------------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (value = ANY ('{1,2}'::integer[])) - -> Seq Scan on hash_rel_2 - Filter: (value = ANY ('{1,2}'::integer[])) -(5 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ANY (ARRAY[1, 2, 3, 4, 5]); - QUERY PLAN ----------------------------------------------------------- - Append - -> Seq Scan on hash_rel_0 - Filter: (value = ANY ('{1,2,3,4,5}'::integer[])) - -> Seq Scan on hash_rel_1 - Filter: (value = ANY ('{1,2,3,4,5}'::integer[])) - -> Seq Scan on hash_rel_2 - Filter: (value = ANY ('{1,2,3,4,5}'::integer[])) -(7 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ALL (ARRAY[1, 2]); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ALL (ARRAY[1, 2, 3, 4, 5]); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; QUERY PLAN ---------------------------------------------------------------- diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql new file mode 100644 index 00000000..b7a7a5e9 --- /dev/null +++ b/sql/pathman_array_qual.sql @@ -0,0 +1,321 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; + + + +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); + +ANALYZE; + +/* + * Test expr IN (...) + */ + +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + + +/* + * Test expr = ANY (...) + */ + +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); + + +/* + * Test expr = ALL (...) + */ + +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + + +/* + * Test expr < ANY (...) + */ + +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + +SET pg_pathman.enable = f; +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); +SET pg_pathman.enable = t; +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + + +/* + * Test expr < ALL (...) + */ + +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + +SET pg_pathman.enable = f; +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); +SET pg_pathman.enable = t; +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + + +/* + * Test expr > ANY (...) + */ + +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + +SET pg_pathman.enable = f; +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); +SET pg_pathman.enable = t; +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + + +/* + * Test expr > ALL (...) + */ + +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + +SET pg_pathman.enable = f; +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); +SET pg_pathman.enable = t; +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + + +/* + * Test expr > ANY (... $1 ...) + */ + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + + +/* + * Test expr > ALL (... $1 ...) + */ + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; + +DEALLOCATE q; + +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; + +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; + +DEALLOCATE q; + + +/* + * Test expr = ALL (... $1 ...) + */ + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(100); +DEALLOCATE q; + + + +DROP SCHEMA array_qual CASCADE; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 180a9e7b..db5878ba 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -175,21 +175,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3 EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (2500); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (500, 1500); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-500, 500, 1500); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-1, -1, -1); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id IN (-1, -1, -1, NULL); - -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ANY (ARRAY[1500, 2200]); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ANY (ARRAY[100, 1500]); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ALL (ARRAY[1500, 2200]); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > ALL (ARRAY[100, 1500]); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ANY (ARRAY[1500, 2200]); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ANY (ARRAY[100, 1500]); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ALL (ARRAY[1500, 2200]); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = ALL (ARRAY[100, 1500]); - EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; @@ -206,22 +191,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (2); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (2, 1); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (1, 2); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (1, 2, -1); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (0, 0, 0); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value IN (NULL::int, NULL, NULL); - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ANY (ARRAY[1, 2]); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ANY (ARRAY[1, 2, 3, 4, 5]); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ALL (ARRAY[1, 2]); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value > ALL (ARRAY[1, 2, 3, 4, 5]); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ANY (ARRAY[1, 2]); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ANY (ARRAY[1, 2, 3, 4, 5]); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ALL (ARRAY[1, 2]); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = ALL (ARRAY[1, 2, 3, 4, 5]); - EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; From 23125b7c410a9f5324249c87bf55ada481013d77 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 17 May 2017 18:35:42 +0300 Subject: [PATCH 0527/1124] even more tests in pathman_array_qual --- expected/pathman_array_qual.out | 153 ++++++++++++++++++++++++++++++++ sql/pathman_array_qual.sql | 36 ++++++-- 2 files changed, 183 insertions(+), 6 deletions(-) diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out index e9b006a8..918f2930 100644 --- a/expected/pathman_array_qual.out +++ b/expected/pathman_array_qual.out @@ -941,6 +941,108 @@ DEALLOCATE q; /* * Test expr > ALL (... $1 ...) */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); EXPLAIN (COSTS OFF) EXECUTE q(1); QUERY PLAN @@ -1228,6 +1330,57 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); Filter: (a > ALL (ARRAY[100, 600, $1])) (12 rows) +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + DEALLOCATE q; PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); EXPLAIN (COSTS OFF) EXECUTE q(1); diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql index b7a7a5e9..3897fe17 100644 --- a/sql/pathman_array_qual.sql +++ b/sql/pathman_array_qual.sql @@ -155,6 +155,26 @@ DEALLOCATE q; * Test expr > ALL (... $1 ...) */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); @@ -185,7 +205,7 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); DEALLOCATE q; -PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); @@ -193,7 +213,16 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); /* check query plan: EXECUTE q(999) */ DO language plpgsql $$ @@ -213,7 +242,6 @@ $$ RAISE notice '%: number of partitions: %', query, num; END $$; - DEALLOCATE q; PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); @@ -224,7 +252,6 @@ EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); - /* check query plan: EXECUTE q('{1, 999}') */ DO language plpgsql $$ @@ -244,7 +271,6 @@ $$ RAISE notice '%: number of partitions: %', query, num; END $$; - DEALLOCATE q; PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); @@ -255,7 +281,6 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); - /* check query plan: EXECUTE q(999) */ DO language plpgsql $$ @@ -275,7 +300,6 @@ $$ RAISE notice '%: number of partitions: %', query, num; END $$; - DEALLOCATE q; From 1192aea7afc32dd18868b144697f949a5584fb4e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 17 May 2017 18:44:52 +0300 Subject: [PATCH 0528/1124] Don't forget to reenable hooks if expression parsing failed --- src/relation_info.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/relation_info.c b/src/relation_info.c index 25a2bd50..c088b26a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -705,6 +705,8 @@ cook_partitioning_expression(const Oid relid, error->cursorpos = 0; error->internalpos = 0; + /* Enable pathman hooks */ + pathman_hooks_enabled = true; ReThrowError(error); } PG_END_TRY(); From d83e9809ef469e26c5deb143ee900f15f8d0de34 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 17 May 2017 19:34:53 +0300 Subject: [PATCH 0529/1124] fix collations, more tests --- expected/pathman_array_qual.out | 101 ++++++++++++++++++++++++++++++++ sql/pathman_array_qual.sql | 35 +++++++++++ src/pg_pathman.c | 36 ++++++++---- 3 files changed, 161 insertions(+), 11 deletions(-) diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out index 918f2930..e80ebdcf 100644 --- a/expected/pathman_array_qual.out +++ b/expected/pathman_array_qual.out @@ -2,6 +2,107 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + array_qual.test_1 +(1 row) + +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + array_qual.test_2 +(1 row) + +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + array_qual.test_3 +(1 row) + +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + array_qual.test_4 +(1 row) + +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); +ANALYZE; +/* + * Test expr op ANY (...) + */ +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 +(5 rows) + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text < ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: ((val)::text < ANY ('{a,b}'::text[])) + -> Seq Scan on test_3 + Filter: ((val)::text < ANY ('{a,b}'::text[])) + -> Seq Scan on test_4 + Filter: ((val)::text < ANY ('{a,b}'::text[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); + QUERY PLAN +--------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (val < ANY ('{a,b}'::text[] COLLATE "POSIX")) + -> Seq Scan on test_2 + Filter: (val < ANY ('{a,b}'::text[] COLLATE "POSIX")) + -> Seq Scan on test_3 + Filter: (val < ANY ('{a,b}'::text[] COLLATE "POSIX")) + -> Seq Scan on test_4 + Filter: (val < ANY ('{a,b}'::text[] COLLATE "POSIX")) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); +ERROR: collation mismatch between explicit collations "C" and "POSIX" at character 95 +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text = ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: ((val)::text = ANY ('{a,b}'::text[])) +(5 rows) + +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 5 other objects CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); create_range_partitions diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql index 3897fe17..e3a75400 100644 --- a/sql/pathman_array_qual.sql +++ b/sql/pathman_array_qual.sql @@ -6,6 +6,41 @@ CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); + +ANALYZE; + +/* + * Test expr op ANY (...) + */ + +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); + +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + + + +DROP TABLE array_qual.test CASCADE; + + + CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 100f374a..1ea5bd44 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -54,10 +54,18 @@ void _PG_init(void); static Node *wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue); static void handle_const(const Const *c, + const Oid collid, const int strategy, const WalkerContext *context, WrapperNode *result); +static void handle_array(ArrayType *array, + const Oid collid, + const int strategy, + const bool use_or, + const WalkerContext *context, + WrapperNode *result); + static void handle_boolexpr(const BoolExpr *expr, const WalkerContext *context, WrapperNode *result); @@ -406,9 +414,9 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, /* - * -------------------------- - * RANGE partition prunning - * -------------------------- + * ------------------------- + * RANGE partition pruning + * ------------------------- */ /* Given 'value' and 'ranges', return selected partitions list */ @@ -613,7 +621,8 @@ walk_expr_tree(Expr *expr, const WalkerContext *context) { /* Useful for INSERT optimization */ case T_Const: - handle_const((Const *) expr, BTEqualStrategyNumber, context, result); + handle_const((Const *) expr, ((Const *) expr)->constcollid, + BTEqualStrategyNumber, context, result); return result; /* AND, OR, NOT expressions */ @@ -718,6 +727,7 @@ wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) /* Const handler */ static void handle_const(const Const *c, + const Oid collid, const int strategy, const WalkerContext *context, WrapperNode *result) /* ret value #1 */ @@ -806,8 +816,7 @@ handle_const(const Const *c, FmgrInfo cmp_finfo; /* Cannot do much about non-equal strategies + diff. collations */ - if (strategy != BTEqualStrategyNumber && - c->constcollid != prel->ev_collid) + if (strategy != BTEqualStrategyNumber && collid != prel->ev_collid) { goto handle_const_return; } @@ -817,7 +826,7 @@ handle_const(const Const *c, getBaseType(prel->ev_type)); select_range_partitions(c->constvalue, - c->constcollid, + collid, &cmp_finfo, PrelGetRangesArray(context->prel), PrelChildrenCount(context->prel), @@ -841,6 +850,7 @@ handle_const(const Const *c, /* Array handler */ static void handle_array(ArrayType *array, + const Oid collid, const int strategy, const bool use_or, const WalkerContext *context, @@ -898,7 +908,7 @@ handle_array(ArrayType *array, c.constbyval = elem_byval; c.location = -1; - handle_const(&c, strategy, context, &wrap); + handle_const(&c, collid, strategy, context, &wrap); /* Should we use OR | AND? */ ranges = use_or ? @@ -1020,7 +1030,8 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, /* Examine array */ handle_array(DatumGetArrayTypeP(c->constvalue), - strategy, expr->useOr, context, result); + expr->inputcollid, strategy, + expr->useOr, context, result); /* Save expression */ result->orig = (const Node *) expr; @@ -1063,10 +1074,12 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, /* Examine array */ handle_array(DatumGetArrayTypeP(c->constvalue), - strategy, expr->useOr, context, &wrap); + expr->inputcollid, strategy, + expr->useOr, context, &wrap); } /* ... or a single element? */ - else handle_const(c, strategy, context, &wrap); + else handle_const(c, expr->inputcollid, + strategy, context, &wrap); /* Should we use OR | AND? */ ranges = expr->useOr ? @@ -1131,6 +1144,7 @@ handle_opexpr(const OpExpr *expr, if (IsConstValue(param, context)) { handle_const(ExtractConst(param, context), + expr->inputcollid, strategy, context, result); /* Save expression */ From ab59cf056cf634c745f43ae6290b3810dfc029e6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 17 May 2017 19:53:35 +0300 Subject: [PATCH 0530/1124] new safety check in handle_arrexpr() --- src/pg_pathman.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 1ea5bd44..144d7d82 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1047,6 +1047,9 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, List *ranges; ListCell *lc; + if (list_length(arr_expr->elements) == 0) + goto handle_arrexpr_return; + /* Set default ranges for OR | AND */ ranges = expr->useOr ? NIL : list_make1_irange_full(prel, IR_COMPLETE); From 9e4b730a0dffdd280f320d95ff04dde33b1e1475 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 17 May 2017 20:08:41 +0300 Subject: [PATCH 0531/1124] more tests (+ operator ~~) --- expected/pathman_array_qual.out | 171 ++++++++++++++++++++++++++++++++ sql/pathman_array_qual.sql | 9 ++ 2 files changed, 180 insertions(+) diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out index e80ebdcf..35bb6e48 100644 --- a/expected/pathman_array_qual.out +++ b/expected/pathman_array_qual.out @@ -101,6 +101,21 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = AN Filter: ((val)::text = ANY ('{a,b}'::text[])) (5 rows) +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_3 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_4 + Filter: (val ~~ ANY ('{a,b}'::text[])) +(9 rows) + DROP TABLE array_qual.test CASCADE; NOTICE: drop cascades to 5 other objects CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); @@ -328,6 +343,32 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, * Test expr = ANY (...) */ /* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY (NULL::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY (NULL::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY (NULL::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY (NULL::integer[])) + -> Seq Scan on test_5 + Filter: (a = ANY (NULL::integer[])) + -> Seq Scan on test_6 + Filter: (a = ANY (NULL::integer[])) + -> Seq Scan on test_7 + Filter: (a = ANY (NULL::integer[])) + -> Seq Scan on test_8 + Filter: (a = ANY (NULL::integer[])) + -> Seq Scan on test_9 + Filter: (a = ANY (NULL::integer[])) + -> Seq Scan on test_10 + Filter: (a = ANY (NULL::integer[])) +(21 rows) + EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); QUERY PLAN ---------------------------------------------------- @@ -394,6 +435,32 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100 * Test expr = ALL (...) */ /* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL (NULL::integer[])) + -> Seq Scan on test_2 + Filter: (a = ALL (NULL::integer[])) + -> Seq Scan on test_3 + Filter: (a = ALL (NULL::integer[])) + -> Seq Scan on test_4 + Filter: (a = ALL (NULL::integer[])) + -> Seq Scan on test_5 + Filter: (a = ALL (NULL::integer[])) + -> Seq Scan on test_6 + Filter: (a = ALL (NULL::integer[])) + -> Seq Scan on test_7 + Filter: (a = ALL (NULL::integer[])) + -> Seq Scan on test_8 + Filter: (a = ALL (NULL::integer[])) + -> Seq Scan on test_9 + Filter: (a = ALL (NULL::integer[])) + -> Seq Scan on test_10 + Filter: (a = ALL (NULL::integer[])) +(21 rows) + EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); QUERY PLAN ---------------------------------------------------- @@ -441,6 +508,32 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NUL * Test expr < ANY (...) */ /* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ANY (NULL::integer[])) + -> Seq Scan on test_2 + Filter: (a < ANY (NULL::integer[])) + -> Seq Scan on test_3 + Filter: (a < ANY (NULL::integer[])) + -> Seq Scan on test_4 + Filter: (a < ANY (NULL::integer[])) + -> Seq Scan on test_5 + Filter: (a < ANY (NULL::integer[])) + -> Seq Scan on test_6 + Filter: (a < ANY (NULL::integer[])) + -> Seq Scan on test_7 + Filter: (a < ANY (NULL::integer[])) + -> Seq Scan on test_8 + Filter: (a < ANY (NULL::integer[])) + -> Seq Scan on test_9 + Filter: (a < ANY (NULL::integer[])) + -> Seq Scan on test_10 + Filter: (a < ANY (NULL::integer[])) +(21 rows) + EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); QUERY PLAN ---------------------------------------------------- @@ -517,6 +610,32 @@ SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); * Test expr < ALL (...) */ /* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL (NULL::integer[])) + -> Seq Scan on test_2 + Filter: (a < ALL (NULL::integer[])) + -> Seq Scan on test_3 + Filter: (a < ALL (NULL::integer[])) + -> Seq Scan on test_4 + Filter: (a < ALL (NULL::integer[])) + -> Seq Scan on test_5 + Filter: (a < ALL (NULL::integer[])) + -> Seq Scan on test_6 + Filter: (a < ALL (NULL::integer[])) + -> Seq Scan on test_7 + Filter: (a < ALL (NULL::integer[])) + -> Seq Scan on test_8 + Filter: (a < ALL (NULL::integer[])) + -> Seq Scan on test_9 + Filter: (a < ALL (NULL::integer[])) + -> Seq Scan on test_10 + Filter: (a < ALL (NULL::integer[])) +(21 rows) + EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); QUERY PLAN ---------------------------------------------------- @@ -580,6 +699,32 @@ SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); * Test expr > ANY (...) */ /* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ANY (NULL::integer[])) + -> Seq Scan on test_2 + Filter: (a > ANY (NULL::integer[])) + -> Seq Scan on test_3 + Filter: (a > ANY (NULL::integer[])) + -> Seq Scan on test_4 + Filter: (a > ANY (NULL::integer[])) + -> Seq Scan on test_5 + Filter: (a > ANY (NULL::integer[])) + -> Seq Scan on test_6 + Filter: (a > ANY (NULL::integer[])) + -> Seq Scan on test_7 + Filter: (a > ANY (NULL::integer[])) + -> Seq Scan on test_8 + Filter: (a > ANY (NULL::integer[])) + -> Seq Scan on test_9 + Filter: (a > ANY (NULL::integer[])) + -> Seq Scan on test_10 + Filter: (a > ANY (NULL::integer[])) +(21 rows) + EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); QUERY PLAN ---------------------------------------------------- @@ -675,6 +820,32 @@ SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); * Test expr > ALL (...) */ /* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ALL (NULL::integer[])) + -> Seq Scan on test_2 + Filter: (a > ALL (NULL::integer[])) + -> Seq Scan on test_3 + Filter: (a > ALL (NULL::integer[])) + -> Seq Scan on test_4 + Filter: (a > ALL (NULL::integer[])) + -> Seq Scan on test_5 + Filter: (a > ALL (NULL::integer[])) + -> Seq Scan on test_6 + Filter: (a > ALL (NULL::integer[])) + -> Seq Scan on test_7 + Filter: (a > ALL (NULL::integer[])) + -> Seq Scan on test_8 + Filter: (a > ALL (NULL::integer[])) + -> Seq Scan on test_9 + Filter: (a > ALL (NULL::integer[])) + -> Seq Scan on test_10 + Filter: (a > ALL (NULL::integer[])) +(21 rows) + EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); QUERY PLAN ---------------------------------------------------- diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql index e3a75400..a126c202 100644 --- a/sql/pathman_array_qual.sql +++ b/sql/pathman_array_qual.sql @@ -35,6 +35,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (a /* different collations (pruning should work) */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + DROP TABLE array_qual.test CASCADE; @@ -73,6 +76,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, */ /* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); @@ -85,6 +89,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100 */ /* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); @@ -98,6 +103,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NUL */ /* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); @@ -115,6 +121,7 @@ SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); */ /* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); @@ -132,6 +139,7 @@ SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); */ /* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); @@ -149,6 +157,7 @@ SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); */ /* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); From 1405e922e1d0bdb0bb828d70d0c08489ae2087ff Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 18 May 2017 14:17:45 +0300 Subject: [PATCH 0532/1124] Refine compat version of ExecEvalExpr routine --- src/include/compat/pg_compat.h | 15 +++++++++++---- src/partition_filter.c | 10 +--------- src/pl_funcs.c | 11 ++--------- src/utility_stmt_hooking.c | 11 ++--------- 4 files changed, 16 insertions(+), 31 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 2a2ac897..a7b332c7 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -165,13 +165,20 @@ extern void create_plain_partial_paths(PlannerInfo *root, /* * ExecEvalExpr + * + * 'errmsg' specifies error string when result of ExecEvalExpr doesn't return + * a single value */ #if PG_VERSION_NUM >= 100000 -#define ExecEvalExprCompat(expr, econtext, isNull, isDone) \ +#define ExecEvalExprCompat(expr, econtext, isNull, errmsg) \ ExecEvalExpr((expr), (econtext), (isNull)) -#else -#define ExecEvalExprCompat(expr, econtext, isNull, isDone) \ - ExecEvalExpr((expr), (econtext), (isNull), (isDone)) +#elif PG_VERSION_NUM >= 90500 +#define ExecEvalExprCompat(expr, econtext, isNull, errmsg) \ + do { \ + ExecEvalExpr((expr), (econtext), (isNull), (isDone)); \ + if (isDone != ExprSingleResult) \ + elog(ERROR, (errmsg)); \ + } while (0) #endif diff --git a/src/partition_filter.c b/src/partition_filter.c index 810a6abd..e6222326 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -600,9 +600,6 @@ partition_filter_exec(CustomScanState *node) ResultRelInfoHolder *rri_holder; bool isnull; Datum value; -#if PG_VERSION_NUM < 100000 - ExprDoneCond itemIsDone; -#endif TupleTableSlot *tmp_slot; /* Fetch PartRelationInfo for this partitioned relation */ @@ -624,17 +621,12 @@ partition_filter_exec(CustomScanState *node) tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; value = ExecEvalExprCompat(state->expr_state, econtext, &isnull, - &itemIsDone); + ERR_PART_ATTR_MULTIPLE_RESULTS); econtext->ecxt_scantuple = tmp_slot; if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); -#if PG_VERSION_NUM < 100000 - if (itemIsDone != ExprSingleResult) - elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); -#endif - /* Search for a matching partition */ rri_holder = select_partition_for_insert(value, prel->ev_type, prel, &state->result_parts, estate); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 8bcfed7d..c10b4206 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1153,9 +1153,6 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) Datum value; Oid value_type; bool isnull; -#if PG_VERSION_NUM < 100000 - ExprDoneCond itemIsDone; -#endif Oid *parts; int nparts; @@ -1204,17 +1201,13 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) source_rel, new_tuple, &value_type); - value = ExecEvalExprCompat(expr_state, econtext, &isnull, &itemIsDone); + value = ExecEvalExprCompat(expr_state, econtext, &isnull, + ERR_PART_ATTR_MULTIPLE_RESULTS); MemoryContextSwitchTo(old_mcxt); if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); -#if PG_VERSION_NUM < 100000 - if (itemIsDone != ExprSingleResult) - elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); -#endif - /* Search for matching partitions */ parts = find_partitions_for_value(value, value_type, prel, &nparts); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index ff30d928..464dcaca 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -607,9 +607,6 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { TupleTableSlot *slot, *tmp_slot; -#if PG_VERSION_NUM < 100000 - ExprDoneCond itemIsDone; -#endif bool skip_tuple, isnull; Oid tuple_oid = InvalidOid; @@ -653,17 +650,13 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Execute expression */ tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; - value = ExecEvalExprCompat(expr_state, econtext, &isnull, &itemIsDone); + value = ExecEvalExprCompat(expr_state, econtext, &isnull, + ERR_PART_ATTR_MULTIPLE_RESULTS); econtext->ecxt_scantuple = tmp_slot; if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); -#if PG_VERSION_NUM < 100000 - if (itemIsDone != ExprSingleResult) - elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); -#endif - /* Search for a matching partition */ rri_holder = select_partition_for_insert(value, prel->ev_type, prel, From 20fae01d0e45a55f234d1478882a557f848e4389 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 18 May 2017 15:40:00 +0300 Subject: [PATCH 0533/1124] handle NULL arrays in handle_arrexpr() --- src/pg_pathman.c | 39 ++++++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 144d7d82..3d9b5f24 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1011,11 +1011,11 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, /* Check if expression tree is a partitioning expression */ if (!match_expr_to_operand(context->prel_expr, part_expr)) - goto handle_arrexpr_return; + goto handle_arrexpr_all; /* Check if we can work with this strategy */ if (strategy == 0) - goto handle_arrexpr_return; + goto handle_arrexpr_all; /* Examine the array node */ switch (nodeTag(array)) @@ -1026,7 +1026,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, /* Array is NULL */ if (c->constisnull) - goto handle_arrexpr_return; + goto handle_arrexpr_none; /* Examine array */ handle_array(DatumGetArrayTypeP(c->constvalue), @@ -1048,7 +1048,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, ListCell *lc; if (list_length(arr_expr->elements) == 0) - goto handle_arrexpr_return; + goto handle_arrexpr_all; /* Set default ranges for OR | AND */ ranges = expr->useOr ? NIL : list_make1_irange_full(prel, IR_COMPLETE); @@ -1068,21 +1068,19 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, { Const *c = ExtractConst(elem, context); - /* Is this an array?.. */ - if (c->consttype != elem_type) + /* Is this an array?. */ + if (c->consttype != elem_type && !c->constisnull) { - /* Array is NULL */ - if (c->constisnull) - goto handle_arrexpr_return; - - /* Examine array */ handle_array(DatumGetArrayTypeP(c->constvalue), expr->inputcollid, strategy, expr->useOr, context, &wrap); } /* ... or a single element? */ - else handle_const(c, expr->inputcollid, - strategy, context, &wrap); + else + { + handle_const(c, expr->inputcollid, + strategy, context, &wrap); + } /* Should we use OR | AND? */ ranges = expr->useOr ? @@ -1097,7 +1095,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, { /* We can't say anything if PARAMs + ANY */ if (expr->useOr) - goto handle_arrexpr_return; + goto handle_arrexpr_all; /* Recheck condition on a narrowed set of partitions */ ranges = irange_list_set_lossiness(ranges, IR_LOSSY); @@ -1116,12 +1114,23 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, break; } -handle_arrexpr_return: +handle_arrexpr_all: result->rangeset = list_make1_irange_full(prel, IR_LOSSY); result->paramsel = estimate_paramsel_using_prel(prel, strategy); /* Save expression */ result->orig = (const Node *) expr; + + return; + +handle_arrexpr_none: + result->rangeset = NIL; + result->paramsel = 0.0; + + /* Save expression */ + result->orig = (const Node *) expr; + + return; } /* Operator expression handler */ From 3a90ad92e6cdcad01f4931d707a6b10246b7125d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 18 May 2017 15:57:04 +0300 Subject: [PATCH 0534/1124] more tests for stupid cases with arrays --- expected/pathman_array_qual.out | 189 +++++++++++++++----------------- sql/pathman_array_qual.sql | 6 + 2 files changed, 93 insertions(+), 102 deletions(-) diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out index 35bb6e48..5e81bcd2 100644 --- a/expected/pathman_array_qual.out +++ b/expected/pathman_array_qual.out @@ -344,30 +344,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, */ /* a = ANY (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); - QUERY PLAN ---------------------------------------------- - Append - -> Seq Scan on test_1 - Filter: (a = ANY (NULL::integer[])) - -> Seq Scan on test_2 - Filter: (a = ANY (NULL::integer[])) - -> Seq Scan on test_3 - Filter: (a = ANY (NULL::integer[])) - -> Seq Scan on test_4 - Filter: (a = ANY (NULL::integer[])) - -> Seq Scan on test_5 - Filter: (a = ANY (NULL::integer[])) - -> Seq Scan on test_6 - Filter: (a = ANY (NULL::integer[])) - -> Seq Scan on test_7 - Filter: (a = ANY (NULL::integer[])) - -> Seq Scan on test_8 - Filter: (a = ANY (NULL::integer[])) - -> Seq Scan on test_9 - Filter: (a = ANY (NULL::integer[])) - -> Seq Scan on test_10 - Filter: (a = ANY (NULL::integer[])) -(21 rows) + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); QUERY PLAN @@ -436,29 +424,36 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100 */ /* a = ALL (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); QUERY PLAN --------------------------------------------- Append -> Seq Scan on test_1 - Filter: (a = ALL (NULL::integer[])) + Filter: (a = ALL ('{}'::integer[])) -> Seq Scan on test_2 - Filter: (a = ALL (NULL::integer[])) + Filter: (a = ALL ('{}'::integer[])) -> Seq Scan on test_3 - Filter: (a = ALL (NULL::integer[])) + Filter: (a = ALL ('{}'::integer[])) -> Seq Scan on test_4 - Filter: (a = ALL (NULL::integer[])) + Filter: (a = ALL ('{}'::integer[])) -> Seq Scan on test_5 - Filter: (a = ALL (NULL::integer[])) + Filter: (a = ALL ('{}'::integer[])) -> Seq Scan on test_6 - Filter: (a = ALL (NULL::integer[])) + Filter: (a = ALL ('{}'::integer[])) -> Seq Scan on test_7 - Filter: (a = ALL (NULL::integer[])) + Filter: (a = ALL ('{}'::integer[])) -> Seq Scan on test_8 - Filter: (a = ALL (NULL::integer[])) + Filter: (a = ALL ('{}'::integer[])) -> Seq Scan on test_9 - Filter: (a = ALL (NULL::integer[])) + Filter: (a = ALL ('{}'::integer[])) -> Seq Scan on test_10 - Filter: (a = ALL (NULL::integer[])) + Filter: (a = ALL ('{}'::integer[])) (21 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); @@ -509,30 +504,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NUL */ /* a < ANY (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); - QUERY PLAN ---------------------------------------------- - Append - -> Seq Scan on test_1 - Filter: (a < ANY (NULL::integer[])) - -> Seq Scan on test_2 - Filter: (a < ANY (NULL::integer[])) - -> Seq Scan on test_3 - Filter: (a < ANY (NULL::integer[])) - -> Seq Scan on test_4 - Filter: (a < ANY (NULL::integer[])) - -> Seq Scan on test_5 - Filter: (a < ANY (NULL::integer[])) - -> Seq Scan on test_6 - Filter: (a < ANY (NULL::integer[])) - -> Seq Scan on test_7 - Filter: (a < ANY (NULL::integer[])) - -> Seq Scan on test_8 - Filter: (a < ANY (NULL::integer[])) - -> Seq Scan on test_9 - Filter: (a < ANY (NULL::integer[])) - -> Seq Scan on test_10 - Filter: (a < ANY (NULL::integer[])) -(21 rows) + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); QUERY PLAN @@ -611,29 +594,36 @@ SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); */ /* a < ALL (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); QUERY PLAN --------------------------------------------- Append -> Seq Scan on test_1 - Filter: (a < ALL (NULL::integer[])) + Filter: (a < ALL ('{}'::integer[])) -> Seq Scan on test_2 - Filter: (a < ALL (NULL::integer[])) + Filter: (a < ALL ('{}'::integer[])) -> Seq Scan on test_3 - Filter: (a < ALL (NULL::integer[])) + Filter: (a < ALL ('{}'::integer[])) -> Seq Scan on test_4 - Filter: (a < ALL (NULL::integer[])) + Filter: (a < ALL ('{}'::integer[])) -> Seq Scan on test_5 - Filter: (a < ALL (NULL::integer[])) + Filter: (a < ALL ('{}'::integer[])) -> Seq Scan on test_6 - Filter: (a < ALL (NULL::integer[])) + Filter: (a < ALL ('{}'::integer[])) -> Seq Scan on test_7 - Filter: (a < ALL (NULL::integer[])) + Filter: (a < ALL ('{}'::integer[])) -> Seq Scan on test_8 - Filter: (a < ALL (NULL::integer[])) + Filter: (a < ALL ('{}'::integer[])) -> Seq Scan on test_9 - Filter: (a < ALL (NULL::integer[])) + Filter: (a < ALL ('{}'::integer[])) -> Seq Scan on test_10 - Filter: (a < ALL (NULL::integer[])) + Filter: (a < ALL ('{}'::integer[])) (21 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); @@ -700,30 +690,18 @@ SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); */ /* a > ANY (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); - QUERY PLAN ---------------------------------------------- - Append - -> Seq Scan on test_1 - Filter: (a > ANY (NULL::integer[])) - -> Seq Scan on test_2 - Filter: (a > ANY (NULL::integer[])) - -> Seq Scan on test_3 - Filter: (a > ANY (NULL::integer[])) - -> Seq Scan on test_4 - Filter: (a > ANY (NULL::integer[])) - -> Seq Scan on test_5 - Filter: (a > ANY (NULL::integer[])) - -> Seq Scan on test_6 - Filter: (a > ANY (NULL::integer[])) - -> Seq Scan on test_7 - Filter: (a > ANY (NULL::integer[])) - -> Seq Scan on test_8 - Filter: (a > ANY (NULL::integer[])) - -> Seq Scan on test_9 - Filter: (a > ANY (NULL::integer[])) - -> Seq Scan on test_10 - Filter: (a > ANY (NULL::integer[])) -(21 rows) + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); QUERY PLAN @@ -821,29 +799,36 @@ SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); */ /* a > ALL (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); QUERY PLAN --------------------------------------------- Append -> Seq Scan on test_1 - Filter: (a > ALL (NULL::integer[])) + Filter: (a > ALL ('{}'::integer[])) -> Seq Scan on test_2 - Filter: (a > ALL (NULL::integer[])) + Filter: (a > ALL ('{}'::integer[])) -> Seq Scan on test_3 - Filter: (a > ALL (NULL::integer[])) + Filter: (a > ALL ('{}'::integer[])) -> Seq Scan on test_4 - Filter: (a > ALL (NULL::integer[])) + Filter: (a > ALL ('{}'::integer[])) -> Seq Scan on test_5 - Filter: (a > ALL (NULL::integer[])) + Filter: (a > ALL ('{}'::integer[])) -> Seq Scan on test_6 - Filter: (a > ALL (NULL::integer[])) + Filter: (a > ALL ('{}'::integer[])) -> Seq Scan on test_7 - Filter: (a > ALL (NULL::integer[])) + Filter: (a > ALL ('{}'::integer[])) -> Seq Scan on test_8 - Filter: (a > ALL (NULL::integer[])) + Filter: (a > ALL ('{}'::integer[])) -> Seq Scan on test_9 - Filter: (a > ALL (NULL::integer[])) + Filter: (a > ALL ('{}'::integer[])) -> Seq Scan on test_10 - Filter: (a > ALL (NULL::integer[])) + Filter: (a > ALL ('{}'::integer[])) (21 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql index a126c202..72d3c138 100644 --- a/sql/pathman_array_qual.sql +++ b/sql/pathman_array_qual.sql @@ -77,6 +77,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, /* a = ANY (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); @@ -90,6 +91,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100 /* a = ALL (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); @@ -104,6 +106,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NUL /* a < ANY (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); @@ -122,6 +125,7 @@ SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); /* a < ALL (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); @@ -140,6 +144,7 @@ SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); /* a > ANY (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); @@ -158,6 +163,7 @@ SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); /* a > ALL (...) - pruning should work */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); From 8b08edad0c3ac8b8ff524054b4f90a06bdf2d616 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Thu, 18 May 2017 17:08:12 +0300 Subject: [PATCH 0535/1124] subpartitions tests --- Makefile | 5 +- expected/pathman_expressions.out | 4 + expected/pathman_subpartitions.out | 299 +++++++++++++++++++++++++++++ sql/pathman_expressions.sql | 4 + sql/pathman_subpartitions.sql | 99 ++++++++++ 5 files changed, 409 insertions(+), 2 deletions(-) create mode 100644 expected/pathman_subpartitions.out create mode 100644 sql/pathman_subpartitions.sql diff --git a/Makefile b/Makefile index 2ebb95be..9acbd6c8 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,7 @@ REGRESS = pathman_basic \ pathman_column_type \ pathman_cte \ pathman_domains \ + pathman_expressions \ pathman_foreign_keys \ pathman_inserts \ pathman_interval \ @@ -40,10 +41,10 @@ REGRESS = pathman_basic \ pathman_permissions \ pathman_rowmarks \ pathman_runtime_nodes \ + pathman_subpartitions \ pathman_update_trigger \ pathman_updates \ - pathman_utility_stmt \ - pathman_expressions + pathman_utility_stmt EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index b462bf20..e0832ff8 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -152,3 +152,7 @@ SELECT COUNT(*) FROM test.range_rel_2; 24 (1 row) +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 17 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out new file mode 100644 index 00000000..6889bb1a --- /dev/null +++ b/expected/pathman_subpartitions.out @@ -0,0 +1,299 @@ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +/* Create two level partitioning structure */ +CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('abc', 'a', 0, 100, 2); +NOTICE: sequence "abc_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | partattr | range_min | range_max +--------+-----------+----------+----------+-----------+----------- + abc | abc_1 | 2 | a | 0 | 100 + abc | abc_2 | 2 | a | 100 | 200 + abc_1 | abc_1_0 | 1 | a | | + abc_1 | abc_1_1 | 1 | a | | + abc_1 | abc_1_2 | 1 | a | | + abc_2 | abc_2_0 | 1 | b | | + abc_2 | abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM abc; + tableoid | a | b +----------+-----+----- + abc_1_0 | 21 | 21 + abc_1_0 | 61 | 61 + abc_1_1 | 41 | 41 + abc_1_2 | 1 | 1 + abc_1_2 | 81 | 81 + abc_2_0 | 101 | 101 + abc_2_0 | 141 | 141 + abc_2_1 | 121 | 121 + abc_2_1 | 161 | 161 + abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creating of new subpartition */ +SELECT append_range_partition('abc', 'abc_3'); + append_range_partition +------------------------ + abc_3 +(1 row) + +SELECT create_range_partitions('abc_3', 'b', 200, 10, 2); +NOTICE: sequence "abc_3_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; + parent | partition | parttype | partattr | range_min | range_max +--------+-----------+----------+----------+-----------+----------- + abc_3 | abc_3_1 | 2 | b | 200 | 210 + abc_3 | abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; + parent | partition | parttype | partattr | range_min | range_max +--------+-----------+----------+----------+-----------+----------- + abc_3 | abc_3_1 | 2 | b | 200 | 210 + abc_3 | abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM abc WHERE a = 215 AND b = 215; + tableoid | a | b +----------+-----+----- + abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a < 150; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + -> Seq Scan on abc_1_1 + -> Seq Scan on abc_1_2 + -> Append + -> Seq Scan on abc_2_0 + Filter: (a < 150) + -> Seq Scan on abc_2_1 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE b = 215; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + Filter: (b = 215) + -> Seq Scan on abc_1_1 + Filter: (b = 215) + -> Seq Scan on abc_1_2 + Filter: (b = 215) + -> Append + -> Seq Scan on abc_2_1 + Filter: (b = 215) + -> Append + -> Seq Scan on abc_3_2 + Filter: (b = 215) +(14 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------------------- + Append + -> Append + -> Seq Scan on abc_3_2 + Filter: ((a = 215) AND (b = 215)) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a >= 210 and b >= 210; + QUERY PLAN +---------------------------------- + Append + -> Append + -> Seq Scan on abc_3_2 + Filter: (a >= 210) +(4 rows) + +/* Multilevel partitioning with update triggers */ +CREATE OR REPLACE FUNCTION partitions_tree(rel REGCLASS) +RETURNS SETOF REGCLASS AS +$$ +DECLARE + partition REGCLASS; + subpartition REGCLASS; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT partitions_tree(partition)) + LOOP + RETURN NEXT subpartition; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION get_triggers(rel REGCLASS) +RETURNS SETOF TEXT AS +$$ +DECLARE + def TEXT; +BEGIN + FOR def IN (SELECT pg_get_triggerdef(oid) FROM pg_trigger WHERE tgrelid = rel) + LOOP + RETURN NEXT def; + END LOOP; + + RETURN; +END; +$$ LANGUAGE plpgsql; +SELECT create_update_triggers('abc_1'); /* Cannot perform on partition */ +ERROR: Parent table must have an update trigger +SELECT create_update_triggers('abc'); /* Only on parent */ + create_update_triggers +------------------------ + +(1 row) + +SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; + p | get_triggers +---------+----------------------------------------------------------------------------------------------------------------------------- + abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_3 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_3_1 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_3_2 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() +(14 rows) + +SELECT append_range_partition('abc', 'abc_4'); + append_range_partition +------------------------ + abc_4 +(1 row) + +SELECT create_hash_partitions('abc_4', 'b', 2); /* Triggers should automatically + create_hash_partitions +------------------------ + 2 +(1 row) + + * be created on subpartitions */ +SELECT p, get_triggers(p) FROM partitions_tree('abc_4') as p; + p | get_triggers +---------+----------------------------------------------------------------------------------------------------------------------------- + abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_4 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_4_0 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + abc_4_1 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() +(4 rows) + +SELECT drop_triggers('abc_1'); /* Cannot perform on partition */ +ERROR: Parent table must not have an update trigger +SELECT drop_triggers('abc'); /* Only on parent */ + drop_triggers +--------------- + +(1 row) + +SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; /* No partitions */ + p | get_triggers +---+-------------- +(0 rows) + +DROP TABLE abc CASCADE; +NOTICE: drop cascades to 13 other objects +/* Test that update trigger words correclty */ +CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('abc_1', 'b', 0, 50, 2); +NOTICE: sequence "abc_1_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('abc_2', 'b', 0, 50, 2); +NOTICE: sequence "abc_2_seq" does not exist, skipping + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_update_triggers('abc'); + create_update_triggers +------------------------ + +(1 row) + +INSERT INTO abc VALUES (25, 25); /* Should get into abc_1_1 */ +SELECT tableoid::regclass, * FROM abc; + tableoid | a | b +----------+----+---- + abc_1_1 | 25 | 25 +(1 row) + +UPDATE abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_1 */ + tableoid | a | b +----------+-----+---- + abc_2_1 | 125 | 25 +(1 row) + +UPDATE abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_2 */ + tableoid | a | b +----------+-----+---- + abc_2_2 | 125 | 75 +(1 row) + +UPDATE abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM abc; /* Should create partition abc_2_3 */ + tableoid | a | b +----------+-----+----- + abc_2_3 | 125 | 125 +(1 row) + +DROP TABLE abc CASCADE; +NOTICE: drop cascades to 7 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index bc24e30f..3212929a 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -51,3 +51,7 @@ UPDATE test.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= ' SELECT COUNT(*) FROM test.range_rel; SELECT COUNT(*) FROM test.range_rel_1; SELECT COUNT(*) FROM test.range_rel_2; + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; \ No newline at end of file diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql new file mode 100644 index 00000000..4cf5d1a1 --- /dev/null +++ b/sql/pathman_subpartitions.sql @@ -0,0 +1,99 @@ +\set VERBOSITY terse + +CREATE EXTENSION pg_pathman; + +/* Create two level partitioning structure */ +CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('abc', 'a', 0, 100, 2); +SELECT create_hash_partitions('abc_1', 'a', 3); +SELECT create_hash_partitions('abc_2', 'b', 2); +SELECT * FROM pathman_partition_list; +SELECT tableoid::regclass, * FROM abc; + +/* Insert should result in creating of new subpartition */ +SELECT append_range_partition('abc', 'abc_3'); +SELECT create_range_partitions('abc_3', 'b', 200, 10, 2); +SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; +INSERT INTO abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; +SELECT tableoid::regclass, * FROM abc WHERE a = 215 AND b = 215; + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a < 150; +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a = 215 AND b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a >= 210 and b >= 210; + +/* Multilevel partitioning with update triggers */ +CREATE OR REPLACE FUNCTION partitions_tree(rel REGCLASS) +RETURNS SETOF REGCLASS AS +$$ +DECLARE + partition REGCLASS; + subpartition REGCLASS; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT partitions_tree(partition)) + LOOP + RETURN NEXT subpartition; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION get_triggers(rel REGCLASS) +RETURNS SETOF TEXT AS +$$ +DECLARE + def TEXT; +BEGIN + FOR def IN (SELECT pg_get_triggerdef(oid) FROM pg_trigger WHERE tgrelid = rel) + LOOP + RETURN NEXT def; + END LOOP; + + RETURN; +END; +$$ LANGUAGE plpgsql; + +SELECT create_update_triggers('abc_1'); /* Cannot perform on partition */ +SELECT create_update_triggers('abc'); /* Only on parent */ +SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; + +SELECT append_range_partition('abc', 'abc_4'); +SELECT create_hash_partitions('abc_4', 'b', 2); /* Triggers should automatically + * be created on subpartitions */ +SELECT p, get_triggers(p) FROM partitions_tree('abc_4') as p; +SELECT drop_triggers('abc_1'); /* Cannot perform on partition */ +SELECT drop_triggers('abc'); /* Only on parent */ +SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; /* No partitions */ + +DROP TABLE abc CASCADE; + +/* Test that update trigger words correclty */ +CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('abc', 'a', 0, 100, 2); +SELECT create_range_partitions('abc_1', 'b', 0, 50, 2); +SELECT create_range_partitions('abc_2', 'b', 0, 50, 2); +SELECT create_update_triggers('abc'); + +INSERT INTO abc VALUES (25, 25); /* Should get into abc_1_1 */ +SELECT tableoid::regclass, * FROM abc; +UPDATE abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_1 */ +UPDATE abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_2 */ +UPDATE abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM abc; /* Should create partition abc_2_3 */ + +DROP TABLE abc CASCADE; + +DROP EXTENSION pg_pathman; \ No newline at end of file From 9f92153dff4efc4c67cc354f2a6cc4c4281c8af9 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 18 May 2017 17:51:16 +0300 Subject: [PATCH 0536/1124] Fix ExecEvalExpr wrapper macro and add compat versions of initial_cost_nextloop and create_nestloop_path routines --- src/compat/pg_compat.c | 11 +++++++ src/hooks.c | 37 ++++----------------- src/include/compat/pg_compat.h | 59 ++++++++++++++++++++++++++++++---- src/partition_filter.c | 2 +- src/pl_funcs.c | 2 +- src/utility_stmt_hooking.c | 2 +- 6 files changed, 73 insertions(+), 40 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index eb182518..10c40702 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -202,6 +202,17 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) #endif +/* + * ExecEvalExpr + * + * global variables for macro wrapper evaluation + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 100000 +Datum exprResult; +ExprDoneCond isDone; +#endif + + /* * make_result * Build a Result plan node diff --git a/src/hooks.c b/src/hooks.c index 15e251df..a9dad734 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -217,18 +217,8 @@ pathman_join_pathlist_hook(PlannerInfo *root, have_dangerous_phv(root, outer->parent->relids, required_inner))) return; - -/* TODO: create macro initial_cost_nestloop_compat() */ -#if (defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603) || \ - PG_VERSION_NUM >= 100000 - initial_cost_nestloop(root, &workspace, jointype, - outer, inner, /* built paths */ - extra); -#else - initial_cost_nestloop(root, &workspace, jointype, - outer, inner, /* built paths */ - extra->sjinfo, &extra->semifactors); -#endif + initial_cost_nestloop_compat(root, &workspace, jointype, outer, inner, + extra); pathkeys = build_join_pathkeys(root, joinrel, jointype, outer->pathkeys); @@ -242,24 +232,11 @@ pathman_join_pathlist_hook(PlannerInfo *root, filtered_joinclauses = lappend(filtered_joinclauses, rinfo); } -/* TODO: create macro create_nestloop_path_compat() */ -#if (defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603) || \ - PG_VERSION_NUM >= 100000 - nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, - extra, - outer, inner, - filtered_joinclauses, - pathkeys, - calc_nestloop_required_outer(outer, inner)); -#else - nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, - extra->sjinfo, - &extra->semifactors, - outer, inner, - filtered_joinclauses, - pathkeys, - calc_nestloop_required_outer(outer, inner)); -#endif + nest_path = + create_nestloop_path_compat(root, joinrel, jointype, + &workspace, extra, outer, inner, + filtered_joinclauses, pathkeys, + calc_nestloop_required_outer(outer, inner)); /* * NOTE: Override 'rows' value produced by standard estimator. diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index a7b332c7..25766160 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -137,6 +137,27 @@ void CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple); #endif /* PG_VERSION_NUM */ +/* + * create_nestloop_path() + */ +#if PG_VERSION_NUM >= 100000 +#define create_nestloop_path_compat(root, joinrel, jointype, workspace, extra, \ + outer, inner, filtered_joinclauses, pathkeys, \ + required_outer) \ + create_nestloop_path((root), (joinrel), (jointype), (workspace), (extra), \ + (outer), (inner), (filtered_joinclauses), (pathkeys), \ + (required_outer)) +#elif PG_VERSION_NUM >= 90500 +#define create_nestloop_path_compat(root, joinrel, jointype, workspace, extra, \ + outer, inner, filtered_joinclauses, pathkeys, \ + required_outer) \ + create_nestloop_path((root), (joinrel), (jointype), (workspace), \ + (extra)->sjinfo, &(extra)->semifactors, (outer), \ + (inner), (filtered_joinclauses), (pathkeys), \ + (required_outer)) +#endif + + /* * create_plain_partial_paths() */ @@ -170,15 +191,23 @@ extern void create_plain_partial_paths(PlannerInfo *root, * a single value */ #if PG_VERSION_NUM >= 100000 -#define ExecEvalExprCompat(expr, econtext, isNull, errmsg) \ +#define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ ExecEvalExpr((expr), (econtext), (isNull)) #elif PG_VERSION_NUM >= 90500 -#define ExecEvalExprCompat(expr, econtext, isNull, errmsg) \ - do { \ - ExecEvalExpr((expr), (econtext), (isNull), (isDone)); \ - if (isDone != ExprSingleResult) \ - elog(ERROR, (errmsg)); \ - } while (0) +#include "partition_filter.h" +extern Datum exprResult; +extern ExprDoneCond isDone; +static inline void +not_signle_result_handler() +{ + elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); +} +#define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ +( \ + exprResult = ExecEvalExpr((expr), (econtext), (isNull), &isDone), \ + (isDone != ExprSingleResult) ? (errHandler)() : (0), \ + exprResult \ +) #endif @@ -212,6 +241,22 @@ char get_rel_persistence(Oid relid); #endif +/* + * initial_cost_nestloop + */ +#if PG_VERSION_NUM >= 100000 || (defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603) +#define initial_cost_nestloop_compat(root, workspace, jointype, outer_path, \ + inner_path, extra) \ + initial_cost_nestloop((root), (workspace), (jointype), (outer_path), \ + (inner_path), (extra)) +#elif PG_VERSION_NUM >= 90500 +#define initial_cost_nestloop_compat(root, workspace, jointype, outer_path, \ + inner_path, extra) \ + initial_cost_nestloop((root), (workspace), (jointype), (outer_path), \ + (inner_path), (extra)->sjinfo, &(extra)->semifactors) +#endif + + /* * InitResultRelInfo * diff --git a/src/partition_filter.c b/src/partition_filter.c index e6222326..2a88e747 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -621,7 +621,7 @@ partition_filter_exec(CustomScanState *node) tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; value = ExecEvalExprCompat(state->expr_state, econtext, &isnull, - ERR_PART_ATTR_MULTIPLE_RESULTS); + not_signle_result_handler); econtext->ecxt_scantuple = tmp_slot; if (isnull) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index c10b4206..a8619350 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1202,7 +1202,7 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) new_tuple, &value_type); value = ExecEvalExprCompat(expr_state, econtext, &isnull, - ERR_PART_ATTR_MULTIPLE_RESULTS); + not_signle_result_handler); MemoryContextSwitchTo(old_mcxt); if (isnull) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 464dcaca..75a512ef 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -651,7 +651,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; value = ExecEvalExprCompat(expr_state, econtext, &isnull, - ERR_PART_ATTR_MULTIPLE_RESULTS); + not_signle_result_handler); econtext->ecxt_scantuple = tmp_slot; if (isnull) From e7f8c5ac2169cc1a74aff57a122f2ef016fa71f5 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 18 May 2017 18:41:17 +0300 Subject: [PATCH 0537/1124] Remove lowering for expression, add additional checks for expression before any actions --- expected/pathman_basic.out | 4 ++-- expected/pathman_calamity.out | 15 +++++++++++++++ expected/pathman_expressions.out | 20 +++++++++++++++----- init.sql | 10 ++++++++++ range.sql | 5 ----- sql/pathman_basic.sql | 2 +- sql/pathman_calamity.sql | 7 +++++++ sql/pathman_expressions.sql | 1 + src/pl_funcs.c | 32 ++++++++++++++++++++++++++++++++ 9 files changed, 83 insertions(+), 13 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index c6009416..48c81cbd 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -129,11 +129,11 @@ CREATE TABLE test.range_rel ( CREATE INDEX ON test.range_rel (dt); INSERT INTO test.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; -SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); -ERROR: not enough partitions to fit all values of "dt" SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); ERROR: partitioning key "dt" must be marked NOT NULL ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); create_range_partitions ------------------------- diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 13c3c401..4f7aebc3 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -296,6 +296,21 @@ SELECT validate_relname(1::REGCLASS); ERROR: relation "1" does not exist SELECT validate_relname(NULL); ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression('calamity.part_test'); +ERROR: function validate_expression(unknown) does not exist at character 51 +SELECT validate_expression('calamity.part_test', NULL); +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); +ERROR: cannot find type name for attribute "valval" of relation "part_test" +SELECT validate_expression('calamity.part_test', 'random()'); +ERROR: functions in partitioning expression must be marked IMMUTABLE +SELECT validate_expression('calamity.part_test', 'val'); + validate_expression +--------------------- + +(1 row) + /* check function get_number_of_partitions() */ SELECT get_number_of_partitions('calamity.part_test'); get_number_of_partitions diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index d3b8e413..94f94257 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -16,18 +16,28 @@ SELECT COUNT(*) FROM test_exprs.hash_rel; 5 (1 row) +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: functions in partitioning expression must be marked IMMUTABLE \set VERBOSITY default SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); ERROR: partitioning expression parse error DETAIL: syntax error at or near ")" -QUERY: SELECT public.add_to_pathman_config(parent_relid, expression) -CONTEXT: PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 9 at PERFORM +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); ERROR: partitioning expression analyze error DETAIL: column "value3" does not exist HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". -QUERY: SELECT public.add_to_pathman_config(parent_relid, expression) -CONTEXT: PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 9 at PERFORM +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM \set VERBOSITY terse SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); create_hash_partitions @@ -88,7 +98,7 @@ CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT INSERT INTO test_exprs.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); -ERROR: start value is less than min value of "random()" +ERROR: functions in partitioning expression must be marked IMMUTABLE SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); create_range_partitions diff --git a/init.sql b/init.sql index 6cf9aa0b..1109c45d 100644 --- a/init.sql +++ b/init.sql @@ -434,6 +434,7 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_expression(parent_relid, expression); IF partition_data = true THEN /* Acquire data modification lock */ @@ -830,6 +831,15 @@ CREATE OR REPLACE FUNCTION @extschema@.validate_relname( RETURNS VOID AS 'pg_pathman', 'validate_relname' LANGUAGE C; +/* + * Check that expression is valid + */ +CREATE OR REPLACE FUNCTION @extschema@.validate_expression( + relid REGCLASS, + expression TEXT) +RETURNS VOID AS 'pg_pathman', 'validate_expression' +LANGUAGE C; + /* * Check if regclass is date or timestamp. */ diff --git a/range.sql b/range.sql index e9bfefe9..bae93c75 100644 --- a/range.sql +++ b/range.sql @@ -67,7 +67,6 @@ DECLARE i INTEGER; BEGIN - expression := lower(expression); PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); @@ -167,7 +166,6 @@ DECLARE i INTEGER; BEGIN - expression := lower(expression); PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); @@ -268,7 +266,6 @@ BEGIN RAISE EXCEPTION 'Bounds array must have at least two values'; END IF; - expression := lower(expression); PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); @@ -319,7 +316,6 @@ DECLARE part_count INTEGER := 0; BEGIN - expression := lower(expression); PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); @@ -376,7 +372,6 @@ DECLARE part_count INTEGER := 0; BEGIN - expression := lower(expression); PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 180a9e7b..4da6408e 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -39,9 +39,9 @@ CREATE TABLE test.range_rel ( CREATE INDEX ON test.range_rel (dt); INSERT INTO test.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; -SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); SELECT COUNT(*) FROM test.range_rel; SELECT COUNT(*) FROM ONLY test.range_rel; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index b7e4ec8c..426d19ec 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -140,6 +140,13 @@ SELECT validate_relname('calamity.part_test'); SELECT validate_relname(1::REGCLASS); SELECT validate_relname(NULL); +/* check function validate_expression() */ +SELECT validate_expression('calamity.part_test'); +SELECT validate_expression('calamity.part_test', NULL); +SELECT validate_expression('calamity.part_test', 'valval'); +SELECT validate_expression('calamity.part_test', 'random()'); +SELECT validate_expression('calamity.part_test', 'val'); + /* check function get_number_of_partitions() */ SELECT get_number_of_partitions('calamity.part_test'); SELECT get_number_of_partitions(NULL) IS NULL; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index b95eac45..7927c50d 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -15,6 +15,7 @@ INSERT INTO test_exprs.hash_rel (value, value2) SELECT val, val * 2 FROM generate_series(1, 5) val; SELECT COUNT(*) FROM test_exprs.hash_rel; +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); \set VERBOSITY default SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 276b25bd..b3cd0b8c 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -57,6 +57,7 @@ PG_FUNCTION_INFO_V1( build_update_trigger_func_name ); PG_FUNCTION_INFO_V1( build_check_constraint_name ); PG_FUNCTION_INFO_V1( validate_relname ); +PG_FUNCTION_INFO_V1( validate_expression ); PG_FUNCTION_INFO_V1( is_date_type ); PG_FUNCTION_INFO_V1( is_operator_supported ); PG_FUNCTION_INFO_V1( is_tuple_convertible ); @@ -595,6 +596,37 @@ validate_relname(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +/* + * Validate a partitioning expression + * We need this in range functions because we do many things + * before actual partitioning. + */ +Datum +validate_expression(PG_FUNCTION_ARGS) +{ + Oid relid; + char *expression; + + /* Fetch relation's Oid */ + relid = PG_GETARG_OID(0); + + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid), + errdetail("triggered in function " + CppAsString(validate_expression)))); + + if (!PG_ARGISNULL(1)) + { + expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'expression' should not be NULL"))); + + cook_partitioning_expression(relid, expression, NULL); + PG_RETURN_VOID(); +} + Datum is_date_type(PG_FUNCTION_ARGS) { From 642d0bd22ed9cc7b22d6c4fb08159bc3684a4190 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 19 May 2017 19:16:09 +0300 Subject: [PATCH 0538/1124] replace extern params with consts via eval_extern_params_mutator() --- src/hooks.c | 4 +- src/include/planner_tree_modification.h | 2 +- src/planner_tree_modification.c | 74 +++++++++++++++++++++++-- 3 files changed, 72 insertions(+), 8 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 4f2176da..325b5de8 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -562,7 +562,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) incr_refcount_relation_tags(); /* Modify query tree if needed */ - pathman_transform_query(parse); + pathman_transform_query(parse, boundParams); } /* Invoke original hook if needed */ @@ -682,7 +682,7 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) } /* Modify query tree if needed */ - pathman_transform_query(query); + pathman_transform_query(query, NULL); } } diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index 17e17fb4..eee1ea76 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -30,7 +30,7 @@ void plan_tree_walker(Plan *plan, void *context); /* Query tree rewriting utility */ -void pathman_transform_query(Query *parse); +void pathman_transform_query(Query *parse, ParamListInfo params); /* These functions scribble on Plan tree */ void add_partition_filters(List *rtable, Plan *plan); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 26ac2de0..bee6e02c 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -30,12 +30,14 @@ static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse); -static void handle_modification_query(Query *parse); +static void handle_modification_query(Query *parse, ParamListInfo params); static void partition_filter_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); +static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); + /* * HACK: We have to mark each Query with a unique @@ -134,9 +136,9 @@ plan_tree_walker(Plan *plan, /* Perform some transformations on Query tree */ void -pathman_transform_query(Query *parse) +pathman_transform_query(Query *parse, ParamListInfo params) { - pathman_transform_query_walker((Node *) parse, NULL); + pathman_transform_query_walker((Node *) parse, (void *) params); } /* Walker for pathman_transform_query() */ @@ -156,7 +158,7 @@ pathman_transform_query_walker(Node *node, void *context) /* Apply Query tree modifiers */ rowmark_add_tableoids(query); disable_standard_inheritance(query); - handle_modification_query(query); + handle_modification_query(query, (ParamListInfo) context); /* Handle Query node */ return query_tree_walker(query, @@ -236,7 +238,7 @@ disable_standard_inheritance(Query *parse) /* Checks if query affects only one partition */ static void -handle_modification_query(Query *parse) +handle_modification_query(Query *parse, ParamListInfo params) { const PartRelationInfo *prel; Node *prel_expr; @@ -276,6 +278,10 @@ handle_modification_query(Query *parse) /* Exit if there's no expr (no use) */ if (!expr) return; + /* Check if we can replace PARAMs with CONSTs */ + if (clause_contains_params((Node *) expr) && params) + expr = (Expr *) eval_extern_params_mutator((Node *) expr, params); + /* Prepare partitioning expression */ prel_expr = PrelExpressionForRelid(prel, result_rel); @@ -477,3 +483,61 @@ tag_extract_parenthood_status(List *relation_tag) return status; } + + +/* Replace extern param nodes with consts */ +static Node * +eval_extern_params_mutator(Node *node, ParamListInfo params) +{ + if (node == NULL) + return NULL; + + if (IsA(node, Param)) + { + Param *param = (Param *) node; + + /* Look to see if we've been given a value for this Param */ + if (param->paramkind == PARAM_EXTERN && + params != NULL && + param->paramid > 0 && + param->paramid <= params->numParams) + { + ParamExternData *prm = ¶ms->params[param->paramid - 1]; + + if (OidIsValid(prm->ptype)) + { + /* OK to substitute parameter value? */ + if (prm->pflags & PARAM_FLAG_CONST) + { + /* + * Return a Const representing the param value. + * Must copy pass-by-ref datatypes, since the + * Param might be in a memory context + * shorter-lived than our output plan should be. + */ + int16 typLen; + bool typByVal; + Datum pval; + + Assert(prm->ptype == param->paramtype); + get_typlenbyval(param->paramtype, + &typLen, &typByVal); + if (prm->isnull || typByVal) + pval = prm->value; + else + pval = datumCopy(prm->value, typByVal, typLen); + return (Node *) makeConst(param->paramtype, + param->paramtypmod, + param->paramcollid, + (int) typLen, + pval, + prm->isnull, + typByVal); + } + } + } + } + + return expression_tree_mutator(node, eval_extern_params_mutator, + (void *) params); +} From 98bfe69b024ae15a7dd6800a0c81c34d106808bb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 19 May 2017 19:19:48 +0300 Subject: [PATCH 0539/1124] rename pathman_updates to pathman_rebuild_updates --- Makefile | 2 +- expected/{pathman_updates.out => pathman_rebuild_updates.out} | 0 sql/{pathman_updates.sql => pathman_rebuild_updates.sql} | 0 3 files changed, 1 insertion(+), 1 deletion(-) rename expected/{pathman_updates.out => pathman_rebuild_updates.out} (100%) rename sql/{pathman_updates.sql => pathman_rebuild_updates.sql} (100%) diff --git a/Makefile b/Makefile index 01116492..1d3e1aaa 100644 --- a/Makefile +++ b/Makefile @@ -41,10 +41,10 @@ REGRESS = pathman_array_qual \ pathman_mergejoin \ pathman_only \ pathman_permissions \ + pathman_rebuild_updates \ pathman_rowmarks \ pathman_runtime_nodes \ pathman_update_trigger \ - pathman_updates \ pathman_utility_stmt EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_updates.out b/expected/pathman_rebuild_updates.out similarity index 100% rename from expected/pathman_updates.out rename to expected/pathman_rebuild_updates.out diff --git a/sql/pathman_updates.sql b/sql/pathman_rebuild_updates.sql similarity index 100% rename from sql/pathman_updates.sql rename to sql/pathman_rebuild_updates.sql From 38fe61ec8d2f8ea3fbc67fe5d8a1e8bdd20d5033 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 19 May 2017 19:32:09 +0300 Subject: [PATCH 0540/1124] add new regression test (pathman_param_upd_del) --- Makefile | 1 + expected/pathman_param_upd_del.out | 132 +++++++++++++++++++++++++++++ sql/pathman_param_upd_del.sql | 38 +++++++++ 3 files changed, 171 insertions(+) create mode 100644 expected/pathman_param_upd_del.out create mode 100644 sql/pathman_param_upd_del.sql diff --git a/Makefile b/Makefile index 1d3e1aaa..72c1c57b 100644 --- a/Makefile +++ b/Makefile @@ -40,6 +40,7 @@ REGRESS = pathman_array_qual \ pathman_lateral \ pathman_mergejoin \ pathman_only \ + pathman_param_upd_del \ pathman_permissions \ pathman_rebuild_updates \ pathman_rowmarks \ diff --git a/expected/pathman_param_upd_del.out b/expected/pathman_param_upd_del.out new file mode 100644 index 00000000..7419ad29 --- /dev/null +++ b/expected/pathman_param_upd_del.out @@ -0,0 +1,132 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA param_upd_del; +CREATE TABLE param_upd_del.test(key INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('param_upd_del.test', 'key', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +INSERT INTO param_upd_del.test SELECT i, i FROM generate_series(1, 1000) i; +ANALYZE; +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = $1; +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(11); + QUERY PLAN +---------------------------- + Update on test_9 + -> Seq Scan on test_9 + Filter: (key = 11) +(3 rows) + +DEALLOCATE upd; +PREPARE del(INT4) AS DELETE FROM param_upd_del.test WHERE key = $1; +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(11); + QUERY PLAN +---------------------------- + Delete on test_9 + -> Seq Scan on test_9 + Filter: (key = 11) +(3 rows) + +DEALLOCATE del; +DROP SCHEMA param_upd_del CASCADE; +NOTICE: drop cascades to 11 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_param_upd_del.sql b/sql/pathman_param_upd_del.sql new file mode 100644 index 00000000..98be1179 --- /dev/null +++ b/sql/pathman_param_upd_del.sql @@ -0,0 +1,38 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA param_upd_del; + + +CREATE TABLE param_upd_del.test(key INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('param_upd_del.test', 'key', 10); +INSERT INTO param_upd_del.test SELECT i, i FROM generate_series(1, 1000) i; + +ANALYZE; + + +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = $1; +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(11); +DEALLOCATE upd; + + +PREPARE del(INT4) AS DELETE FROM param_upd_del.test WHERE key = $1; +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(11); +DEALLOCATE del; + + +DROP SCHEMA param_upd_del CASCADE; +DROP EXTENSION pg_pathman; From a27dfd6cadc1d742374d92bae9ad4db2d6f00740 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 19 May 2017 19:54:15 +0300 Subject: [PATCH 0541/1124] small adjustments to error messages + more locks --- expected/pathman_expressions.out | 4 ++-- src/pl_funcs.c | 15 +++++++++++---- src/relation_info.c | 4 ++-- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 94f94257..d25866b4 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -20,7 +20,7 @@ SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); ERROR: functions in partitioning expression must be marked IMMUTABLE \set VERBOSITY default SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); -ERROR: partitioning expression parse error +ERROR: failed to parse partitioning expression DETAIL: syntax error at or near ")" QUERY: SELECT public.validate_expression(parent_relid, expression) CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM @@ -29,7 +29,7 @@ SQL statement "SELECT public.prepare_for_partitioning(parent_relid, partition_data)" PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); -ERROR: partitioning expression analyze error +ERROR: failed to analyze partitioning expression DETAIL: column "value3" does not exist HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". QUERY: SELECT public.validate_expression(parent_relid, expression) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index b3cd0b8c..54a25b4a 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -597,9 +597,9 @@ validate_relname(PG_FUNCTION_ARGS) } /* - * Validate a partitioning expression - * We need this in range functions because we do many things - * before actual partitioning. + * Validate a partitioning expression. + * NOTE: We need this in range functions because + * we do many things before actual partitioning. */ Datum validate_expression(PG_FUNCTION_ARGS) @@ -610,6 +610,9 @@ validate_expression(PG_FUNCTION_ARGS) /* Fetch relation's Oid */ relid = PG_GETARG_OID(0); + /* Protect relation from concurrent drop */ + LockRelationOid(relid, AccessShareLock); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation \"%u\" does not exist", relid), @@ -623,7 +626,11 @@ validate_expression(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL"))); + /* Perform some checks */ cook_partitioning_expression(relid, expression, NULL); + + UnlockRelationOid(relid, AccessShareLock); + PG_RETURN_VOID(); } @@ -763,7 +770,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'parent_relid' should not be NULL"))); - /* Lock relation */ + /* Protect relation from concurrent modification */ xact_lock_rel_exclusive(relid, true); /* Check that relation exists */ diff --git a/src/relation_info.c b/src/relation_info.c index c088b26a..348b633a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -592,7 +592,7 @@ parse_partitioning_expression(const Oid relid, FlushErrorState(); error->detail = error->message; - error->message = "partitioning expression parse error"; + error->message = "failed to parse partitioning expression"; error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; error->cursorpos = 0; error->internalpos = 0; @@ -700,7 +700,7 @@ cook_partitioning_expression(const Oid relid, FlushErrorState(); error->detail = error->message; - error->message = "partitioning expression analyze error"; + error->message = "failed to analyze partitioning expression"; error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; error->cursorpos = 0; error->internalpos = 0; From 9905ac6a3dbbf61760f097920131aa0ab89ce8ca Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 19 May 2017 20:03:00 +0300 Subject: [PATCH 0542/1124] tests and fixes for validate_expression() --- expected/pathman_calamity.out | 20 ++++++++++++++------ sql/pathman_calamity.sql | 12 +++++++----- src/pl_funcs.c | 7 ++++++- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 4f7aebc3..e83055b8 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -297,15 +297,23 @@ ERROR: relation "1" does not exist SELECT validate_relname(NULL); ERROR: relation should not be NULL /* check function validate_expression() */ -SELECT validate_expression('calamity.part_test'); -ERROR: function validate_expression(unknown) does not exist at character 51 -SELECT validate_expression('calamity.part_test', NULL); +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ ERROR: 'expression' should not be NULL -SELECT validate_expression('calamity.part_test', 'valval'); +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ ERROR: cannot find type name for attribute "valval" of relation "part_test" -SELECT validate_expression('calamity.part_test', 'random()'); +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ ERROR: functions in partitioning expression must be marked IMMUTABLE -SELECT validate_expression('calamity.part_test', 'val'); +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ validate_expression --------------------- diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 426d19ec..10dcc15b 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -141,11 +141,13 @@ SELECT validate_relname(1::REGCLASS); SELECT validate_relname(NULL); /* check function validate_expression() */ -SELECT validate_expression('calamity.part_test'); -SELECT validate_expression('calamity.part_test', NULL); -SELECT validate_expression('calamity.part_test', 'valval'); -SELECT validate_expression('calamity.part_test', 'random()'); -SELECT validate_expression('calamity.part_test', 'val'); +SELECT validate_expression(1::regclass, NULL); /* not ok */ +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ /* check function get_number_of_partitions() */ SELECT get_number_of_partitions('calamity.part_test'); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 54a25b4a..0d71f858 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -608,7 +608,12 @@ validate_expression(PG_FUNCTION_ARGS) char *expression; /* Fetch relation's Oid */ - relid = PG_GETARG_OID(0); + if (!PG_ARGISNULL(0)) + { + relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'relid' should not be NULL"))); /* Protect relation from concurrent drop */ LockRelationOid(relid, AccessShareLock); From 3fb25830c157dbf318234383711122f144ad8df6 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Fri, 19 May 2017 20:45:07 +0300 Subject: [PATCH 0543/1124] Add compat version of ExecBuildProjectionInfo --- src/hooks.c | 3 +-- src/include/compat/pg_compat.h | 17 +++++++++++++++++ src/partition_filter.c | 26 +++++++++++--------------- 3 files changed, 29 insertions(+), 17 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index a9dad734..d631b027 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -759,8 +759,8 @@ pathman_relcache_hook(Datum arg, Oid relid) /* * Utility function invoker hook. */ -#if PG_VERSION_NUM >= 100000 void +#if PG_VERSION_NUM >= 100000 pathman_process_utility_hook(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, @@ -770,7 +770,6 @@ pathman_process_utility_hook(PlannedStmt *pstmt, { Node *parsetree = pstmt->utilityStmt; #else -void pathman_process_utility_hook(Node *parsetree, const char *queryString, ProcessUtilityContext context, diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 25766160..44cb5687 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -184,6 +184,23 @@ extern void create_plain_partial_paths(PlannerInfo *root, #endif +/* + * ExecBuildProjectionInfo + */ +#if PG_VERSION_NUM >= 100000 +#define ExecBuildProjectionInfoCompat(targetList, econtext, resultSlot, \ + ownerPlanState, inputDesc) \ + ExecBuildProjectionInfo((targetList), (econtext), (resultSlot), \ + (ownerPlanState), (inputDesc)) +#elif PG_VERSION_NUM >= 90500 +#define ExecBuildProjectionInfoCompat(targetList, econtext, resultSlot, \ + ownerPlanState, inputDesc) \ + ExecBuildProjectionInfo((List *) ExecInitExpr((Expr *) (targetList), \ + (ownerPlanState)), \ + (econtext), (resultSlot), (inputDesc)) +#endif + + /* * ExecEvalExpr * diff --git a/src/partition_filter.c b/src/partition_filter.c index 2a88e747..b66f42d2 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -783,6 +783,7 @@ prepare_rri_returning_for_insert(EState *estate, ResultRelInfo *child_rri, *parent_rri; Index parent_rt_idx; + TupleTableSlot *result_slot; /* We don't need to do anything ff there's no map */ if (!rri_holder->tuple_map) @@ -809,23 +810,18 @@ prepare_rri_returning_for_insert(EState *estate, list_make2(makeInteger(parent_rt_idx), rri_holder)); - /* Build new projection info */ + /* Specify tuple slot where will be place projection result in */ #if PG_VERSION_NUM >= 100000 - child_rri->ri_projectReturning = - ExecBuildProjectionInfo((List *) ExecInitExpr((Expr *) returning_list, - /* HACK: no PlanState */ NULL), - pfstate->tup_convert_econtext, - parent_rri->ri_projectReturning->pi_state.resultslot, - (PlanState *) pfstate, - RelationGetDescr(child_rri->ri_RelationDesc)); -#else - child_rri->ri_projectReturning = - ExecBuildProjectionInfo((List *) ExecInitExpr((Expr *) returning_list, - /* HACK: no PlanState */ NULL), - pfstate->tup_convert_econtext, - parent_rri->ri_projectReturning->pi_slot, - RelationGetDescr(child_rri->ri_RelationDesc)); + result_slot = parent_rri->ri_projectReturning->pi_state.resultslot; +#elif PG_VERSION_NUM >= 90500 + result_slot = parent_rri->ri_projectReturning->pi_slot; #endif + + /* Build new projection info */ + child_rri->ri_projectReturning = + ExecBuildProjectionInfoCompat(returning_list, pfstate->tup_convert_econtext, + result_slot, NULL /* HACK: no PlanState */, + RelationGetDescr(child_rri->ri_RelationDesc)); } /* Prepare FDW access structs */ From 47e9cd01b4f60c13ed7490a514bee5e0dc22156a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 19 May 2017 23:02:03 +0300 Subject: [PATCH 0544/1124] tiny optimization in handle_modification_query() --- src/planner_tree_modification.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index bee6e02c..af268614 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -279,7 +279,7 @@ handle_modification_query(Query *parse, ParamListInfo params) if (!expr) return; /* Check if we can replace PARAMs with CONSTs */ - if (clause_contains_params((Node *) expr) && params) + if (params && clause_contains_params((Node *) expr)) expr = (Expr *) eval_extern_params_mutator((Node *) expr, params); /* Prepare partitioning expression */ @@ -293,7 +293,7 @@ handle_modification_query(Query *parse, ParamListInfo params) /* * If only one partition is affected, - * substitute parent table with partition. + * substitute parent table with the partition. */ if (irange_list_length(ranges) == 1) { From 0e650ddf9f40f37f82c31b376e68b6b3af3d279b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 19 May 2017 23:07:26 +0300 Subject: [PATCH 0545/1124] add missing portions copyrights --- src/planner_tree_modification.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index af268614..02f20f51 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -4,6 +4,8 @@ * Functions for query- and plan- tree modification * * Copyright (c) 2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ */ @@ -496,9 +498,10 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) { Param *param = (Param *) node; + Assert(params); + /* Look to see if we've been given a value for this Param */ if (param->paramkind == PARAM_EXTERN && - params != NULL && param->paramid > 0 && param->paramid <= params->numParams) { From 72d88abd1568e5738f08f0d03f85e707f1bbcbe1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 20 May 2017 00:58:47 +0300 Subject: [PATCH 0546/1124] refactoring & bugfixes, fix memory consumption and error handling in cook_partitioning_expression() --- expected/pathman_expressions.out | 45 ++++++++- sql/pathman_expressions.sql | 32 +++++- src/hooks.c | 26 ++--- src/include/hooks.h | 2 +- src/include/init.h | 23 +++-- src/include/pathman.h | 3 - src/include/relation_info.h | 2 +- src/init.c | 23 +++-- src/pg_pathman.c | 3 - src/relation_info.c | 165 ++++++++++++++++++------------- 10 files changed, 208 insertions(+), 116 deletions(-) diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index d25866b4..c5910736 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -2,7 +2,18 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_exprs; -/* hash */ +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ CREATE TABLE test_exprs.hash_rel ( id SERIAL PRIMARY KEY, value INTEGER, @@ -18,9 +29,20 @@ SELECT COUNT(*) FROM test_exprs.hash_rel; SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); ERROR: functions in partitioning expression must be marked IMMUTABLE +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + \set VERBOSITY default SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); -ERROR: failed to parse partitioning expression +ERROR: failed to parse partitioning expression (value * value2))) DETAIL: syntax error at or near ")" QUERY: SELECT public.validate_expression(parent_relid, expression) CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM @@ -29,7 +51,7 @@ SQL statement "SELECT public.prepare_for_partitioning(parent_relid, partition_data)" PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); -ERROR: failed to analyze partitioning expression +ERROR: failed to analyze partitioning expression (value * value3) DETAIL: column "value3" does not exist HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". QUERY: SELECT public.validate_expression(parent_relid, expression) @@ -38,6 +60,17 @@ SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + \set VERBOSITY terse SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); create_hash_partitions @@ -93,7 +126,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5 Filter: ((value * value2) = 5) (3 rows) -/* range */ +/* + * Test RANGE + */ CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); INSERT INTO test_exprs.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; @@ -174,5 +209,5 @@ SELECT COUNT(*) FROM test_exprs.range_rel_2; (1 row) DROP SCHEMA test_exprs CASCADE; -NOTICE: drop cascades to 17 other objects +NOTICE: drop cascades to 24 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 7927c50d..898a698e 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -5,7 +5,17 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA test_exprs; -/* hash */ +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + + + +/* + * Test HASH + */ + CREATE TABLE test_exprs.hash_rel ( id SERIAL PRIMARY KEY, value INTEGER, @@ -16,11 +26,24 @@ INSERT INTO test_exprs.hash_rel (value, value2) SELECT COUNT(*) FROM test_exprs.hash_rel; SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); + +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + + \set VERBOSITY default + SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + \set VERBOSITY terse + + SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; SELECT COUNT(*) FROM test_exprs.hash_rel; @@ -33,7 +56,12 @@ SELECT COUNT(*) FROM test_exprs.hash_rel; EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; -/* range */ + + +/* + * Test RANGE + */ + CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); INSERT INTO test_exprs.range_rel (dt, txt) diff --git a/src/hooks.c b/src/hooks.c index 325b5de8..78a061be 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -506,23 +506,23 @@ pathman_rel_pathlist_hook(PlannerInfo *root, * Intercept 'pg_pathman.enable' GUC assignments. */ void -pg_pathman_enable_assign_hook(bool newval, void *extra) +pathman_enable_assign_hook(bool newval, void *extra) { elog(DEBUG2, "pg_pathman_enable_assign_hook() [newval = %s] triggered", newval ? "true" : "false"); /* Return quickly if nothing has changed */ - if (newval == (pg_pathman_init_state.pg_pathman_enable && - pg_pathman_init_state.auto_partition && - pg_pathman_init_state.override_copy && + if (newval == (pathman_init_state.pg_pathman_enable && + pathman_init_state.auto_partition && + pathman_init_state.override_copy && pg_pathman_enable_runtimeappend && pg_pathman_enable_runtime_merge_append && pg_pathman_enable_partition_filter && pg_pathman_enable_bounds_cache)) return; - pg_pathman_init_state.auto_partition = newval; - pg_pathman_init_state.override_copy = newval; + pathman_init_state.auto_partition = newval; + pathman_init_state.override_copy = newval; pg_pathman_enable_runtimeappend = newval; pg_pathman_enable_runtime_merge_append = newval; pg_pathman_enable_partition_filter = newval; @@ -552,11 +552,13 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) PlannedStmt *result; uint32 query_id = parse->queryId; - bool pathman_ready = IsPathmanReady(); /* in case it changes */ + + /* Save the result in case it changes */ + bool pathman_ready = pathman_hooks_enabled && IsPathmanReady(); PG_TRY(); { - if (pathman_ready && pathman_hooks_enabled) + if (pathman_ready) { /* Increment relation tags refcount */ incr_refcount_relation_tags(); @@ -571,7 +573,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) else result = standard_planner(parse, cursorOptions, boundParams); - if (pathman_ready && pathman_hooks_enabled) + if (pathman_ready) { /* Give rowmark-related attributes correct names */ ExecuteForPlanTree(result, postprocess_lock_rows); @@ -711,13 +713,13 @@ pathman_relcache_hook(Datum arg, Oid relid) PartParentSearch search; Oid partitioned_table; - if (!IsPathmanReady()) - return; - /* Hooks can be disabled */ if (!pathman_hooks_enabled) return; + if (!IsPathmanReady()) + return; + /* We shouldn't even consider special OIDs */ if (relid < FirstNormalObjectId) return; diff --git a/src/include/hooks.h b/src/include/hooks.h index 95400fe2..5eab7079 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -40,7 +40,7 @@ void pathman_rel_pathlist_hook(PlannerInfo *root, Index rti, RangeTblEntry *rte); -void pg_pathman_enable_assign_hook(char newval, void *extra); +void pathman_enable_assign_hook(char newval, void *extra); PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, diff --git a/src/include/init.h b/src/include/init.h index d5e877c0..64888595 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -63,7 +63,10 @@ extern HTAB *parent_cache; extern HTAB *bound_cache; /* pg_pathman's initialization state */ -extern PathmanInitState pg_pathman_init_state; +extern PathmanInitState pathman_init_state; + +/* pg_pathman's hooks state */ +extern bool pathman_hooks_enabled; /* Transform pg_pathman's memory context into simple name */ @@ -94,12 +97,12 @@ simpify_mcxt_name(MemoryContext mcxt) /* * Check if pg_pathman is initialized. */ -#define IsPathmanInitialized() ( !pg_pathman_init_state.initialization_needed ) +#define IsPathmanInitialized() ( !pathman_init_state.initialization_needed ) /* * Check if pg_pathman is enabled. */ -#define IsPathmanEnabled() ( pg_pathman_init_state.pg_pathman_enable ) +#define IsPathmanEnabled() ( pathman_init_state.pg_pathman_enable ) /* * Check if pg_pathman is initialized & enabled. @@ -109,12 +112,12 @@ simpify_mcxt_name(MemoryContext mcxt) /* * Should we override COPY stmt handling? */ -#define IsOverrideCopyEnabled() ( pg_pathman_init_state.override_copy ) +#define IsOverrideCopyEnabled() ( pathman_init_state.override_copy ) /* * Check if auto partition creation is enabled. */ -#define IsAutoPartitionEnabled() ( pg_pathman_init_state.auto_partition ) +#define IsAutoPartitionEnabled() ( pathman_init_state.auto_partition ) /* * Enable/disable auto partition propagation. Note that this only works if @@ -124,7 +127,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define SetAutoPartitionEnabled(value) \ do { \ Assert((value) == true || (value) == false); \ - pg_pathman_init_state.auto_partition = (value); \ + pathman_init_state.auto_partition = (value); \ } while (0) /* @@ -132,10 +135,10 @@ simpify_mcxt_name(MemoryContext mcxt) */ #define DisablePathman() \ do { \ - pg_pathman_init_state.pg_pathman_enable = false; \ - pg_pathman_init_state.auto_partition = false; \ - pg_pathman_init_state.override_copy = false; \ - pg_pathman_init_state.initialization_needed = true; \ + pathman_init_state.pg_pathman_enable = false; \ + pathman_init_state.auto_partition = false; \ + pathman_init_state.override_copy = false; \ + pathman_init_state.initialization_needed = true; \ } while (0) diff --git a/src/include/pathman.h b/src/include/pathman.h index 7ef6ced5..f89698e3 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -94,9 +94,6 @@ extern Oid pathman_config_relid; extern Oid pathman_config_params_relid; -/* Hooks enable state */ -extern bool pathman_hooks_enabled; - /* * Just to clarify our intentions (return the corresponding relid). */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 05c4abc5..69578645 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -284,7 +284,7 @@ const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, /* Partitioning expression routines */ Node *parse_partitioning_expression(const Oid relid, - const char *expression, + const char *expr_cstr, char **query_string_out, Node **parsetree_out); diff --git a/src/init.c b/src/init.c index 4b0d6d19..f6c365f6 100644 --- a/src/init.c +++ b/src/init.c @@ -55,11 +55,16 @@ HTAB *parent_cache = NULL; HTAB *bound_cache = NULL; /* pg_pathman's init status */ -PathmanInitState pg_pathman_init_state; +PathmanInitState pathman_init_state; + +/* pg_pathman's hooks state */ +bool pathman_hooks_enabled = true; + /* Shall we install new relcache callback? */ static bool relcache_callback_needed = true; + /* Functions for various local caches */ static bool init_pathman_relation_oids(void); static void fini_pathman_relation_oids(void); @@ -123,13 +128,13 @@ pathman_cache_search_relid(HTAB *cache_table, void save_pathman_init_state(PathmanInitState *temp_init_state) { - *temp_init_state = pg_pathman_init_state; + *temp_init_state = pathman_init_state; } void restore_pathman_init_state(const PathmanInitState *temp_init_state) { - pg_pathman_init_state = *temp_init_state; + pathman_init_state = *temp_init_state; } /* @@ -142,19 +147,19 @@ init_main_pathman_toggles(void) DefineCustomBoolVariable("pg_pathman.enable", "Enables pg_pathman's optimizations during the planner stage", NULL, - &pg_pathman_init_state.pg_pathman_enable, + &pathman_init_state.pg_pathman_enable, DEFAULT_PATHMAN_ENABLE, PGC_SUSET, 0, NULL, - pg_pathman_enable_assign_hook, + pathman_enable_assign_hook, NULL); /* Global toggle for automatic partition creation */ DefineCustomBoolVariable("pg_pathman.enable_auto_partition", "Enables automatic partition creation", NULL, - &pg_pathman_init_state.auto_partition, + &pathman_init_state.auto_partition, DEFAULT_AUTO, PGC_SUSET, 0, @@ -166,7 +171,7 @@ init_main_pathman_toggles(void) DefineCustomBoolVariable("pg_pathman.override_copy", "Override COPY statement handling", NULL, - &pg_pathman_init_state.override_copy, + &pathman_init_state.override_copy, DEFAULT_OVERRIDE_COPY, PGC_SUSET, 0, @@ -208,7 +213,7 @@ load_config(void) } /* Mark pg_pathman as initialized */ - pg_pathman_init_state.initialization_needed = false; + pathman_init_state.initialization_needed = false; elog(DEBUG2, "pg_pathman's config has been loaded successfully [%u]", MyProcPid); @@ -228,7 +233,7 @@ unload_config(void) fini_local_cache(); /* Mark pg_pathman as uninitialized */ - pg_pathman_init_state.initialization_needed = true; + pathman_init_state.initialization_needed = true; elog(DEBUG2, "pg_pathman's config has been unloaded successfully [%u]", MyProcPid); } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 3d9b5f24..f4d2569f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -42,9 +42,6 @@ PG_MODULE_MAGIC; Oid pathman_config_relid = InvalidOid, pathman_config_params_relid = InvalidOid; -/* Used to disable hooks temporarily */ -bool pathman_hooks_enabled = true; - /* pg module functions */ void _PG_init(void); diff --git a/src/relation_info.c b/src/relation_info.c index 348b633a..9b080901 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -46,6 +46,11 @@ #endif +/* Error messages for partitioning expression */ +#define PARSE_PART_EXPR_ERROR "failed to parse partitioning expression (%s)" +#define COOK_PART_EXPR_ERROR "failed to analyze partitioning expression (%s)" + + /* Comparison function info */ typedef struct cmp_func_info { @@ -561,7 +566,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, /* Wraps expression in SELECT query and returns parse tree */ Node * parse_partitioning_expression(const Oid relid, - const char *exp_cstr, + const char *expr_cstr, char **query_string_out, /* ret value #1 */ Node **parsetree_out) /* ret value #2 */ { @@ -572,7 +577,7 @@ parse_partitioning_expression(const Oid relid, const char *sql = "SELECT (%s) FROM ONLY %s.%s"; char *relname = get_rel_name(relid), *nspname = get_namespace_name(get_rel_namespace(relid)); - char *query_string = psprintf(sql, exp_cstr, + char *query_string = psprintf(sql, expr_cstr, quote_identifier(nspname), quote_identifier(relname)); @@ -591,18 +596,19 @@ parse_partitioning_expression(const Oid relid, error = CopyErrorData(); FlushErrorState(); - error->detail = error->message; - error->message = "failed to parse partitioning expression"; - error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; - error->cursorpos = 0; - error->internalpos = 0; + /* Adjust error message */ + error->detail = error->message; + error->message = psprintf(PARSE_PART_EXPR_ERROR, expr_cstr); + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; ReThrowError(error); } PG_END_TRY(); if (list_length(parsetree_list) != 1) - elog(ERROR, "expression \"%s\" produced more than one query", exp_cstr); + elog(ERROR, "expression \"%s\" produced more than one query", expr_cstr); select_stmt = (SelectStmt *) linitial(parsetree_list); @@ -621,31 +627,38 @@ cook_partitioning_expression(const Oid relid, const char *expr_cstr, Oid *expr_type_out) /* ret value #1 */ { - Node *parsetree; - List *querytree_list; - TargetEntry *target_entry; + Node *parse_tree, + *raw_expr; + List *query_tree_list; + + volatile bool ok_rewrite = false; /* must be volatile, else + cpu register might change + in case of longjmp() */ - Node *raw_expr; - Query *expr_query; - PlannedStmt *expr_plan; - Node *expr; - Datum expr_datum; + char *query_string, + *expr_serialized = ""; /* keep compiler happy */ - char *query_string, - *expr_serialized; + Datum expr_datum; - MemoryContext parse_mcxt, - old_mcxt; + MemoryContext parse_mcxt, + old_mcxt; AssertTemporaryContext(); + /* + * We use separate memory context here, just to make sure we won't + * leave anything behind after parsing, rewriting and planning. + */ parse_mcxt = AllocSetContextCreate(CurrentMemoryContext, - "pathman parse context", + CppAsString(cook_partitioning_expression), ALLOCSET_DEFAULT_SIZES); - /* Keep raw expression */ + /* Switch to mcxt for cooking :) */ + old_mcxt = MemoryContextSwitchTo(parse_mcxt); + + /* First we have to build a raw AST */ raw_expr = parse_partitioning_expression(relid, expr_cstr, - &query_string, &parsetree); + &query_string, &parse_tree); /* Check if raw_expr is NULLable */ if (IsA(raw_expr, ColumnRef)) @@ -666,87 +679,99 @@ cook_partitioning_expression(const Oid relid, attnotnull = att_tup->attnotnull; ReleaseSysCache(htup); } - else elog(ERROR, "cannot find type name for attribute \"%s\" " - "of relation \"%s\"", + else elog(ERROR, "cannot find type name for attribute \"%s\"" + " of relation \"%s\"", attname, get_rel_name_or_relid(relid)); if (!attnotnull) - elog(ERROR, "partitioning key \"%s\" must be marked NOT NULL", attname); + elog(ERROR, "partitioning key \"%s\" must be marked NOT NULL", + attname); } } - /* We don't need pathman activity initialization for this relation yet */ + /* We don't need pg_pathman's magic here */ pathman_hooks_enabled = false; - /* - * We use separate memory context here, just to make sure we - * don't leave anything behind after analyze and planning. - * Parsed raw expression will stay in caller's context. - */ - old_mcxt = MemoryContextSwitchTo(parse_mcxt); - PG_TRY(); { - /* This will fail with elog in case of wrong expression */ - querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); + TargetEntry *target_entry; + + Query *expr_query; + PlannedStmt *expr_plan; + Node *expr; + + /* This will fail with ERROR in case of wrong expression */ + query_tree_list = pg_analyze_and_rewrite(parse_tree, query_string, NULL, 0); + ok_rewrite = true; /* tell PG_CATCH that we're fine */ + + if (list_length(query_tree_list) != 1) + elog(ERROR, "partitioning expression produced more than 1 query"); + + expr_query = (Query *) linitial(query_tree_list); + + /* Plan this query. We reuse 'expr_node' here */ + expr_plan = pg_plan_query(expr_query, 0, NULL); + + target_entry = IsA(expr_plan->planTree, IndexOnlyScan) ? + linitial(((IndexOnlyScan *) expr_plan->planTree)->indextlist) : + linitial(expr_plan->planTree->targetlist); + + expr = eval_const_expressions(NULL, (Node *) target_entry->expr); + if (contain_mutable_functions(expr)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("functions in partitioning expression" + " must be marked IMMUTABLE"))); + + Assert(expr); + expr_serialized = nodeToString(expr); + + /* Set 'expr_type_out' if needed */ + if (expr_type_out) + *expr_type_out = exprType(expr); } PG_CATCH(); { ErrorData *error; + /* Don't forget to enable pg_pathman's hooks */ + pathman_hooks_enabled = true; + + /* + * Simply rethrow if rewrite of AST was successful. + * NOTE: We aim to modify only ERRORs that + * are relevant to analyze and rewrite steps. + */ + if (ok_rewrite) + PG_RE_THROW(); + /* Switch to the original context & copy edata */ MemoryContextSwitchTo(old_mcxt); error = CopyErrorData(); FlushErrorState(); - error->detail = error->message; - error->message = "failed to analyze partitioning expression"; - error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; - error->cursorpos = 0; - error->internalpos = 0; + /* Adjust error message */ + error->detail = error->message; + error->message = psprintf(COOK_PART_EXPR_ERROR, expr_cstr); + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; - /* Enable pathman hooks */ - pathman_hooks_enabled = true; ReThrowError(error); } PG_END_TRY(); - if (list_length(querytree_list) != 1) - elog(ERROR, "partitioning expression produced more than 1 query"); - - expr_query = (Query *) linitial(querytree_list); - - /* Plan this query. We reuse 'expr_node' here */ - expr_plan = pg_plan_query(expr_query, 0, NULL); - - target_entry = IsA(expr_plan->planTree, IndexOnlyScan) ? - linitial(((IndexOnlyScan *) expr_plan->planTree)->indextlist) : - linitial(expr_plan->planTree->targetlist); - - expr = eval_const_expressions(NULL, (Node *) target_entry->expr); - if (contain_mutable_functions(expr)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("functions in partitioning expression must be marked IMMUTABLE"))); - - Assert(expr); - expr_serialized = nodeToString(expr); + /* Don't forget to enable pg_pathman's hooks */ + pathman_hooks_enabled = true; /* Switch to previous mcxt */ MemoryContextSwitchTo(old_mcxt); - /* Set 'expr_type_out' if needed */ - if (expr_type_out) - *expr_type_out = exprType(expr); - expr_datum = CStringGetTextDatum(expr_serialized); /* Free memory */ MemoryContextDelete(parse_mcxt); - /* Enable pathman hooks */ - pathman_hooks_enabled = true; - return expr_datum; } From 4cae42bf4ccaca851e4c95001f9d37f81c537ec4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 22 May 2017 17:40:30 +0300 Subject: [PATCH 0547/1124] fix paramsel in handle_const() --- src/pg_pathman.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index f4d2569f..d2b9d376 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -803,7 +803,7 @@ handle_const(const Const *c, PrelChildrenCount(prel)); result->rangeset = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); - result->paramsel = estimate_paramsel_using_prel(prel, strategy); + result->paramsel = 1.0; return; /* done, exit */ } @@ -828,9 +828,8 @@ handle_const(const Const *c, PrelGetRangesArray(context->prel), PrelChildrenCount(context->prel), strategy, - result); /* output */ - - result->paramsel = estimate_paramsel_using_prel(prel, strategy); + result); /* result->rangeset = ... */ + result->paramsel = 1.0; return; /* done, exit */ } @@ -841,7 +840,7 @@ handle_const(const Const *c, handle_const_return: result->rangeset = list_make1_irange_full(prel, IR_LOSSY); - result->paramsel = estimate_paramsel_using_prel(prel, strategy); + result->paramsel = 1.0; } /* Array handler */ From 16ffb9bdb8486e807399a23cf17351ce79ba162b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 22 May 2017 17:42:42 +0300 Subject: [PATCH 0548/1124] update comments to WrapperNode --- src/include/pathman.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/include/pathman.h b/src/include/pathman.h index f89698e3..cf8a3c66 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -134,8 +134,9 @@ typedef struct const Node *orig; /* examined expression */ List *args; /* extracted from 'orig' */ List *rangeset; /* IndexRanges representing selected parts */ + double paramsel; /* estimated selectivity of PARAMs + (for RuntimeAppend costs) */ bool found_gap; /* were there any gaps? */ - double paramsel; /* estimated selectivity */ } WrapperNode; typedef struct From e835537206f8a99f30aae9750bbdc39a5c92acc6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 22 May 2017 17:51:09 +0300 Subject: [PATCH 0549/1124] fix paramsel in handle_array() --- src/pg_pathman.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index d2b9d376..ebc696f2 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -762,7 +762,7 @@ handle_const(const Const *c, } else { - result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + result->rangeset = list_make1_irange_full(prel, IR_COMPLETE); result->paramsel = 1.0; } @@ -883,7 +883,7 @@ handle_array(ArrayType *array, List *ranges; int i; - /* Set default ranges for OR | AND */ + /* Set default rangeset */ ranges = use_or ? NIL : list_make1_irange_full(prel, IR_COMPLETE); /* Select partitions using values */ @@ -910,23 +910,21 @@ handle_array(ArrayType *array, ranges = use_or ? irange_list_union(ranges, wrap.rangeset) : irange_list_intersection(ranges, wrap.rangeset); - - result->paramsel = Max(result->paramsel, wrap.paramsel); } /* Free resources */ pfree(elem_values); pfree(elem_isnull); - /* Save rangeset */ result->rangeset = ranges; + result->paramsel = 1.0; return; /* done, exit */ } handle_array_return: result->rangeset = list_make1_irange_full(prel, IR_LOSSY); - result->paramsel = estimate_paramsel_using_prel(prel, strategy); + result->paramsel = 1.0; } /* Boolean expression handler */ From b92dae9b383f28d6655c84db7758365227548464 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 22 May 2017 18:03:22 +0300 Subject: [PATCH 0550/1124] beutify handle_boolexpr() --- src/pg_pathman.c | 44 ++++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index ebc696f2..06fd34d3 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -934,58 +934,62 @@ handle_boolexpr(const BoolExpr *expr, WrapperNode *result) /* ret value #1 */ { const PartRelationInfo *prel = context->prel; + List *ranges, + *args = NIL; + double paramsel = 1.0; ListCell *lc; - /* Save expression */ - result->orig = (const Node *) expr; - - result->args = NIL; - result->paramsel = 1.0; - - /* First, set default rangeset */ - result->rangeset = (expr->boolop == AND_EXPR) ? - list_make1_irange_full(prel, IR_COMPLETE) : - NIL; + /* Set default rangeset */ + ranges = (expr->boolop == AND_EXPR) ? + list_make1_irange_full(prel, IR_COMPLETE) : + NIL; + /* Examine expressions */ foreach (lc, expr->args) { WrapperNode *wrap; wrap = walk_expr_tree((Expr *) lfirst(lc), context); - result->args = lappend(result->args, wrap); + args = lappend(args, wrap); switch (expr->boolop) { case OR_EXPR: - result->rangeset = irange_list_union(result->rangeset, - wrap->rangeset); + ranges = irange_list_union(ranges, wrap->rangeset); break; case AND_EXPR: - result->rangeset = irange_list_intersection(result->rangeset, - wrap->rangeset); - result->paramsel *= wrap->paramsel; + ranges = irange_list_intersection(ranges, wrap->rangeset); + paramsel *= wrap->paramsel; break; default: - result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + ranges = list_make1_irange_full(prel, IR_LOSSY); break; } } + /* Adjust paramsel for OR */ if (expr->boolop == OR_EXPR) { - int totallen = irange_list_length(result->rangeset); + int totallen = irange_list_length(ranges); foreach (lc, result->args) { WrapperNode *arg = (WrapperNode *) lfirst(lc); int len = irange_list_length(arg->rangeset); - result->paramsel *= (1.0 - arg->paramsel * (double)len / (double)totallen); + paramsel *= (1.0 - arg->paramsel * (double)len / (double)totallen); } - result->paramsel = 1.0 - result->paramsel; + + paramsel = 1.0 - paramsel; } + + /* Save results */ + result->rangeset = ranges; + result->paramsel = paramsel; + result->orig = (const Node *) expr; + result->args = args; } /* Scalar array expression handler */ From 6d968d2021f153fd19cc09c77152b169cc5e9173 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 22 May 2017 18:36:17 +0300 Subject: [PATCH 0551/1124] Make compat version of exec_append_common --- src/nodes_common.c | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/src/nodes_common.c b/src/nodes_common.c index 93218d26..96eb6f48 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -659,15 +659,36 @@ exec_append_common(CustomScanState *node, void (*fetch_next_tuple) (CustomScanState *node)) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + TupleTableSlot *result; /* ReScan if no plans are selected */ if (scan_state->ncur_plans == 0) ExecReScan(&node->ss.ps); +#if PG_VERSION_NUM >= 100000 + fetch_next_tuple(node); /* use specific callback */ + + if (TupIsNull(scan_state->slot)) + return NULL; + + if (!node->ss.ps.ps_ProjInfo) + return scan_state->slot; + + /* + * Assuming that current projection doesn't involve SRF + * + * Any SFR functions are evaluated in the specialized parent node ProjectSet + */ + ResetExprContext(node->ss.ps.ps_ExprContext); + node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = + scan_state->slot; + result = ExecProject(node->ss.ps.ps_ProjInfo); + + return result; +#elif PG_VERSION_NUM >= 90500 for (;;) { /* Fetch next tuple if we're done with Projections */ -#if PG_VERSION_NUM < 100000 if (!node->ss.ps.ps_TupFromTlist) { fetch_next_tuple(node); /* use specific callback */ @@ -675,23 +696,17 @@ exec_append_common(CustomScanState *node, if (TupIsNull(scan_state->slot)) return NULL; } -#endif if (node->ss.ps.ps_ProjInfo) { ExprDoneCond isDone; - TupleTableSlot *result; ResetExprContext(node->ss.ps.ps_ExprContext); - node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = scan_state->slot; -#if PG_VERSION_NUM >= 100000 - result = ExecProject(node->ss.ps.ps_ProjInfo); -#else + node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = + scan_state->slot; result = ExecProject(node->ss.ps.ps_ProjInfo, &isDone); -#endif -#if PG_VERSION_NUM < 100000 if (isDone != ExprEndResult) { node->ss.ps.ps_TupFromTlist = (isDone == ExprMultipleResult); @@ -700,14 +715,11 @@ exec_append_common(CustomScanState *node, } else node->ss.ps.ps_TupFromTlist = false; -#else - if (isDone != ExprEndResult) - return result; -#endif } else return scan_state->slot; } +#endif } void From 3d225b78de24b61c98f48ca3f3a761695a431639 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 22 May 2017 18:40:08 +0300 Subject: [PATCH 0552/1124] simplify handle_opexpr() --- src/pg_pathman.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 06fd34d3..78764ac9 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1140,6 +1140,9 @@ handle_opexpr(const OpExpr *expr, Node *param; const PartRelationInfo *prel = context->prel; + /* Save expression */ + result->orig = (const Node *) expr; + if (list_length(expr->args) == 2) { /* Is it KEY OP PARAM or PARAM OP KEY? */ @@ -1157,9 +1160,6 @@ handle_opexpr(const OpExpr *expr, expr->inputcollid, strategy, context, result); - /* Save expression */ - result->orig = (const Node *) expr; - return; /* done, exit */ } /* TODO: estimate selectivity for param if it's Var */ @@ -1168,19 +1168,13 @@ handle_opexpr(const OpExpr *expr, result->rangeset = list_make1_irange_full(prel, IR_LOSSY); result->paramsel = estimate_paramsel_using_prel(prel, strategy); - /* Save expression */ - result->orig = (const Node *) expr; - return; /* done, exit */ } } } result->rangeset = list_make1_irange_full(prel, IR_LOSSY); - result->paramsel = 1.0; /* can't give any estimates */ - - /* Save expression */ - result->orig = (const Node *) expr; + result->paramsel = 1.0; } From 1d7820744b7e76c5283c98eade9dfd5684a799c8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 23 May 2017 14:46:32 +0300 Subject: [PATCH 0553/1124] more optimizations in handle_array() --- expected/pathman_array_qual.out | 217 ++++++++++++++++++++++++++++++++ sql/pathman_array_qual.sql | 27 ++++ src/include/pathman.h | 20 +-- src/partition_creation.c | 4 +- src/pg_pathman.c | 80 ++++++++++++ src/pl_range_funcs.c | 3 +- 6 files changed, 338 insertions(+), 13 deletions(-) diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out index 5e81bcd2..da6a8f8b 100644 --- a/expected/pathman_array_qual.out +++ b/expected/pathman_array_qual.out @@ -419,6 +419,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100 Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) (7 rows) +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + /* * Test expr = ALL (...) */ @@ -573,6 +580,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700 Filter: (a < ANY ('{NULL,700}'::integer[])) (9 rows) +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + SET pg_pathman.enable = f; NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); @@ -669,6 +683,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700 One-Time Filter: false (2 rows) +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + SET pg_pathman.enable = f; NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); @@ -778,6 +799,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700 -> Seq Scan on test_10 (6 rows) +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + SET pg_pathman.enable = f; NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); @@ -894,6 +922,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700 One-Time Filter: false (2 rows) +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + SET pg_pathman.enable = f; NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); @@ -1194,6 +1229,132 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); Filter: (a > ANY (ARRAY[100, 600, $1])) (22 rows) +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + DEALLOCATE q; /* * Test expr > ALL (... $1 ...) @@ -1299,6 +1460,62 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); One-Time Filter: false (2 rows) +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + DEALLOCATE q; PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); EXPLAIN (COSTS OFF) EXECUTE q(1); diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql index 72d3c138..67249a68 100644 --- a/sql/pathman_array_qual.sql +++ b/sql/pathman_array_qual.sql @@ -83,6 +83,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); /* @@ -112,6 +113,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); SET pg_pathman.enable = f; SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); @@ -131,6 +133,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); SET pg_pathman.enable = f; SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); @@ -150,6 +153,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); SET pg_pathman.enable = f; SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); @@ -169,6 +173,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); SET pg_pathman.enable = f; SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); @@ -200,6 +205,17 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXECUTE q(NULL); +DEALLOCATE q; + /* * Test expr > ALL (... $1 ...) @@ -225,6 +241,17 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXECUTE q(NULL); +DEALLOCATE q; + PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); diff --git a/src/include/pathman.h b/src/include/pathman.h index cf8a3c66..fc0241b8 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -183,20 +183,20 @@ hash_to_part_index(uint32 value, uint32 partitions) * * flinfo is a pointer to FmgrInfo, arg1 & arg2 are Datums. */ -#define check_lt(finfo, arg1, arg2) \ - ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) < 0 ) +#define check_lt(finfo, collid, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2Coll((finfo), (collid), (arg1), (arg2))) < 0 ) -#define check_le(finfo, arg1, arg2) \ - ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) <= 0 ) +#define check_le(finfo, collid, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2Coll((finfo), (collid), (arg1), (arg2))) <= 0 ) -#define check_eq(finfo, arg1, arg2) \ - ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) == 0 ) +#define check_eq(finfo, collid, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2Coll((finfo), (collid), (arg1), (arg2))) == 0 ) -#define check_ge(finfo, arg1, arg2) \ - ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) >= 0 ) +#define check_ge(finfo, collid, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2Coll((finfo), (collid), (arg1), (arg2))) >= 0 ) -#define check_gt(finfo, arg1, arg2) \ - ( DatumGetInt32(FunctionCall2((finfo), (arg1), (arg2))) > 0 ) +#define check_gt(finfo, collid, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2Coll((finfo), (collid), (arg1), (arg2))) > 0 ) #endif /* PATHMAN_H */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 346aaeac..62d8c2fd 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -567,8 +567,8 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ /* Execute comparison function cmp(value, cur_leading_bound) */ while (should_append ? - check_ge(&cmp_value_bound_finfo, value, cur_leading_bound) : - check_lt(&cmp_value_bound_finfo, value, cur_leading_bound)) + check_ge(&cmp_value_bound_finfo, collid, value, cur_leading_bound) : + check_lt(&cmp_value_bound_finfo, collid, value, cur_leading_bound)) { Bound bounds[2]; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 78764ac9..9141409e 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -82,6 +82,14 @@ static bool is_key_op_param(const OpExpr *expr, static Const *extract_const(Param *param, const WalkerContext *context); +static Datum array_find_min_max(Datum *values, + bool *isnull, + int length, + Oid value_type, + Oid collid, + bool take_min, + bool *result_null); + /* Copied from PostgreSQL (allpaths.c) */ static void set_plain_rel_size(PlannerInfo *root, @@ -883,6 +891,44 @@ handle_array(ArrayType *array, List *ranges; int i; + /* This is only for paranoia's sake */ + Assert(BTMaxStrategyNumber == 5 && BTEqualStrategyNumber == 3); + + /* Optimizations for <, <=, >=, > */ + if (strategy != BTEqualStrategyNumber) + { + bool take_min; + Datum pivot; + bool pivot_null; + + /* + * OR: Max for (< | <=); Min for (> | >=) + * AND: Min for (< | <=); Max for (> | >=) + */ + take_min = strategy < BTEqualStrategyNumber ? !use_or : use_or; + + /* Extract Min (or Max) element */ + pivot = array_find_min_max(elem_values, elem_isnull, + elem_count, elem_type, collid, + take_min, &pivot_null); + + /* Write data and "shrink" the array */ + elem_values[0] = pivot_null ? (Datum) 0 : pivot; + elem_isnull[0] = pivot_null; + elem_count = 1; + + /* Append NULL if array contains NULLs and 'pivot' is not NULL */ + if (!pivot_null && array_contains_nulls(array)) + { + /* Make sure that we have enough space for 2 elements */ + Assert(ArrayGetNItems(ARR_NDIM(array), ARR_DIMS(array)) >= 2); + + elem_values[1] = (Datum) 0; + elem_isnull[1] = true; + elem_count = 2; + } + } + /* Set default rangeset */ ranges = use_or ? NIL : list_make1_irange_full(prel, IR_COMPLETE); @@ -1221,6 +1267,40 @@ extract_const(Param *param, value, isnull, get_typbyval(param->paramtype)); } +/* Find Max or Min value of array */ +static Datum +array_find_min_max(Datum *values, + bool *isnull, + int length, + Oid value_type, + Oid collid, + bool take_min, + bool *result_null) /* ret value #2 */ +{ + TypeCacheEntry *tce = lookup_type_cache(value_type, TYPECACHE_CMP_PROC_FINFO); + Datum *pivot = NULL; + int i; + + for (i = 0; i < length; i++) + { + if (isnull[i]) + continue; + + /* Update 'pivot' */ + if (pivot == NULL || (take_min ? + check_lt(&tce->cmp_proc_finfo, + collid, values[i], *pivot) : + check_gt(&tce->cmp_proc_finfo, + collid, values[i], *pivot))) + { + pivot = &values[i]; + } + } + + /* Return results */ + *result_null = (pivot == NULL); + return (pivot == NULL) ? (Datum) 0 : *pivot; +} /* diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 97eb566e..54ed56c9 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -230,7 +230,8 @@ create_range_partitions_internal(PG_FUNCTION_ARGS) errmsg("only first bound can be NULL"))); /* Check that bounds are ascending */ - if (!nulls[i - 1] && !check_le(&cmp_func, datums[i - 1], datums[i])) + if (!nulls[i - 1] && !check_le(&cmp_func, InvalidOid, + datums[i - 1], datums[i])) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'bounds' array must be ascending"))); } From e8afda626f6c8634b064d0aa71df1eacda1e9147 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 23 May 2017 14:52:43 +0300 Subject: [PATCH 0554/1124] make older versions of clang analyzer happy (v 3.5) --- src/relation_info.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/relation_info.c b/src/relation_info.c index 9b080901..15c1f1bc 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -645,6 +645,9 @@ cook_partitioning_expression(const Oid relid, AssertTemporaryContext(); + /* Make clang analyzer happy (v 3.5) */ + (void) ok_rewrite; + /* * We use separate memory context here, just to make sure we won't * leave anything behind after parsing, rewriting and planning. From ef33cf77b28d318cb1c5bd52594a422d7508615d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 23 May 2017 15:17:46 +0300 Subject: [PATCH 0555/1124] clean regression tests up a little bit, check that expression references the table --- expected/pathman_calamity.out | 2 +- expected/pathman_expressions.out | 48 ++++++++++++++++++++++++++++---- sql/pathman_expressions.sql | 36 ++++++++++++++++++++---- src/relation_info.c | 21 ++------------ 4 files changed, 78 insertions(+), 29 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index e83055b8..a874fe1a 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -306,7 +306,7 @@ ERROR: 'expression' should not be NULL SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ ERROR: cannot find type name for attribute "valval" of relation "part_test" SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ -ERROR: functions in partitioning expression must be marked IMMUTABLE +ERROR: failed to analyze partitioning expression (random()) SELECT validate_expression('calamity.part_test', 'val'); /* OK */ validate_expression --------------------- diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index c5910736..5afe1d33 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -27,8 +27,20 @@ SELECT COUNT(*) FROM test_exprs.hash_rel; 5 (1 row) +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: partitioning expression should reference table "hash_rel" +\set VERBOSITY default +/* Try using mutable expression */ SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); -ERROR: functions in partitioning expression must be marked IMMUTABLE +ERROR: failed to analyze partitioning expression (random()) +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; @@ -40,7 +52,7 @@ SELECT * FROM test_exprs.canary WHERE val = 1; Filter: (val = 1) (4 rows) -\set VERBOSITY default +/* Try using missing columns */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); ERROR: failed to parse partitioning expression (value * value2))) DETAIL: syntax error at or near ")" @@ -132,10 +144,36 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5 CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); INSERT INTO test_exprs.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; -SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); -ERROR: functions in partitioning expression must be marked IMMUTABLE +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: partitioning expression should reference table "range_rel" +\set VERBOSITY default +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression (RANDOM()) +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + +\set VERBOSITY terse SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', - '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); create_range_partitions ------------------------- 10 diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 898a698e..05d698ca 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -25,15 +25,22 @@ INSERT INTO test_exprs.hash_rel (value, value2) SELECT val, val * 2 FROM generate_series(1, 5) val; SELECT COUNT(*) FROM test_exprs.hash_rel; + + +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); + + +\set VERBOSITY default + +/* Try using mutable expression */ SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; - -\set VERBOSITY default - +/* Try using missing columns */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); @@ -66,9 +73,28 @@ CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT INSERT INTO test_exprs.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; -SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + + +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + + +\set VERBOSITY default + +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + +\set VERBOSITY terse + + SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', - '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); SELECT COUNT(*) FROM test_exprs.range_rel_6; INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); diff --git a/src/relation_info.c b/src/relation_info.c index 15c1f1bc..06ae6c6c 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -183,8 +183,9 @@ refresh_pathman_relation_info(Oid relid, fix_opfuncids(prel->expr); expr_varnos = pull_varnos(prel->expr); - if (bms_singleton_member(expr_varnos) != PART_EXPR_VARNO) - elog(ERROR, "partitioning expression may reference only one table"); + if (bms_num_members(expr_varnos) != 1) + elog(ERROR, "partitioning expression should reference table \"%s\"", + get_rel_name(relid)); /* Extract Vars and varattnos of partitioning expression */ prel->expr_vars = NIL; @@ -631,10 +632,6 @@ cook_partitioning_expression(const Oid relid, *raw_expr; List *query_tree_list; - volatile bool ok_rewrite = false; /* must be volatile, else - cpu register might change - in case of longjmp() */ - char *query_string, *expr_serialized = ""; /* keep compiler happy */ @@ -645,9 +642,6 @@ cook_partitioning_expression(const Oid relid, AssertTemporaryContext(); - /* Make clang analyzer happy (v 3.5) */ - (void) ok_rewrite; - /* * We use separate memory context here, just to make sure we won't * leave anything behind after parsing, rewriting and planning. @@ -705,7 +699,6 @@ cook_partitioning_expression(const Oid relid, /* This will fail with ERROR in case of wrong expression */ query_tree_list = pg_analyze_and_rewrite(parse_tree, query_string, NULL, 0); - ok_rewrite = true; /* tell PG_CATCH that we're fine */ if (list_length(query_tree_list) != 1) elog(ERROR, "partitioning expression produced more than 1 query"); @@ -740,14 +733,6 @@ cook_partitioning_expression(const Oid relid, /* Don't forget to enable pg_pathman's hooks */ pathman_hooks_enabled = true; - /* - * Simply rethrow if rewrite of AST was successful. - * NOTE: We aim to modify only ERRORs that - * are relevant to analyze and rewrite steps. - */ - if (ok_rewrite) - PG_RE_THROW(); - /* Switch to the original context & copy edata */ MemoryContextSwitchTo(old_mcxt); error = CopyErrorData(); From 5e2002a808a60e293019d56be665c1687fdd5586 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 23 May 2017 15:32:21 +0300 Subject: [PATCH 0556/1124] fix handle_boolexpr() --- src/pg_pathman.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 9141409e..1ace65de 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1020,7 +1020,7 @@ handle_boolexpr(const BoolExpr *expr, { int totallen = irange_list_length(ranges); - foreach (lc, result->args) + foreach (lc, args) { WrapperNode *arg = (WrapperNode *) lfirst(lc); int len = irange_list_length(arg->rangeset); From a20c44627858cefeaf315d514c82fea7427c7f05 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 23 May 2017 17:03:13 +0300 Subject: [PATCH 0557/1124] rebuild quals in handle_array() --- expected/pathman_array_qual.out | 404 ++++++++++++++++++-------------- sql/pathman_array_qual.sql | 4 + src/include/pathman.h | 4 +- src/nodes_common.c | 6 +- src/pg_pathman.c | 51 +++- 5 files changed, 276 insertions(+), 193 deletions(-) diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out index da6a8f8b..36ec268d 100644 --- a/expected/pathman_array_qual.out +++ b/expected/pathman_array_qual.out @@ -61,31 +61,31 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z /* different collations */ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); - QUERY PLAN -------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------- Append -> Seq Scan on test_1 - Filter: ((val)::text < ANY ('{a,b}'::text[])) + Filter: ((val)::text < 'b'::text COLLATE "POSIX") -> Seq Scan on test_2 - Filter: ((val)::text < ANY ('{a,b}'::text[])) + Filter: ((val)::text < 'b'::text COLLATE "POSIX") -> Seq Scan on test_3 - Filter: ((val)::text < ANY ('{a,b}'::text[])) + Filter: ((val)::text < 'b'::text COLLATE "POSIX") -> Seq Scan on test_4 - Filter: ((val)::text < ANY ('{a,b}'::text[])) + Filter: ((val)::text < 'b'::text COLLATE "POSIX") (9 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); - QUERY PLAN ---------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------- Append -> Seq Scan on test_1 - Filter: (val < ANY ('{a,b}'::text[] COLLATE "POSIX")) + Filter: (val < 'b'::text COLLATE "POSIX") -> Seq Scan on test_2 - Filter: (val < ANY ('{a,b}'::text[] COLLATE "POSIX")) + Filter: (val < 'b'::text COLLATE "POSIX") -> Seq Scan on test_3 - Filter: (val < ANY ('{a,b}'::text[] COLLATE "POSIX")) + Filter: (val < 'b'::text COLLATE "POSIX") -> Seq Scan on test_4 - Filter: (val < ANY ('{a,b}'::text[] COLLATE "POSIX")) + Filter: (val < 'b'::text COLLATE "POSIX") (9 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); @@ -525,11 +525,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[] (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a < ANY ('{100,100}'::integer[])) + Filter: (a < 100) (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); @@ -540,8 +540,8 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 -> Seq Scan on test_2 @@ -549,12 +549,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550] -> Seq Scan on test_4 -> Seq Scan on test_5 -> Seq Scan on test_6 - Filter: (a < ANY ('{500,550}'::integer[])) + Filter: (a < 550) (8 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 -> Seq Scan on test_2 @@ -563,7 +563,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700] -> Seq Scan on test_5 -> Seq Scan on test_6 -> Seq Scan on test_7 - Filter: (a < ANY ('{100,700}'::integer[])) + Filter: (a < 700) (9 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); @@ -641,39 +641,39 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[] (21 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a < ALL ('{100,100}'::integer[])) + Filter: (a < 100) (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); - QUERY PLAN -------------------------------------------------------- + QUERY PLAN +-------------------------- Append -> Seq Scan on test_1 - Filter: (a < ALL ('{99,100,101}'::integer[])) + Filter: (a < 99) (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 -> Seq Scan on test_5 - Filter: (a < ALL ('{500,550}'::integer[])) + Filter: (a < 500) (7 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a < ALL ('{100,700}'::integer[])) + Filter: (a < 100) (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); @@ -725,11 +725,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[] (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{100,100}'::integer[])) + Filter: (a > 100) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -742,11 +742,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100] (12 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); - QUERY PLAN -------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{99,100,101}'::integer[])) + Filter: (a > 99) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -759,11 +759,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, (12 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_5 - Filter: (a > ANY ('{500,550}'::integer[])) + Filter: (a > 500) -> Seq Scan on test_6 -> Seq Scan on test_7 -> Seq Scan on test_8 @@ -772,11 +772,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550] (8 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{100,700}'::integer[])) + Filter: (a > 100) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -860,11 +860,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[] (21 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ALL ('{100,100}'::integer[])) + Filter: (a > 100) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -877,11 +877,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100] (12 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); - QUERY PLAN -------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_2 - Filter: (a > ALL ('{99,100,101}'::integer[])) + Filter: (a > 101) -> Seq Scan on test_3 -> Seq Scan on test_4 -> Seq Scan on test_5 @@ -893,11 +893,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, (11 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{500,550}'::integer[])) + Filter: (a > 550) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -905,11 +905,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550] (7 rows) EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_7 - Filter: (a > ALL ('{100,700}'::integer[])) + Filter: (a > 700) -> Seq Scan on test_8 -> Seq Scan on test_9 -> Seq Scan on test_10 @@ -950,11 +950,11 @@ SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); */ PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{1,100,600}'::integer[])) + Filter: (a > 1) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -967,11 +967,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (12 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{1,100,600}'::integer[])) + Filter: (a > 1) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -984,11 +984,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (12 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{1,100,600}'::integer[])) + Filter: (a > 1) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -1001,11 +1001,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (12 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{1,100,600}'::integer[])) + Filter: (a > 1) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -1018,11 +1018,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (12 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{1,100,600}'::integer[])) + Filter: (a > 1) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -1091,11 +1091,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); DEALLOCATE q; PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{100,600,1}'::integer[])) + Filter: (a > 1) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -1108,11 +1108,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (12 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{100,600,1}'::integer[])) + Filter: (a > 1) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -1125,11 +1125,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (12 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{100,600,1}'::integer[])) + Filter: (a > 1) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -1142,11 +1142,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (12 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{100,600,1}'::integer[])) + Filter: (a > 1) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -1159,11 +1159,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (12 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_1 - Filter: (a > ANY ('{100,600,1}'::integer[])) + Filter: (a > 1) -> Seq Scan on test_2 -> Seq Scan on test_3 -> Seq Scan on test_4 @@ -1519,11 +1519,11 @@ EXECUTE q(NULL); DEALLOCATE q; PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{1,100,600}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1531,11 +1531,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{1,100,600}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1543,11 +1543,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{1,100,600}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1555,11 +1555,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{1,100,600}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1567,11 +1567,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{1,100,600}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1615,11 +1615,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); DEALLOCATE q; PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{100,1,600}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1627,11 +1627,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{100,1,600}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1639,11 +1639,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{100,1,600}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1651,11 +1651,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{100,1,600}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1663,11 +1663,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{100,1,600}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1711,11 +1711,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); DEALLOCATE q; PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{100,600,1}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1723,11 +1723,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{100,600,1}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1735,11 +1735,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{100,600,1}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1747,11 +1747,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{100,600,1}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1759,11 +1759,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{100,600,1}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1858,11 +1858,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); DEALLOCATE q; PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1870,11 +1870,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1882,11 +1882,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1894,11 +1894,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1906,11 +1906,11 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN ------------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1951,6 +1951,23 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) (12 rows) +EXPLAIN (COSTS OFF) EXECUTE q(999); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + /* check query plan: EXECUTE q(999) */ DO language plpgsql $$ @@ -1974,11 +1991,11 @@ NOTICE: EXECUTE q(999): number of partitions: 5 DEALLOCATE q; PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); - QUERY PLAN ------------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1986,11 +2003,11 @@ EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); - QUERY PLAN ------------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -1998,11 +2015,11 @@ EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); - QUERY PLAN ------------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -2010,11 +2027,11 @@ EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); - QUERY PLAN ------------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -2022,11 +2039,11 @@ EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); (7 rows) EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); - QUERY PLAN ------------------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_6 - Filter: (a > ALL ('{{100,600},{1,1}}'::integer[])) + Filter: (a > 600) -> Seq Scan on test_7 -> Seq Scan on test_8 -> Seq Scan on test_9 @@ -2067,6 +2084,23 @@ EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) (12 rows) +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + /* check query plan: EXECUTE q('{1, 999}') */ DO language plpgsql $$ @@ -2090,47 +2124,47 @@ NOTICE: EXECUTE q('{1, 999}'): number of partitions: 1 DEALLOCATE q; PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN --------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_9 - Filter: (a > ALL ('{1,898}'::integer[])) + Filter: (a > 898) -> Seq Scan on test_10 (4 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN --------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_9 - Filter: (a > ALL ('{1,898}'::integer[])) + Filter: (a > 898) -> Seq Scan on test_10 (4 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN --------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_9 - Filter: (a > ALL ('{1,898}'::integer[])) + Filter: (a > 898) -> Seq Scan on test_10 (4 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN --------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_9 - Filter: (a > ALL ('{1,898}'::integer[])) + Filter: (a > 898) -> Seq Scan on test_10 (4 rows) EXPLAIN (COSTS OFF) EXECUTE q(1); - QUERY PLAN --------------------------------------------------- + QUERY PLAN +--------------------------- Append -> Seq Scan on test_9 - Filter: (a > ALL ('{1,898}'::integer[])) + Filter: (a > 898) -> Seq Scan on test_10 (4 rows) @@ -2156,6 +2190,22 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); Filter: (a > ALL (ARRAY[$1, 898])) (6 rows) +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXECUTE q(1000); + a | b +---+--- +(0 rows) + /* check query plan: EXECUTE q(999) */ DO language plpgsql $$ diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql index 67249a68..7ab15b6a 100644 --- a/sql/pathman_array_qual.sql +++ b/sql/pathman_array_qual.sql @@ -300,6 +300,7 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(999); /* check query plan: EXECUTE q(999) */ DO language plpgsql $$ @@ -329,6 +330,7 @@ EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); /* check query plan: EXECUTE q('{1, 999}') */ DO language plpgsql $$ @@ -358,6 +360,8 @@ EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ +EXECUTE q(1000); /* check query plan: EXECUTE q(999) */ DO language plpgsql $$ diff --git a/src/include/pathman.h b/src/include/pathman.h index fc0241b8..c1a45939 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -132,13 +132,15 @@ Path *get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, typedef struct { const Node *orig; /* examined expression */ - List *args; /* extracted from 'orig' */ + List *args; /* clauses/wrappers extracted from 'orig' */ List *rangeset; /* IndexRanges representing selected parts */ double paramsel; /* estimated selectivity of PARAMs (for RuntimeAppend costs) */ bool found_gap; /* were there any gaps? */ } WrapperNode; +#define InvalidWrapperNode { NULL, NIL, NIL, 0.0, false } + typedef struct { Node *prel_expr; /* expression from PartRelationInfo */ diff --git a/src/nodes_common.c b/src/nodes_common.c index 984ff908..344e8e65 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -726,11 +726,11 @@ rescan_append_common(CustomScanState *node) InitWalkerContext(&wcxt, prel_expr, prel, econtext); foreach (lc, scan_state->canon_custom_exprs) { - WrapperNode *wn; + WrapperNode *wrap; /* ... then we cut off irrelevant ones using the provided clauses */ - wn = walk_expr_tree((Expr *) lfirst(lc), &wcxt); - ranges = irange_list_intersection(ranges, wn->rangeset); + wrap = walk_expr_tree((Expr *) lfirst(lc), &wcxt); + ranges = irange_list_intersection(ranges, wrap->rangeset); } /* Get Oids of the required partitions */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 1ace65de..86fcd30d 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -917,15 +917,39 @@ handle_array(ArrayType *array, elem_isnull[0] = pivot_null; elem_count = 1; - /* Append NULL if array contains NULLs and 'pivot' is not NULL */ - if (!pivot_null && array_contains_nulls(array)) + /* If pivot is not NULL ... */ + if (!pivot_null) { - /* Make sure that we have enough space for 2 elements */ - Assert(ArrayGetNItems(ARR_NDIM(array), ARR_DIMS(array)) >= 2); + /* ... append single NULL if array contains NULLs */ + if (array_contains_nulls(array)) + { + /* Make sure that we have enough space for 2 elements */ + Assert(ArrayGetNItems(ARR_NDIM(array), ARR_DIMS(array)) >= 2); - elem_values[1] = (Datum) 0; - elem_isnull[1] = true; - elem_count = 2; + elem_values[1] = (Datum) 0; + elem_isnull[1] = true; + elem_count = 2; + } + /* ... optimize clause ('orig') if array does not contain NULLs */ + else if (result->orig) + { + /* Should've been provided by the caller */ + ScalarArrayOpExpr *orig = (ScalarArrayOpExpr *) result->orig; + + /* Rebuild clause using 'pivot' */ + result->orig = (Node *) + make_opclause(orig->opno, BOOLOID, false, + (Expr *) linitial(orig->args), + (Expr *) makeConst(elem_type, + -1, + collid, + elem_len, + elem_values[0], + elem_isnull[0], + elem_byval), + InvalidOid, + collid); + } } } @@ -935,8 +959,8 @@ handle_array(ArrayType *array, /* Select partitions using values */ for (i = 0; i < elem_count; i++) { - WrapperNode wrap; Const c; + WrapperNode wrap = InvalidWrapperNode; NodeSetTag(&c, T_Const); c.consttype = elem_type; @@ -1050,6 +1074,9 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, TypeCacheEntry *tce; int strategy; + /* Small sanity check */ + Assert(list_length(expr->args) == 2); + tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); @@ -1072,14 +1099,14 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, if (c->constisnull) goto handle_arrexpr_none; + /* Provide expression for optimizations */ + result->orig = (const Node *) expr; + /* Examine array */ handle_array(DatumGetArrayTypeP(c->constvalue), expr->inputcollid, strategy, expr->useOr, context, result); - /* Save expression */ - result->orig = (const Node *) expr; - return; /* done, exit */ } @@ -1101,7 +1128,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, foreach (lc, arr_expr->elements) { Node *elem = lfirst(lc); - WrapperNode wrap; + WrapperNode wrap = InvalidWrapperNode; /* Stop if ALL + quals evaluate to NIL */ if (!expr->useOr && ranges == NIL) From a3ca85df9a76e01924e0ea97241b7d2e08bc9c7e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 23 May 2017 18:55:39 +0300 Subject: [PATCH 0558/1124] improve paramsel estimation in handle_arrexpr() --- src/pg_pathman.c | 65 +++++++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 29 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 86fcd30d..7c265640 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1080,6 +1080,9 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); + /* Save expression */ + result->orig = (const Node *) expr; + /* Check if expression tree is a partitioning expression */ if (!match_expr_to_operand(context->prel_expr, part_expr)) goto handle_arrexpr_all; @@ -1097,10 +1100,12 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, /* Array is NULL */ if (c->constisnull) - goto handle_arrexpr_none; + { + result->rangeset = NIL; + result->paramsel = 0.0; - /* Provide expression for optimizations */ - result->orig = (const Node *) expr; + return; /* done, exit */ + } /* Examine array */ handle_array(DatumGetArrayTypeP(c->constvalue), @@ -1114,7 +1119,8 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, { ArrayExpr *arr_expr = (ArrayExpr *) array; Oid elem_type = arr_expr->element_typeid; - bool array_has_params = false; + int array_params = 0; + double paramsel = 1.0; List *ranges; ListCell *lc; @@ -1158,25 +1164,40 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, irange_list_union(ranges, wrap.rangeset) : irange_list_intersection(ranges, wrap.rangeset); } - else array_has_params = true; /* we have non-const nodes */ + else array_params++; /* we've just met non-const nodes */ } /* Check for PARAM-related optimizations */ - if (array_has_params) + if (array_params > 0) { - /* We can't say anything if PARAMs + ANY */ + double sel = estimate_paramsel_using_prel(prel, strategy); + int i; + if (expr->useOr) - goto handle_arrexpr_all; + { + /* We can't say anything if PARAMs + ANY */ + ranges = list_make1_irange_full(prel, IR_LOSSY); + + /* See handle_boolexpr() */ + for (i = 0; i < array_params; i++) + paramsel *= (1 - sel); + + paramsel = 1 - paramsel; + } + else + { + /* Recheck condition on a narrowed set of partitions */ + ranges = irange_list_set_lossiness(ranges, IR_LOSSY); - /* Recheck condition on a narrowed set of partitions */ - ranges = irange_list_set_lossiness(ranges, IR_LOSSY); + /* See handle_boolexpr() */ + for (i = 0; i < array_params; i++) + paramsel *= sel; + } } - /* Save rangeset */ + /* Save result */ result->rangeset = ranges; - - /* Save expression */ - result->orig = (const Node *) expr; + result->paramsel = paramsel; return; /* done, exit */ } @@ -1187,21 +1208,7 @@ handle_arrexpr(const ScalarArrayOpExpr *expr, handle_arrexpr_all: result->rangeset = list_make1_irange_full(prel, IR_LOSSY); - result->paramsel = estimate_paramsel_using_prel(prel, strategy); - - /* Save expression */ - result->orig = (const Node *) expr; - - return; - -handle_arrexpr_none: - result->rangeset = NIL; - result->paramsel = 0.0; - - /* Save expression */ - result->orig = (const Node *) expr; - - return; + result->paramsel = 1.0; } /* Operator expression handler */ From 241f2dd6f7edcc42f5e2865e9ee71f85a3df8ed1 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Wed, 24 May 2017 13:03:16 +0300 Subject: [PATCH 0559/1124] Fix regression test on runtime_append node --- expected/pathman_runtime_nodes.out | 13 +++++++++---- sql/pathman_runtime_nodes.sql | 13 +++++++++---- src/include/compat/pg_compat.h | 2 ++ src/pg_pathman.c | 7 ++----- 4 files changed, 22 insertions(+), 13 deletions(-) diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index 87617029..ef9aaa93 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -64,6 +64,7 @@ create or replace function test.pathman_test_2() returns text as $$ declare plan jsonb; num int; + c text; begin plan = test.pathman_test('select * from test.runtime_test_1 where id = any (select * from test.run_values limit 4)'); @@ -82,11 +83,15 @@ begin select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); - for i in 0..3 loop - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->i->'Relation Name')::text, - format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), - 'wrong partition'); + execute 'select string_agg(y.z, '','') from + (select (x->''Relation Name'')::text as z from + jsonb_array_elements($1->0->''Plan''->''Plans''->1->''Plans'') x + order by x->''Relation Name'') y' + into c using plan; + perform test.pathman_equal(c, '"runtime_test_1_2","runtime_test_1_3","runtime_test_1_4","runtime_test_1_5"', + 'wrong partitions'); + for i in 0..3 loop num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; perform test.pathman_equal(num::text, '1', 'expected 1 loop'); end loop; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index 6a65a557..e5cf17a5 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -71,6 +71,7 @@ create or replace function test.pathman_test_2() returns text as $$ declare plan jsonb; num int; + c text; begin plan = test.pathman_test('select * from test.runtime_test_1 where id = any (select * from test.run_values limit 4)'); @@ -89,11 +90,15 @@ begin select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); - for i in 0..3 loop - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->i->'Relation Name')::text, - format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), - 'wrong partition'); + execute 'select string_agg(y.z, '','') from + (select (x->''Relation Name'')::text as z from + jsonb_array_elements($1->0->''Plan''->''Plans''->1->''Plans'') x + order by x->''Relation Name'') y' + into c using plan; + perform test.pathman_equal(c, '"runtime_test_1_2","runtime_test_1_3","runtime_test_1_4","runtime_test_1_5"', + 'wrong partitions'); + for i in 0..3 loop num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; perform test.pathman_equal(num::text, '1', 'expected 1 loop'); end loop; diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 44cb5687..2f746670 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -215,6 +215,8 @@ extern void create_plain_partial_paths(PlannerInfo *root, extern Datum exprResult; extern ExprDoneCond isDone; static inline void +dummy_handler() { } +static inline void not_signle_result_handler() { elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 8ac10c34..0c841736 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1153,11 +1153,8 @@ extract_const(Param *param, { ExprState *estate = ExecInitExpr((Expr *) param, NULL); bool isnull; -#if PG_VERSION_NUM >= 100000 - Datum value = ExecEvalExpr(estate, context->econtext, &isnull); -#else - Datum value = ExecEvalExpr(estate, context->econtext, &isnull, NULL); -#endif + Datum value = ExecEvalExprCompat(estate, context->econtext, &isnull, + dummy_handler); return makeConst(param->paramtype, param->paramtypmod, param->paramcollid, get_typlen(param->paramtype), From 6dd77372cb14a242e4012943b38b71e9441ed3f8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 24 May 2017 13:07:04 +0300 Subject: [PATCH 0560/1124] simplify conditions in check_range_available() --- src/partition_creation.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 62d8c2fd..57a84426 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1277,12 +1277,12 @@ check_range_available(Oid parent_relid, if (raise_error) elog(ERROR, "specified range [%s, %s) overlaps " "with existing partitions", - !IsInfinite(start) ? - datum_to_cstring(BoundGetValue(start), value_type) : - "NULL", - !IsInfinite(end) ? - datum_to_cstring(BoundGetValue(end), value_type) : - "NULL"); + IsInfinite(start) ? + "NULL" : + datum_to_cstring(BoundGetValue(start), value_type), + IsInfinite(end) ? + "NULL" : + datum_to_cstring(BoundGetValue(end), value_type)); else return false; } From ba12800acba1fb376eeaa70ebf43a9aceb54d678 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 24 May 2017 13:21:58 +0300 Subject: [PATCH 0561/1124] fix error message in check_boundaries() (issue #92) --- range.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/range.sql b/range.sql index bae93c75..36b5029d 100644 --- a/range.sql +++ b/range.sql @@ -36,7 +36,7 @@ BEGIN /* Check lower boundary */ IF start_value > min_value THEN - RAISE EXCEPTION 'start value is less than min value of "%"', expression; + RAISE EXCEPTION 'start value is greater than min value of "%"', expression; END IF; /* Check upper boundary */ From 02ff47432a7ca1e8ef4b6e6e04a0d34439162a59 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 24 May 2017 16:11:16 +0300 Subject: [PATCH 0562/1124] improved function cook_partitioning_expression() --- expected/pathman_basic.out | 6 +-- expected/pathman_calamity.out | 2 +- expected/pathman_expressions.out | 56 +++++++++++++-------- sql/pathman_expressions.sql | 21 ++++---- src/hooks.c | 10 +--- src/nodes_common.c | 3 -- src/relation_info.c | 86 ++++++++++++++++++++------------ 7 files changed, 108 insertions(+), 76 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 1feb2be4..6a196215 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1511,9 +1511,9 @@ SELECT pathman.create_partitions_from_range('test."RangeRel"', 'dt', '2015-01-01 DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 6 other objects SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr ---------------------+------+----------+----------------+------------------------------------------------------------------------------------------------------------------------ - test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} + partrel | expr | parttype | range_interval | cooked_expr +--------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- + test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) CREATE TABLE test."RangeRel" ( diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index a874fe1a..dcf5ed54 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -306,7 +306,7 @@ ERROR: 'expression' should not be NULL SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ ERROR: cannot find type name for attribute "valval" of relation "part_test" SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ -ERROR: failed to analyze partitioning expression (random()) +ERROR: failed to analyze partitioning expression "random()" SELECT validate_expression('calamity.part_test', 'val'); /* OK */ validate_expression --------------------- diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 5afe1d33..a078e78f 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -27,13 +27,32 @@ SELECT COUNT(*) FROM test_exprs.hash_rel; 5 (1 row) +\set VERBOSITY default /* Try using constant expression */ SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); -ERROR: partitioning expression should reference table "hash_rel" -\set VERBOSITY default +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +/* Try using multiple queries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM /* Try using mutable expression */ SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); -ERROR: failed to analyze partitioning expression (random()) +ERROR: failed to analyze partitioning expression "random()" DETAIL: functions in partitioning expression must be marked IMMUTABLE CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM @@ -41,20 +60,9 @@ SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM -/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ -EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy -SELECT * FROM test_exprs.canary WHERE val = 1; - QUERY PLAN ----------------------------------- - Insert on canary_copy - -> Append - -> Seq Scan on canary_0 - Filter: (val = 1) -(4 rows) - -/* Try using missing columns */ +/* Try using broken parentheses */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); -ERROR: failed to parse partitioning expression (value * value2))) +ERROR: failed to parse partitioning expression "value * value2))" DETAIL: syntax error at or near ")" QUERY: SELECT public.validate_expression(parent_relid, expression) CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM @@ -62,8 +70,9 @@ SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +/* Try using missing columns */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); -ERROR: failed to analyze partitioning expression (value * value3) +ERROR: failed to analyze partitioning expression "value * value3" DETAIL: column "value3" does not exist HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". QUERY: SELECT public.validate_expression(parent_relid, expression) @@ -144,15 +153,22 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5 CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); INSERT INTO test_exprs.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default /* Try using constant expression */ SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); -ERROR: partitioning expression should reference table "range_rel" -\set VERBOSITY default +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM /* Try using mutable expression */ SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); -ERROR: failed to analyze partitioning expression (RANDOM()) +ERROR: failed to analyze partitioning expression "RANDOM()" DETAIL: functions in partitioning expression must be marked IMMUTABLE CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 05d698ca..0eccf5ae 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -27,21 +27,24 @@ INSERT INTO test_exprs.hash_rel (value, value2) SELECT COUNT(*) FROM test_exprs.hash_rel; + +\set VERBOSITY default + /* Try using constant expression */ SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); - -\set VERBOSITY default +/* Try using multiple queries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); /* Try using mutable expression */ SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); -/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ -EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy -SELECT * FROM test_exprs.canary WHERE val = 1; +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); /* Try using missing columns */ -SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ @@ -75,13 +78,13 @@ INSERT INTO test_exprs.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; + +\set VERBOSITY default + /* Try using constant expression */ SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); - -\set VERBOSITY default - /* Try using mutable expression */ SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); diff --git a/src/hooks.c b/src/hooks.c index 78a061be..b583fa7b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -91,10 +91,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, set_join_pathlist_next(root, joinrel, outerrel, innerrel, jointype, extra); - /* Hooks can be disabled */ - if (!pathman_hooks_enabled) - return; - /* Check that both pg_pathman & RuntimeAppend nodes are enabled */ if (!IsPathmanReady() || !pg_pathman_enable_runtimeappend) return; @@ -289,10 +285,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (set_rel_pathlist_hook_next != NULL) set_rel_pathlist_hook_next(root, rel, rti, rte); - /* Hooks can be disabled */ - if (!pathman_hooks_enabled) - return; - /* Make sure that pg_pathman is ready */ if (!IsPathmanReady()) return; @@ -554,7 +546,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) uint32 query_id = parse->queryId; /* Save the result in case it changes */ - bool pathman_ready = pathman_hooks_enabled && IsPathmanReady(); + bool pathman_ready = IsPathmanReady(); PG_TRY(); { diff --git a/src/nodes_common.c b/src/nodes_common.c index 344e8e65..ddd15ec7 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -348,9 +348,6 @@ canonicalize_custom_exprs_mutator(Node *node, void *cxt) /* Restore original 'varattno' */ var->varattno = var->varoattno; - /* Forget 'location' */ - var->location = -1; - return (Node *) var; } diff --git a/src/relation_info.c b/src/relation_info.c index 06ae6c6c..f216f45e 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -47,8 +47,8 @@ /* Error messages for partitioning expression */ -#define PARSE_PART_EXPR_ERROR "failed to parse partitioning expression (%s)" -#define COOK_PART_EXPR_ERROR "failed to analyze partitioning expression (%s)" +#define PARSE_PART_EXPR_ERROR "failed to parse partitioning expression \"%s\"" +#define COOK_PART_EXPR_ERROR "failed to analyze partitioning expression \"%s\"" /* Comparison function info */ @@ -105,6 +105,8 @@ static void fill_pbin_with_bounds(PartBoundInfo *pbin, static int cmp_range_entries(const void *p1, const void *p2, void *arg); +static bool query_contains_subqueries(Node *node, void *context); + void init_relation_info_static_data(void) @@ -139,7 +141,6 @@ refresh_pathman_relation_info(Oid relid, Datum param_values[Natts_pathman_config_params]; bool param_isnull[Natts_pathman_config_params]; char *expr; - Relids expr_varnos; HeapTuple htup; MemoryContext old_mcxt; @@ -182,11 +183,6 @@ refresh_pathman_relation_info(Oid relid, prel->expr = (Node *) stringToNode(expr); fix_opfuncids(prel->expr); - expr_varnos = pull_varnos(prel->expr); - if (bms_num_members(expr_varnos) != 1) - elog(ERROR, "partitioning expression should reference table \"%s\"", - get_rel_name(relid)); - /* Extract Vars and varattnos of partitioning expression */ prel->expr_vars = NIL; prel->expr_atts = NULL; @@ -559,6 +555,17 @@ fill_prel_with_partitions(PartRelationInfo *prel, #endif } +/* qsort comparison function for RangeEntries */ +static int +cmp_range_entries(const void *p1, const void *p2, void *arg) +{ + const RangeEntry *v1 = (const RangeEntry *) p1; + const RangeEntry *v2 = (const RangeEntry *) p2; + cmp_func_info *info = (cmp_func_info *) arg; + + return cmp_bounds(&info->flinfo, info->collid, &v1->min, &v2->min); +} + /* * Partitioning expression routines. @@ -691,34 +698,47 @@ cook_partitioning_expression(const Oid relid, PG_TRY(); { - TargetEntry *target_entry; - - Query *expr_query; - PlannedStmt *expr_plan; - Node *expr; + Query *query; + Node *expr; + Relids expr_varnos; /* This will fail with ERROR in case of wrong expression */ query_tree_list = pg_analyze_and_rewrite(parse_tree, query_string, NULL, 0); + /* Sanity check #1 */ if (list_length(query_tree_list) != 1) elog(ERROR, "partitioning expression produced more than 1 query"); - expr_query = (Query *) linitial(query_tree_list); + query = (Query *) linitial(query_tree_list); - /* Plan this query. We reuse 'expr_node' here */ - expr_plan = pg_plan_query(expr_query, 0, NULL); + /* Sanity check #2 */ + if (list_length(query->targetList) != 1) + elog(ERROR, "there should be exactly 1 partitioning expression"); - target_entry = IsA(expr_plan->planTree, IndexOnlyScan) ? - linitial(((IndexOnlyScan *) expr_plan->planTree)->indextlist) : - linitial(expr_plan->planTree->targetlist); + /* Sanity check #3 */ + if (query_tree_walker(query, query_contains_subqueries, NULL, 0)) + elog(ERROR, "subqueries are not allowed in partitioning expression"); - expr = eval_const_expressions(NULL, (Node *) target_entry->expr); + expr = (Node *) ((TargetEntry *) linitial(query->targetList))->expr; + expr = eval_const_expressions(NULL, expr); + + /* Sanity check #4 */ if (contain_mutable_functions(expr)) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("functions in partitioning expression" " must be marked IMMUTABLE"))); + /* Sanity check #5 */ + expr_varnos = pull_varnos(expr); + if (bms_num_members(expr_varnos) != 1 || + ((RangeTblEntry *) linitial(query->rtable))->relid != relid) + { + elog(ERROR, "partitioning expression should reference table \"%s\"", + get_rel_name(relid)); + } + bms_free(expr_varnos); + Assert(expr); expr_serialized = nodeToString(expr); @@ -755,6 +775,7 @@ cook_partitioning_expression(const Oid relid, /* Switch to previous mcxt */ MemoryContextSwitchTo(old_mcxt); + /* Get Datum of serialized expression (right mcxt) */ expr_datum = CStringGetTextDatum(expr_serialized); /* Free memory */ @@ -763,6 +784,20 @@ cook_partitioning_expression(const Oid relid, return expr_datum; } +/* Check if query has subqueries */ +static bool +query_contains_subqueries(Node *node, void *context) +{ + if (node == NULL) + return false; + + /* We've met a subquery */ + if (IsA(node, Query)) + return true; + + return expression_tree_walker(node, query_contains_subqueries, NULL); +} + /* * Functions for delayed invalidation. @@ -1308,17 +1343,6 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, } } -/* qsort comparison function for RangeEntries */ -static int -cmp_range_entries(const void *p1, const void *p2, void *arg) -{ - const RangeEntry *v1 = (const RangeEntry *) p1; - const RangeEntry *v2 = (const RangeEntry *) p2; - cmp_func_info *info = (cmp_func_info *) arg; - - return cmp_bounds(&info->flinfo, info->collid, &v1->min, &v2->min); -} - /* * Common PartRelationInfo checks. Emit ERROR if anything is wrong. From 8fa83bfbc0a5113a9cd8b4fc04d3017158ca1cc4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 24 May 2017 17:22:13 +0300 Subject: [PATCH 0563/1124] introduce function canonicalize_partitioning_expression(), fix typmod & collid in refresh_pathman_relation_info() --- expected/pathman_expressions.out | 93 ++++++++++++++++++++++++++++++++ sql/pathman_expressions.sql | 29 ++++++++++ src/include/relation_info.h | 3 ++ src/pl_funcs.c | 5 +- src/relation_info.c | 40 ++++++++++---- 5 files changed, 155 insertions(+), 15 deletions(-) diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index a078e78f..764a11fb 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -2,6 +2,99 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on canon_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects /* We use this rel to check 'pathman_hooks_enabled' */ CREATE TABLE test_exprs.canary(val INT4 NOT NULL); CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 0eccf5ae..379736a5 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -5,6 +5,35 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA test_exprs; + +/* + * Test partitioning expression canonicalization process + */ + +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); +SELECT expr FROM pathman_config; /* check expression */ +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; +DROP TABLE test_exprs.canon CASCADE; + + +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); +SELECT expr FROM pathman_config; /* check expression */ +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); +DROP TABLE test_exprs.canon CASCADE; + + + /* We use this rel to check 'pathman_hooks_enabled' */ CREATE TABLE test_exprs.canary(val INT4 NOT NULL); CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 69578645..f9963a94 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -292,6 +292,9 @@ Datum cook_partitioning_expression(const Oid relid, const char *expr_cstr, Oid *expr_type); +char *canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr); + /* Global invalidation routines */ void delay_pathman_shutdown(void); void delay_invalidation_parent_rel(Oid parent); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 0d71f858..58a78210 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -34,7 +34,6 @@ #include "nodes/nodeFuncs.h" #include "utils/builtins.h" #include "utils/inval.h" -#include "utils/ruleutils.h" #include "utils/snapmgr.h" #include "utils/lsyscache.h" #include "utils/syscache.h" @@ -832,9 +831,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) expr_datum = cook_partitioning_expression(relid, expression, &expr_type); /* Canonicalize user's expression (trim whitespaces etc) */ - expression = deparse_expression(stringToNode(TextDatumGetCString(expr_datum)), - deparse_context_for(get_rel_name(relid), relid), - false, false); + expression = canonicalize_partitioning_expression(relid, expression); /* Check hash function for HASH partitioning */ if (parttype == PT_HASH) diff --git a/src/relation_info.c b/src/relation_info.c index f216f45e..e824b72f 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -26,6 +26,7 @@ #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/var.h" +#include "parser/analyze.h" #include "parser/parser.h" #include "storage/lmgr.h" #include "tcop/tcopprot.h" @@ -33,6 +34,7 @@ #include "utils/fmgroids.h" #include "utils/hsearch.h" #include "utils/memutils.h" +#include "utils/ruleutils.h" #include "utils/syscache.h" #include "utils/lsyscache.h" #include "utils/typcache.h" @@ -141,7 +143,6 @@ refresh_pathman_relation_info(Oid relid, Datum param_values[Natts_pathman_config_params]; bool param_isnull[Natts_pathman_config_params]; char *expr; - HeapTuple htup; MemoryContext old_mcxt; AssertTemporaryContext(); @@ -193,16 +194,8 @@ refresh_pathman_relation_info(Oid relid, /* First, fetch type of partitioning expression */ prel->ev_type = exprType(prel->expr); - - htup = SearchSysCache1(TYPEOID, prel->ev_type); - if (HeapTupleIsValid(htup)) - { - Form_pg_type typtup = (Form_pg_type) GETSTRUCT(htup); - prel->ev_typmod = typtup->typtypmod; - prel->ev_collid = typtup->typcollation; - ReleaseSysCache(htup); - } - else elog(ERROR, "cache lookup failed for type %u", prel->ev_type); + prel->ev_typmod = exprTypmod(prel->expr); + prel->ev_collid = exprCollation(prel->expr); /* Fetch HASH & CMP fuctions and other stuff from type cache */ typcache = lookup_type_cache(prel->ev_type, @@ -784,6 +777,31 @@ cook_partitioning_expression(const Oid relid, return expr_datum; } +/* Canonicalize user's expression (trim whitespaces etc) */ +char * +canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr) +{ + Node *parse_tree; + Expr *expr; + char *query_string; + Query *query; + + AssertTemporaryContext(); + + /* First we have to build a raw AST */ + (void) parse_partitioning_expression(relid, expr_cstr, + &query_string, &parse_tree); + + query = parse_analyze(parse_tree, query_string, NULL, 0); + expr = ((TargetEntry *) linitial(query->targetList))->expr; + + /* We don't care about memory efficiency here */ + return deparse_expression((Node *) expr, + deparse_context_for(get_rel_name(relid), relid), + false, false); +} + /* Check if query has subqueries */ static bool query_contains_subqueries(Node *node, void *context) From 3b528e738ff44e9e6a3aa22d9d2d11a3ea24437d Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Wed, 24 May 2017 18:24:01 +0300 Subject: [PATCH 0564/1124] Add compat version of BeginCopyFrom and DoCopy routines --- expected/pathman_utility_stmt.out | 65 ++++++++++++++---------------- sql/pathman_utility_stmt.sql | 20 +++++++-- src/hooks.c | 7 +++- src/include/compat/pg_compat.h | 27 +++++++++++++ src/include/utility_stmt_hooking.h | 3 +- src/utility_stmt_hooking.c | 24 +++++------ 6 files changed, 93 insertions(+), 53 deletions(-) diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 913c130d..19bad191 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -294,15 +294,13 @@ SELECT create_hash_partitions('rename.test', 'a', 3); ALTER TABLE rename.test_0 RENAME TO test_one; /* We expect to find check constraint renamed as well */ -\d+ rename.test_one - Table "rename.test_one" - Column | Type | Modifiers | Storage | Stats target | Description ---------+---------+---------------------------------------------------------+---------+--------------+------------- - a | integer | not null default nextval('rename.test_a_seq'::regclass) | plain | | - b | integer | | plain | | -Check constraints: - "pathman_test_one_check" CHECK (get_hash_part_idx(hashint4(a), 3) = 0) -Inherits: rename.test +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.test_one'::regclass AND r.contype = 'c'; + conname | pg_get_constraintdef +------------------------+----------------------------------------------- + pathman_test_one_check | CHECK (get_hash_part_idx(hashint4(a), 3) = 0) +(1 row) /* Generates check constraint for relation */ CREATE OR REPLACE FUNCTION add_constraint(rel regclass) @@ -329,15 +327,14 @@ SELECT add_constraint('rename.test_inh_1'); (1 row) ALTER TABLE rename.test_inh_1 RENAME TO test_inh_one; -\d+ rename.test_inh_one - Table "rename.test_inh_one" - Column | Type | Modifiers | Storage | Stats target | Description ---------+---------+---------------------------------------------------------+---------+--------------+------------- - a | integer | not null default nextval('rename.test_a_seq'::regclass) | plain | | - b | integer | | plain | | -Check constraints: - "pathman_test_inh_1_check" CHECK (a < 100) -Inherits: rename.test_inh +/* Show check constraints of rename.test_inh_one */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.test_inh_one'::regclass AND r.contype = 'c'; + conname | pg_get_constraintdef +--------------------------+---------------------- + pathman_test_inh_1_check | CHECK (a < 100) +(1 row) /* Check that plain tables are not affected too */ CREATE TABLE rename.plain_test(a serial, b int); @@ -348,24 +345,24 @@ SELECT add_constraint('rename.plain_test_renamed'); (1 row) -\d+ rename.plain_test_renamed - Table "rename.plain_test_renamed" - Column | Type | Modifiers | Storage | Stats target | Description ---------+---------+---------------------------------------------------------------+---------+--------------+------------- - a | integer | not null default nextval('rename.plain_test_a_seq'::regclass) | plain | | - b | integer | | plain | | -Check constraints: - "pathman_plain_test_renamed_check" CHECK (a < 100) +/* Show check constraints of rename.plain_test_renamed */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.plain_test_renamed'::regclass AND r.contype = 'c'; + conname | pg_get_constraintdef +----------------------------------+---------------------- + pathman_plain_test_renamed_check | CHECK (a < 100) +(1 row) ALTER TABLE rename.plain_test_renamed RENAME TO plain_test; -\d+ rename.plain_test - Table "rename.plain_test" - Column | Type | Modifiers | Storage | Stats target | Description ---------+---------+---------------------------------------------------------------+---------+--------------+------------- - a | integer | not null default nextval('rename.plain_test_a_seq'::regclass) | plain | | - b | integer | | plain | | -Check constraints: - "pathman_plain_test_renamed_check" CHECK (a < 100) +/* ... and check constraints of rename.plain_test */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; + conname | pg_get_constraintdef +----------------------------------+---------------------- + pathman_plain_test_renamed_check | CHECK (a < 100) +(1 row) DROP SCHEMA rename CASCADE; NOTICE: drop cascades to 7 other objects diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index 8c3a09af..7dc9dd2f 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -168,7 +168,9 @@ CREATE TABLE rename.test(a serial, b int); SELECT create_hash_partitions('rename.test', 'a', 3); ALTER TABLE rename.test_0 RENAME TO test_one; /* We expect to find check constraint renamed as well */ -\d+ rename.test_one +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.test_one'::regclass AND r.contype = 'c'; /* Generates check constraint for relation */ CREATE OR REPLACE FUNCTION add_constraint(rel regclass) @@ -191,15 +193,25 @@ CREATE TABLE rename.test_inh_1 (LIKE rename.test INCLUDING ALL); ALTER TABLE rename.test_inh_1 INHERIT rename.test_inh; SELECT add_constraint('rename.test_inh_1'); ALTER TABLE rename.test_inh_1 RENAME TO test_inh_one; -\d+ rename.test_inh_one +/* Show check constraints of rename.test_inh_one */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.test_inh_one'::regclass AND r.contype = 'c'; /* Check that plain tables are not affected too */ CREATE TABLE rename.plain_test(a serial, b int); ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; SELECT add_constraint('rename.plain_test_renamed'); -\d+ rename.plain_test_renamed +/* Show check constraints of rename.plain_test_renamed */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.plain_test_renamed'::regclass AND r.contype = 'c'; + ALTER TABLE rename.plain_test_renamed RENAME TO plain_test; -\d+ rename.plain_test +/* ... and check constraints of rename.plain_test */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; DROP SCHEMA rename CASCADE; diff --git a/src/hooks.c b/src/hooks.c index eb01376c..397e0fbb 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -771,6 +771,8 @@ pathman_process_utility_hook(PlannedStmt *pstmt, DestReceiver *dest, char *completionTag) { Node *parsetree = pstmt->utilityStmt; + int stmt_location = pstmt->stmt_location, + stmt_len = pstmt->stmt_len; #else pathman_process_utility_hook(Node *parsetree, const char *queryString, @@ -779,6 +781,8 @@ pathman_process_utility_hook(Node *parsetree, DestReceiver *dest, char *completionTag) { + int stmt_location = -1, + stmt_len = 0; #endif if (IsPathmanReady()) @@ -793,7 +797,8 @@ pathman_process_utility_hook(Node *parsetree, uint64 processed; /* Handle our COPY case (and show a special cmd name) */ - PathmanDoCopy((CopyStmt *) parsetree, queryString, &processed); + PathmanDoCopy((CopyStmt *) parsetree, queryString, stmt_location, + stmt_len, &processed); if (completionTag) snprintf(completionTag, COMPLETION_TAG_BUFSIZE, "PATHMAN COPY " UINT64_FORMAT, processed); diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 2f746670..e77788c4 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -60,6 +60,21 @@ #endif +/* + * BeginCopyFrom() + */ +#if PG_VERSION_NUM >= 100000 +#define BeginCopyFromCompat(pstate, rel, filename, is_program, data_source_cb, \ + attnamelist, options) \ + BeginCopyFrom((pstate), (rel), (filename), (is_program), \ + (data_source_cb), (attnamelist), (options)) +#elif PG_VERSION_NUM >= 90500 +#define BeginCopyFromCompat(pstate, rel, filename, is_program, data_source_cb, \ + attnamelist, options) \ + BeginCopyFrom((rel), (filename), (is_program), (attnamelist), (options)) +#endif + + /* * Define ALLOCSET_DEFAULT_SIZES for our precious MemoryContexts */ @@ -184,6 +199,18 @@ extern void create_plain_partial_paths(PlannerInfo *root, #endif +/* + * DoCopy() + */ +#if PG_VERSION_NUM >= 100000 +#define DoCopyCompat(pstate, copy_stmt, stmt_location, stmt_len, processed) \ + DoCopy((pstate), (copy_stmt), (stmt_location), (stmt_len), (processed)) +#elif PG_VERSION_NUM >= 90500 +#define DoCopyCompat(pstate, copy_stmt, stmt_location, stmt_len, processed) \ + DoCopy((copy_stmt), (pstate)->p_sourcetext, (processed)) +#endif + + /* * ExecBuildProjectionInfo */ diff --git a/src/include/utility_stmt_hooking.h b/src/include/utility_stmt_hooking.h index ee16a2a5..6b45cde3 100644 --- a/src/include/utility_stmt_hooking.h +++ b/src/include/utility_stmt_hooking.h @@ -30,7 +30,8 @@ bool is_pathman_related_alter_column_type(Node *parsetree, PartType *part_type_out); /* Statement handlers */ -void PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed); +void PathmanDoCopy(const CopyStmt *stmt, const char *queryString, + int stmt_location, int stmt_len, uint64 *processed); void PathmanRenameConstraint(Oid partition_relid, const RenameStmt *partition_rename_stmt); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 75a512ef..b45714e1 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -325,7 +325,8 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) * NOTE: based on DoCopy() (see copy.c). */ void -PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) +PathmanDoCopy(const CopyStmt *stmt, const char *queryString, int stmt_location, + int stmt_len, uint64 *processed) { CopyState cstate; bool is_from = stmt->is_from; @@ -333,6 +334,7 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) Relation rel; Node *query = NULL; List *range_table = NIL; + ParseState *pstate; /* Disallow COPY TO/FROM file or program except to superusers. */ if (!pipe && !superuser()) @@ -481,6 +483,9 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) /* This should never happen (see is_pathman_related_copy()) */ else elog(ERROR, "error in function " CppAsString(PathmanDoCopy)); + pstate = make_parsestate(NULL); + pstate->p_sourcetext = queryString; + /* COPY ... FROM ... */ if (is_from) { @@ -495,13 +500,9 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) PreventCommandIfReadOnly("PATHMAN COPY FROM"); PreventCommandIfParallelMode("PATHMAN COPY FROM"); -#if PG_VERSION_NUM >= 100000 - cstate = BeginCopyFrom(NULL, rel, stmt->filename, stmt->is_program, - NULL, stmt->attlist, stmt->options); -#else - cstate = BeginCopyFrom(rel, stmt->filename, stmt->is_program, - stmt->attlist, stmt->options); -#endif + cstate = BeginCopyFromCompat(pstate, rel, stmt->filename, + stmt->is_program, NULL, stmt->attlist, + stmt->options); *processed = PathmanCopyFrom(cstate, rel, range_table, is_old_protocol); EndCopyFrom(cstate); } @@ -519,11 +520,8 @@ PathmanDoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) modified_copy_stmt.query = query; /* Call standard DoCopy using a new CopyStmt */ -#if PG_VERSION_NUM >= 100000 - DoCopy(NULL, &modified_copy_stmt, 0, 0, processed); -#else - DoCopy(&modified_copy_stmt, queryString, processed); -#endif + DoCopyCompat(pstate, &modified_copy_stmt, stmt_location, stmt_len, + processed); } /* From 981e76965dd7017a1a8564685e5fd7a50f8039b8 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Wed, 24 May 2017 18:44:35 +0300 Subject: [PATCH 0565/1124] Add compat version of parse_analyze routine --- src/include/compat/pg_compat.h | 30 ++++++++++++++++++++++++------ src/relation_info.c | 2 +- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index e77788c4..818a8130 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -347,21 +347,39 @@ void McxtStatsInternal(MemoryContext context, int level, #endif +/* + * parse_analyze() + * + * for v10 cast first arg to RawStmt type + */ +#if PG_VERSION_NUM >= 100000 +#define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ + query_env) \ + parse_analyze((RawStmt *) (parse_tree), (query_string), (param_types), \ + (nparams), (query_env)) +#elif PG_VERSION_NUM >= 90500 +#define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ + query_env) \ + parse_analyze((Node *) (parse_tree), (query_string), (param_types), \ + (nparams)) +#endif + + /* * pg_analyze_and_rewrite * * for v10 cast first arg to RawStmt type */ #if PG_VERSION_NUM >= 100000 -#define pg_analyze_and_rewrite_compat(parsetree, query_string, paramTypes, \ - numParams, queryEnv) \ +#define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ + nparams, query_env) \ pg_analyze_and_rewrite((RawStmt *) (parsetree), (query_string), \ - (paramTypes), (numParams), (queryEnv)) + (param_types), (nparams), (query_env)) #elif PG_VERSION_NUM >= 90500 -#define pg_analyze_and_rewrite_compat(parsetree, query_string, paramTypes, \ - numParams, queryEnv) \ +#define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ + nparams, query_env) \ pg_analyze_and_rewrite((Node *) (parsetree), (query_string), \ - (paramTypes), (numParams)) + (param_types), (nparams)) #endif diff --git a/src/relation_info.c b/src/relation_info.c index 463e2490..9313a954 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -798,7 +798,7 @@ canonicalize_partitioning_expression(const Oid relid, (void) parse_partitioning_expression(relid, expr_cstr, &query_string, &parse_tree); - query = parse_analyze(parse_tree, query_string, NULL, 0); + query = parse_analyze_compat(parse_tree, query_string, NULL, 0, NULL); expr = ((TargetEntry *) linitial(query->targetList))->expr; /* We don't care about memory efficiency here */ From cc6b1e80809e9f7aa63586539e1909bc952aba6a Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Wed, 24 May 2017 19:52:20 +0300 Subject: [PATCH 0566/1124] Add compat version of build_simple_rel routine --- src/include/compat/pg_compat.h | 13 +++++++++++++ src/pg_pathman.c | 6 +----- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 818a8130..33a22d30 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -75,6 +75,19 @@ #endif +/* + * build_simple_rel() + */ +#if PG_VERSION_NUM >= 100000 +#define build_simple_rel_compat(root, childRTindex, parent_rel) \ + build_simple_rel((root), (childRTindex), (parent_rel)) +#elif PG_VERSION_NUM >= 90500 +#define build_simple_rel_compat(root, childRTindex, parent_rel) \ + build_simple_rel((root), (childRTindex), \ + (parent_rel) ? RELOPT_OTHER_MEMBER_REL : RELOPT_BASEREL) +#endif + + /* * Define ALLOCSET_DEFAULT_SIZES for our precious MemoryContexts */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 40fb4217..9d697449 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -338,11 +338,7 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, root->simple_rte_array[childRTindex] = child_rte; /* Create RelOptInfo for this child (and make some estimates as well) */ -#if PG_VERSION_NUM >= 100000 - child_rel = build_simple_rel(root, childRTindex, parent_rel); -#else - child_rel = build_simple_rel(root, childRTindex, RELOPT_OTHER_MEMBER_REL); -#endif + child_rel = build_simple_rel_compat(root, childRTindex, parent_rel); /* Increase total_table_pages using the 'child_rel' */ root->total_table_pages += (double) child_rel->pages; From 2a57b630027f278cd5e2775bf8088bb178c27442 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 24 May 2017 19:53:54 +0300 Subject: [PATCH 0567/1124] simplified travis/apt.postgresql.org.sh --- travis/apt.postgresql.org.sh | 82 ++---------------------------------- 1 file changed, 3 insertions(+), 79 deletions(-) diff --git a/travis/apt.postgresql.org.sh b/travis/apt.postgresql.org.sh index 22814fa7..369615a5 100644 --- a/travis/apt.postgresql.org.sh +++ b/travis/apt.postgresql.org.sh @@ -1,79 +1,11 @@ #!/bin/sh -# script to add apt.postgresql.org to sources.list - -# from command like -CODENAME="$1" -# lsb_release is the best interface, but not always available -if [ -z "$CODENAME" ]; then - CODENAME=$(lsb_release -cs 2>/dev/null) -fi -# parse os-release (unreliable, does not work on Ubuntu) -if [ -z "$CODENAME" -a -f /etc/os-release ]; then - . /etc/os-release - # Debian: VERSION="7.0 (wheezy)" - # Ubuntu: VERSION="13.04, Raring Ringtail" - CODENAME=$(echo $VERSION | sed -ne 's/.*(\(.*\)).*/\1/') -fi -# guess from sources.list -if [ -z "$CODENAME" ]; then - CODENAME=$(grep '^deb ' /etc/apt/sources.list | head -n1 | awk '{ print $3 }') -fi -# complain if no result yet -if [ -z "$CODENAME" ]; then - cat < /etc/apt/sources.list.d/pgdg.list < Date: Wed, 24 May 2017 20:00:24 +0300 Subject: [PATCH 0568/1124] add PostgreSQL 10 to .travis.yml --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index f0bcd93f..ab7d3a49 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,6 +14,8 @@ before_install: - sudo sh ./travis/apt.postgresql.org.sh env: + - PGVERSION=10 CHECK_CODE=true + - PGVERSION=10 CHECK_CODE=false - PGVERSION=9.6 CHECK_CODE=true - PGVERSION=9.6 CHECK_CODE=false - PGVERSION=9.5 CHECK_CODE=true From 48967519bb67387a1d480272722f6b859db23689 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 25 May 2017 13:25:17 +0300 Subject: [PATCH 0569/1124] Fix clang analyzer warning --- src/pg_pathman.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 9d697449..d02158fd 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -371,6 +371,9 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, appinfo->translated_vars); } + /* Here and below we assume that parent RelOptInfo exists */ + AssertState(parent_rel); + /* Adjust join quals for this child */ child_rel->joininfo = (List *) adjust_appendrel_attrs(root, (Node *) parent_rel->joininfo, From d0606bf1af21f41f591442697a8d5c340040a400 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 25 May 2017 15:05:52 +0300 Subject: [PATCH 0570/1124] improved scripts for Travis CI --- .travis.yml | 16 +++++--- travis/apt.postgresql.org.sh | 60 --------------------------- travis/dep-ubuntu-llvm.sh | 4 ++ travis/dep-ubuntu-postgres.sh | 4 ++ travis/llvm-snapshot.gpg.key | 52 +++++++++++++++++++++++ travis/pg-travis-test.sh | 27 ++++++------ travis/postgresql.gpg.key | 77 +++++++++++++++++++++++++++++++++++ 7 files changed, 162 insertions(+), 78 deletions(-) delete mode 100644 travis/apt.postgresql.org.sh create mode 100755 travis/dep-ubuntu-llvm.sh create mode 100755 travis/dep-ubuntu-postgres.sh create mode 100644 travis/llvm-snapshot.gpg.key mode change 100644 => 100755 travis/pg-travis-test.sh create mode 100644 travis/postgresql.gpg.key diff --git a/.travis.yml b/.travis.yml index f0bcd93f..bb1c7244 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,13 +11,19 @@ compiler: - gcc before_install: - - sudo sh ./travis/apt.postgresql.org.sh + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo apt-get -y install -qq wget ca-certificates; fi + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then source ./travis/dep-ubuntu-postgres.sh; fi + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then source ./travis/dep-ubuntu-llvm.sh; fi + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo apt-get update -qq; fi env: - - PGVERSION=9.6 CHECK_CODE=true - - PGVERSION=9.6 CHECK_CODE=false - - PGVERSION=9.5 CHECK_CODE=true - - PGVERSION=9.5 CHECK_CODE=false + global: + - LLVM_VER=4.0 + matrix: + - PG_VER=9.6 CHECK_CODE=true + - PG_VER=9.6 CHECK_CODE=false + - PG_VER=9.5 CHECK_CODE=true + - PG_VER=9.5 CHECK_CODE=false script: bash ./travis/pg-travis-test.sh diff --git a/travis/apt.postgresql.org.sh b/travis/apt.postgresql.org.sh deleted file mode 100644 index 369615a5..00000000 --- a/travis/apt.postgresql.org.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/sh - -# OS version -CODENAME=trusty - -echo "Writing /etc/apt/sources.list.d/pgdg.list ..." -cat > /etc/apt/sources.list.d/pgdg.list <> $CLUSTER_PATH/postgresql.conf echo "port = 55435" >> $CLUSTER_PATH/postgresql.conf diff --git a/travis/postgresql.gpg.key b/travis/postgresql.gpg.key new file mode 100644 index 00000000..8480576e --- /dev/null +++ b/travis/postgresql.gpg.key @@ -0,0 +1,77 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja +UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V +G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4 +bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi +c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC +IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh +hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U +A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3 +RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj +Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2 +AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB +tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQJOBBMBCAA4AhsDBQsJCAcD +BRUKCQgLBRYCAwEAAh4BAheAFiEEuXsK/KoaR/BE8kSgf8x9RqzMTPgFAlhtCD8A +CgkQf8x9RqzMTPgECxAAk8uL+dwveTv6eH21tIHcltt8U3Ofajdo+D/ayO53LiYO +xi27kdHD0zvFMUWXLGxQtWyeqqDRvDagfWglHucIcaLxoxNwL8+e+9hVFIEskQAY +kVToBCKMXTQDLarz8/J030Pmcv3ihbwB+jhnykMuyyNmht4kq0CNgnlcMCdVz0d3 +z/09puryIHJrD+A8y3TD4RM74snQuwc9u5bsckvRtRJKbP3GX5JaFZAqUyZNRJRJ +Tn2OQRBhCpxhlZ2afkAPFIq2aVnEt/Ie6tmeRCzsW3lOxEH2K7MQSfSu/kRz7ELf +Cz3NJHj7rMzC+76Rhsas60t9CjmvMuGONEpctijDWONLCuch3Pdj6XpC+MVxpgBy +2VUdkunb48YhXNW0jgFGM/BFRj+dMQOUbY8PjJjsmVV0joDruWATQG/M4C7O8iU0 +B7o6yVv4m8LDEN9CiR6r7H17m4xZseT3f+0QpMe7iQjz6XxTUFRQxXqzmNnloA1T +7VjwPqIIzkj/u0V8nICG/ktLzp1OsCFatWXh7LbU+hwYl6gsFH/mFDqVxJ3+DKQi +vyf1NatzEwl62foVjGUSpvh3ymtmtUQ4JUkNDsXiRBWczaiGSuzD9Qi0ONdkAX3b +ewqmN4TfE+XIpCPxxHXwGq9Rv1IFjOdCX0iG436GHyTLC1tTUIKF5xV4Y0+cXIOI +RgQQEQgABgUCTpdI7gAKCRDFr3dKWFELWqaPAKD1TtT5c3sZz92Fj97KYmqbNQZP ++ACfSC6+hfvlj4GxmUjp1aepoVTo3weJAhwEEAEIAAYFAk6XSQsACgkQTFprqxLS +p64F8Q//cCcutwrH50UoRFejg0EIZav6LUKejC6kpLeubbEtuaIH3r2zMblPGc4i ++eMQKo/PqyQrceRXeNNlqO6/exHozYi2meudxa6IudhwJIOn1MQykJbNMSC2sGUp +1W5M1N5EYgt4hy+qhlfnD66LR4G+9t5FscTJSy84SdiOuqgCOpQmPkVRm1HX5X1+ +dmnzMOCk5LHHQuiacV0qeGO7JcBCVEIDr+uhU1H2u5GPFNHm5u15n25tOxVivb94 +xg6NDjouECBH7cCVuW79YcExH/0X3/9G45rjdHlKPH1OIUJiiX47OTxdG3dAbB4Q +fnViRJhjehFscFvYWSqXo3pgWqUsEvv9qJac2ZEMSz9x2mj0ekWxuM6/hGWxJdB+ ++985rIelPmc7VRAXOjIxWknrXnPCZAMlPlDLu6+vZ5BhFX0Be3y38f7GNCxFkJzl +hWZ4Cj3WojMj+0DaC1eKTj3rJ7OJlt9S9xnO7OOPEUTGyzgNIDAyCiu8F4huLPaT +ape6RupxOMHZeoCVlqx3ouWctelB2oNXcxxiQ/8y+21aHfD4n/CiIFwDvIQjl7dg +mT3u5Lr6yxuosR3QJx1P6rP5ZrDTP9khT30t+HZCbvs5Pq+v/9m6XDmi+NlU7Zuh +Ehy97tL3uBDgoL4b/5BpFL5U9nruPlQzGq1P9jj40dxAaDAX/WKJAj0EEwEIACcC +GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlB5KywFCQPDFt8ACgkQf8x9RqzM +TPhuCQ//QAjRSAOCQ02qmUAikT+mTB6baOAakkYq6uHbEO7qPZkv4E/M+HPIJ4wd +nBNeSQjfvdNcZBA/x0hr5EMcBneKKPDj4hJ0panOIRQmNSTThQw9OU351gm3YQct +AMPRUu1fTJAL/AuZUQf9ESmhyVtWNlH/56HBfYjE4iVeaRkkNLJyX3vkWdJSMwC/ +LO3Lw/0M3R8itDsm74F8w4xOdSQ52nSRFRh7PunFtREl+QzQ3EA/WB4AIj3VohIG +kWDfPFCzV3cyZQiEnjAe9gG5pHsXHUWQsDFZ12t784JgkGyO5wT26pzTiuApWM3k +/9V+o3HJSgH5hn7wuTi3TelEFwP1fNzI5iUUtZdtxbFOfWMnZAypEhaLmXNkg4zD +kH44r0ss9fR0DAgUav1a25UnbOn4PgIEQy2fgHKHwRpCy20d6oCSlmgyWsR40EPP +YvtGq49A2aK6ibXmdvvFT+Ts8Z+q2SkFpoYFX20mR2nsF0fbt1lfH65P64dukxeR +GteWIeNakDD40bAAOH8+OaoTGVBJ2ACJfLVNM53PEoftavAwUYMrR910qvwYfd/4 +6rh46g1Frr9SFMKYE9uvIJIgDsQB3QBp71houU4H55M5GD8XURYs+bfiQpJG1p7e +B8e5jZx1SagNWc4XwL2FzQ9svrkbg1Y+359buUiP7T6QXX2zY++JAj0EEwEIACcC +GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlEqbZUFCQg2wEEACgkQf8x9RqzM +TPhFMQ//WxAfKMdpSIA9oIC/yPD/dJpY/+DyouOljpE6MucMy/ArBECjFTBwi/j9 +NYM4ynAk34IkhuNexc1i9/05f5RM6+riLCLgAOsADDbHD4miZzoSxiVr6GQ3YXMb +OGld9kV9Sy6mGNjcUov7iFcf5Hy5w3AjPfKuR9zXswyfzIU1YXObiiZT38l55pp/ +BSgvGVQsvbNjsff5CbEKXS7q3xW+WzN0QWF6YsfNVhFjRGj8hKtHvwKcA02wwjLe +LXVTm6915ZUKhZXUFc0vM4Pj4EgNswH8Ojw9AJaKWJIZmLyW+aP+wpu6YwVCicxB +Y59CzBO2pPJDfKFQzUtrErk9irXeuCCLesDyirxJhv8o0JAvmnMAKOLhNFUrSQ2m ++3EnF7zhfz70gHW+EG8X8mL/EN3/dUM09j6TVrjtw43RLxBzwMDeariFF9yC+5bL +tnGgxjsB9Ik6GV5v34/NEEGf1qBiAzFmDVFRZlrNDkq6gmpvGnA5hUWNr+y0i01L +jGyaLSWHYjgw2UEQOqcUtTFK9MNzbZze4mVaHMEz9/aMfX25R6qbiNqCChveIm8m +Yr5Ds2zdZx+G5bAKdzX7nx2IUAxFQJEE94VLSp3npAaTWv3sHr7dR8tSyUJ9poDw +gw4W9BIcnAM7zvFYbLF5FNggg/26njHCCN70sHt8zGxKQINMc6SJAj0EEwEIACcC +GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlLpFRkFCQ6EJy0ACgkQf8x9RqzM +TPjOZA//Zp0e25pcvle7cLc0YuFr9pBv2JIkLzPm83nkcwKmxaWayUIG4Sv6pH6h +m8+S/CHQij/yFCX+o3ngMw2J9HBUvafZ4bnbI0RGJ70GsAwraQ0VlkIfg7GUw3Tz +voGYO42rZTru9S0K/6nFP6D1HUu+U+AsJONLeb6oypQgInfXQExPZyliUnHdipei +4WR1YFW6sjSkZT/5C3J1wkAvPl5lvOVthI9Zs6bZlJLZwusKxU0UM4Btgu1Sf3nn +JcHmzisixwS9PMHE+AgPWIGSec/N27a0KmTTvImV6K6nEjXJey0K2+EYJuIBsYUN +orOGBwDFIhfRk9qGlpgt0KRyguV+AP5qvgry95IrYtrOuE7307SidEbSnvO5ezNe +mE7gT9Z1tM7IMPfmoKph4BfpNoH7aXiQh1Wo+ChdP92hZUtQrY2Nm13cmkxYjQ4Z +gMWfYMC+DA/GooSgZM5i6hYqyyfAuUD9kwRN6BqTbuAUAp+hCWYeN4D88sLYpFh3 +paDYNKJ+Gf7Yyi6gThcV956RUFDH3ys5Dk0vDL9NiWwdebWfRFbzoRM3dyGP889a +OyLzS3mh6nHzZrNGhW73kslSQek8tjKrB+56hXOnb4HaElTZGDvD5wmrrhN94kby +Gtz3cydIohvNO9d90+29h0eGEDYti7j7maHkBKUAwlcPvMg5m3Y= +=DA1T +-----END PGP PUBLIC KEY BLOCK----- From e0c04c300e0dd8f28ad0ac161e8b6135e481a24d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 25 May 2017 15:43:42 +0300 Subject: [PATCH 0571/1124] add test case for issue #91 --- expected/pathman_join_clause.out | 96 ++++++++++++++++++++++++++++---- sql/pathman_join_clause.sql | 62 ++++++++++++++++++++- 2 files changed, 145 insertions(+), 13 deletions(-) diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out index 48aeba5e..7d9acdea 100644 --- a/expected/pathman_join_clause.out +++ b/expected/pathman_join_clause.out @@ -28,15 +28,14 @@ SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); INSERT INTO test.fk VALUES (1, 1); INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); /* gather statistics on test tables to have deterministic plans */ -ANALYZE test.fk; -ANALYZE test.mytbl; +ANALYZE; /* run test queries */ EXPLAIN (COSTS OFF) /* test plan */ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key FROM test.mytbl m JOIN test.fk USING(id1, id2) WHERE NOT key <@ int4range(6, end_key); - QUERY PLAN ------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------- Nested Loop -> Seq Scan on fk -> Custom Scan (RuntimeAppend) @@ -71,17 +70,14 @@ WHERE NOT key <@ int4range(6, end_key); Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) -> Bitmap Index Scan on mytbl_5_pkey Index Cond: (id1 = fk.id1) - -> Bitmap Heap Scan on mytbl_6 m - Recheck Cond: (id1 = fk.id1) - Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) - -> Bitmap Index Scan on mytbl_6_pkey - Index Cond: (id1 = fk.id1) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) -> Bitmap Heap Scan on mytbl_7 m Recheck Cond: (id1 = fk.id1) Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) -> Bitmap Index Scan on mytbl_7_pkey Index Cond: (id1 = fk.id1) -(44 rows) +(41 rows) /* test joint data */ SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key @@ -92,7 +88,85 @@ WHERE NOT key <@ int4range(6, end_key); test.mytbl_6 | 1 | 1 | 5 | | (1 row) +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child_1.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Append + -> Seq Scan on child_1 + Filter: (owner_id = 3) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 10 other objects +NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql index b97650ba..90287201 100644 --- a/sql/pathman_join_clause.sql +++ b/sql/pathman_join_clause.sql @@ -5,6 +5,7 @@ CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; + /* * Test push down a join clause into child nodes of append */ @@ -16,12 +17,14 @@ CREATE TABLE test.fk ( start_key INT, end_key INT, PRIMARY KEY (id1, id2)); + CREATE TABLE test.mytbl ( id1 INT NOT NULL, id2 INT NOT NULL, key INT NOT NULL, CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), PRIMARY KEY (id1, key)); + SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); /* ...fill out with test data */ @@ -29,8 +32,8 @@ INSERT INTO test.fk VALUES (1, 1); INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); /* gather statistics on test tables to have deterministic plans */ -ANALYZE test.fk; -ANALYZE test.mytbl; +ANALYZE; + /* run test queries */ EXPLAIN (COSTS OFF) /* test plan */ @@ -44,6 +47,61 @@ FROM test.mytbl m JOIN test.fk USING(id1, id2) WHERE NOT key <@ int4range(6, end_key); + +/* + * Test case by @dimarick + */ + +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); + +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); + +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); + +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); + +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; + + +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + + + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; From 42ce25b66087c75bd029f23337f8828ee83fb48b Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Thu, 25 May 2017 16:55:47 +0300 Subject: [PATCH 0572/1124] Fix python tests for pg10 --- tests/python/partitioning_test.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index cda00c62..808c46c8 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -16,8 +16,9 @@ import subprocess import threading -from testgres import get_new_node, stop_all +from testgres import get_new_node, stop_all, get_config +version = get_config().get("VERSION_NUM") # Helper function for json equality def ordered(obj): @@ -68,11 +69,17 @@ def init_test_data(self, node): def catchup_replica(self, master, replica): """Wait until replica synchronizes with master""" - master.poll_query_until( - 'postgres', - 'SELECT pg_current_xlog_location() <= replay_location ' - 'FROM pg_stat_replication WHERE application_name = \'%s\'' - % replica.name) + if version >= 100000: + wait_lsn_query = \ + 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ + 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ + % replica.name + else: + wait_lsn_query = \ + 'SELECT pg_current_xlog_location() <= replay_location ' \ + 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ + % replica.name + master.poll_query_until('postgres', wait_lsn_query) def printlog(self, logfile): with open(logfile, 'r') as log: @@ -482,7 +489,6 @@ def test_parallel_nodes(self): # Check version of postgres server # If version < 9.6 skip all tests for parallel queries - version = int(node.psql("postgres", "show server_version_num")[1]) if version < 90600: return @@ -512,7 +518,10 @@ def test_parallel_nodes(self): # Test parallel select with node.connect() as con: con.execute('set max_parallel_workers_per_gather = 2') - con.execute('set min_parallel_relation_size = 0') + if version >= 100000: + con.execute('set min_parallel_table_scan_size = 0') + else: + con.execute('set min_parallel_relation_size = 0') con.execute('set parallel_setup_cost = 0') con.execute('set parallel_tuple_cost = 0') From 6187a4e0180bcf02d78bc0ba1c1b75b41ef5fd33 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Fri, 26 May 2017 18:12:43 +0300 Subject: [PATCH 0573/1124] Put missing in pg10 functions into pg_compat --- src/compat/pg_compat.c | 73 ++++++++++++++++++++++++++++++++++ src/include/compat/pg_compat.h | 17 ++++++++ src/partition_creation.c | 31 ++++++++++++++- src/pg_pathman.c | 62 ----------------------------- 4 files changed, 119 insertions(+), 64 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 10c40702..c2cbbb9f 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -213,6 +213,79 @@ ExprDoneCond isDone; #endif +/* + * get_all_actual_clauses + */ +#if PG_VERSION_NUM >= 100000 +List * +get_all_actual_clauses(List *restrictinfo_list) +{ + List *result = NIL; + ListCell *l; + + foreach(l, restrictinfo_list) + { + RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); + + Assert(IsA(rinfo, RestrictInfo)); + + result = lappend(result, rinfo->clause); + } + return result; +} +#endif + + +/* + * make_restrictinfos_from_actual_clauses + */ +#if PG_VERSION_NUM >= 100000 +#include "optimizer/restrictinfo.h" +#include "optimizer/var.h" + +List * +make_restrictinfos_from_actual_clauses(PlannerInfo *root, + List *clause_list) +{ + List *result = NIL; + ListCell *l; + + foreach(l, clause_list) + { + Expr *clause = (Expr *) lfirst(l); + bool pseudoconstant; + RestrictInfo *rinfo; + + /* + * It's pseudoconstant if it contains no Vars and no volatile + * functions. We probably can't see any sublinks here, so + * contain_var_clause() would likely be enough, but for safety use + * contain_vars_of_level() instead. + */ + pseudoconstant = + !contain_vars_of_level((Node *) clause, 0) && + !contain_volatile_functions((Node *) clause); + if (pseudoconstant) + { + /* tell createplan.c to check for gating quals */ + root->hasPseudoConstantQuals = true; + } + + rinfo = make_restrictinfo(clause, + true, + false, + pseudoconstant, + root->qual_security_level, + NULL, + NULL, + NULL); + result = lappend(result, rinfo); + } + return result; +} +#endif + + /* * make_result * Build a Result plan node diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 33a22d30..cd753810 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -270,6 +270,14 @@ not_signle_result_handler() #endif +/* + * get_all_actual_clauses() + */ +#if PG_VERSION_NUM >= 100000 +extern List *get_all_actual_clauses(List *restrictinfo_list); +#endif + + /* * get_parameterized_joinrel_size() */ @@ -335,6 +343,15 @@ char get_rel_persistence(Oid relid); #endif +/* + * make_restrictinfo() + */ +#if PG_VERSION_NUM >= 100000 +extern List * make_restrictinfos_from_actual_clauses(PlannerInfo *root, + List *clause_list); +#endif + + /* * make_result() */ diff --git a/src/partition_creation.c b/src/partition_creation.c index c27295ca..8c45204f 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -686,6 +686,8 @@ create_single_partition_internal(Oid parent_relid, /* Elements of the "CREATE TABLE" query tree */ RangeVar *parent_rv; + TableLikeClause like_clause; + CreateStmt create_stmt; List *create_stmts; ListCell *lc; @@ -739,9 +741,34 @@ create_single_partition_internal(Oid parent_relid, if (!tablespace) tablespace = get_tablespace_name(get_rel_tablespace(parent_relid)); + /* Initialize TableLikeClause structure */ + NodeSetTag(&like_clause, T_TableLikeClause); + like_clause.relation = copyObject(parent_rv); + like_clause.options = CREATE_TABLE_LIKE_DEFAULTS | + CREATE_TABLE_LIKE_INDEXES | + CREATE_TABLE_LIKE_STORAGE; + + /* Initialize CreateStmt structure */ + NodeSetTag(&create_stmt, T_CreateStmt); + create_stmt.relation = copyObject(partition_rv); + create_stmt.tableElts = list_make1(copyObject(&like_clause)); + create_stmt.inhRelations = list_make1(copyObject(parent_rv)); + create_stmt.ofTypename = NULL; + create_stmt.constraints = NIL; + create_stmt.options = NIL; + create_stmt.oncommit = ONCOMMIT_NOOP; + create_stmt.tablespacename = tablespace; + create_stmt.if_not_exists = false; +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 90600 + create_stmt.partition_info = NULL; +#endif +#if PG_VERSION_NUM >= 100000 + create_stmt.partbound = NULL; + create_stmt.partspec = NULL; +#endif + /* Obtain the sequence of Stmts to create partition and link it to parent */ - create_stmts = init_createstmts_for_partition(parent_rv, partition_rv, - tablespace); + create_stmts = transformCreateStmt(&create_stmt, NULL); /* Create the partition and all required relations */ foreach (lc, create_stmts) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index d02158fd..50b247b6 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -232,68 +232,6 @@ get_pathman_config_params_relid(bool invalid_is_ok) * ---------------------------------------- */ -#if PG_VERSION_NUM >= 100000 -static List * -get_all_actual_clauses(List *restrictinfo_list) -{ - List *result = NIL; - ListCell *l; - - foreach(l, restrictinfo_list) - { - RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); - - Assert(IsA(rinfo, RestrictInfo)); - - result = lappend(result, rinfo->clause); - } - return result; -} - -#include "optimizer/var.h" - -static List * -make_restrictinfos_from_actual_clauses(PlannerInfo *root, - List *clause_list) -{ - List *result = NIL; - ListCell *l; - - foreach(l, clause_list) - { - Expr *clause = (Expr *) lfirst(l); - bool pseudoconstant; - RestrictInfo *rinfo; - - /* - * It's pseudoconstant if it contains no Vars and no volatile - * functions. We probably can't see any sublinks here, so - * contain_var_clause() would likely be enough, but for safety use - * contain_vars_of_level() instead. - */ - pseudoconstant = - !contain_vars_of_level((Node *) clause, 0) && - !contain_volatile_functions((Node *) clause); - if (pseudoconstant) - { - /* tell createplan.c to check for gating quals */ - root->hasPseudoConstantQuals = true; - } - - rinfo = make_restrictinfo(clause, - true, - false, - pseudoconstant, - root->qual_security_level, - NULL, - NULL, - NULL); - result = lappend(result, rinfo); - } - return result; -} -#endif - /* * Creates child relation and adds it to root. * Returns child index in simple_rel_array. From 639f014bd63689f8ecd826d062e2a7f9e7e6338e Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Fri, 26 May 2017 19:05:18 +0300 Subject: [PATCH 0574/1124] Refactor pathman_process_utility_callback --- src/hooks.c | 39 ++++++++++------------------------ src/include/compat/pg_compat.h | 22 +++++++++++++++++++ 2 files changed, 33 insertions(+), 28 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 788226af..29e2fb76 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -755,24 +755,25 @@ pathman_relcache_hook(Datum arg, Oid relid) */ void #if PG_VERSION_NUM >= 100000 -pathman_process_utility_hook(PlannedStmt *pstmt, +pathman_process_utility_hook(PlannedStmt *first_arg, const char *queryString, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, char *completionTag) { - Node *parsetree = pstmt->utilityStmt; - int stmt_location = pstmt->stmt_location, - stmt_len = pstmt->stmt_len; + Node *parsetree = first_arg->utilityStmt; + int stmt_location = first_arg->stmt_location, + stmt_len = first_arg->stmt_len; #else -pathman_process_utility_hook(Node *parsetree, +pathman_process_utility_hook(Node *first_arg, const char *queryString, ProcessUtilityContext context, ParamListInfo params, DestReceiver *dest, char *completionTag) { + Node *parsetree = first_arg; int stmt_location = -1, stmt_len = 0; #endif @@ -825,27 +826,9 @@ pathman_process_utility_hook(Node *parsetree, } } -#if PG_VERSION_NUM >= 100000 - /* Call hooks set by other extensions if needed */ - if (process_utility_hook_next) - process_utility_hook_next(pstmt, queryString, - context, params, queryEnv, - dest, completionTag); - /* Else call internal implementation */ - else - standard_ProcessUtility(pstmt, queryString, - context, params, queryEnv, - dest, completionTag); -#else - /* Call hooks set by other extensions if needed */ - if (process_utility_hook_next) - process_utility_hook_next(parsetree, queryString, - context, params, - dest, completionTag); - /* Else call internal implementation */ - else - standard_ProcessUtility(parsetree, queryString, - context, params, - dest, completionTag); -#endif + /* 'first_arg' is PlannedStmt in pg10 or Node parsetree in pg9.6 and lower */ + call_process_utility_compat( + (process_utility_hook_next) ? process_utility_hook_next : + standard_ProcessUtility, + first_arg, queryString, context, params, queryEnv, dest, completionTag); } diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index cd753810..cd235a98 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -97,6 +97,28 @@ #endif +/* + * call_process_utility_compat() + * + * the parameter 'first_arg' is: + * - in pg 10 PlannedStmt object + * - in pg 9.6 and lower Node parsetree + */ +#if PG_VERSION_NUM >= 100000 +#define call_process_utility_compat(process_utility, first_arg, query_string, \ + context, params, query_env, dest, \ + completion_tag) \ + (process_utility)((first_arg), (query_string), (context), (params), \ + (query_env), (dest), (completion_tag)) +#elif PG_VERSION_NUM >= 90500 +#define call_process_utility_compat(process_utility, first_arg, query_string, \ + context, params, query_env, dest, \ + completion_tag) \ + (process_utility)((first_arg), (query_string), (context), (params), \ + (dest), (completion_tag)) +#endif + + /* * CatalogIndexInsert() */ From 2bbdd2db9b3a6fd14972d4889a658c2224ae9009 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 29 May 2017 13:46:31 +0300 Subject: [PATCH 0575/1124] Add compat version of get_cheapest_path_for_pathkeys routine --- src/include/compat/pg_compat.h | 19 +++++++++++++ src/pg_pathman.c | 50 ++++++++++------------------------ 2 files changed, 34 insertions(+), 35 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index cd235a98..2667ba95 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -300,6 +300,25 @@ extern List *get_all_actual_clauses(List *restrictinfo_list); #endif +/* + * get_cheapest_path_for_pathkeys() + */ +#if PG_VERSION_NUM >= 100000 +#define get_cheapest_path_for_pathkeys_compat(paths, pathkeys, required_outer, \ + cost_criterion, \ + require_parallel_safe) \ + get_cheapest_path_for_pathkeys((paths), (pathkeys), (required_outer), \ + (cost_criterion), \ + (require_parallel_safe)) +#elif PG_VERSION_NUM >= 90500 +#define get_cheapest_path_for_pathkeys_compat(paths, pathkeys, required_outer, \ + cost_criterion, \ + require_parallel_safe) \ + get_cheapest_path_for_pathkeys((paths), (pathkeys), (required_outer), \ + (cost_criterion)) +#endif + + /* * get_parameterized_joinrel_size() */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 50b247b6..4c185f32 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1489,31 +1489,18 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, *cheapest_total; /* Locate the right paths, if they are available. */ -#if PG_VERSION_NUM >= 100000 - cheapest_startup = - get_cheapest_path_for_pathkeys(childrel->pathlist, - pathkeys, - NULL, - STARTUP_COST, - true); - cheapest_total = - get_cheapest_path_for_pathkeys(childrel->pathlist, - pathkeys, - NULL, - TOTAL_COST, - true); -#else cheapest_startup = - get_cheapest_path_for_pathkeys(childrel->pathlist, - pathkeys, - NULL, - STARTUP_COST); + get_cheapest_path_for_pathkeys_compat(childrel->pathlist, + pathkeys, + NULL, + STARTUP_COST, + false); cheapest_total = - get_cheapest_path_for_pathkeys(childrel->pathlist, - pathkeys, - NULL, - TOTAL_COST); -#endif + get_cheapest_path_for_pathkeys_compat(childrel->pathlist, + pathkeys, + NULL, + TOTAL_COST, + false); /* * If we can't find any paths with the right order just use the @@ -2091,18 +2078,11 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, * parameterization. If it has exactly the needed parameterization, we're * done. */ -#if PG_VERSION_NUM >= 100000 - cheapest = get_cheapest_path_for_pathkeys(rel->pathlist, - NIL, - required_outer, - TOTAL_COST, - false); -#else - cheapest = get_cheapest_path_for_pathkeys(rel->pathlist, - NIL, - required_outer, - TOTAL_COST); -#endif + cheapest = get_cheapest_path_for_pathkeys_compat(rel->pathlist, + NIL, + required_outer, + TOTAL_COST, + false); Assert(cheapest != NULL); if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer)) return cheapest; From a084041a620b8d4f554f83b3d6801d7c458011d3 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 29 May 2017 15:56:56 +0300 Subject: [PATCH 0576/1124] Add compat version of create_merge_append_path routine and other light fixes --- src/compat/pg_compat.c | 22 +++++++++++++++++++ src/include/compat/pg_compat.h | 40 +++++++++++++++++++++++++++++++++- src/init.c | 21 ------------------ src/nodes_common.c | 8 ++----- src/pg_pathman.c | 30 ++++--------------------- 5 files changed, 67 insertions(+), 54 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index c2cbbb9f..e25a8898 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -352,6 +352,28 @@ McxtStatsInternal(MemoryContext context, int level, #endif +/* + * oid_cmp + * + * qsort comparison function for Oids; + * needed for find_inheritance_children_array() function + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 100000 +int +oid_cmp(const void *p1, const void *p2) +{ + Oid v1 = *((const Oid *) p1); + Oid v2 = *((const Oid *) p2); + + if (v1 < v2) + return -1; + if (v1 > v2) + return 1; + return 0; +} +#endif + + /* * set_dummy_rel_pathlist * Build a dummy path for a relation that's been excluded by constraints diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 2667ba95..e9b64dcc 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -169,7 +169,7 @@ void CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple); */ #if PG_VERSION_NUM >= 100000 #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ - create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NULL) + create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NIL) #elif PG_VERSION_NUM >= 90600 #ifndef PGPRO_VERSION @@ -187,6 +187,22 @@ void CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple); #endif /* PG_VERSION_NUM */ +/* + * create_merge_append_path() + */ +#if PG_VERSION_NUM >= 100000 +#define create_merge_append_path_compat(root, rel, subpaths, pathkeys, \ + required_outer) \ + create_merge_append_path((root), (rel), (subpaths), (pathkeys), \ + (required_outer), NIL) +#elif PG_VERSION_NUM >= 90500 +#define create_merge_append_path_compat(root, rel, subpaths, pathkeys, \ + required_outer) \ + create_merge_append_path((root), (rel), (subpaths), (pathkeys), \ + (required_outer)) +#endif + + /* * create_nestloop_path() */ @@ -418,6 +434,14 @@ void McxtStatsInternal(MemoryContext context, int level, #endif +/* + * oid_cmp() + */ +#if PG_VERSION_NUM >=90500 && PG_VERSION_NUM < 100000 +extern int oid_cmp(const void *p1, const void *p2); +#endif + + /* * parse_analyze() * @@ -512,6 +536,20 @@ extern void set_rel_consider_parallel(PlannerInfo *root, #endif +/* + * tlist_member_ignore_relabel() + * + * in compat version the type of first argument is (Expr *) + */ +#if PG_VERSION_NUM >= 100000 +#define tlist_member_ignore_relabel_compat(expr, targetlist) \ + tlist_member_ignore_relabel((expr), (targetlist)) +#elif PG_VERSION_NUM >= 90500 +#define tlist_member_ignore_relabel_compat(expr, targetlist) \ + tlist_member_ignore_relabel((Node *) (expr), (targetlist)) +#endif + + /* * ------------- * Common code diff --git a/src/init.c b/src/init.c index d0210486..1508a021 100644 --- a/src/init.c +++ b/src/init.c @@ -82,10 +82,6 @@ static bool read_opexpr_const(const OpExpr *opexpr, const PartRelationInfo *prel, Datum *value); -#if PG_VERSION_NUM < 100000 -static int oid_cmp(const void *p1, const void *p2); -#endif - /* Validate SQL facade */ static uint32 build_sql_facade_version(char *version_cstr); @@ -1127,23 +1123,6 @@ validate_hash_constraint(const Expr *expr, return false; } -#if PG_VERSION_NUM < 100000 -/* needed for find_inheritance_children_array() function */ -static int -oid_cmp(const void *p1, const void *p2) -{ - Oid v1 = *((const Oid *) p1); - Oid v2 = *((const Oid *) p2); - - if (v1 < v2) - return -1; - if (v1 > v2) - return 1; - return 0; -} -#endif - - /* Parse cstring and build uint32 representing the version */ static uint32 build_sql_facade_version(char *version_cstr) diff --git a/src/nodes_common.c b/src/nodes_common.c index 50a0e668..18f8d944 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -7,6 +7,7 @@ * * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" #include "init.h" #include "nodes_common.h" @@ -175,13 +176,8 @@ tlist_is_var_subset(List *a, List *b) if (!IsA(te->expr, Var) && !IsA(te->expr, RelabelType)) continue; -#if PG_VERSION_NUM >= 100000 - if (!tlist_member_ignore_relabel(te->expr, a)) - return true; -#else - if (!tlist_member_ignore_relabel((Node *) te->expr, a)) + if (!tlist_member_ignore_relabel_compat(te->expr, a)) return true; -#endif } return false; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 4c185f32..67709f69 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1575,33 +1575,11 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, else { /* ... and build the MergeAppend paths */ -#if PG_VERSION_NUM >= 100000 - add_path(rel, (Path *) create_merge_append_path(root, - rel, - startup_subpaths, - pathkeys, - NULL, - NULL)); + add_path(rel, (Path *) create_merge_append_path_compat( + root, rel, startup_subpaths, pathkeys, NULL)); if (startup_neq_total) - add_path(rel, (Path *) create_merge_append_path(root, - rel, - total_subpaths, - pathkeys, - NULL, - NULL)); -#else - add_path(rel, (Path *) create_merge_append_path(root, - rel, - startup_subpaths, - pathkeys, - NULL)); - if (startup_neq_total) - add_path(rel, (Path *) create_merge_append_path(root, - rel, - total_subpaths, - pathkeys, - NULL)); -#endif + add_path(rel, (Path *) create_merge_append_path_compat( + root, rel, total_subpaths, pathkeys, NULL)); } } } From 135064f3ba8bafc15e356c742153e188f34de349 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Mon, 29 May 2017 17:08:42 +0300 Subject: [PATCH 0577/1124] Remove unused routines from pg_compat --- src/compat/pg_compat.c | 131 --------------------------------- src/include/compat/pg_compat.h | 9 --- 2 files changed, 140 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index e25a8898..0d866844 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -37,92 +37,6 @@ */ -/* - * CatalogIndexInsert is the copy of static prototype having the same name from - * src/backend/catalog/indexing.c - */ -#if PG_VERSION_NUM >= 100000 -#include "catalog/index.h" -void -CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) -{ - int i; - int numIndexes; - RelationPtr relationDescs; - Relation heapRelation; - TupleTableSlot *slot; - IndexInfo **indexInfoArray; - Datum values[INDEX_MAX_KEYS]; - bool isnull[INDEX_MAX_KEYS]; - - /* HOT update does not require index inserts */ - if (HeapTupleIsHeapOnly(heapTuple)) - return; - - /* - * Get information from the state structure. Fall out if nothing to do. - */ - numIndexes = indstate->ri_NumIndices; - if (numIndexes == 0) - return; - relationDescs = indstate->ri_IndexRelationDescs; - indexInfoArray = indstate->ri_IndexRelationInfo; - heapRelation = indstate->ri_RelationDesc; - - /* Need a slot to hold the tuple being examined */ - slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation)); - ExecStoreTuple(heapTuple, slot, InvalidBuffer, false); - - /* - * for each index, form and insert the index tuple - */ - for (i = 0; i < numIndexes; i++) - { - IndexInfo *indexInfo; - - indexInfo = indexInfoArray[i]; - - /* If the index is marked as read-only, ignore it */ - if (!indexInfo->ii_ReadyForInserts) - continue; - - /* - * Expressional and partial indexes on system catalogs are not - * supported, nor exclusion constraints, nor deferred uniqueness - */ - Assert(indexInfo->ii_Expressions == NIL); - Assert(indexInfo->ii_Predicate == NIL); - Assert(indexInfo->ii_ExclusionOps == NULL); - Assert(relationDescs[i]->rd_index->indimmediate); - - /* - * FormIndexDatum fills in its values and isnull parameters with the - * appropriate values for the column(s) of the index. - */ - FormIndexDatum(indexInfo, - slot, - NULL, /* no expression eval to do */ - values, - isnull); - - /* - * The index AM does the rest. - */ - index_insert(relationDescs[i], /* index relation */ - values, /* array of index Datums */ - isnull, /* is-null flags */ - &(heapTuple->t_self), /* tid of heap tuple */ - heapRelation, - relationDescs[i]->rd_index->indisunique ? - UNIQUE_CHECK_YES : UNIQUE_CHECK_NO, - indexInfo); - } - - ExecDropSingleTupleTableSlot(slot); -} -#endif - - /* * create_plain_partial_paths * Build partial access paths for parallel scan of a plain relation @@ -790,48 +704,3 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) rel->tuples = parent_rows; } - -/* - * Construct the sequence of utility statements to create a new partition - */ -List * -init_createstmts_for_partition(RangeVar *parent_rv, - RangeVar *partition_rv, - char *tablespace) -{ - TableLikeClause like_clause; - CreateStmt create_stmt; - List *result; - - /* Initialize TableLikeClause structure */ - NodeSetTag(&like_clause, T_TableLikeClause); - like_clause.relation = copyObject(parent_rv); - like_clause.options = CREATE_TABLE_LIKE_DEFAULTS | - CREATE_TABLE_LIKE_INDEXES | - CREATE_TABLE_LIKE_STORAGE; - - /* Initialize CreateStmt structure */ - NodeSetTag(&create_stmt, T_CreateStmt); - create_stmt.relation = copyObject(partition_rv); - create_stmt.tableElts = list_make1(copyObject(&like_clause)); - create_stmt.inhRelations = list_make1(copyObject(parent_rv)); - create_stmt.ofTypename = NULL; - create_stmt.constraints = NIL; - create_stmt.options = NIL; - create_stmt.oncommit = ONCOMMIT_NOOP; - create_stmt.tablespacename = tablespace; - create_stmt.if_not_exists = false; - -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 90600 - create_stmt.partition_info = NULL; -#endif - -#if PG_VERSION_NUM >= 100000 - create_stmt.partbound = NULL; - create_stmt.partspec = NULL; -#endif - - result = transformCreateStmt(&create_stmt, NULL); - - return result; -} diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index e9b64dcc..589aee79 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -119,15 +119,6 @@ #endif -/* - * CatalogIndexInsert() - */ -#if PG_VERSION_NUM >= 100000 -#include "catalog/indexing.h" -void CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple); -#endif - - /* * CatalogTupleInsert() */ From f5dc278aa83f195e6654a176750f4b8fdabdca93 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 29 May 2017 19:19:53 +0300 Subject: [PATCH 0578/1124] make finish_delayed_invalidation() more lazy --- expected/pathman_calamity.out | 6 +- src/hooks.c | 2 +- src/include/init.h | 2 + src/include/utils.h | 1 + src/init.c | 125 ++++++++++++++++++++++++---------- src/relation_info.c | 86 +++++++++++++---------- src/utils.c | 15 ++++ 7 files changed, 160 insertions(+), 77 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index dcf5ed54..21b542ae 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -867,7 +867,7 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ maintenance | 0 partition bounds cache | 0 partition dispatch cache | 1 - partition parents cache | 10 + partition parents cache | 0 (4 rows) SELECT drop_partitions('calamity.test_pathman_cache_stats'); @@ -910,9 +910,9 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ context | entries --------------------------+--------- maintenance | 0 - partition bounds cache | 10 + partition bounds cache | 0 partition dispatch cache | 1 - partition parents cache | 10 + partition parents cache | 0 (4 rows) SELECT drop_partitions('calamity.test_pathman_cache_stats'); diff --git a/src/hooks.c b/src/hooks.c index b583fa7b..6b9973a5 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -746,7 +746,7 @@ pathman_relcache_hook(Datum arg, Oid relid) /* Which means that 'relid' might be parent */ if (relid != InvalidOid) - delay_invalidation_parent_rel(relid); + delay_invalidation_vague_rel(relid); #ifdef NOT_USED elog(DEBUG2, "Invalidation message for relation %u [%u]", relid, MyProcPid); diff --git a/src/include/init.h b/src/include/init.h index 64888595..a2fb494e 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -225,6 +225,8 @@ bool read_pathman_params(Oid relid, Datum *values, bool *isnull); +Oid *read_parent_oids(int *nelems); + bool validate_range_constraint(const Expr *expr, const PartRelationInfo *prel, diff --git a/src/include/utils.h b/src/include/utils.h index 16100df7..30ddca2a 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -31,6 +31,7 @@ bool match_expr_to_operand(Node *expr, Node *operand); */ Oid get_pathman_schema(void); List * list_reverse(List *l); +int oid_cmp(const void *p1, const void *p2); /* * Useful functions for relations. diff --git a/src/init.c b/src/init.c index f6c365f6..db06babb 100644 --- a/src/init.c +++ b/src/init.c @@ -70,7 +70,15 @@ static bool init_pathman_relation_oids(void); static void fini_pathman_relation_oids(void); static void init_local_cache(void); static void fini_local_cache(void); -static void read_pathman_config(void); + +/* Special handlers for read_pathman_config() */ +static void add_partrel_to_array(Datum *values, bool *isnull, void *context); +static void startup_invalidate_parent(Datum *values, bool *isnull, void *context); + +static void read_pathman_config(void (*per_row_cb)(Datum *values, + bool *isnull, + void *context), + void *context); static bool validate_range_opexpr(const Expr *expr, const PartRelationInfo *prel, @@ -82,8 +90,6 @@ static bool read_opexpr_const(const OpExpr *opexpr, const PartRelationInfo *prel, Datum *value); -static int oid_cmp(const void *p1, const void *p2); - /* Validate SQL facade */ static uint32 build_sql_facade_version(char *version_cstr); @@ -202,8 +208,11 @@ load_config(void) /* Validate pg_pathman's Pl/PgSQL facade (might be outdated) */ validate_sql_facade_version(get_sql_facade_version()); - init_local_cache(); /* create various hash tables (caches) */ - read_pathman_config(); /* read PATHMAN_CONFIG table & fill cache */ + /* Create various hash tables (caches) */ + init_local_cache(); + + /* Read PATHMAN_CONFIG table & fill cache */ + read_pathman_config(startup_invalidate_parent, NULL); /* Register pathman_relcache_hook(), currently we can't unregister it */ if (relcache_callback_needed) @@ -781,11 +790,83 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) } +typedef struct +{ + Oid *array; + int nelems; + int capacity; +} read_parent_oids_cxt; + +/* + * Get a sorted array of partitioned tables' Oids. + */ +Oid * +read_parent_oids(int *nelems) +{ + read_parent_oids_cxt context = { NULL, 0, 0 }; + + read_pathman_config(add_partrel_to_array, &context); + + /* Perform sorting */ + qsort(context.array, context.nelems, sizeof(Oid), oid_cmp); + + /* Return values */ + *nelems = context.nelems; + return context.array; +} + + +/* read_pathman_config(): add parent to array of Oids */ +static void +add_partrel_to_array(Datum *values, bool *isnull, void *context) +{ + Oid relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); + read_parent_oids_cxt *result = (read_parent_oids_cxt *) context; + + if (result->array == NULL) + { + result->capacity = PART_RELS_SIZE; + result->array = palloc(result->capacity * sizeof(Oid)); + } + + if (result->nelems >= result->capacity) + { + result->capacity = result->capacity * 2 + 1; + result->array = repalloc(result->array, result->capacity * sizeof(Oid)); + } + + /* Append current relid */ + result->array[result->nelems++] = relid; +} + +/* read_pathman_config(): create dummy cache entry for parent */ +static void +startup_invalidate_parent(Datum *values, bool *isnull, void *context) +{ + Oid relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); + + /* Check that relation 'relid' exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("table \"%s\" contains nonexistent relation %u", + PATHMAN_CONFIG, relid), + errhint(INIT_ERROR_HINT))); + } + + /* get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(relid, NULL); +} + /* * Go through the PATHMAN_CONFIG table and create PartRelationInfo entries. */ static void -read_pathman_config(void) +read_pathman_config(void (*per_row_cb)(Datum *values, + bool *isnull, + void *context), + void *context) { Relation rel; HeapScanDesc scan; @@ -811,7 +892,6 @@ read_pathman_config(void) { Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; - Oid relid; /* partitioned table */ /* Extract Datums from tuple 'htup' */ heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); @@ -821,21 +901,8 @@ read_pathman_config(void) Assert(!isnull[Anum_pathman_config_parttype - 1]); Assert(!isnull[Anum_pathman_config_expr - 1]); - /* Extract values from Datums */ - relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); - - /* Check that relation 'relid' exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("table \"%s\" contains nonexistent relation %u", - PATHMAN_CONFIG, relid), - errhint(INIT_ERROR_HINT))); - } - - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(relid, NULL); + /* Execute per row callback */ + per_row_cb(values, isnull, context); } /* Clean resources */ @@ -1127,20 +1194,6 @@ validate_hash_constraint(const Expr *expr, return false; } -/* needed for find_inheritance_children_array() function */ -static int -oid_cmp(const void *p1, const void *p2) -{ - Oid v1 = *((const Oid *) p1); - Oid v2 = *((const Oid *) p2); - - if (v1 < v2) - return -1; - if (v1 > v2) - return 1; - return 0; -} - /* Parse cstring and build uint32 representing the version */ static uint32 diff --git a/src/relation_info.c b/src/relation_info.c index e824b72f..8d1e21d0 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -88,8 +88,12 @@ static bool delayed_shutdown = false; /* pathman was dropped */ list = NIL; \ } while (0) +/* Handy wrappers for Oids */ +#define bsearch_oid(key, array, array_size) \ + bsearch((const void *) &(key), (array), (array_size), sizeof(Oid), oid_cmp) -static bool try_perform_parent_refresh(Oid parent); + +static bool try_invalidate_parent(Oid relid, Oid *parents, int parents_count); static Oid try_syscache_parent_search(Oid partition, PartParentSearch *status); static Oid get_parent_of_partition_internal(Oid partition, PartParentSearch *status, @@ -857,6 +861,9 @@ finish_delayed_invalidation(void) /* Check that current state is transactional */ if (IsTransactionState()) { + Oid *parents = NULL; + int parents_count; + bool parents_fetched = false; ListCell *lc; /* Handle the probable 'DROP EXTENSION' case */ @@ -896,11 +903,19 @@ finish_delayed_invalidation(void) if (IsToastNamespace(get_rel_namespace(parent))) continue; - if (!pathman_config_contains_relation(parent, NULL, NULL, NULL, NULL)) - remove_pathman_relation_info(parent); - else + /* Fetch all partitioned tables */ + if (!parents_fetched) + { + parents = read_parent_oids(&parents_count); + parents_fetched = true; + } + + /* Check if parent still exists */ + if (bsearch_oid(parent, parents, parents_count)) /* get_pathman_relation_info() will refresh this entry */ invalidate_pathman_relation_info(parent, NULL); + else + remove_pathman_relation_info(parent); } /* Process all other vague cases */ @@ -912,8 +927,15 @@ finish_delayed_invalidation(void) if (IsToastNamespace(get_rel_namespace(vague_rel))) continue; + /* Fetch all partitioned tables */ + if (!parents_fetched) + { + parents = read_parent_oids(&parents_count); + parents_fetched = true; + } + /* It might be a partitioned table or a partition */ - if (!try_perform_parent_refresh(vague_rel)) + if (!try_invalidate_parent(vague_rel, parents, parents_count)) { PartParentSearch search; Oid parent; @@ -923,21 +945,17 @@ finish_delayed_invalidation(void) switch (search) { - /* It's still parent */ + /* + * Two main cases: + * - It's *still* parent (in PATHMAN_CONFIG) + * - It *might have been* parent before (not in PATHMAN_CONFIG) + */ case PPS_ENTRY_PART_PARENT: - { - /* Skip if we've already refreshed this parent */ - if (!list_member_oid(fresh_rels, parent)) - try_perform_parent_refresh(parent); - } - break; - - /* It *might have been* parent before (not in PATHMAN_CONFIG) */ case PPS_ENTRY_PARENT: { /* Skip if we've already refreshed this parent */ if (!list_member_oid(fresh_rels, parent)) - try_perform_parent_refresh(parent); + try_invalidate_parent(parent, parents, parents_count); } break; @@ -954,6 +972,9 @@ finish_delayed_invalidation(void) free_invalidation_list(delayed_invalidation_parent_rels); free_invalidation_list(delayed_invalidation_vague_rels); + + if (parents) + pfree(parents); } } @@ -1113,34 +1134,25 @@ try_syscache_parent_search(Oid partition, PartParentSearch *status) } } -/* - * Try to refresh cache entry for relation 'parent'. - * - * Return true on success. - */ +/* Try to invalidate cache entry for relation 'parent' */ static bool -try_perform_parent_refresh(Oid parent) +try_invalidate_parent(Oid relid, Oid *parents, int parents_count) { - ItemPointerData iptr; - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - - if (pathman_config_contains_relation(parent, values, isnull, NULL, &iptr)) + /* Check if this is a partitioned table */ + if (bsearch_oid(relid, parents, parents_count)) { - bool should_update_expr = isnull[Anum_pathman_config_cooked_expr - 1]; - - if (should_update_expr) - pathman_config_refresh_parsed_expression(parent, values, isnull, &iptr); + /* get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(relid, NULL); - /* If anything went wrong, return false (actually, it might emit ERROR) */ - refresh_pathman_relation_info(parent, - values, - true); /* allow lazy */ + /* Success */ + return true; } - /* Not a partitioned relation */ - else return false; - return true; + /* Clear remaining cache entry */ + remove_pathman_relation_info(relid); + + /* Not a partitioned relation */ + return false; } diff --git a/src/utils.c b/src/utils.c index 12afb632..d3577b66 100644 --- a/src/utils.c +++ b/src/utils.c @@ -179,6 +179,21 @@ list_reverse(List *l) return result; } +int +oid_cmp(const void *p1, const void *p2) +{ + Oid v1 = *((const Oid *) p1); + Oid v2 = *((const Oid *) p2); + + if (v1 < v2) + return -1; + + if (v1 > v2) + return 1; + + return 0; +} + /* From 12957de0eb1b179269745b980fe97af9fe676d6e Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Tue, 30 May 2017 16:44:30 +0300 Subject: [PATCH 0579/1124] Refactor set_rel_consider_parallel routine in pg_compat --- src/compat/pg_compat.c | 171 +++++------------------------------------ 1 file changed, 19 insertions(+), 152 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 5693358e..3e048d0d 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -325,11 +325,17 @@ set_dummy_rel_pathlist(RelOptInfo *rel) * If this relation could possibly be scanned from within a worker, then set * its consider_parallel flag. */ -#if PG_VERSION_NUM >= 100000 void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) { +#if PG_VERSION_NUM >= 100000 +#define is_parallel_safe_compat(root, exprs) is_parallel_safe((root), (exprs)) +#elif PG_VERSION_NUM >= 90500 +#define is_parallel_safe_compat(root, exprs) \ + (!has_parallel_hazard((exprs), false)) +#endif + /* * The flag has previously been initialized to false, so we can just * return if it becomes clear that we can't safely set it. @@ -340,7 +346,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, Assert(root->glob->parallelModeOK); /* This should only be called for baserels and appendrel children. */ - Assert(IS_SIMPLE_REL(rel)); + Assert(rel->reloptkind == RELOPT_BASEREL || + rel->reloptkind == RELOPT_OTHER_MEMBER_REL); /* Assorted checks based on rtekind. */ switch (rte->rtekind) @@ -370,7 +377,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, if (proparallel != PROPARALLEL_SAFE) return; - if (!is_parallel_safe(root, (Node *) rte->tablesample->args)) + if (!is_parallel_safe_compat( + root, (Node *) rte->tablesample->args)) return; } @@ -423,17 +431,19 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, case RTE_FUNCTION: /* Check for parallel-restricted functions. */ - if (!is_parallel_safe(root, (Node *) rte->functions)) + if (!is_parallel_safe_compat(root, (Node *) rte->functions)) return; break; +#if PG_VERSION_NUM >= 100000 case RTE_TABLEFUNC: /* not parallel safe */ return; +#endif case RTE_VALUES: /* Check for parallel-restricted functions. */ - if (!is_parallel_safe(root, (Node *) rte->values_lists)) + if (!is_parallel_safe_compat(root, (Node *) rte->values_lists)) return; break; @@ -448,12 +458,14 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, */ return; +#if PG_VERSION_NUM >= 100000 case RTE_NAMEDTUPLESTORE: /* * tuplestore cannot be shared, at least without more * infrastructure to support that. */ return; +#endif } /* @@ -465,164 +477,19 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, * outer join clauses work correctly. It would likely break equivalence * classes, too. */ - if (!is_parallel_safe(root, (Node *) rel->baserestrictinfo)) - return; - - /* - * Likewise, if the relation's outputs are not parallel-safe, give up. - * (Usually, they're just Vars, but sometimes they're not.) - */ - if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs)) - return; - - /* We have a winner. */ - rel->consider_parallel = true; -} -#elif PG_VERSION_NUM >= 90600 -void -set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte) -{ - /* - * The flag has previously been initialized to false, so we can just - * return if it becomes clear that we can't safely set it. - */ - Assert(!rel->consider_parallel); - - /* Don't call this if parallelism is disallowed for the entire query. */ - Assert(root->glob->parallelModeOK); - - /* This should only be called for baserels and appendrel children. */ - Assert(rel->reloptkind == RELOPT_BASEREL || - rel->reloptkind == RELOPT_OTHER_MEMBER_REL); - - /* Assorted checks based on rtekind. */ - switch (rte->rtekind) - { - case RTE_RELATION: - - /* - * Currently, parallel workers can't access the leader's temporary - * tables. We could possibly relax this if the wrote all of its - * local buffers at the start of the query and made no changes - * thereafter (maybe we could allow hint bit changes), and if we - * taught the workers to read them. Writing a large number of - * temporary buffers could be expensive, though, and we don't have - * the rest of the necessary infrastructure right now anyway. So - * for now, bail out if we see a temporary table. - */ - if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP) - return; - - /* - * Table sampling can be pushed down to workers if the sample - * function and its arguments are safe. - */ - if (rte->tablesample != NULL) - { - char proparallel = func_parallel(rte->tablesample->tsmhandler); - - if (proparallel != PROPARALLEL_SAFE) - return; - if (has_parallel_hazard((Node *) rte->tablesample->args, - false)) - return; - } - - /* - * Ask FDWs whether they can support performing a ForeignScan - * within a worker. Most often, the answer will be no. For - * example, if the nature of the FDW is such that it opens a TCP - * connection with a remote server, each parallel worker would end - * up with a separate connection, and these connections might not - * be appropriately coordinated between workers and the leader. - */ - if (rte->relkind == RELKIND_FOREIGN_TABLE) - { - Assert(rel->fdwroutine); - if (!rel->fdwroutine->IsForeignScanParallelSafe) - return; - if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte)) - return; - } - - /* - * There are additional considerations for appendrels, which we'll - * deal with in set_append_rel_size and set_append_rel_pathlist. - * For now, just set consider_parallel based on the rel's own - * quals and targetlist. - */ - break; - - case RTE_SUBQUERY: - - /* - * There's no intrinsic problem with scanning a subquery-in-FROM - * (as distinct from a SubPlan or InitPlan) in a parallel worker. - * If the subquery doesn't happen to have any parallel-safe paths, - * then flagging it as consider_parallel won't change anything, - * but that's true for plain tables, too. We must set - * consider_parallel based on the rel's own quals and targetlist, - * so that if a subquery path is parallel-safe but the quals and - * projection we're sticking onto it are not, we correctly mark - * the SubqueryScanPath as not parallel-safe. (Note that - * set_subquery_pathlist() might push some of these quals down - * into the subquery itself, but that doesn't change anything.) - */ - break; - - case RTE_JOIN: - /* Shouldn't happen; we're only considering baserels here. */ - Assert(false); - return; - - case RTE_FUNCTION: - /* Check for parallel-restricted functions. */ - if (has_parallel_hazard((Node *) rte->functions, false)) - return; - break; - - case RTE_VALUES: - /* Check for parallel-restricted functions. */ - if (has_parallel_hazard((Node *) rte->values_lists, false)) - return; - break; - - case RTE_CTE: - - /* - * CTE tuplestores aren't shared among parallel workers, so we - * force all CTE scans to happen in the leader. Also, populating - * the CTE would require executing a subplan that's not available - * in the worker, might be parallel-restricted, and must get - * executed only once. - */ - return; - } - - /* - * If there's anything in baserestrictinfo that's parallel-restricted, we - * give up on parallelizing access to this relation. We could consider - * instead postponing application of the restricted quals until we're - * above all the parallelism in the plan tree, but it's not clear that - * that would be a win in very many cases, and it might be tricky to make - * outer join clauses work correctly. It would likely break equivalence - * classes, too. - */ - if (has_parallel_hazard((Node *) rel->baserestrictinfo, false)) + if (!is_parallel_safe_compat(root, (Node *) rel->baserestrictinfo)) return; /* * Likewise, if the relation's outputs are not parallel-safe, give up. * (Usually, they're just Vars, but sometimes they're not.) */ - if (has_parallel_hazard((Node *) rel->reltarget->exprs, false)) + if (!is_parallel_safe_compat(root, (Node *) rel->reltarget->exprs)) return; /* We have a winner. */ rel->consider_parallel = true; } -#endif /* From 1d8c8cb7e0228b4da5ad04d207c4b7f4cdc2eed2 Mon Sep 17 00:00:00 2001 From: Maksim Milyutin Date: Tue, 30 May 2017 16:58:32 +0300 Subject: [PATCH 0580/1124] Light fix to previous commit --- src/compat/pg_compat.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 3e048d0d..e9792b3c 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -321,6 +321,7 @@ set_dummy_rel_pathlist(RelOptInfo *rel) #endif +#if PG_VERSION_NUM >= 90600 /* * If this relation could possibly be scanned from within a worker, then set * its consider_parallel flag. @@ -490,6 +491,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, /* We have a winner. */ rel->consider_parallel = true; } +#endif /* From d765b14be48b6684e4493ebcf12f712055653477 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 31 May 2017 14:55:16 +0300 Subject: [PATCH 0581/1124] unification: change 'bound_info' to 'pbin' --- src/relation_info.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index 8d1e21d0..ff2b59fb 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -465,7 +465,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, /* Initialize bounds of partitions */ for (i = 0; i < PrelChildrenCount(prel); i++) { - PartBoundInfo *bound_info; + PartBoundInfo *pbin; /* Clear all previous allocations */ MemoryContextReset(temp_mcxt); @@ -474,7 +474,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, old_mcxt = MemoryContextSwitchTo(temp_mcxt); { /* Fetch constraint's expression tree */ - bound_info = get_bounds_of_partition(partitions[i], prel); + pbin = get_bounds_of_partition(partitions[i], prel); } MemoryContextSwitchTo(old_mcxt); @@ -482,22 +482,22 @@ fill_prel_with_partitions(PartRelationInfo *prel, switch (prel->parttype) { case PT_HASH: - prel->children[bound_info->part_idx] = bound_info->child_rel; + prel->children[pbin->part_idx] = pbin->child_rel; break; case PT_RANGE: { /* Copy child's Oid */ - prel->ranges[i].child_oid = bound_info->child_rel; + prel->ranges[i].child_oid = pbin->child_rel; /* Copy all min & max Datums to the persistent mcxt */ old_mcxt = MemoryContextSwitchTo(cache_mcxt); { - prel->ranges[i].min = CopyBound(&bound_info->range_min, + prel->ranges[i].min = CopyBound(&pbin->range_min, prel->ev_byval, prel->ev_len); - prel->ranges[i].max = CopyBound(&bound_info->range_max, + prel->ranges[i].max = CopyBound(&pbin->range_max, prel->ev_byval, prel->ev_len); } From d03fd3289fe79e2c67fc660d35a325ce15bf1506 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 31 May 2017 16:50:18 +0300 Subject: [PATCH 0582/1124] check that expression's columns are NOT NULL --- expected/pathman_basic.out | 22 +++++++++- expected/pathman_calamity.out | 8 ++-- expected/pathman_expressions.out | 6 +-- sql/pathman_basic.sql | 10 +++++ sql/pathman_expressions.sql | 6 +-- src/relation_info.c | 69 ++++++++++++++++---------------- 6 files changed, 75 insertions(+), 46 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 6a196215..97cbbdf8 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -9,8 +9,17 @@ CREATE TABLE test.hash_rel ( INSERT INTO test.hash_rel VALUES (1, 1); INSERT INTO test.hash_rel VALUES (2, 2); INSERT INTO test.hash_rel VALUES (3, 3); +\set VERBOSITY default SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); -ERROR: partitioning key "value" must be marked NOT NULL +ERROR: failed to analyze partitioning expression "value" +DETAIL: column "value" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +\set VERBOSITY terse ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); create_hash_partitions @@ -129,8 +138,17 @@ CREATE TABLE test.range_rel ( CREATE INDEX ON test.range_rel (dt); INSERT INTO test.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +\set VERBOSITY default SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); -ERROR: partitioning key "dt" must be marked NOT NULL +ERROR: failed to analyze partitioning expression "dt" +DETAIL: column "dt" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM +\set VERBOSITY terse ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); ERROR: not enough partitions to fit all values of "dt" diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 21b542ae..5e11a029 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -280,11 +280,11 @@ ERROR: 'parttype' should not be NULL SELECT validate_interval_value('pg_class', 'oid', 1, 'HASH', NULL); /* not ok */ ERROR: interval should be NULL for HASH partitioned table SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ -ERROR: cannot find type name for attribute "expr" of relation "pg_class" +ERROR: failed to analyze partitioning expression "expr" SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ ERROR: unrecognized token: "cooked_expr" SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ -ERROR: cannot find type name for attribute "expr" of relation "pg_class" +ERROR: failed to analyze partitioning expression "EXPR" /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); validate_relname @@ -304,7 +304,7 @@ ERROR: 'relid' should not be NULL SELECT validate_expression('calamity.part_test', NULL); /* not ok */ ERROR: 'expression' should not be NULL SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ -ERROR: cannot find type name for attribute "valval" of relation "part_test" +ERROR: failed to analyze partitioning expression "valval" SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ ERROR: failed to analyze partitioning expression "random()" SELECT validate_expression('calamity.part_test', 'val'); /* OK */ @@ -580,7 +580,7 @@ ERROR: relation "0" does not exist SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ -ERROR: cannot find type name for attribute "v_a_l" of relation "part_test" +ERROR: failed to analyze partitioning expression "V_A_L" SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ add_to_pathman_config ----------------------- diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 764a11fb..1e5d7d47 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -109,8 +109,8 @@ SELECT create_hash_partitions('test_exprs.canary', 'val', 5); */ CREATE TABLE test_exprs.hash_rel ( id SERIAL PRIMARY KEY, - value INTEGER, - value2 INTEGER + value INTEGER NOT NULL, + value2 INTEGER NOT NULL ); INSERT INTO test_exprs.hash_rel (value, value2) SELECT val, val * 2 FROM generate_series(1, 5) val; @@ -243,7 +243,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5 /* * Test RANGE */ -CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); INSERT INTO test_exprs.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; \set VERBOSITY default diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 008e1338..364584be 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -11,8 +11,13 @@ CREATE TABLE test.hash_rel ( INSERT INTO test.hash_rel VALUES (1, 1); INSERT INTO test.hash_rel VALUES (2, 2); INSERT INTO test.hash_rel VALUES (3, 3); + +\set VERBOSITY default SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +\set VERBOSITY terse + ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; + SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; SELECT * FROM test.hash_rel; @@ -39,8 +44,13 @@ CREATE TABLE test.range_rel ( CREATE INDEX ON test.range_rel (dt); INSERT INTO test.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; + +\set VERBOSITY default SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); +\set VERBOSITY terse + ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; + SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); SELECT COUNT(*) FROM test.range_rel; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 379736a5..d30656a8 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -47,8 +47,8 @@ SELECT create_hash_partitions('test_exprs.canary', 'val', 5); CREATE TABLE test_exprs.hash_rel ( id SERIAL PRIMARY KEY, - value INTEGER, - value2 INTEGER + value INTEGER NOT NULL, + value2 INTEGER NOT NULL ); INSERT INTO test_exprs.hash_rel (value, value2) SELECT val, val * 2 FROM generate_series(1, 5) val; @@ -101,7 +101,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5 * Test RANGE */ -CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP, txt TEXT); +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); INSERT INTO test_exprs.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; diff --git a/src/relation_info.c b/src/relation_info.c index ff2b59fb..a4a08a38 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -632,8 +632,7 @@ cook_partitioning_expression(const Oid relid, const char *expr_cstr, Oid *expr_type_out) /* ret value #1 */ { - Node *parse_tree, - *raw_expr; + Node *parse_tree; List *query_tree_list; char *query_string, @@ -658,37 +657,8 @@ cook_partitioning_expression(const Oid relid, old_mcxt = MemoryContextSwitchTo(parse_mcxt); /* First we have to build a raw AST */ - raw_expr = parse_partitioning_expression(relid, expr_cstr, - &query_string, &parse_tree); - - /* Check if raw_expr is NULLable */ - if (IsA(raw_expr, ColumnRef)) - { - ColumnRef *column = (ColumnRef *) raw_expr; - - if (list_length(column->fields) == 1) - { - HeapTuple htup; - bool attnotnull; - char *attname = strVal(linitial(column->fields)); - - /* check if attribute is nullable */ - htup = SearchSysCacheAttName(relid, attname); - if (HeapTupleIsValid(htup)) - { - Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(htup); - attnotnull = att_tup->attnotnull; - ReleaseSysCache(htup); - } - else elog(ERROR, "cannot find type name for attribute \"%s\"" - " of relation \"%s\"", - attname, get_rel_name_or_relid(relid)); - - if (!attnotnull) - elog(ERROR, "partitioning key \"%s\" must be marked NOT NULL", - attname); - } - } + (void) parse_partitioning_expression(relid, expr_cstr, + &query_string, &parse_tree); /* We don't need pg_pathman's magic here */ pathman_hooks_enabled = false; @@ -697,7 +667,9 @@ cook_partitioning_expression(const Oid relid, { Query *query; Node *expr; + int expr_attr; Relids expr_varnos; + Bitmapset *expr_varattnos = NULL; /* This will fail with ERROR in case of wrong expression */ query_tree_list = pg_analyze_and_rewrite(parse_tree, query_string, NULL, 0); @@ -729,12 +701,41 @@ cook_partitioning_expression(const Oid relid, /* Sanity check #5 */ expr_varnos = pull_varnos(expr); if (bms_num_members(expr_varnos) != 1 || - ((RangeTblEntry *) linitial(query->rtable))->relid != relid) + relid != ((RangeTblEntry *) linitial(query->rtable))->relid) { elog(ERROR, "partitioning expression should reference table \"%s\"", get_rel_name(relid)); } + + /* Sanity check #6 */ + pull_varattnos(expr, bms_singleton_member(expr_varnos), &expr_varattnos); + expr_attr = -1; + while ((expr_attr = bms_next_member(expr_varattnos, expr_attr)) >= 0) + { + AttrNumber attnum = expr_attr + FirstLowInvalidHeapAttributeNumber; + HeapTuple htup; + + htup = SearchSysCache2(ATTNUM, + ObjectIdGetDatum(relid), + Int16GetDatum(attnum)); + if (HeapTupleIsValid(htup)) + { + bool nullable; + + /* Fetch 'nullable' and free syscache tuple */ + nullable = !((Form_pg_attribute) GETSTRUCT(htup))->attnotnull; + ReleaseSysCache(htup); + + if (nullable) + ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), + errmsg("column \"%s\" should be marked NOT NULL", + get_attname(relid, attnum)))); + } + } + + /* Free sets */ bms_free(expr_varnos); + bms_free(expr_varattnos); Assert(expr); expr_serialized = nodeToString(expr); From aa4213a26ab5d1437207861f968fe9459e7827f8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 31 May 2017 18:35:13 +0300 Subject: [PATCH 0583/1124] fix memory leak caused by invalidations, fix parents cache --- expected/pathman_calamity.out | 159 +++++++++++++++++++++++++-------- sql/pathman_calamity.sql | 29 ++++-- src/include/compat/pg_compat.h | 3 + src/include/relation_info.h | 51 ++--------- src/relation_info.c | 53 ++++++----- 5 files changed, 190 insertions(+), 105 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 5e11a029..12ab1a78 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -851,9 +851,7 @@ DROP EXTENSION pg_pathman; */ CREATE SCHEMA calamity; CREATE EXTENSION pg_pathman; -/* Change this setting for code coverage */ -SET pg_pathman.enable_bounds_cache = false; -/* check view pathman_cache_stats */ +/* check that cache loading is lazy */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); create_range_partitions @@ -870,22 +868,54 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ partition parents cache | 0 (4 rows) -SELECT drop_partitions('calamity.test_pathman_cache_stats'); -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_1 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_2 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_3 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_4 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_5 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_6 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_7 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_8 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_9 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_10 - drop_partitions ------------------ - 10 +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition dispatch cache | 0 + partition parents cache | 0 +(4 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 (1 row) +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition dispatch cache | 1 + partition parents cache | 10 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ context | entries --------------------------+--------- @@ -895,10 +925,9 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ partition parents cache | 0 (4 rows) -DROP TABLE calamity.test_pathman_cache_stats; /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; -/* check view pathman_cache_stats (one more time) */ +/* check view pathman_cache_stats (bounds cache enabled) */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); create_range_partitions @@ -906,31 +935,92 @@ SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10 10 (1 row) +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ context | entries --------------------------+--------- maintenance | 0 - partition bounds cache | 0 + partition bounds cache | 10 partition dispatch cache | 1 + partition parents cache | 10 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition dispatch cache | 0 partition parents cache | 0 (4 rows) -SELECT drop_partitions('calamity.test_pathman_cache_stats'); -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_1 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_2 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_3 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_4 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_5 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_6 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_7 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_8 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_9 -NOTICE: 0 rows copied from calamity.test_pathman_cache_stats_10 - drop_partitions ------------------ - 10 +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 (1 row) +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition dispatch cache | 1 + partition parents cache | 10 +(4 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +--------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition dispatch cache | 1 + partition parents cache | 0 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ context | entries --------------------------+--------- @@ -940,7 +1030,6 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ partition parents cache | 0 (4 rows) -DROP TABLE calamity.test_pathman_cache_stats; DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; /* diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 10dcc15b..f8c98255 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -372,27 +372,44 @@ DROP EXTENSION pg_pathman; CREATE SCHEMA calamity; CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + /* Change this setting for code coverage */ SET pg_pathman.enable_bounds_cache = false; -/* check view pathman_cache_stats */ +/* check view pathman_cache_stats (bounds cache disabled) */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ -SELECT drop_partitions('calamity.test_pathman_cache_stats'); +DROP TABLE calamity.test_pathman_cache_stats CASCADE; SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ -DROP TABLE calamity.test_pathman_cache_stats; /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; -/* check view pathman_cache_stats (one more time) */ +/* check view pathman_cache_stats (bounds cache enabled) */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ -SELECT drop_partitions('calamity.test_pathman_cache_stats'); +DROP TABLE calamity.test_pathman_cache_stats CASCADE; SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ -DROP TABLE calamity.test_pathman_cache_stats; DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index e731268e..0ffb4cf1 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -25,6 +25,9 @@ #if PG_VERSION_NUM < 90600 #define ALLOCSET_DEFAULT_SIZES \ ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE + +#define ALLOCSET_SMALL_SIZES \ + ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE #endif diff --git a/src/include/relation_info.h b/src/include/relation_info.h index f9963a94..0edf96ee 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -18,6 +18,7 @@ #include "fmgr.h" #include "nodes/bitmapset.h" #include "nodes/nodes.h" +#include "nodes/memnodes.h" #include "nodes/primnodes.h" #include "nodes/value.h" #include "port/atomics.h" @@ -162,6 +163,8 @@ typedef struct Oid cmp_proc, /* comparison fuction for 'ev_type' */ hash_proc; /* hash function for 'ev_type' */ + + MemoryContext mcxt; /* memory context holding this struct */ } PartRelationInfo; #define PART_EXPR_VARNO ( 1 ) @@ -361,19 +364,13 @@ void shout_if_prel_is_invalid(const Oid parent_oid, * Useful functions & macros for freeing memory. */ -#define FreeIfNotNull(ptr) \ - do { \ - if (ptr) \ - { \ - pfree((void *) ptr); \ - ptr = NULL; \ - } \ - } while(0) - +/* Remove all references to this parent from parents cache */ static inline void -FreeChildrenArray(PartRelationInfo *prel) +ForgetParent(PartRelationInfo *prel) { - uint32 i; + uint32 i; + + AssertArg(MemoryContextIsValid(prel->mcxt)); /* Remove relevant PartParentInfos */ if (prel->children) @@ -390,38 +387,6 @@ FreeChildrenArray(PartRelationInfo *prel) if (PrelParentRelid(prel) == get_parent_of_partition(child, NULL)) forget_parent_of_partition(child, NULL); } - - pfree(prel->children); - prel->children = NULL; - } -} - -static inline void -FreeRangesArray(PartRelationInfo *prel) -{ - uint32 i; - - /* Remove RangeEntries array */ - if (prel->ranges) - { - /* Remove persistent entries if not byVal */ - if (!prel->ev_byval) - { - for (i = 0; i < PrelChildrenCount(prel); i++) - { - Oid child = prel->ranges[i].child_oid; - - /* Skip if Oid is invalid (e.g. initialization error) */ - if (!OidIsValid(child)) - continue; - - FreeBound(&prel->ranges[i].min, prel->ev_byval); - FreeBound(&prel->ranges[i].max, prel->ev_byval); - } - } - - pfree(prel->ranges); - prel->ranges = NULL; } } diff --git a/src/relation_info.c b/src/relation_info.c index a4a08a38..3de86a5a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -180,8 +180,13 @@ refresh_pathman_relation_info(Oid relid, /* Fetch cooked partitioning expression */ expr = TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); - /* Expression and attname should be saved in cache context */ - old_mcxt = MemoryContextSwitchTo(PathmanRelationCacheContext); + /* Create a new memory context to store expression tree etc */ + prel->mcxt = AllocSetContextCreate(PathmanRelationCacheContext, + CppAsString(refresh_pathman_relation_info), + ALLOCSET_SMALL_SIZES); + + /* Switch to persistent memory context */ + old_mcxt = MemoryContextSwitchTo(prel->mcxt); /* Build partitioning expression tree */ prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); @@ -261,11 +266,15 @@ refresh_pathman_relation_info(Oid relid, } PG_CATCH(); { - /* Free remaining resources */ - FreeChildrenArray(prel); - FreeRangesArray(prel); - FreeIfNotNull(prel->expr_cstr); - FreeIfNotNull(prel->expr); + /* Remove this parent from parents cache */ + ForgetParent(prel); + + /* Delete unused 'prel_mcxt' */ + MemoryContextDelete(prel->mcxt); + + prel->children = NULL; + prel->ranges = NULL; + prel->mcxt = NULL; /* Rethrow ERROR further */ PG_RE_THROW(); @@ -314,22 +323,25 @@ invalidate_pathman_relation_info(Oid relid, bool *found) relid, action, &prel_found); + /* Handle valid PartRelationInfo */ if ((action == HASH_FIND || (action == HASH_ENTER && prel_found)) && PrelIsValid(prel)) { - FreeChildrenArray(prel); - FreeRangesArray(prel); - FreeIfNotNull(prel->expr_cstr); + /* Remove this parent from parents cache */ + ForgetParent(prel); - prel->valid = false; /* now cache entry is invalid */ + /* Drop cached bounds etc */ + MemoryContextDelete(prel->mcxt); } - /* Handle invalid PartRelationInfo */ - else if (prel) + + /* Set important default values */ + if (prel) { - prel->children = NULL; - prel->ranges = NULL; + prel->children = NULL; + prel->ranges = NULL; + prel->mcxt = NULL; - prel->valid = false; /* now cache entry is invalid */ + prel->valid = false; /* now cache entry is invalid */ } /* Set 'found' if necessary */ @@ -444,15 +456,14 @@ fill_prel_with_partitions(PartRelationInfo *prel, ) uint32 i; - MemoryContext cache_mcxt = PathmanRelationCacheContext, - temp_mcxt, /* reference temporary mcxt */ + MemoryContext temp_mcxt, /* reference temporary mcxt */ old_mcxt; /* reference current mcxt */ AssertTemporaryContext(); /* Allocate memory for 'prel->children' & 'prel->ranges' (if needed) */ - prel->children = AllocZeroArray(PT_ANY, cache_mcxt, parts_count, Oid); - prel->ranges = AllocZeroArray(PT_RANGE, cache_mcxt, parts_count, RangeEntry); + prel->children = AllocZeroArray(PT_ANY, prel->mcxt, parts_count, Oid); + prel->ranges = AllocZeroArray(PT_RANGE, prel->mcxt, parts_count, RangeEntry); /* Set number of children */ PrelChildrenCount(prel) = parts_count; @@ -491,7 +502,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, prel->ranges[i].child_oid = pbin->child_rel; /* Copy all min & max Datums to the persistent mcxt */ - old_mcxt = MemoryContextSwitchTo(cache_mcxt); + old_mcxt = MemoryContextSwitchTo(prel->mcxt); { prel->ranges[i].min = CopyBound(&pbin->range_min, prel->ev_byval, From 09d9331f3698f730e69bab2586a14b800f044f25 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 31 May 2017 19:57:20 +0300 Subject: [PATCH 0584/1124] fix PartitionFilter (due to latest changes) --- src/partition_filter.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/partition_filter.c b/src/partition_filter.c index 74053a46..df3e351d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -560,6 +560,9 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) } expr = PrelExpressionForRelid(prel, parent_varno); + /* HACK: protect expression from 'prel' invalidation */ + expr = copyObject(expr); + /* Prepare state for expression execution */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); state->expr_state = ExecInitExpr((Expr *) expr, NULL); From 0963406aeb82ce0579e5f674e031ee3a9607c82a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 1 Jun 2017 15:02:30 +0300 Subject: [PATCH 0585/1124] make PrelExpressionForRelid() more memory-safe --- src/include/relation_info.h | 8 ++------ src/include/runtimeappend.h | 3 +++ src/nodes_common.c | 14 ++++++++------ src/partition_filter.c | 3 --- 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 0edf96ee..cbc16b6e 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -261,15 +261,11 @@ PrelExpressionColumnNames(const PartRelationInfo *prel) static inline Node * PrelExpressionForRelid(const PartRelationInfo *prel, Index rel_index) { - Node *expr; - /* TODO: implement some kind of cache */ + Node *expr = copyObject(prel->expr); + if (rel_index != PART_EXPR_VARNO) - { - expr = copyObject(prel->expr); ChangeVarNodes(expr, PART_EXPR_VARNO, rel_index, 0); - } - else expr = prel->expr; return expr; } diff --git a/src/include/runtimeappend.h b/src/include/runtimeappend.h index a1f934c4..ee25c337 100644 --- a/src/include/runtimeappend.h +++ b/src/include/runtimeappend.h @@ -41,6 +41,9 @@ typedef struct /* Refined clauses for partition pruning */ List *canon_custom_exprs; + /* Copy of partitioning expression (protect from invalidations) */ + Node *prel_expr; + /* All available plans \ plan states */ HTAB *children_table; HASHCTL children_table_config; diff --git a/src/nodes_common.c b/src/nodes_common.c index ddd15ec7..7bfeff71 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -635,10 +635,16 @@ create_append_scan_state_common(CustomScan *node, void begin_append_common(CustomScanState *node, EState *estate, int eflags) { - RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + const PartRelationInfo *prel; node->ss.ps.ps_TupFromTlist = false; + prel = get_pathman_relation_info(scan_state->relid); + + /* Prepare expression according to set_set_customscan_references() */ + scan_state->prel_expr = PrelExpressionForRelid(prel, INDEX_VAR); + /* Prepare custom expression according to set_set_customscan_references() */ scan_state->canon_custom_exprs = canonicalize_custom_exprs(scan_state->custom_exprs); @@ -709,18 +715,14 @@ rescan_append_common(CustomScanState *node) WalkerContext wcxt; Oid *parts; int nparts; - Node *prel_expr; prel = get_pathman_relation_info(scan_state->relid); Assert(prel); - /* Prepare expression according to set_set_customscan_references() */ - prel_expr = PrelExpressionForRelid(prel, INDEX_VAR); - /* First we select all available partitions... */ ranges = list_make1_irange_full(prel, IR_COMPLETE); - InitWalkerContext(&wcxt, prel_expr, prel, econtext); + InitWalkerContext(&wcxt, scan_state->prel_expr, prel, econtext); foreach (lc, scan_state->canon_custom_exprs) { WrapperNode *wrap; diff --git a/src/partition_filter.c b/src/partition_filter.c index df3e351d..74053a46 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -560,9 +560,6 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) } expr = PrelExpressionForRelid(prel, parent_varno); - /* HACK: protect expression from 'prel' invalidation */ - expr = copyObject(expr); - /* Prepare state for expression execution */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); state->expr_state = ExecInitExpr((Expr *) expr, NULL); From 7e465d2ada4353ae54797639009f0a6f7e8bdd58 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 1 Jun 2017 17:20:50 +0300 Subject: [PATCH 0586/1124] remove duplicate code in prepare_rri_fdw_for_insert() --- src/partition_filter.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 74053a46..2fe5985f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -845,6 +845,7 @@ prepare_rri_fdw_for_insert(EState *estate, break; case PF_FDW_INSERT_POSTGRES: + case PF_FDW_INSERT_ANY_FDW: { ForeignDataWrapper *fdw; ForeignServer *fserver; @@ -852,23 +853,21 @@ prepare_rri_fdw_for_insert(EState *estate, /* Check if it's PostgreSQL FDW */ fserver = GetForeignServer(GetForeignTable(partid)->serverid); fdw = GetForeignDataWrapper(fserver->fdwid); - if (strcmp("postgres_fdw", fdw->fdwname) != 0) - elog(ERROR, "FDWs other than postgres_fdw are restricted"); - } - break; - - case PF_FDW_INSERT_ANY_FDW: - { - ForeignDataWrapper *fdw; - ForeignServer *fserver; - fserver = GetForeignServer(GetForeignTable(partid)->serverid); - fdw = GetForeignDataWrapper(fserver->fdwid); + /* Show message if not postgres_fdw */ if (strcmp("postgres_fdw", fdw->fdwname) != 0) - elog(WARNING, "unrestricted FDW mode may lead to \"%s\" crashes", - fdw->fdwname); + switch (pg_pathman_insert_into_fdw) + { + case PF_FDW_INSERT_POSTGRES: + elog(ERROR, + "FDWs other than postgres_fdw are restricted"); + + case PF_FDW_INSERT_ANY_FDW: + elog(WARNING, + "unrestricted FDW mode may lead to crashes"); + } } - break; /* do nothing */ + break; default: elog(ERROR, "Mode is not implemented yet"); From 24bc7d8f9e7c9fdd6edd13d7744ac5406d385a74 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 2 Jun 2017 14:15:26 +0300 Subject: [PATCH 0587/1124] refactoring & bugfixes in scan_result_parts_storage() & select_partition_for_insert(), more tests --- hash.sql | 8 +-- init.sql | 14 ++--- range.sql | 16 +++--- src/include/partition_creation.h | 4 +- src/include/xact_handling.h | 8 +-- src/partition_creation.c | 22 +++++++- src/partition_filter.c | 66 +++++++++++++++------- src/pl_funcs.c | 16 +++--- src/pl_range_funcs.c | 17 ++---- src/relation_info.c | 4 +- src/utility_stmt_hooking.c | 5 +- src/xact_handling.c | 45 ++------------- tests/python/partitioning_test.py | 94 +++++++++++++++++++++++++++++-- 13 files changed, 201 insertions(+), 118 deletions(-) diff --git a/hash.sql b/hash.sql index b510cfb9..6bfd77a5 100644 --- a/hash.sql +++ b/hash.sql @@ -75,15 +75,15 @@ BEGIN IF lock_parent THEN /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); + PERFORM @extschema@.prevent_data_modification(parent_relid); ELSE /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.prevent_part_modification(parent_relid); END IF; /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(old_partition); - PERFORM @extschema@.prevent_relation_modification(new_partition); + PERFORM @extschema@.prevent_data_modification(old_partition); + PERFORM @extschema@.prevent_data_modification(new_partition); /* Ignore temporary tables */ SELECT relpersistence FROM pg_catalog.pg_class diff --git a/init.sql b/init.sql index 1109c45d..181a81a7 100644 --- a/init.sql +++ b/init.sql @@ -438,10 +438,10 @@ BEGIN IF partition_data = true THEN /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); + PERFORM @extschema@.prevent_data_modification(parent_relid); ELSE /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.prevent_part_modification(parent_relid); END IF; /* Ignore temporary tables */ @@ -601,7 +601,7 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); + PERFORM @extschema@.prevent_data_modification(parent_relid); IF NOT EXISTS (SELECT FROM @extschema@.pathman_config WHERE partrel = parent_relid) THEN @@ -916,17 +916,17 @@ LANGUAGE C; * Lock partitioned relation to restrict concurrent * modification of partitioning scheme. */ -CREATE OR REPLACE FUNCTION @extschema@.lock_partitioned_relation( +CREATE OR REPLACE FUNCTION @extschema@.prevent_part_modification( parent_relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'lock_partitioned_relation' +RETURNS VOID AS 'pg_pathman', 'prevent_part_modification' LANGUAGE C STRICT; /* * Lock relation to restrict concurrent modification of data. */ -CREATE OR REPLACE FUNCTION @extschema@.prevent_relation_modification( +CREATE OR REPLACE FUNCTION @extschema@.prevent_data_modification( parent_relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'prevent_relation_modification' +RETURNS VOID AS 'pg_pathman', 'prevent_data_modification' LANGUAGE C STRICT; diff --git a/range.sql b/range.sql index 36b5029d..92dfd888 100644 --- a/range.sql +++ b/range.sql @@ -443,10 +443,10 @@ BEGIN PERFORM @extschema@.validate_relname(partition_relid); /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.prevent_part_modification(parent_relid); /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition_relid); + PERFORM @extschema@.prevent_data_modification(partition_relid); part_expr_type = @extschema@.get_partition_key_type(parent_relid); part_expr := @extschema@.get_partition_key(parent_relid); @@ -536,7 +536,7 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.prevent_part_modification(parent_relid); part_expr_type := @extschema@.get_partition_key_type(parent_relid); @@ -640,7 +640,7 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.prevent_part_modification(parent_relid); part_expr_type := @extschema@.get_partition_key_type(parent_relid); @@ -744,7 +744,7 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.prevent_part_modification(parent_relid); IF start_value >= end_value THEN RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; @@ -798,7 +798,7 @@ BEGIN END IF; /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.prevent_part_modification(parent_relid); IF NOT delete_data THEN EXECUTE format('INSERT INTO %s SELECT * FROM %s', @@ -849,7 +849,7 @@ BEGIN PERFORM @extschema@.validate_relname(partition_relid); /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.prevent_part_modification(parent_relid); /* Ignore temporary tables */ SELECT relpersistence FROM pg_catalog.pg_class @@ -926,7 +926,7 @@ BEGIN PERFORM @extschema@.validate_relname(partition_relid); /* Acquire lock on parent */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); + PERFORM @extschema@.prevent_data_modification(parent_relid); part_type := @extschema@.get_partition_type(parent_relid); diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 42454ca9..4a93bbfe 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -76,7 +76,9 @@ Node * build_raw_hash_check_tree(Node *raw_expression, Oid relid, Oid value_type); -void drop_check_constraint(Oid relid); +/* Add & drop pg_pathman's check constraint */ +void drop_pathman_check_constraint(Oid relid); +void add_pathman_check_constraint(Oid relid, Constraint *constraint); /* Update triggers */ diff --git a/src/include/xact_handling.h b/src/include/xact_handling.h index db7f37d8..30b19eec 100644 --- a/src/include/xact_handling.h +++ b/src/include/xact_handling.h @@ -20,11 +20,7 @@ /* * Transaction locks. */ -LockAcquireResult xact_lock_partitioned_rel(Oid relid, bool nowait); -void xact_unlock_partitioned_rel(Oid relid); - -LockAcquireResult xact_lock_rel_exclusive(Oid relid, bool nowait); -void xact_unlock_rel_exclusive(Oid relid); +LockAcquireResult xact_lock_rel(Oid relid, LOCKMODE lockmode, bool nowait); /* * Utility checks. @@ -35,7 +31,7 @@ bool xact_is_transaction_stmt(Node *stmt); bool xact_is_set_transaction_stmt(Node *stmt); bool xact_object_is_visible(TransactionId obj_xmin); -void prevent_relation_modification_internal(Oid relid); +void prevent_data_modification_internal(Oid relid); #endif /* XACT_HANDLING_H */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 57a84426..f9eb7e81 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -375,7 +375,7 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, else if (nparts == 1) { /* Unlock the parent (we're not going to spawn) */ - xact_unlock_partitioned_rel(relid); + UnlockRelationOid(relid, ShareUpdateExclusiveLock); /* Simply return the suitable partition */ partid = parts[0]; @@ -1125,7 +1125,7 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) /* Drop pg_pathman's check constraint by 'relid' */ void -drop_check_constraint(Oid relid) +drop_pathman_check_constraint(Oid relid) { char *constr_name; AlterTableStmt *stmt; @@ -1146,9 +1146,25 @@ drop_check_constraint(Oid relid) stmt->cmds = list_make1(cmd); - AlterTable(relid, ShareUpdateExclusiveLock, stmt); + /* See function AlterTableGetLockLevel() */ + AlterTable(relid, AccessExclusiveLock, stmt); } +/* Add pg_pathman's check constraint using 'relid' */ +void +add_pathman_check_constraint(Oid relid, Constraint *constraint) +{ + Relation part_rel = heap_open(relid, AccessExclusiveLock); + + AddRelationNewConstraints(part_rel, NIL, + list_make1(constraint), + false, true, true); + + heap_close(part_rel, NoLock); +} + + + /* Build RANGE check constraint expression tree */ Node * build_raw_range_check_tree(Node *raw_expression, diff --git a/src/partition_filter.c b/src/partition_filter.c index 2fe5985f..7dd83468 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -253,6 +253,11 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) LockRelationOid(partid, parts_storage->head_open_lock_mode); if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partid))) { + /* Don't forget to drop invalid hash table entry */ + hash_search(parts_storage->result_rels_table, + (const void *) &partid, + HASH_REMOVE, NULL); + UnlockRelationOid(partid, parts_storage->head_open_lock_mode); return NULL; } @@ -419,34 +424,50 @@ select_partition_for_insert(Datum value, Oid value_type, { MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; + Oid parent_relid = PrelParentRelid(prel); Oid selected_partid = InvalidOid; Oid *parts; int nparts; - /* Search for matching partitions */ - parts = find_partitions_for_value(value, value_type, prel, &nparts); - - if (nparts > 1) - elog(ERROR, ERR_PART_ATTR_MULTIPLE); - else if (nparts == 0) + do { - selected_partid = create_partitions_for_value(PrelParentRelid(prel), - value, prel->ev_type); + /* Search for matching partitions */ + parts = find_partitions_for_value(value, value_type, prel, &nparts); - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); - } - else selected_partid = parts[0]; + if (nparts > 1) + elog(ERROR, ERR_PART_ATTR_MULTIPLE); + else if (nparts == 0) + { + selected_partid = create_partitions_for_value(parent_relid, + value, prel->ev_type); - /* Replace parent table with a suitable partition */ - old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); - rri_holder = scan_result_parts_storage(selected_partid, parts_storage); - MemoryContextSwitchTo(old_mcxt); + /* get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(parent_relid, NULL); + } + else selected_partid = parts[0]; - /* Could not find suitable partition */ - if (rri_holder == NULL) - elog(ERROR, ERR_PART_ATTR_NO_PART, - datum_to_cstring(value, prel->ev_type)); + /* Replace parent table with a suitable partition */ + old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + rri_holder = scan_result_parts_storage(selected_partid, parts_storage); + MemoryContextSwitchTo(old_mcxt); + + /* This partition has been dropped, repeat with a new 'prel' */ + if (rri_holder == NULL) + { + /* get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(parent_relid, NULL); + + /* Get a fresh PartRelationInfo */ + prel = get_pathman_relation_info(parent_relid); + + /* Paranoid check (all partitions have vanished) */ + if (!prel) + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)); + } + } + /* Loop until we get some result */ + while (rri_holder == NULL); return rri_holder; } @@ -629,7 +650,10 @@ partition_filter_exec(CustomScanState *node) if (itemIsDone != ExprSingleResult) elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); - /* Search for a matching partition */ + /* + * Search for a matching partition. + * WARNING: 'prel' might change after this call! + */ rri_holder = select_partition_for_insert(value, prel->ev_type, prel, &state->result_parts, estate); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 58a78210..24812a54 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -64,8 +64,8 @@ PG_FUNCTION_INFO_V1( is_tuple_convertible ); PG_FUNCTION_INFO_V1( add_to_pathman_config ); PG_FUNCTION_INFO_V1( pathman_config_params_trigger_func ); -PG_FUNCTION_INFO_V1( lock_partitioned_relation ); -PG_FUNCTION_INFO_V1( prevent_relation_modification ); +PG_FUNCTION_INFO_V1( prevent_part_modification ); +PG_FUNCTION_INFO_V1( prevent_data_modification ); PG_FUNCTION_INFO_V1( validate_part_callback_pl ); PG_FUNCTION_INFO_V1( invoke_on_partition_created_callback ); @@ -774,8 +774,8 @@ add_to_pathman_config(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'parent_relid' should not be NULL"))); - /* Protect relation from concurrent modification */ - xact_lock_rel_exclusive(relid, true); + /* Protect data + definition from concurrent modification */ + LockRelationOid(relid, AccessExclusiveLock); /* Check that relation exists */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) @@ -1000,12 +1000,12 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) * Acquire appropriate lock on a partitioned relation. */ Datum -lock_partitioned_relation(PG_FUNCTION_ARGS) +prevent_part_modification(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); /* Lock partitioned relation till transaction's end */ - xact_lock_partitioned_rel(relid, false); + LockRelationOid(relid, ShareUpdateExclusiveLock); PG_RETURN_VOID(); } @@ -1014,9 +1014,9 @@ lock_partitioned_relation(PG_FUNCTION_ARGS) * Lock relation exclusively & check for current isolation level. */ Datum -prevent_relation_modification(PG_FUNCTION_ARGS) +prevent_data_modification(PG_FUNCTION_ARGS) { - prevent_relation_modification_internal(PG_GETARG_OID(0)); + prevent_data_modification_internal(PG_GETARG_OID(0)); PG_RETURN_VOID(); } diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 54ed56c9..e3b6ba99 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -670,15 +670,15 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) ranges = PrelGetRangesArray(prel); /* Lock parent till transaction's end */ - xact_lock_partitioned_rel(parent, false); + LockRelationOid(parent, ShareUpdateExclusiveLock); /* Process partitions */ for (i = 0; i < nparts; i++) { int j; - /* Lock partition in ACCESS EXCLUSIVE mode */ - prevent_relation_modification_internal(parts[0]); + /* Prevent modification of partitions */ + LockRelationOid(parts[0], AccessExclusiveLock); /* Look for the specified partition */ for (j = 0; j < PrelChildrenCount(prel); j++) @@ -1072,10 +1072,9 @@ modify_range_constraint(Oid partition_relid, { Node *expr; Constraint *constraint; - Relation partition_rel; /* Drop old constraint */ - drop_check_constraint(partition_relid); + drop_pathman_check_constraint(partition_relid); /* Parse expression */ expr = parse_partitioning_expression(partition_relid, expression, NULL, NULL); @@ -1087,12 +1086,8 @@ modify_range_constraint(Oid partition_relid, upper, expression_type); - /* Open the relation and add new check constraint */ - partition_rel = heap_open(partition_relid, AccessExclusiveLock); - AddRelationNewConstraints(partition_rel, NIL, - list_make1(constraint), - false, true, true); - heap_close(partition_rel, NoLock); + /* Add new constraint */ + add_pathman_check_constraint(partition_relid, constraint); } /* diff --git a/src/relation_info.c b/src/relation_info.c index 3de86a5a..460b7898 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -407,7 +407,7 @@ get_pathman_relation_info_after_lock(Oid relid, LockAcquireResult acquire_result; /* Restrict concurrent partition creation (it's dangerous) */ - acquire_result = xact_lock_partitioned_rel(relid, false); + acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); /* Invalidate cache entry (see AcceptInvalidationMessages()) */ invalidate_pathman_relation_info(relid, NULL); @@ -418,7 +418,7 @@ get_pathman_relation_info_after_lock(Oid relid, prel = get_pathman_relation_info(relid); if (!prel && unlock_if_not_found) - xact_unlock_partitioned_rel(relid); + UnlockRelationOid(relid, ShareUpdateExclusiveLock); return prel; } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 43bec993..11f75a94 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -650,7 +650,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, if (itemIsDone != ExprSingleResult) elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); - /* Search for a matching partition */ + /* + * Search for a matching partition. + * WARNING: 'prel' might change after this call! + */ rri_holder = select_partition_for_insert(value, prel->ev_type, prel, &parts_storage, estate); diff --git a/src/xact_handling.c b/src/xact_handling.c index 2c49067e..48efac09 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -48,39 +48,12 @@ LockAcquireOid(Oid relid, LOCKMODE lockmode, bool sessionLock, bool dontWait) /* - * Lock certain partitioned relation to disable concurrent access. + * Acquire lock and return LockAcquireResult. */ LockAcquireResult -xact_lock_partitioned_rel(Oid relid, bool nowait) +xact_lock_rel(Oid relid, LOCKMODE lockmode, bool nowait) { - return LockAcquireOid(relid, ShareUpdateExclusiveLock, false, nowait); -} - -/* - * Unlock partitioned relation. - */ -void -xact_unlock_partitioned_rel(Oid relid) -{ - UnlockRelationOid(relid, ShareUpdateExclusiveLock); -} - -/* - * Lock relation exclusively (SELECTs are possible). - */ -LockAcquireResult -xact_lock_rel_exclusive(Oid relid, bool nowait) -{ - return LockAcquireOid(relid, ExclusiveLock, false, nowait); -} - -/* - * Unlock relation (exclusive lock). - */ -void -xact_unlock_rel_exclusive(Oid relid) -{ - UnlockRelationOid(relid, ExclusiveLock); + return LockAcquireOid(relid, lockmode, false, nowait); } /* @@ -220,7 +193,7 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid) * Lock relation exclusively & check for current isolation level. */ void -prevent_relation_modification_internal(Oid relid) +prevent_data_modification_internal(Oid relid) { /* * Check that isolation level is READ COMMITTED. @@ -232,13 +205,5 @@ prevent_relation_modification_internal(Oid relid) (errmsg("Cannot perform blocking partitioning operation"), errdetail("Expected READ COMMITTED isolation level"))); - /* - * Check if table is being modified - * concurrently in a separate transaction. - */ - if (!xact_lock_rel_exclusive(relid, true)) - ereport(ERROR, - (errmsg("Cannot perform blocking partitioning operation"), - errdetail("Table \"%s\" is being modified concurrently", - get_rel_name_or_relid(relid)))); + LockRelationOid(relid, AccessExclusiveLock); } diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index cda00c62..a8a52d11 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -186,7 +186,7 @@ def test_replication(self): def test_locks(self): """Test that a session trying to create new partitions waits for other - sessions if they doing the same""" + sessions if they are doing the same""" import threading import time @@ -201,11 +201,10 @@ def set(self, value): def get(self): return self.flag - # There is one flag for each thread which shows if thread have done - # its work + # There is one flag for each thread which shows if thread have done its work flags = [Flag(False) for i in range(3)] - # All threads synchronizes though this lock + # All threads synchronize though this lock lock = threading.Lock() # Define thread function @@ -678,20 +677,34 @@ def test_conc_part_creation_insert(self): # Thread for connection #2 (it has to wait) def con2_thread(): con2.execute('insert into ins_test values(51)') + con2.commit() # Step 1: lock partitioned table in con1 con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache con1.execute('lock table ins_test in share update exclusive mode') # Step 2: try inserting new value in con2 (waiting) + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache t = threading.Thread(target=con2_thread) t.start() - # Step 3: try inserting new value in con1 (success, unlock) + # Step 3: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 4: try inserting new value in con1 (success, unlock) con1.execute('insert into ins_test values(52)') con1.commit() - # Step 4: wait for con2 + # Step 5: wait for con2 t.join() rows = con1.execute(""" @@ -715,6 +728,75 @@ def con2_thread(): node.stop() node.cleanup() + def test_conc_part_merge_insert(self): + """Test concurrent merge_range_partitions() + INSERT""" + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'ins_test' and partition it + with node.connect() as con0: + con0.begin() + con0.execute('create table ins_test(val int not null)') + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('insert into ins_test values(20)') + con2.commit() + + # Step 1: initilize con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + + # Step 2: initilize con2 + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations + + # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) + con1.execute("select merge_range_partitions('ins_test_1', 'ins_test_2')") + + # Step 4: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: finish merge in con1 (success, unlock) + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute("select *, tableoid::regclass::text from ins_test") + + # check number of rows in table + self.assertEqual(len(rows), 1) + + # check value that has been inserted + self.assertEqual(int(rows[0][0]), 20) + + # check partition that was chosen for insert + self.assertEqual(str(rows[0][1]), 'ins_test_1') + + # Stop instance and finish work + node.stop() + node.cleanup() + def test_pg_dump(self): """ Test using dump and restore of partitioned table through pg_dump and pg_restore tools. From c3cedda33e241c62690c057ac05ac0ceca16b048 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 2 Jun 2017 14:40:47 +0300 Subject: [PATCH 0588/1124] small fixes here and there --- src/partition_creation.c | 15 +++++++-------- src/partition_filter.c | 2 +- src/pl_funcs.c | 18 ++++-------------- 3 files changed, 12 insertions(+), 23 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index f9eb7e81..54c680a0 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -432,12 +432,12 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, } } else - elog(ERROR, "pg_pathman's config does not contain relation \"%s\"", + elog(ERROR, "table \"%s\" is not partitioned", get_rel_name_or_relid(relid)); } PG_CATCH(); { - ErrorData *edata; + ErrorData *error; /* Simply rethrow ERROR if we're in backend */ if (!is_background_worker) @@ -445,16 +445,15 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, /* Switch to the original context & copy edata */ MemoryContextSwitchTo(old_mcxt); - edata = CopyErrorData(); + error = CopyErrorData(); FlushErrorState(); /* Produce log message if we're in BGW */ - ereport(LOG, - (errmsg(CppAsString(create_partitions_for_value_internal) ": %s [%u]", - edata->message, MyProcPid), - (edata->detail) ? errdetail("%s", edata->detail) : 0)); + error->elevel = LOG; + error->message = psprintf(CppAsString(create_partitions_for_value_internal) + ": %s [%u]", error->message, MyProcPid); - FreeErrorData(edata); + ReThrowError(error); /* Reset 'partid' in case of error */ partid = InvalidOid; diff --git a/src/partition_filter.c b/src/partition_filter.c index 7dd83468..edabceed 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -439,7 +439,7 @@ select_partition_for_insert(Datum value, Oid value_type, else if (nparts == 0) { selected_partid = create_partitions_for_value(parent_relid, - value, prel->ev_type); + value, value_type); /* get_pathman_relation_info() will refresh this entry */ invalidate_pathman_relation_info(parent_relid, NULL); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 24812a54..4c009c1e 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -765,7 +765,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) Datum expr_datum; PathmanInitState init_state; - MemoryContext old_mcxt = CurrentMemoryContext; if (!PG_ARGISNULL(0)) { @@ -887,20 +886,11 @@ add_to_pathman_config(PG_FUNCTION_ARGS) } PG_CATCH(); { - ErrorData *edata; - - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - edata = CopyErrorData(); - FlushErrorState(); - /* We have to restore all changed flags */ restore_pathman_init_state(&init_state); - /* Show error message */ - elog(ERROR, "%s", edata->message); - - FreeErrorData(edata); + /* Rethrow ERROR */ + PG_RE_THROW(); } PG_END_TRY(); } @@ -1262,11 +1252,11 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) elog(ERROR, ERR_PART_ATTR_MULTIPLE); else if (nparts == 0) { - target_relid = create_partitions_for_value(PrelParentRelid(prel), + target_relid = create_partitions_for_value(parent_relid, value, value_type); /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); + invalidate_pathman_relation_info(parent_relid, NULL); } else target_relid = parts[0]; From 90c48a57119e89243cdb29f6c4e24d76f09eb459 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 2 Jun 2017 15:04:12 +0300 Subject: [PATCH 0589/1124] slightly improve Makefiles --- Makefile | 2 +- tests/cmocka/Makefile | 1 + travis/pg-travis-test.sh | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 72c1c57b..e0ef3f74 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/compat/pg_compat.o src/compat/relation_tags.o src/compat/expand_rte_hook.o \ src/compat/rowmarks_fix.o $(WIN32RES) -PG_CPPFLAGS = -I$(CURDIR)/src/include +override PG_CPPFLAGS += -I$(CURDIR)/src/include EXTENSION = pg_pathman diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index 65f967e6..d46ad869 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -7,6 +7,7 @@ CFLAGS += -I$(CURDIR)/../../src/include CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) +CFLAGS += $(PG_CPPFLAGS) LDFLAGS = -lcmocka TEST_BIN = rangeset_tests diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index 284285f2..5c0ec44e 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -126,7 +126,7 @@ LD_LIBRARY_PATH=/usr/local/lib export LD_LIBRARY_PATH # run cmocka tests (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -coverage" cmocka_tests || status=$? +make USE_PGXS=1 PG_CONFIG=$config_path PG_CPPFLAGS="-coverage" cmocka_tests || status=$? # remove useless gcov files rm -f tests/cmocka/*.gcno From 7d1bf40b169a3675ee20e99830e2e3dabc2bd77a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 2 Jun 2017 19:31:13 +0300 Subject: [PATCH 0590/1124] light refactoring, enable composite partitioning key (record(...)::TYPE) --- expected/pathman_expressions.out | 104 +++++++++++++++++++++++++++ sql/pathman_expressions.sql | 32 +++++++++ src/include/pathman.h | 2 +- src/partition_creation.c | 75 +++++++++++++++----- src/pg_pathman.c | 116 ++++++++++++++++++++++++------- 5 files changed, 284 insertions(+), 45 deletions(-) diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 1e5d7d47..7de37f78 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -95,6 +95,110 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < A DROP TABLE test_exprs.canon CASCADE; NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1, ''a'')'::test_exprs.composite, + '(10, ''a'')'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10, ''a'')'::test_exprs.composite, + '(20, ''a'')'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20, ''a'')'::test_exprs.composite, + '(30, ''a'')'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30, ''a'')'::test_exprs.composite, + '(40, ''a'')'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, '0'::text)::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_2 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_4 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) +(9 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects /* We use this rel to check 'pathman_hooks_enabled' */ CREATE TABLE test_exprs.canary(val INT4 NOT NULL); CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index d30656a8..b3529cfc 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -34,6 +34,38 @@ DROP TABLE test_exprs.canon CASCADE; +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); +SELECT add_range_partition('test_exprs.composite', + '(1, ''a'')'::test_exprs.composite, + '(10, ''a'')'::test_exprs.composite); +SELECT add_range_partition('test_exprs.composite', + '(10, ''a'')'::test_exprs.composite, + '(20, ''a'')'::test_exprs.composite); +SELECT add_range_partition('test_exprs.composite', + '(20, ''a'')'::test_exprs.composite, + '(30, ''a'')'::test_exprs.composite); +SELECT add_range_partition('test_exprs.composite', + '(30, ''a'')'::test_exprs.composite, + '(40, ''a'')'::test_exprs.composite); +SELECT expr FROM pathman_config; /* check expression */ +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); +DROP TABLE test_exprs.composite CASCADE; + + /* We use this rel to check 'pathman_hooks_enabled' */ CREATE TABLE test_exprs.canary(val INT4 NOT NULL); CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); diff --git a/src/include/pathman.h b/src/include/pathman.h index c1a45939..37d1b481 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -157,7 +157,7 @@ typedef struct } while (0) /* Check that WalkerContext contains ExprContext (plan execution stage) */ -#define WcxtHasExprContext(wcxt) ( (wcxt)->econtext ) +#define WcxtHasExprContext(wcxt) ( (wcxt)->econtext != NULL ) /* Examine expression in order to select partitions */ WrapperNode *walk_expr_tree(Expr *expr, const WalkerContext *context); diff --git a/src/partition_creation.c b/src/partition_creation.c index 54c680a0..401ad7f0 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1171,6 +1171,29 @@ build_raw_range_check_tree(Node *raw_expression, const Bound *end_value, Oid value_type) { +#define BuildConstExpr(node, value, value_type) \ + do { \ + (node)->val = make_string_value_struct( \ + datum_to_cstring((value), (value_type))); \ + (node)->location = -1; \ + } while (0) + +#define BuildCmpExpr(node, opname, expr, c) \ + do { \ + (node)->name = list_make1(makeString(opname)); \ + (node)->kind = AEXPR_OP; \ + (node)->lexpr = (Node *) (expr); \ + (node)->rexpr = (Node *) (c); \ + (node)->location = -1; \ + } while (0) + +#define CopyTypeCastExpr(node, src, argument) \ + do { \ + memcpy((node), (src), sizeof(TypeCast)); \ + (node)->arg = (Node *) (argument); \ + (node)->typeName = (TypeName *) copyObject((node)->typeName); \ + } while (0) + BoolExpr *and_oper = makeNode(BoolExpr); A_Expr *left_arg = makeNode(A_Expr), *right_arg = makeNode(A_Expr); @@ -1184,16 +1207,22 @@ build_raw_range_check_tree(Node *raw_expression, /* Left comparison (VAR >= start_value) */ if (!IsInfinite(start_value)) { - /* Left boundary */ - left_const->val = make_string_value_struct( - datum_to_cstring(BoundGetValue(start_value), value_type)); - left_const->location = -1; + /* Build left boundary */ + BuildConstExpr(left_const, BoundGetValue(start_value), value_type); + + /* Build ">=" clause */ + BuildCmpExpr(left_arg, ">=", raw_expression, left_const); - left_arg->name = list_make1(makeString(">=")); - left_arg->kind = AEXPR_OP; - left_arg->lexpr = raw_expression; - left_arg->rexpr = (Node *) left_const; - left_arg->location = -1; + /* Cast const to expression's type (e.g. composite key, row type) */ + if (IsA(raw_expression, TypeCast)) + { + TypeCast *cast = makeNode(TypeCast); + + /* Copy cast to expression's type */ + CopyTypeCastExpr(cast, raw_expression, left_const); + + left_arg->rexpr = (Node *) cast; + } and_oper->args = lappend(and_oper->args, left_arg); } @@ -1201,16 +1230,22 @@ build_raw_range_check_tree(Node *raw_expression, /* Right comparision (VAR < end_value) */ if (!IsInfinite(end_value)) { - /* Right boundary */ - right_const->val = make_string_value_struct( - datum_to_cstring(BoundGetValue(end_value), value_type)); - right_const->location = -1; + /* Build right boundary */ + BuildConstExpr(right_const, BoundGetValue(end_value), value_type); + + /* Build "<" clause */ + BuildCmpExpr(right_arg, "<", raw_expression, right_const); - right_arg->name = list_make1(makeString("<")); - right_arg->kind = AEXPR_OP; - right_arg->lexpr = raw_expression; - right_arg->rexpr = (Node *) right_const; - right_arg->location = -1; + /* Cast const to expression's type (e.g. composite key, row type) */ + if (IsA(raw_expression, TypeCast)) + { + TypeCast *cast = makeNode(TypeCast); + + /* Copy cast to expression's type */ + CopyTypeCastExpr(cast, raw_expression, right_const); + + right_arg->rexpr = (Node *) cast; + } and_oper->args = lappend(and_oper->args, right_arg); } @@ -1220,6 +1255,10 @@ build_raw_range_check_tree(Node *raw_expression, elog(ERROR, "cannot create partition with range (-inf, +inf)"); return (Node *) and_oper; + +#undef BuildConstExpr +#undef BuildCmpExpr +#undef CopyTypeCastExpr } /* Build complete RANGE check constraint */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 7c265640..c3be23a3 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -79,9 +79,6 @@ static bool is_key_op_param(const OpExpr *expr, const WalkerContext *context, Node **param_ptr); -static Const *extract_const(Param *param, - const WalkerContext *context); - static Datum array_find_min_max(Datum *values, bool *isnull, int length, @@ -110,16 +107,97 @@ static void generate_mergeappend_paths(PlannerInfo *root, PathKey *pathkeyDesc); -/* We can transform Param into Const provided that 'econtext' is available */ -#define IsConstValue(node, wcxt) \ - ( IsA((node), Const) || (WcxtHasExprContext(wcxt) ? IsA((node), Param) : false) ) +/* Can we transform this node into a Const? */ +static bool +IsConstValue(Node *node, const WalkerContext *context) +{ + switch (nodeTag(node)) + { + case T_Const: + return true; -#define ExtractConst(node, wcxt) \ - ( \ - IsA((node), Param) ? \ - extract_const((Param *) (node), (wcxt)) : \ - ((Const *) (node)) \ - ) + case T_Param: + return WcxtHasExprContext(context); + + case T_RowExpr: + { + RowExpr *row = (RowExpr *) node; + ListCell *lc; + + /* Can't do anything about RECORD of wrong type */ + if (row->row_typeid != context->prel->ev_type) + return false; + + /* Check that args are const values */ + foreach (lc, row->args) + if (!IsConstValue((Node *) lfirst(lc), context)) + return false; + } + return true; + + default: + return false; + } +} + +/* Extract a Const from node that has been checked by IsConstValue() */ +static Const * +ExtractConst(Node *node, const WalkerContext *context) +{ + ExprState *estate; + + Datum value; + bool isnull; + + Oid typid, + collid; + int typmod; + + /* Fast path for Consts */ + if (IsA(node, Const)) + return (Const *) node; + + /* Evaluate expression */ + estate = ExecInitExpr((Expr *) node, NULL); + value = ExecEvalExpr(estate, context->econtext, &isnull, NULL); + + switch (nodeTag(node)) + { + case T_Param: + { + Param *param = (Param *) node; + + typid = param->paramtype; + typmod = param->paramtypmod; + collid = param->paramcollid; + } + break; + + case T_RowExpr: + { + RowExpr *row = (RowExpr *) node; + + typid = row->row_typeid; + typmod = - 1; + collid = InvalidOid; + } + break; + + default: + { + /* Keep compiler happy */ + typid = InvalidOid; + typmod = - 1; + collid = InvalidOid; + + elog(ERROR, "error in function " CppAsString(ExtractConst)); + } + break; + } + + return makeConst(typid, typmod, collid, get_typlen(typid), + value, isnull, get_typbyval(typid)); +} /* Selectivity estimator for common 'paramsel' */ static inline double @@ -1287,20 +1365,6 @@ is_key_op_param(const OpExpr *expr, return false; } -/* Extract (evaluate) Const from Param node */ -static Const * -extract_const(Param *param, - const WalkerContext *context) -{ - ExprState *estate = ExecInitExpr((Expr *) param, NULL); - bool isnull; - Datum value = ExecEvalExpr(estate, context->econtext, &isnull, NULL); - - return makeConst(param->paramtype, param->paramtypmod, - param->paramcollid, get_typlen(param->paramtype), - value, isnull, get_typbyval(param->paramtype)); -} - /* Find Max or Min value of array */ static Datum array_find_min_max(Datum *values, From c856cd15021046b816e6a5c6db5f35e2acad6b57 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 2 Jun 2017 19:48:02 +0300 Subject: [PATCH 0591/1124] remove unnecessary code, fix tests --- expected/pathman_expressions.out | 16 ++++++++-------- sql/pathman_expressions.sql | 16 ++++++++-------- src/pg_pathman.c | 10 +--------- 3 files changed, 17 insertions(+), 25 deletions(-) diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 7de37f78..bdef76a3 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -109,32 +109,32 @@ SELECT add_to_pathman_config('test_exprs.composite', (1 row) SELECT add_range_partition('test_exprs.composite', - '(1, ''a'')'::test_exprs.composite, - '(10, ''a'')'::test_exprs.composite); + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); add_range_partition ------------------------ test_exprs.composite_1 (1 row) SELECT add_range_partition('test_exprs.composite', - '(10, ''a'')'::test_exprs.composite, - '(20, ''a'')'::test_exprs.composite); + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); add_range_partition ------------------------ test_exprs.composite_2 (1 row) SELECT add_range_partition('test_exprs.composite', - '(20, ''a'')'::test_exprs.composite, - '(30, ''a'')'::test_exprs.composite); + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); add_range_partition ------------------------ test_exprs.composite_3 (1 row) SELECT add_range_partition('test_exprs.composite', - '(30, ''a'')'::test_exprs.composite, - '(40, ''a'')'::test_exprs.composite); + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); add_range_partition ------------------------ test_exprs.composite_4 diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index b3529cfc..c01e971f 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -43,17 +43,17 @@ SELECT add_to_pathman_config('test_exprs.composite', '(a, b)::test_exprs.composite', NULL); SELECT add_range_partition('test_exprs.composite', - '(1, ''a'')'::test_exprs.composite, - '(10, ''a'')'::test_exprs.composite); + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); SELECT add_range_partition('test_exprs.composite', - '(10, ''a'')'::test_exprs.composite, - '(20, ''a'')'::test_exprs.composite); + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); SELECT add_range_partition('test_exprs.composite', - '(20, ''a'')'::test_exprs.composite, - '(30, ''a'')'::test_exprs.composite); + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); SELECT add_range_partition('test_exprs.composite', - '(30, ''a'')'::test_exprs.composite, - '(40, ''a'')'::test_exprs.composite); + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); SELECT expr FROM pathman_config; /* check expression */ INSERT INTO test_exprs.composite VALUES(2, 'a'); INSERT INTO test_exprs.composite VALUES(11, 'a'); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index c3be23a3..71064125 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -184,15 +184,7 @@ ExtractConst(Node *node, const WalkerContext *context) break; default: - { - /* Keep compiler happy */ - typid = InvalidOid; - typmod = - 1; - collid = InvalidOid; - - elog(ERROR, "error in function " CppAsString(ExtractConst)); - } - break; + elog(ERROR, "error in function " CppAsString(ExtractConst));; } return makeConst(typid, typmod, collid, get_typlen(typid), From 53e7c7798680e36fcaedde81144a8a6d5a887a1b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 5 Jun 2017 13:54:16 +0300 Subject: [PATCH 0592/1124] refactoring, do not emit update-related ERROR on SET & ALTER EXTENSION pg_pathman (issue #95) --- src/hooks.c | 31 +++++++++++++++++++++++++------ src/include/init.h | 12 ++++++------ src/include/pathman.h | 8 ++++++++ src/include/xact_handling.h | 3 ++- src/init.c | 12 ++++++------ src/partition_creation.c | 12 ++++++------ src/pg_pathman.c | 4 ++-- src/relation_info.c | 2 +- src/xact_handling.c | 30 ++++++++++++++++++++++-------- 9 files changed, 78 insertions(+), 36 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 6b9973a5..70feabf5 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -613,16 +613,35 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) if (!pathman_hooks_enabled) return; - /* We shouldn't do anything on BEGIN or SET ISOLATION LEVEL stmts */ - if (query->commandType == CMD_UTILITY && - (xact_is_transaction_stmt(query->utilityStmt) || - xact_is_set_transaction_stmt(query->utilityStmt))) - return; - /* Finish delayed invalidation jobs */ if (IsPathmanReady()) finish_delayed_invalidation(); + /* + * We shouldn't proceed on: + * BEGIN + * SET [TRANSACTION] + */ + if (query->commandType == CMD_UTILITY && + (xact_is_transaction_stmt(query->utilityStmt) || + xact_is_set_stmt(query->utilityStmt))) + return; + + /* + * We should also disable pg_pathman on: + * ALTER EXTENSION pg_pathman + */ + if (query->commandType == CMD_UTILITY && + xact_is_alter_pathman_stmt(query->utilityStmt)) + { + /* Disable pg_pathman to perform a painless update */ + (void) set_config_option(PATHMAN_ENABLE, "off", + PGC_SUSET, PGC_S_SESSION, + GUC_ACTION_SAVE, true, 0, false); + + return; + } + /* Load config if pg_pathman exists & it's still necessary */ if (IsPathmanEnabled() && !IsPathmanInitialized() && diff --git a/src/include/init.h b/src/include/init.h index a2fb494e..24a08c5a 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -143,14 +143,14 @@ simpify_mcxt_name(MemoryContext mcxt) /* Default column values for PATHMAN_CONFIG_PARAMS */ -#define DEFAULT_ENABLE_PARENT false -#define DEFAULT_AUTO true -#define DEFAULT_INIT_CALLBACK InvalidOid -#define DEFAULT_SPAWN_USING_BGW false +#define DEFAULT_PATHMAN_ENABLE_PARENT false +#define DEFAULT_PATHMAN_AUTO true +#define DEFAULT_PATHMAN_INIT_CALLBACK InvalidOid +#define DEFAULT_PATHMAN_SPAWN_USING_BGW false /* Other default values (for GUCs etc) */ -#define DEFAULT_PATHMAN_ENABLE true -#define DEFAULT_OVERRIDE_COPY true +#define DEFAULT_PATHMAN_ENABLE true +#define DEFAULT_PATHMAN_OVERRIDE_COPY true /* Lowest version of Pl/PgSQL frontend compatible with internals (0xAA_BB_CC) */ diff --git a/src/include/pathman.h b/src/include/pathman.h index 37d1b481..93fb090a 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -40,6 +40,14 @@ #endif +/* + * Main GUC variables. + */ +#define PATHMAN_ENABLE "pg_pathman.enable" +#define PATHMAN_ENABLE_AUTO_PARTITION "pg_pathman.enable_auto_partition" +#define PATHMAN_OVERRIDE_COPY "pg_pathman.override_copy" + + /* * Definitions for the "pathman_config" table. */ diff --git a/src/include/xact_handling.h b/src/include/xact_handling.h index 30b19eec..27939304 100644 --- a/src/include/xact_handling.h +++ b/src/include/xact_handling.h @@ -28,7 +28,8 @@ LockAcquireResult xact_lock_rel(Oid relid, LOCKMODE lockmode, bool nowait); bool xact_bgw_conflicting_lock_exists(Oid relid); bool xact_is_level_read_committed(void); bool xact_is_transaction_stmt(Node *stmt); -bool xact_is_set_transaction_stmt(Node *stmt); +bool xact_is_set_stmt(Node *stmt); +bool xact_is_alter_pathman_stmt(Node *stmt); bool xact_object_is_visible(TransactionId obj_xmin); void prevent_data_modification_internal(Oid relid); diff --git a/src/init.c b/src/init.c index db06babb..1f5e470c 100644 --- a/src/init.c +++ b/src/init.c @@ -150,8 +150,8 @@ void init_main_pathman_toggles(void) { /* Main toggle, load_config() will enable it */ - DefineCustomBoolVariable("pg_pathman.enable", - "Enables pg_pathman's optimizations during the planner stage", + DefineCustomBoolVariable(PATHMAN_ENABLE, + "Enables pg_pathman's optimizations during planning stage", NULL, &pathman_init_state.pg_pathman_enable, DEFAULT_PATHMAN_ENABLE, @@ -162,11 +162,11 @@ init_main_pathman_toggles(void) NULL); /* Global toggle for automatic partition creation */ - DefineCustomBoolVariable("pg_pathman.enable_auto_partition", + DefineCustomBoolVariable(PATHMAN_ENABLE_AUTO_PARTITION, "Enables automatic partition creation", NULL, &pathman_init_state.auto_partition, - DEFAULT_AUTO, + DEFAULT_PATHMAN_AUTO, PGC_SUSET, 0, NULL, @@ -174,11 +174,11 @@ init_main_pathman_toggles(void) NULL); /* Global toggle for COPY stmt handling */ - DefineCustomBoolVariable("pg_pathman.override_copy", + DefineCustomBoolVariable(PATHMAN_OVERRIDE_COPY, "Override COPY statement handling", NULL, &pathman_init_state.override_copy, - DEFAULT_OVERRIDE_COPY, + DEFAULT_PATHMAN_OVERRIDE_COPY, PGC_SUSET, 0, NULL, diff --git a/src/partition_creation.c b/src/partition_creation.c index 401ad7f0..e1bc2829 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -134,7 +134,7 @@ create_single_range_partition_internal(Oid parent_relid, /* Cook args for init_callback */ MakeInitCallbackRangeParams(&callback_params, - DEFAULT_INIT_CALLBACK, + DEFAULT_PATHMAN_INIT_CALLBACK, parent_relid, partition_relid, *start_value, *end_value, value_type); @@ -193,7 +193,7 @@ create_single_hash_partition_internal(Oid parent_relid, /* Cook args for init_callback */ MakeInitCallbackHashParams(&callback_params, - DEFAULT_INIT_CALLBACK, + DEFAULT_PATHMAN_INIT_CALLBACK, parent_relid, partition_relid); /* Add constraint & execute init_callback */ @@ -263,8 +263,8 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin, NULL)) { /* Take default values */ - bool spawn_using_bgw = DEFAULT_SPAWN_USING_BGW, - enable_auto = DEFAULT_AUTO; + bool spawn_using_bgw = DEFAULT_PATHMAN_SPAWN_USING_BGW, + enable_auto = DEFAULT_PATHMAN_AUTO; /* Values to be extracted from PATHMAN_CONFIG_PARAMS */ Datum values[Natts_pathman_config_params]; @@ -835,7 +835,7 @@ create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) guc_level = NewGUCNestLevel(); /* ... and set client_min_messages = warning */ - (void) set_config_option("client_min_messages", "WARNING", + (void) set_config_option(CppAsString(client_min_messages), "WARNING", PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, 0, false); @@ -1683,7 +1683,7 @@ validate_part_callback(Oid procid, bool emit_error) Form_pg_proc functup; bool is_ok = true; - if (procid == DEFAULT_INIT_CALLBACK) + if (procid == DEFAULT_PATHMAN_INIT_CALLBACK) return true; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(procid)); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 71064125..8b3fc5b3 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -232,8 +232,8 @@ _PG_init(void) /* Assign pg_pathman's initial state */ temp_init_state.pg_pathman_enable = DEFAULT_PATHMAN_ENABLE; - temp_init_state.auto_partition = DEFAULT_AUTO; - temp_init_state.override_copy = DEFAULT_OVERRIDE_COPY; + temp_init_state.auto_partition = DEFAULT_PATHMAN_AUTO; + temp_init_state.override_copy = DEFAULT_PATHMAN_OVERRIDE_COPY; temp_init_state.initialization_needed = true; /* ofc it's needed! */ /* Apply initial state */ diff --git a/src/relation_info.c b/src/relation_info.c index 460b7898..e8e079b6 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -302,7 +302,7 @@ refresh_pathman_relation_info(Oid relid, /* Else set default values if they cannot be found */ else { - prel->enable_parent = DEFAULT_ENABLE_PARENT; + prel->enable_parent = DEFAULT_PATHMAN_ENABLE_PARENT; } /* We've successfully built a cache entry */ diff --git a/src/xact_handling.c b/src/xact_handling.c index 48efac09..0d4ea5b0 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -111,22 +111,36 @@ xact_is_transaction_stmt(Node *stmt) } /* - * Check if 'stmt' is SET TRANSACTION statement. + * Check if 'stmt' is SET [TRANSACTION] statement. */ bool -xact_is_set_transaction_stmt(Node *stmt) +xact_is_set_stmt(Node *stmt) { + /* Check that SET TRANSACTION is implemented via VariableSetStmt */ + Assert(VAR_SET_MULTI > 0); + if (!stmt) return false; if (IsA(stmt, VariableSetStmt)) - { - VariableSetStmt *var_set_stmt = (VariableSetStmt *) stmt; + return true; - /* special case for SET TRANSACTION ... */ - if (var_set_stmt->kind == VAR_SET_MULTI) - return true; - } + return false; +} + +/* + * Check if 'stmt' is ALTER EXTENSION pg_pathman. + */ +bool +xact_is_alter_pathman_stmt(Node *stmt) +{ + if (!stmt) + return false; + + if (IsA(stmt, AlterExtensionStmt) && + 0 == strcmp(((AlterExtensionStmt *) stmt)->extname, + "pg_pathman")) + return true; return false; } From 4c166c14d930a612ae258c4513dce996a419b882 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 5 Jun 2017 14:50:10 +0300 Subject: [PATCH 0593/1124] clean pg_compat.h --- src/include/compat/pg_compat.h | 36 ++++++++++++++-------------------- src/partition_filter.c | 2 +- src/pg_pathman.c | 3 ++- src/pl_funcs.c | 2 +- src/utility_stmt_hooking.c | 2 +- 5 files changed, 20 insertions(+), 25 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 194d1acf..8dcc339a 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -273,25 +273,22 @@ extern void create_plain_partial_paths(PlannerInfo *root, /* - * ExecEvalExpr - * - * 'errmsg' specifies error string when result of ExecEvalExpr doesn't return - * a single value + * ExecEvalExpr() + * NOTE: 'errmsg' specifies error string when ExecEvalExpr returns multiple values. */ #if PG_VERSION_NUM >= 100000 #define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ ExecEvalExpr((expr), (econtext), (isNull)) #elif PG_VERSION_NUM >= 90500 #include "partition_filter.h" -extern Datum exprResult; -extern ExprDoneCond isDone; -static inline void -dummy_handler() { } -static inline void -not_signle_result_handler() -{ - elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); -} + +/* Variables for ExecEvalExprCompat() */ +extern Datum exprResult; +extern ExprDoneCond isDone; + +/* Error handlers */ +static inline void mult_result_handler() { elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); } + #define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ ( \ exprResult = ExecEvalExpr((expr), (econtext), (isNull), &isDone), \ @@ -481,11 +478,11 @@ extern int oid_cmp(const void *p1, const void *p2); completionTag) \ do { \ PlannedStmt *stmt = makeNode(PlannedStmt); \ - stmt->commandType = CMD_UTILITY; \ - stmt->canSetTag = true; \ - stmt->utilityStmt = (parsetree); \ - stmt->stmt_location = -1; \ - stmt->stmt_len = 0; \ + stmt->commandType = CMD_UTILITY; \ + stmt->canSetTag = true; \ + stmt->utilityStmt = (parsetree); \ + stmt->stmt_location = -1; \ + stmt->stmt_len = 0; \ ProcessUtility(stmt, (queryString), (context), (params), NULL, \ (dest), (completionTag)); \ } while (0) @@ -550,9 +547,6 @@ extern void set_rel_consider_parallel(PlannerInfo *root, */ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); -List *init_createstmts_for_partition(RangeVar *parent_rv, - RangeVar *partition_rv, - char *tablespace); #endif /* PG_COMPAT_H */ diff --git a/src/partition_filter.c b/src/partition_filter.c index d0cdae04..f0edf76d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -642,7 +642,7 @@ partition_filter_exec(CustomScanState *node) tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; value = ExecEvalExprCompat(state->expr_state, econtext, &isnull, - not_signle_result_handler); + mult_result_handler); econtext->ecxt_scantuple = tmp_slot; if (isnull) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 952e9e3d..0121cc20 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -159,7 +159,8 @@ ExtractConst(Node *node, const WalkerContext *context) /* Evaluate expression */ estate = ExecInitExpr((Expr *) node, NULL); - value = ExecEvalExpr(estate, context->econtext, &isnull, NULL); + value = ExecEvalExprCompat(estate, context->econtext, &isnull, + mult_result_handler); switch (nodeTag(node)) { diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 686ea03b..f2ca6164 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1233,7 +1233,7 @@ pathman_update_trigger_func(PG_FUNCTION_ARGS) new_tuple, &value_type); value = ExecEvalExprCompat(expr_state, econtext, &isnull, - not_signle_result_handler); + mult_result_handler); MemoryContextSwitchTo(old_mcxt); if (isnull) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 119a6e07..0f1710f1 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -649,7 +649,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; value = ExecEvalExprCompat(expr_state, econtext, &isnull, - not_signle_result_handler); + mult_result_handler); econtext->ecxt_scantuple = tmp_slot; if (isnull) From 3a05cfa20ae83c6cb36c51334624b64aa05afc83 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 5 Jun 2017 17:12:30 +0300 Subject: [PATCH 0594/1124] fix ExtractConst() for PostgreSQL 10 --- src/pg_pathman.c | 44 ++++++++++++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 12 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 0121cc20..1580bb22 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -144,23 +144,22 @@ IsConstValue(Node *node, const WalkerContext *context) static Const * ExtractConst(Node *node, const WalkerContext *context) { - ExprState *estate; + ExprState *estate; + ExprContext *econtext = context->econtext; - Datum value; - bool isnull; + Datum value; + bool isnull; - Oid typid, - collid; - int typmod; + Oid typid, + collid; + int typmod; /* Fast path for Consts */ if (IsA(node, Const)) return (Const *) node; - /* Evaluate expression */ - estate = ExecInitExpr((Expr *) node, NULL); - value = ExecEvalExprCompat(estate, context->econtext, &isnull, - mult_result_handler); + /* Just a paranoid check */ + Assert(IsConstValue(node, context)); switch (nodeTag(node)) { @@ -171,6 +170,9 @@ ExtractConst(Node *node, const WalkerContext *context) typid = param->paramtype; typmod = param->paramtypmod; collid = param->paramcollid; + + /* It must be provided */ + Assert(WcxtHasExprContext(context)); } break; @@ -179,15 +181,33 @@ ExtractConst(Node *node, const WalkerContext *context) RowExpr *row = (RowExpr *) node; typid = row->row_typeid; - typmod = - 1; + typmod = -1; collid = InvalidOid; + +#if PG_VERSION_NUM >= 100000 + /* If there's no context - create it! */ + if (!WcxtHasExprContext(context)) + econtext = CreateStandaloneExprContext(); +#endif } break; default: - elog(ERROR, "error in function " CppAsString(ExtractConst));; + elog(ERROR, "error in function " CppAsString(ExtractConst)); } + /* Evaluate expression */ + estate = ExecInitExpr((Expr *) node, NULL); + value = ExecEvalExprCompat(estate, econtext, &isnull, + mult_result_handler); + +#if PG_VERSION_NUM >= 100000 + /* Free temp econtext if needed */ + if (econtext && !WcxtHasExprContext(context)) + FreeExprContext(econtext, true); +#endif + + /* Finally return Const */ return makeConst(typid, typmod, collid, get_typlen(typid), value, isnull, get_typbyval(typid)); } From b38f5ff1824ee8dab96605a8089242c90420272f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 5 Jun 2017 17:38:03 +0300 Subject: [PATCH 0595/1124] fix formatting --- src/hooks.c | 32 +++++++++++++++++--------------- src/nodes_common.c | 8 +++----- src/partition_creation.c | 4 ++-- src/utility_stmt_hooking.c | 7 +++++-- 4 files changed, 27 insertions(+), 24 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index dd15db45..92314c7b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -213,8 +213,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, have_dangerous_phv(root, outer->parent->relids, required_inner))) return; - initial_cost_nestloop_compat(root, &workspace, jointype, outer, inner, - extra); + initial_cost_nestloop_compat(root, &workspace, jointype, outer, inner, extra); pathkeys = build_join_pathkeys(root, joinrel, jointype, outer->pathkeys); @@ -771,6 +770,7 @@ pathman_relcache_hook(Datum arg, Oid relid) /* * Utility function invoker hook. + * NOTE: 'first_arg' is (PlannedStmt *) in PG 10, or (Node *) in PG <= 9.6. */ void #if PG_VERSION_NUM >= 100000 @@ -781,9 +781,9 @@ pathman_process_utility_hook(PlannedStmt *first_arg, QueryEnvironment *queryEnv, DestReceiver *dest, char *completionTag) { - Node *parsetree = first_arg->utilityStmt; - int stmt_location = first_arg->stmt_location, - stmt_len = first_arg->stmt_len; + Node *parsetree = first_arg->utilityStmt; + int stmt_location = first_arg->stmt_location, + stmt_len = first_arg->stmt_len; #else pathman_process_utility_hook(Node *first_arg, const char *queryString, @@ -792,9 +792,9 @@ pathman_process_utility_hook(Node *first_arg, DestReceiver *dest, char *completionTag) { - Node *parsetree = first_arg; - int stmt_location = -1, - stmt_len = 0; + Node *parsetree = first_arg; + int stmt_location = -1, + stmt_len = 0; #endif if (IsPathmanReady()) @@ -809,8 +809,8 @@ pathman_process_utility_hook(Node *first_arg, uint64 processed; /* Handle our COPY case (and show a special cmd name) */ - PathmanDoCopy((CopyStmt *) parsetree, queryString, stmt_location, - stmt_len, &processed); + PathmanDoCopy((CopyStmt *) parsetree, queryString, + stmt_location, stmt_len, &processed); if (completionTag) snprintf(completionTag, COMPLETION_TAG_BUFSIZE, "PATHMAN COPY " UINT64_FORMAT, processed); @@ -845,9 +845,11 @@ pathman_process_utility_hook(Node *first_arg, } } - /* 'first_arg' is PlannedStmt in pg10 or Node parsetree in pg9.6 and lower */ - call_process_utility_compat( - (process_utility_hook_next) ? process_utility_hook_next : - standard_ProcessUtility, - first_arg, queryString, context, params, queryEnv, dest, completionTag); + /* Finally call process_utility_hook_next or standard_ProcessUtility */ + call_process_utility_compat((process_utility_hook_next ? + process_utility_hook_next : + standard_ProcessUtility), + first_arg, queryString, + context, params, queryEnv, + dest, completionTag); } diff --git a/src/nodes_common.c b/src/nodes_common.c index bae293b5..7688bb07 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -674,13 +674,11 @@ exec_append_common(CustomScanState *node, return scan_state->slot; /* - * Assuming that current projection doesn't involve SRF - * - * Any SFR functions are evaluated in the specialized parent node ProjectSet + * Assuming that current projection doesn't involve SRF. + * NOTE: Any SFR functions are evaluated in ProjectSet node. */ ResetExprContext(node->ss.ps.ps_ExprContext); - node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = - scan_state->slot; + node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = scan_state->slot; result = ExecProject(node->ss.ps.ps_ProjInfo); return result; diff --git a/src/partition_creation.c b/src/partition_creation.c index 4c0046bd..f73861b6 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -762,8 +762,8 @@ create_single_partition_internal(Oid parent_relid, create_stmt.partition_info = NULL; #endif #if PG_VERSION_NUM >= 100000 - create_stmt.partbound = NULL; - create_stmt.partspec = NULL; + create_stmt.partbound = NULL; + create_stmt.partspec = NULL; #endif /* Obtain the sequence of Stmts to create partition and link it to parent */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 0f1710f1..5fe4ef91 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -325,8 +325,11 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) * NOTE: based on DoCopy() (see copy.c). */ void -PathmanDoCopy(const CopyStmt *stmt, const char *queryString, int stmt_location, - int stmt_len, uint64 *processed) +PathmanDoCopy(const CopyStmt *stmt, + const char *queryString, + int stmt_location, + int stmt_len, + uint64 *processed) { CopyState cstate; bool is_from = stmt->is_from; From 018428265817263de9f89d1d2093f2a2b30ec58d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 5 Jun 2017 19:04:25 +0300 Subject: [PATCH 0596/1124] drop function create_partitions_from_range() --- README.md | 16 --- expected/pathman_basic.out | 104 +--------------- ...ergejoin_0.out => pathman_mergejoin_1.out} | 0 range.sql | 115 ------------------ sql/pathman_basic.sql | 28 ----- 5 files changed, 1 insertion(+), 262 deletions(-) rename expected/{pathman_mergejoin_0.out => pathman_mergejoin_1.out} (100%) diff --git a/README.md b/README.md index e9880b31..9f11ae34 100644 --- a/README.md +++ b/README.md @@ -122,22 +122,6 @@ create_range_partitions(relation REGCLASS, ``` Performs RANGE partitioning for `relation` by partitioning key `attribute`, `start_value` argument specifies initial value, `p_interval` sets the default range for auto created partitions or partitions created with `append_range_partition()` or `prepend_range_partition()` (if `NULL` then auto partition creation feature won't work), `p_count` is the number of premade partitions (if not set then `pg_pathman` tries to determine it based on attribute values). Partition creation callback is invoked for each partition if set beforehand. -```plpgsql -create_partitions_from_range(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval ANYELEMENT, - partition_data BOOLEAN DEFAULT TRUE) - -create_partitions_from_range(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval INTERVAL, - partition_data BOOLEAN DEFAULT TRUE) -``` -Performs RANGE-partitioning from specified range for `relation` by partitioning key `attribute`. Partition creation callback is invoked for each partition if set beforehand. ### Data migration diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index fd50f06b..231786bd 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -404,50 +404,6 @@ SELECT count(*) FROM test.insert_into_select_copy; DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; NOTICE: drop cascades to 6 other objects -/* Test INSERT hooking with DATE type */ -CREATE TABLE test.insert_date_test(val DATE NOT NULL); -SELECT pathman.create_partitions_from_range('test.insert_date_test', 'val', - date '20161001', date '20170101', interval '1 month'); - create_partitions_from_range ------------------------------- - 4 -(1 row) - -INSERT INTO test.insert_date_test VALUES ('20161201'); /* just insert the date */ -SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; - count -------- - 4 -(1 row) - -INSERT INTO test.insert_date_test VALUES ('20170311'); /* append new partitions */ -SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; - count -------- - 6 -(1 row) - -INSERT INTO test.insert_date_test VALUES ('20160812'); /* prepend new partitions */ -SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; - count -------- - 8 -(1 row) - -SELECT min(val) FROM test.insert_date_test; /* check first date */ - min ------------- - 08-12-2016 -(1 row) - -SELECT max(val) FROM test.insert_date_test; /* check last date */ - max ------------- - 03-11-2017 -(1 row) - -DROP TABLE test.insert_date_test CASCADE; -NOTICE: drop cascades to 9 other objects SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; VACUUM; @@ -1527,23 +1483,6 @@ SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); {12-31-2014,01-02-2015} (1 row) -SELECT pathman.drop_partitions('test."RangeRel"'); -NOTICE: 0 rows copied from test."RangeRel_1" -NOTICE: 1 rows copied from test."RangeRel_2" -NOTICE: 1 rows copied from test."RangeRel_3" -NOTICE: 0 rows copied from test."RangeRel_4" -NOTICE: 1 rows copied from test."RangeRel_6" - drop_partitions ------------------ - 5 -(1 row) - -SELECT pathman.create_partitions_from_range('test."RangeRel"', 'dt', '2015-01-01'::DATE, '2015-01-05'::DATE, '1 day'::INTERVAL); - create_partitions_from_range ------------------------------- - 5 -(1 row) - DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 6 other objects SELECT * FROM pathman.pathman_config; @@ -1562,21 +1501,6 @@ SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); 3 (1 row) -SELECT pathman.drop_partitions('test."RangeRel"'); -NOTICE: 0 rows copied from test."RangeRel_1" -NOTICE: 0 rows copied from test."RangeRel_2" -NOTICE: 0 rows copied from test."RangeRel_3" - drop_partitions ------------------ - 3 -(1 row) - -SELECT pathman.create_partitions_from_range('test."RangeRel"', 'id', 1, 300, 100); - create_partitions_from_range ------------------------------- - 3 -(1 row) - DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 4 other objects DROP EXTENSION pg_pathman; @@ -1739,32 +1663,6 @@ NOTICE: 0 rows copied from test.range_rel_15 14 (1 row) -SELECT create_partitions_from_range('test.range_rel', 'id', 1, 1000, 100); - create_partitions_from_range ------------------------------- - 10 -(1 row) - -SELECT drop_partitions('test.range_rel', TRUE); - drop_partitions ------------------ - 10 -(1 row) - -SELECT create_partitions_from_range('test.range_rel', 'dt', '2015-01-01'::date, '2015-12-01'::date, '1 month'::interval); - create_partitions_from_range ------------------------------- - 12 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-12-15'; - QUERY PLAN --------------------------------------------------------------------------------- - Append - -> Seq Scan on range_rel_12 - Filter: (dt = 'Tue Dec 15 00:00:00 2015'::timestamp without time zone) -(3 rows) - /* Test NOT operator */ CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); SELECT create_hash_partitions('bool_test', 'a', 3); @@ -1943,6 +1841,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 42 other objects +NOTICE: drop cascades to 29 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_mergejoin_0.out b/expected/pathman_mergejoin_1.out similarity index 100% rename from expected/pathman_mergejoin_0.out rename to expected/pathman_mergejoin_1.out diff --git a/range.sql b/range.sql index 92dfd888..44c6697f 100644 --- a/range.sql +++ b/range.sql @@ -301,121 +301,6 @@ END $$ LANGUAGE plpgsql; -/* - * Creates RANGE partitions for specified range - */ -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( - parent_relid REGCLASS, - expression TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval ANYELEMENT, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS $$ -DECLARE - part_count INTEGER := 0; - -BEGIN - PERFORM @extschema@.prepare_for_partitioning(parent_relid, - expression, - partition_data); - - /* Check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - expression, - start_value, - end_value); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_naming_sequence(parent_relid); - - /* Insert new entry to pathman config */ - PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT); - - WHILE start_value <= end_value - LOOP - PERFORM @extschema@.create_single_range_partition( - parent_relid, - start_value, - start_value + p_interval, - tablespace := @extschema@.get_tablespace(parent_relid)); - - start_value := start_value + p_interval; - part_count := part_count + 1; - END LOOP; - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN part_count; /* number of created partitions */ -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified range based on datetime expression - */ -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( - parent_relid REGCLASS, - expression TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval INTERVAL, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS $$ -DECLARE - part_count INTEGER := 0; - -BEGIN - PERFORM @extschema@.prepare_for_partitioning(parent_relid, - expression, - partition_data); - - /* Check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - expression, - start_value, - end_value); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_naming_sequence(parent_relid); - - /* Insert new entry to pathman config */ - PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT); - - WHILE start_value <= end_value - LOOP - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', - @extschema@.get_base_type(pg_typeof(start_value))::TEXT) - USING - parent_relid, - start_value, - start_value + p_interval, - @extschema@.get_tablespace(parent_relid); - - start_value := start_value + p_interval; - part_count := part_count + 1; - END LOOP; - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); - END IF; - - RETURN part_count; /* number of created partitions */ -END -$$ LANGUAGE plpgsql; - /* * Split RANGE partition diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 2f0725fb..808292ed 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -145,26 +145,6 @@ SELECT count(*) FROM test.insert_into_select_copy; DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; -/* Test INSERT hooking with DATE type */ -CREATE TABLE test.insert_date_test(val DATE NOT NULL); -SELECT pathman.create_partitions_from_range('test.insert_date_test', 'val', - date '20161001', date '20170101', interval '1 month'); - -INSERT INTO test.insert_date_test VALUES ('20161201'); /* just insert the date */ -SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; - -INSERT INTO test.insert_date_test VALUES ('20170311'); /* append new partitions */ -SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; - -INSERT INTO test.insert_date_test VALUES ('20160812'); /* prepend new partitions */ -SELECT count(*) FROM pathman.pathman_partition_list WHERE parent = 'test.insert_date_test'::REGCLASS; - -SELECT min(val) FROM test.insert_date_test; /* check first date */ -SELECT max(val) FROM test.insert_date_test; /* check last date */ - -DROP TABLE test.insert_date_test CASCADE; - - SET pg_pathman.enable_runtimeappend = OFF; SET pg_pathman.enable_runtimemergeappend = OFF; @@ -474,8 +454,6 @@ SELECT pathman.append_range_partition('test."RangeRel"'); SELECT pathman.prepend_range_partition('test."RangeRel"'); SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); -SELECT pathman.drop_partitions('test."RangeRel"'); -SELECT pathman.create_partitions_from_range('test."RangeRel"', 'dt', '2015-01-01'::DATE, '2015-01-05'::DATE, '1 day'::INTERVAL); DROP TABLE test."RangeRel" CASCADE; SELECT * FROM pathman.pathman_config; CREATE TABLE test."RangeRel" ( @@ -483,8 +461,6 @@ CREATE TABLE test."RangeRel" ( dt TIMESTAMP NOT NULL, txt TEXT); SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); -SELECT pathman.drop_partitions('test."RangeRel"'); -SELECT pathman.create_partitions_from_range('test."RangeRel"', 'id', 1, 300, 100); DROP TABLE test."RangeRel" CASCADE; DROP EXTENSION pg_pathman; @@ -533,10 +509,6 @@ DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id /* Create range partitions from whole range */ SELECT drop_partitions('test.range_rel'); -SELECT create_partitions_from_range('test.range_rel', 'id', 1, 1000, 100); -SELECT drop_partitions('test.range_rel', TRUE); -SELECT create_partitions_from_range('test.range_rel', 'dt', '2015-01-01'::date, '2015-12-01'::date, '1 month'::interval); -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-12-15'; /* Test NOT operator */ CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); From 9e8bfc7eff23c56e9c0eb4e5f9d70e33e77f8ecf Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 5 Jun 2017 19:30:13 +0300 Subject: [PATCH 0597/1124] check that table is partitioned by RANGE in create_single_range_partition() --- expected/pathman_calamity.out | 13 ++++++++++++- sql/pathman_calamity.sql | 8 +++++++- src/pl_range_funcs.c | 12 ++++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 12ab1a78..729fd979 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -102,8 +102,19 @@ SELECT count(*) FROM calamity.part_test; DELETE FROM calamity.part_test; /* test function create_single_range_partition() */ -SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; /* test function create_range_partitions_internal() */ SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ ERROR: 'parent_relid' should not be NULL diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index f8c98255..79e85987 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -37,7 +37,13 @@ DELETE FROM calamity.part_test; /* test function create_single_range_partition() */ -SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ + +SELECT add_to_pathman_config('calamity.part_test', 'val'); +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; + /* test function create_range_partitions_internal() */ SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 33bf5b21..91452ba9 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -100,6 +100,9 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) RangeVar *partition_name_rv; char *tablespace; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + /* Handle 'parent_relid' */ if (!PG_ARGISNULL(0)) @@ -109,6 +112,15 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'parent_relid' should not be NULL"))); + /* Check that table is partitioned by RANGE */ + if (!pathman_config_contains_relation(parent_relid, values, isnull, NULL, NULL) || + DatumGetPartType(values[Anum_pathman_config_parttype - 1]) != PT_RANGE) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned by RANGE", + get_rel_name_or_relid(parent_relid)))); + } + bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 1); start = PG_ARGISNULL(1) ? From 9c4dd4917b03c75ca719def80883847efc226e5a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 5 Jun 2017 23:52:54 +0300 Subject: [PATCH 0598/1124] formatting, fix outdated comments --- src/include/init.h | 8 ++++---- src/partition_creation.c | 12 ++++-------- src/utility_stmt_hooking.c | 2 +- src/utils.c | 2 +- 4 files changed, 10 insertions(+), 14 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index 24a08c5a..c1a1041c 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -135,10 +135,10 @@ simpify_mcxt_name(MemoryContext mcxt) */ #define DisablePathman() \ do { \ - pathman_init_state.pg_pathman_enable = false; \ - pathman_init_state.auto_partition = false; \ - pathman_init_state.override_copy = false; \ - pathman_init_state.initialization_needed = true; \ + pathman_init_state.pg_pathman_enable = false; \ + pathman_init_state.auto_partition = false; \ + pathman_init_state.override_copy = false; \ + pathman_init_state.initialization_needed = true; \ } while (0) diff --git a/src/partition_creation.c b/src/partition_creation.c index f73861b6..62f3a6b7 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -348,14 +348,14 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) { - Oid base_bound_type; /* base type of prel->atttype */ + Oid base_bound_type; /* base type of prel->ev_type */ Oid base_value_type; /* base type of value_type */ /* Fetch PartRelationInfo by 'relid' */ prel = get_pathman_relation_info_after_lock(relid, true, &lock_result); shout_if_prel_is_invalid(relid, prel, PT_RANGE); - /* Fetch base types of prel->atttype & value_type */ + /* Fetch base types of prel->ev_type & value_type */ base_bound_type = getBaseType(prel->ev_type); base_value_type = getBaseType(value_type); @@ -467,11 +467,7 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, /* * Append\prepend partitions if there's no partition to store 'value'. - * - * Used by create_partitions_for_value_internal(). - * - * NB: 'value' type is not needed since we've already taken - * it into account while searching for the 'cmp_proc'. + * NOTE: Used by create_partitions_for_value_internal(). */ static Oid spawn_partitions_val(Oid parent_relid, /* parent's Oid */ @@ -479,7 +475,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ const Bound *range_bound_max, /* parent's MAX boundary */ Oid range_bound_type, /* type of boundary's value */ Datum interval_binary, /* interval in binary form */ - Oid interval_type, /* INTERVALOID or prel->atttype */ + Oid interval_type, /* INTERVALOID or prel->ev_type */ Datum value, /* value to be INSERTed */ Oid value_type, /* type of value */ Oid collid) /* collation id */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 5fe4ef91..f05aae27 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -231,7 +231,7 @@ is_pathman_related_alter_column_type(Node *parsetree, if (!bms_is_member(adjusted_attnum, prel->expr_atts)) continue; - /* Return 'prel->attnum' */ + /* Return 'attr_number_out' if asked to */ if (attr_number_out) *attr_number_out = attnum; /* Success! */ diff --git a/src/utils.c b/src/utils.c index 1ff2e460..6f9e53cd 100644 --- a/src/utils.c +++ b/src/utils.c @@ -452,7 +452,7 @@ extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ elog(ERROR, "cannot find input function for type %u", part_atttype); /* - * Convert interval from CSTRING to 'prel->atttype'. + * Convert interval from CSTRING to 'prel->ev_type'. * * Note: We pass 3 arguments in case * 'typein_proc' also takes Oid & typmod. From 8bb4874ccb1430df1ac65c09c2b43cd2ed31af26 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 6 Jun 2017 13:56:56 +0300 Subject: [PATCH 0599/1124] restrict system attributes in partitioning expression --- expected/pathman_calamity.out | 16 ++++++++-------- expected/pathman_expressions.out | 12 +++++++++++- sql/pathman_calamity.sql | 16 ++++++++-------- sql/pathman_expressions.sql | 5 ++++- src/relation_info.c | 5 +++++ 5 files changed, 36 insertions(+), 18 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 729fd979..14eca51d 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -280,21 +280,21 @@ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ (1 row) /* check function validate_interval_value() */ -SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ ERROR: relation "1" does not exist -SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ ERROR: 'partrel' should not be NULL -SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ ERROR: 'expression' should not be NULL -SELECT validate_interval_value('pg_class', 'oid', NULL, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon', 'cooked_expr'); /* not ok */ ERROR: 'parttype' should not be NULL -SELECT validate_interval_value('pg_class', 'oid', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH', NULL); /* not ok */ ERROR: interval should be NULL for HASH partitioned table -SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ ERROR: failed to analyze partitioning expression "expr" -SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ ERROR: unrecognized token: "cooked_expr" -SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ ERROR: failed to analyze partitioning expression "EXPR" /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index bdef76a3..948fdd5e 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -235,7 +235,17 @@ SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM -/* Try using multiple queries */ +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +/* Try using subqueries */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value, (select oid from pg_class limit 1)', 4); diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 79e85987..881cebbd 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -132,14 +132,14 @@ SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ /* check function validate_interval_value() */ -SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'oid', NULL, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'oid', 1, 'HASH', NULL); /* not ok */ -SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ -SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index c01e971f..1c7f4dbe 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -94,7 +94,10 @@ SELECT COUNT(*) FROM test_exprs.hash_rel; /* Try using constant expression */ SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); -/* Try using multiple queries */ +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); + +/* Try using subqueries */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value, (select oid from pg_class limit 1)', 4); diff --git a/src/relation_info.c b/src/relation_info.c index cdfcf0b2..12965f16 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -731,6 +731,11 @@ cook_partitioning_expression(const Oid relid, AttrNumber attnum = expr_attr + FirstLowInvalidHeapAttributeNumber; HeapTuple htup; + /* Check that there's no system attributes in expression */ + if (attnum < InvalidAttrNumber) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("system attributes are not supported"))); + htup = SearchSysCache2(ATTNUM, ObjectIdGetDatum(relid), Int16GetDatum(attnum)); From 37bad35b646b34ea262ba07d2122d7c13235db52 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 6 Jun 2017 15:54:44 +0300 Subject: [PATCH 0600/1124] migration script to 1.4 --- pg_pathman--1.3--1.4.sql | 1362 ++++++++++++++++++++++++++++++-------- 1 file changed, 1084 insertions(+), 278 deletions(-) diff --git a/pg_pathman--1.3--1.4.sql b/pg_pathman--1.3--1.4.sql index d4e9a80b..470a2d82 100644 --- a/pg_pathman--1.3--1.4.sql +++ b/pg_pathman--1.3--1.4.sql @@ -8,33 +8,29 @@ * ------------------------------------------------------------------------ */ -DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, TEXT) CASCADE; -CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( - atttype OID, - parttype INTEGER, - range_interval TEXT) -RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' -LANGUAGE C; -DROP FUNCTION @extschema@.is_attribute_nullable(REGCLASS, TEXT); +/* ------------------------------------------------------------------------ + * Alter config tables + * ----------------------------------------------------------------------*/ +ALTER TABLE @extschema@.pathman_config RENAME COLUMN attname TO expr; +ALTER TABLE @extschema@.pathman_config ADD COLUMN cooked_expr TEXT; -ALTER TABLE @extschema@.pathman_config ADD COLUMN expression_p TEXT DEFAULT '--not set--'; -ALTER TABLE @extschema@.pathman_config ADD COLUMN atttype OID DEFAULT 1; -ALTER TABLE @extschema@.pathman_config ADD COLUMN upd_expr BOOL DEFAULT FALSE; +DROP TRIGGER pathman_config_params_trigger; -/* update constraint */ -ALTER TABLE @extschema@.pathman_config - ADD CONSTRAINT pathman_config_interval_check CHECK (@extschema@.validate_interval_value(atttype, - parttype, - range_interval)); +CREATE TRIGGER pathman_config_params_trigger +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); -/* mark 'expression_p' and 'atttype' to update on next start */ -UPDATE @extschema@.pathman_config SET upd_expr = TRUE; +CREATE OR REPLACE VIEW @extschema@.pathman_cache_stats +AS SELECT * FROM @extschema@.show_cache_stats(); -/* we've changed the format of constraint names, and we need rename them */ -CREATE OR REPLACE FUNCTION @extschema@.update_constraints() -RETURNS BOOLEAN AS -$$ +ALTER TABLE @extschema@.pathman_config +ADD CONSTRAINT pathman_config_interval_check + CHECK (@extschema@.validate_interval_value(atttype, + parttype, + range_interval)); + +DO $$ DECLARE v_rec RECORD; BEGIN @@ -48,80 +44,461 @@ BEGIN RETURN TRUE; END -$$ -LANGUAGE plpgsql; - -SELECT @extschema@.update_constraints(); +$$ LANGUAGE plpgsql; -/* we don't need this function anymore */ -DROP FUNCTION @extschema@.update_constraints(); +/* ------------------------------------------------------------------------ + * Drop irrelevant objects + * ----------------------------------------------------------------------*/ +DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, TEXT); +DROP FUNCTION @extschema@.show_partition_list(); +DROP FUNCTION @extschema@._partition_data_concurrent(REGCLASS, ANYELEMENT, ANYELEMENT, INT, BIGINT); +DROP FUNCTION @extschema@.disable_pathman_for(REGCLASS); DROP FUNCTION @extschema@.common_relation_checks(REGCLASS, TEXT); -CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( +DROP FUNCTION @extschema@.validate_relations_equality(OID, OID); +DROP FUNCTION @extschema@.drop_partitions(REGCLASS, BOOLEAN); +DROP FUNCTION @extschema@.on_create_partitions(REGCLASS); +DROP FUNCTION @extschema@.on_update_partitions(REGCLASS); +DROP FUNCTION @extschema@.on_remove_partitions(REGCLASS); +DROP FUNCTION @extschema@.is_attribute_nullable(REGCLASS, TEXT); +DROP FUNCTION @extschema@.build_check_constraint_name(REGCLASS, INT2); +DROP FUNCTION @extschema@.add_to_pathman_config(REGCLASS, TEXT, TEXT); +DROP FUNCTION @extschema@.lock_partitioned_relation(REGCLASS); +DROP FUNCTION @extschema@.prevent_relation_modification(REGCLASS); +DROP FUNCTION @extschema@.create_hash_partitions(REGCLASS, TEXT, INTEGER, BOOLEAN, TEXT[], TEXT[]); +DROP FUNCTION @extschema@.create_hash_update_trigger(REGCLASS); +DROP FUNCTION @extschema@.get_type_hash_func(REGTYPE); +DROP FUNCTION @extschema@.build_hash_condition(REGTYPE, TEXT, INT4, INT4); +DROP FUNCTION @extschema@.create_or_replace_sequence(REGCLASS, OUT TEXT); +DROP FUNCTION @extschema@.check_boundaries(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYELEMENT, INTERVAL, INTEGER, BOOLEAN); +DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT, INTEGER, BOOLEAN); +DROP FUNCTION @extschema@.create_partitions_from_range(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT, ANYELEMENT, BOOLEAN); +DROP FUNCTION @extschema@.create_partitions_from_range(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT, INTERVAL, BOOLEAN); +DROP FUNCTION @extschema@.create_range_update_trigger(REGCLASS); +DROP FUNCTION @extschema@.build_range_condition(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.find_or_create_range_partition(REGCLASS, ANYELEMENT); + + +/* ------------------------------------------------------------------------ + * Alter functions' modifiers + * ----------------------------------------------------------------------*/ +ALTER FUNCTION @extschema@.build_sequence_name(REGCLASS) STRICT; + + +/* ------------------------------------------------------------------------ + * (Re)create functions + * ----------------------------------------------------------------------*/ +CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( + partrel REGCLASS, + expr TEXT, + parttype INTEGER, + range_interval TEXT, + cooked_expr TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() +RETURNS TABLE ( + parent REGCLASS, + partition REGCLASS, + parttype INT4, + expr TEXT, + range_min TEXT, + range_max TEXT) +AS 'pg_pathman', 'show_partition_list_internal' +LANGUAGE C STRICT; + +CREATE OR REPLACE FUNCTION @extschema@.show_cache_stats() +RETURNS TABLE ( + context TEXT, + size INT8, + used INT8, + entries INT8) +AS 'pg_pathman', 'show_cache_stats_internal' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( relation REGCLASS, - expression TEXT) -RETURNS BOOLEAN AS -$$ + p_min ANYELEMENT DEFAULT NULL::text, + p_max ANYELEMENT DEFAULT NULL::text, + p_limit INT DEFAULT NULL, + OUT p_total BIGINT) +AS $$ +DECLARE + part_expr TEXT; + v_limit_clause TEXT := ''; + v_where_clause TEXT := ''; + ctids TID[]; + +BEGIN + part_expr := @extschema@.get_partition_key(relation); + + p_total := 0; + + /* Format LIMIT clause if needed */ + IF NOT p_limit IS NULL THEN + v_limit_clause := format('LIMIT %s', p_limit); + END IF; + + /* Format WHERE clause if needed */ + IF NOT p_min IS NULL THEN + v_where_clause := format('%1$s >= $1', part_expr); + END IF; + + IF NOT p_max IS NULL THEN + IF NOT p_min IS NULL THEN + v_where_clause := v_where_clause || ' AND '; + END IF; + v_where_clause := v_where_clause || format('%1$s < $2', part_expr); + END IF; + + IF v_where_clause != '' THEN + v_where_clause := 'WHERE ' || v_where_clause; + END IF; + + /* Lock rows and copy data */ + RAISE NOTICE 'Copying data to partitions...'; + EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', + relation, v_where_clause, v_limit_clause) + USING p_min, p_max + INTO ctids; + + EXECUTE format('WITH data AS ( + DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) + INSERT INTO %1$s SELECT * FROM data', + relation) + USING ctids; + + /* Get number of inserted rows */ + GET DIAGNOSTICS p_total = ROW_COUNT; + RETURN; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ + + +CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( + parent_relid REGCLASS) +RETURNS VOID AS $$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Delete rows from both config tables */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + /* Drop triggers on update */ + PERFORM @extschema@.drop_triggers(parent_relid); +END +$$ LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( + parent_relid REGCLASS, + expression TEXT, + partition_data BOOLEAN) +RETURNS VOID AS $$ DECLARE - v_rec RECORD; + constr_name TEXT; is_referenced BOOLEAN; rel_persistence CHAR; BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_expression(parent_relid, expression); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + END IF; + /* Ignore temporary tables */ SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = relation INTO rel_persistence; + WHERE oid = parent_relid INTO rel_persistence; IF rel_persistence = 't'::CHAR THEN - RAISE EXCEPTION 'temporary table "%" cannot be partitioned', - relation::TEXT; + RAISE EXCEPTION 'temporary table "%" cannot be partitioned', parent_relid; END IF; IF EXISTS (SELECT * FROM @extschema@.pathman_config - WHERE partrel = relation) THEN - RAISE EXCEPTION 'relation "%" has already been partitioned', relation; + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has already been partitioned', parent_relid; END IF; /* Check if there are foreign keys that reference the relation */ - FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint - WHERE confrelid = relation::REGCLASS::OID) + FOR constr_name IN (SELECT conname FROM pg_catalog.pg_constraint + WHERE confrelid = parent_relid::REGCLASS::OID) LOOP is_referenced := TRUE; - RAISE WARNING 'foreign key "%" references relation "%"', - v_rec.conname, relation; + RAISE WARNING 'foreign key "%" references table "%"', constr_name, parent_relid; END LOOP; IF is_referenced THEN - RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; + RAISE EXCEPTION 'table "%" is referenced from other tables', parent_relid; END IF; - RETURN FALSE; END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( + parent_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); + + RETURN seq_name; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + + +CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + + +CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + triggername TEXT; + relation OID; + +BEGIN + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Drop trigger for each partition if exists */ + FOR relation IN (SELECT pg_catalog.pg_inherits.inhrelid + FROM pg_catalog.pg_inherits + JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid + WHERE inhparent = parent_relid AND tgname = triggername) + LOOP + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + triggername, + relation::REGCLASS); + END LOOP; + + /* Drop trigger on parent */ + IF EXISTS (SELECT * FROM pg_catalog.pg_trigger + WHERE tgname = triggername AND tgrelid = parent_relid) + THEN + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + triggername, + parent_relid::TEXT); + END IF; +END +$$ LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( + parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) +RETURNS INTEGER AS $$ +DECLARE + child REGCLASS; + rows_count BIGINT; + part_count INTEGER := 0; + rel_kind CHAR; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + IF NOT EXISTS (SELECT FROM @extschema@.pathman_config + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has no partitions', parent_relid::TEXT; + END IF; + + /* First, drop all triggers */ + PERFORM @extschema@.drop_triggers(parent_relid); + + /* Also drop naming sequence */ + PERFORM @extschema@.drop_naming_sequence(parent_relid); + + FOR child IN (SELECT inhrelid::REGCLASS + FROM pg_catalog.pg_inherits + WHERE inhparent::regclass = parent_relid + ORDER BY inhrelid ASC) + LOOP + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + child::TEXT); + GET DIAGNOSTICS rows_count = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', rows_count, child; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = child + INTO rel_kind; + + /* + * Determine the kind of child relation. It can be either a regular + * table (r) or a foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF rel_kind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', child); + ELSE + EXECUTE format('DROP TABLE %s', child); + END IF; + + part_count := part_count + 1; + END LOOP; + + /* Finally delete both config entries */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + RETURN part_count; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + + +CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( + parent_relid REGCLASS, + partition_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + conid OID; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + FOR conid IN (SELECT oid FROM pg_catalog.pg_constraint + WHERE conrelid = parent_relid AND contype = 'f') + LOOP + EXECUTE format('ALTER TABLE %s ADD %s', + partition_relid::TEXT, + pg_catalog.pg_get_constraintdef(conid)); + END LOOP; +END +$$ LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( + relid REGCLASS) +RETURNS TEXT AS $$ -LANGUAGE plpgsql; + SELECT expr FROM @extschema@.pathman_config WHERE partrel = relid; +$$ +LANGUAGE sql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( + relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT parttype FROM @extschema@.pathman_config WHERE partrel = relid; +$$ +LANGUAGE sql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.pathman_update_trigger_func() +RETURNS TRIGGER AS 'pg_pathman', 'pathman_update_trigger_func' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.create_update_triggers( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'create_update_triggers' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.create_single_update_trigger( + parent_relid REGCLASS, + partition_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'create_single_update_trigger' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.has_update_trigger( + parent_relid REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'has_update_trigger' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.validate_expression( + relid REGCLASS, + expression TEXT) +RETURNS VOID AS 'pg_pathman', 'validate_expression' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.is_operator_supported( + type_oid REGTYPE, + opname TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'is_operator_supported' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.is_tuple_convertible( + relation1 REGCLASS, + relation2 REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'is_tuple_convertible' +LANGUAGE C STRICT; -DROP FUNCTION @extschema@.build_check_constraint_name(REGCLASS, INT2); -DROP FUNCTION @extschema@.build_check_constraint_name(REGCLASS, TEXT); CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( partition_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name' LANGUAGE C STRICT; -DROP FUNCTION @extschema@.add_to_pathman_config(REGCLASS, TEXT, TEXT); CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( parent_relid REGCLASS, - attname TEXT, - range_interval TEXT DEFAULT NULL, - refresh_part_info BOOL DEFAULT TRUE, - parttype INT4 DEFAULT 0 -) + expression TEXT, + range_interval TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( + parent_relid REGCLASS, + expression TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; -DROP FUNCTION @extschema@.create_hash_partitions(REGCLASS, TEXT, INT4, BOOLEAN, - TEXT[], TEXT[]); + +CREATE OR REPLACE FUNCTION @extschema@.prevent_part_modification( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'prevent_part_modification' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.prevent_data_modification( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'prevent_data_modification' +LANGUAGE C STRICT; + CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( parent_relid REGCLASS, @@ -130,24 +507,15 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( partition_data BOOLEAN DEFAULT TRUE, partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - expression := lower(expression); - PERFORM @extschema@.common_relation_checks(parent_relid, expression); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); /* Insert new entry to pathman config */ - PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, false); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression); /* Create partitions */ PERFORM @extschema@.create_hash_partitions_internal(parent_relid, @@ -156,9 +524,6 @@ BEGIN partition_names, tablespaces); - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - /* Copy data */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -172,75 +537,141 @@ END $$ LANGUAGE plpgsql SET client_min_messages = WARNING; -DROP FUNCTION @extschema@.build_hash_condition(REGTYPE, TEXT, INT4, INT4); -DROP FUNCTION @extschema@.check_boundaries(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT); +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS $$ +DECLARE + parent_relid REGCLASS; + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); + + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + END IF; + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(old_partition); + PERFORM @extschema@.prevent_data_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.is_tuple_convertible(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + /* Check that table is partitioned */ + IF @extschema@.get_partition_key(parent_relid) IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND conname = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + RETURN new_partition; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( + attribute_type REGTYPE, + attribute TEXT, + partitions_count INT4, + partition_index INT4) +RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' +LANGUAGE C STRICT; + + CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, end_value ANYELEMENT) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ DECLARE - v_min start_value%TYPE; - v_max start_value%TYPE; - v_count BIGINT; + min_value start_value%TYPE; + max_value start_value%TYPE; + rows_count BIGINT; BEGIN /* Get min and max values */ EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) FROM %2$s WHERE NOT %1$s IS NULL', expression, parent_relid::TEXT) - INTO v_count, v_min, v_max; + INTO rows_count, min_value, max_value; /* Check if column has NULL values */ - IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN + IF rows_count > 0 AND (min_value IS NULL OR max_value IS NULL) THEN RAISE EXCEPTION 'expression "%" returns NULL values', expression; END IF; /* Check lower boundary */ - IF start_value > v_min THEN - RAISE EXCEPTION 'start value is less than min value of "%"', expression; + IF start_value > min_value THEN + RAISE EXCEPTION 'start value is greater than min value of "%"', expression; END IF; /* Check upper boundary */ - IF end_value <= v_max THEN + IF end_value <= max_value THEN RAISE EXCEPTION 'not enough partitions to fit all values of "%"', expression; END IF; END $$ LANGUAGE plpgsql; -DROP FUNCTION @extschema@.prepare_for_partitioning(REGCLASS, TEXT, BOOLEAN); -CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( - parent_relid REGCLASS, - expression TEXT, - partition_data BOOLEAN) -RETURNS VOID AS -$$ -BEGIN - PERFORM @extschema@.validate_relname(parent_relid); - - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - expression := lower(expression); - PERFORM @extschema@.common_relation_checks(parent_relid, expression); -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified relation based on datetime attribute - */ -DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYELEMENT, - INTERVAL, INTEGER, BOOLEAN); - CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, @@ -248,20 +679,20 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( p_interval INTERVAL, p_count INTEGER DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ DECLARE - v_rows_count BIGINT; - v_atttype REGTYPE; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; + rows_count BIGINT; + value_type REGTYPE; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; BEGIN - expression := lower(expression); - PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); IF p_count < 0 THEN RAISE EXCEPTION '"p_count" must not be less than 0'; @@ -270,21 +701,21 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) - INTO v_rows_count, v_max; + INTO rows_count, max_value; - IF v_rows_count = 0 THEN + IF rows_count = 0 THEN RAISE EXCEPTION 'cannot determine partitions count for empty table'; END IF; p_count := 0; - WHILE v_cur_value <= v_max + WHILE cur_value <= max_value LOOP - v_cur_value := v_cur_value + p_interval; + cur_value := cur_value + p_interval; p_count := p_count + 1; END LOOP; END IF; - v_atttype := @extschema@.get_base_type(pg_typeof(start_value)); + value_type := @extschema@.get_base_type(pg_typeof(start_value)); /* * In case when user doesn't want to automatically create partitions @@ -299,22 +730,22 @@ BEGIN END LOOP; /* Check boundaries */ - EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', $1, ''%s'', ''%s''::%s)', - parent_relid, - start_value, - end_value, - v_atttype::TEXT) + EXECUTE + format('SELECT @extschema@.check_boundaries(''%s'', $1, ''%s'', ''%s''::%s)', + parent_relid, + start_value, + end_value, + value_type::TEXT) USING - expression; + expression; END IF; + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + /* Insert new entry to pathman config */ PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT, false); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + p_interval::TEXT); IF p_count != 0 THEN part_count := @extschema@.create_range_partitions_internal( @@ -326,9 +757,6 @@ BEGIN NULL); END IF; - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -341,11 +769,6 @@ BEGIN END $$ LANGUAGE plpgsql; -/* - * Creates RANGE partitions for specified relation based on numerical expression - */ -DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYELEMENT, - ANYELEMENT, INTEGER, BOOLEAN); CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, @@ -354,19 +777,19 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( p_interval ANYELEMENT, p_count INTEGER DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ DECLARE - v_rows_count BIGINT; - v_max start_value%TYPE; - v_cur_value start_value%TYPE := start_value; + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; BEGIN - expression := lower(expression); - PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); IF p_count < 0 THEN RAISE EXCEPTION 'partitions count must not be less than zero'; @@ -375,20 +798,20 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) - INTO v_rows_count, v_max; + INTO rows_count, max_value; - IF v_rows_count = 0 THEN + IF rows_count = 0 THEN RAISE EXCEPTION 'cannot determine partitions count for empty table'; END IF; - IF v_max IS NULL THEN + IF max_value IS NULL THEN RAISE EXCEPTION 'expression "%" can return NULL values', expression; END IF; p_count := 0; - WHILE v_cur_value <= v_max + WHILE cur_value <= max_value LOOP - v_cur_value := v_cur_value + p_interval; + cur_value := cur_value + p_interval; p_count := p_count + 1; END LOOP; END IF; @@ -412,25 +835,23 @@ BEGIN end_value); END IF; + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + /* Insert new entry to pathman config */ PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT, false); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + p_interval::TEXT); IF p_count != 0 THEN part_count := @extschema@.create_range_partitions_internal( parent_relid, - @extschema@.generate_range_bounds(start_value, p_interval, p_count), + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), NULL, NULL); END IF; - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -443,11 +864,6 @@ BEGIN END $$ LANGUAGE plpgsql; -/* - * Creates RANGE partitions for specified relation based on bounds array - */ -DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYARRAY, - TEXT[], TEXT[], BOOLEAN); CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, @@ -456,10 +872,10 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ DECLARE - part_count INTEGER; + part_count INTEGER := 0; + BEGIN IF array_ndims(bounds) > 1 THEN RAISE EXCEPTION 'Bounds array must be a one dimensional array'; @@ -469,8 +885,9 @@ BEGIN RAISE EXCEPTION 'Bounds array must have at least two values'; END IF; - expression := lower(expression); - PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, @@ -478,12 +895,11 @@ BEGIN bounds[0], bounds[array_length(bounds, 1) - 1]); - /* Insert new entry to pathman config */ - PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL, false, 2); - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); /* Create partitions */ part_count := @extschema@.create_range_partitions_internal(parent_relid, @@ -491,9 +907,6 @@ BEGIN partition_names, tablespaces); - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - /* Relocate data if asked to */ IF partition_data = true THEN PERFORM @extschema@.set_enable_parent(parent_relid, false); @@ -507,138 +920,515 @@ END $$ LANGUAGE plpgsql; -/* - * Creates RANGE partitions for specified range - */ -DROP FUNCTION @extschema@.create_partitions_from_range(REGCLASS, TEXT, ANYELEMENT, - ANYELEMENT, ANYELEMENT, BOOLEAN); -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS $$ +DECLARE + parent_relid REGCLASS; + part_type INTEGER; + part_expr TEXT; + part_expr_type REGTYPE; + check_name TEXT; + check_cond TEXT; + new_partition TEXT; + +BEGIN + parent_relid = @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(partition_relid); + + part_expr_type = @extschema@.get_partition_key_type(parent_relid); + part_expr := @extschema@.get_partition_key(parent_relid); + + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Get partition values range */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING partition_relid + INTO p_range; + + IF p_range IS NULL THEN + RAISE EXCEPTION 'could not find specified partition'; + END IF; + + /* Check if value fit into the range */ + IF p_range[1] > split_value OR p_range[2] <= split_value + THEN + RAISE EXCEPTION 'specified value does not fit into the range [%, %)', + p_range[1], p_range[2]; + END IF; + + /* Create new partition */ + new_partition := @extschema@.create_single_range_partition(parent_relid, + split_value, + p_range[2], + partition_name, + tablespace); + + /* Copy data */ + check_cond := @extschema@.build_range_condition(new_partition::regclass, + part_expr, split_value, p_range[2]); + EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition_relid::TEXT, + check_cond, + new_partition); + + /* Alter original partition */ + check_cond := @extschema@.build_range_condition(partition_relid::regclass, + part_expr, p_range[1], split_value); + check_name := @extschema@.build_check_constraint_name(partition_relid); + + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition_relid::TEXT, + check_name); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition_relid::TEXT, + check_name, + check_cond); +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( parent_relid REGCLASS, - expression TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - p_interval ANYELEMENT, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ DECLARE - part_count INTEGER := 0; + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; BEGIN - expression := lower(expression); - PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + PERFORM @extschema@.validate_relname(parent_relid); - /* Check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - expression, - start_value, - end_value); + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); - /* Insert new entry to pathman config */ - PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT, false); + part_expr_type := @extschema@.get_partition_key_type(parent_relid); - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '+') THEN + RAISE EXCEPTION 'type % does not support ''+'' operator', part_expr_type::REGTYPE; + END IF; - WHILE start_value <= end_value - LOOP - PERFORM @extschema@.create_single_range_partition( - parent_relid, - start_value, - start_value + p_interval, - tablespace := @extschema@.get_tablespace(parent_relid)); + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; - start_value := start_value + p_interval; - part_count := part_count + 1; - END LOOP; - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); +CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + v_args_format TEXT; - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot append to empty partitions set'; + END IF; + + part_expr_type := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + part_expr_type::TEXT) + USING parent_relid + INTO p_range; + + IF p_range[2] IS NULL THEN + RAISE EXCEPTION 'Cannot append partition because last partition''s range is half open'; + END IF; + + IF @extschema@.is_date_type(p_atttype) THEN + v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); + v_args_format := format('$1, $2, $2 + $3::%s, $4, $5', part_expr_type::TEXT); END IF; - RETURN part_count; /* number of created partitions */ + EXECUTE + format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[2], + p_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; END $$ LANGUAGE plpgsql; -/* - * Creates RANGE partitions for specified range based on datetime expression - */ -DROP FUNCTION @extschema@.create_partitions_from_range(REGCLASS, TEXT, - ANYELEMENT, ANYELEMENT, INTERVAL, BOOLEAN); -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( +CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '-') THEN + RAISE EXCEPTION 'type % does not support ''-'' operator', part_expr_type::REGTYPE; + END IF; + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + v_args_format TEXT; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot prepend to empty partitions set'; + END IF; + + part_expr_type := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + part_expr_type::TEXT) + USING parent_relid + INTO p_range; + + IF p_range[1] IS NULL THEN + RAISE EXCEPTION 'Cannot prepend partition because first partition''s range is half open'; + END IF; + + IF @extschema@.is_date_type(p_atttype) THEN + v_args_format := format('$1, ($2 - $3::interval)::%s, $2, $4, $5', part_expr_type::TEXT); + ELSE + v_args_format := format('$1, $2 - $3::%s, $2, $4, $5', part_expr_type::TEXT); + END IF; + + EXECUTE + format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[1], + p_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( parent_relid REGCLASS, - expression TEXT, start_value ANYELEMENT, end_value ANYELEMENT, - p_interval INTERVAL, - partition_data BOOLEAN DEFAULT TRUE) -RETURNS INTEGER AS -$$ + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ DECLARE - part_count INTEGER := 0; + part_name TEXT; BEGIN - expression := lower(expression); - PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, - partition_data); + PERFORM @extschema@.validate_relname(parent_relid); - /* Check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - expression, - start_value, - end_value); + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); - /* Insert new entry to pathman config */ - PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, - p_interval::TEXT, false); + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; + END IF; - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(parent_relid) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + /* check range overlap */ + IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN + PERFORM @extschema@.check_range_available(parent_relid, + start_value, + end_value); + END IF; - WHILE start_value <= end_value - LOOP - EXECUTE - format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', - @extschema@.get_base_type(pg_typeof(start_value))::TEXT) - USING - parent_relid, - start_value, - start_value + p_interval, - @extschema@.get_tablespace(parent_relid); + /* Create new partition */ + part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); - start_value := start_value + p_interval; - part_count := part_count + 1; - END LOOP; + RETURN part_name; +END +$$ LANGUAGE plpgsql; - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.set_enable_parent(parent_relid, false); - PERFORM @extschema@.partition_data(parent_relid); +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( + partition_relid REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_name TEXT; + part_type INTEGER; + v_relkind CHAR; + v_rows BIGINT; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + part_name := partition_relid::TEXT; /* save the name to be returned */ + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition_relid::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition_relid + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); ELSE - PERFORM @extschema@.set_enable_parent(parent_relid, true); + EXECUTE format('DROP TABLE %s', partition_relid::TEXT); END IF; - RETURN part_count; /* number of created partitions */ + RETURN part_name; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + + +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS $$ +DECLARE + part_expr TEXT; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition_relid INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition_relid::TEXT; + END IF; + + /* check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); + + IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + + part_expr := @extschema@.get_partition_key(parent_relid); + + IF part_expr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid), + @extschema@.build_range_condition(partition_relid, + part_expr, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + /* If update trigger is enabled then create one for this partition */ + if @extschema@.has_update_trigger(parent_relid) THEN + PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); + END IF; + + /* Invoke an initialization callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition_relid, + v_init_callback, + start_value, + end_value); + + RETURN partition_relid; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( + partition_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_type INTEGER; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Remove inheritance */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', + partition_relid::TEXT, + parent_relid::TEXT); + + /* Remove check constraint */ + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid)); + + /* Remove update trigger */ + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + @extschema@.build_update_trigger_name(parent_relid), + partition_relid::TEXT); + + RETURN partition_relid; END $$ LANGUAGE plpgsql; -DROP FUNCTION @extschema@.build_range_condition(REGCLASS, TEXT, - ANYELEMENT, ANYELEMENT); + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( + parent_relid REGCLASS, + bounds ANYARRAY, + partition_names TEXT[], + tablespaces TEXT[]) +RETURNS REGCLASS AS 'pg_pathman', 'create_range_partitions_internal' +LANGUAGE C; + CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( partition_relid REGCLASS, @@ -647,3 +1437,19 @@ CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( end_value ANYELEMENT) RETURNS TEXT AS 'pg_pathman', 'build_range_condition' LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( + p_start ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER) +RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( + p_start ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER) +RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' +LANGUAGE C STRICT; From f0404735c2b235e1c89c2b4c7e2d61fe0c595df9 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 6 Jun 2017 16:34:45 +0300 Subject: [PATCH 0601/1124] fix migration script --- Makefile | 3 +- pg_pathman--1.3--1.4.sql | 71 +++++++++++++++++++++++----------------- 2 files changed, 43 insertions(+), 31 deletions(-) diff --git a/Makefile b/Makefile index e0ef3f74..8b8fa036 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,8 @@ DATA_built = pg_pathman--$(EXTVERSION).sql DATA = pg_pathman--1.0--1.1.sql \ pg_pathman--1.1--1.2.sql \ - pg_pathman--1.2--1.3.sql + pg_pathman--1.2--1.3.sql \ + pg_pathman--1.3--1.4.sql PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" diff --git a/pg_pathman--1.3--1.4.sql b/pg_pathman--1.3--1.4.sql index 470a2d82..7bfeec48 100644 --- a/pg_pathman--1.3--1.4.sql +++ b/pg_pathman--1.3--1.4.sql @@ -15,20 +15,31 @@ ALTER TABLE @extschema@.pathman_config RENAME COLUMN attname TO expr; ALTER TABLE @extschema@.pathman_config ADD COLUMN cooked_expr TEXT; -DROP TRIGGER pathman_config_params_trigger; +DROP TRIGGER pathman_config_params_trigger ON @extschema@.pathman_config_params; CREATE TRIGGER pathman_config_params_trigger AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); -CREATE OR REPLACE VIEW @extschema@.pathman_cache_stats -AS SELECT * FROM @extschema@.show_cache_stats(); + +DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, TEXT) CASCADE; + +CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( + partrel REGCLASS, + expr TEXT, + parttype INTEGER, + range_interval TEXT, + cooked_expr TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C; ALTER TABLE @extschema@.pathman_config ADD CONSTRAINT pathman_config_interval_check - CHECK (@extschema@.validate_interval_value(atttype, + CHECK (@extschema@.validate_interval_value(partrel, + expr, parttype, - range_interval)); + range_interval, + cooked_expr)); DO $$ DECLARE @@ -41,18 +52,35 @@ BEGIN EXECUTE format('ALTER TABLE %s RENAME CONSTRAINT %s TO %s', v_rec.t, v_rec.conname, v_rec.new_conname); END LOOP; - - RETURN TRUE; END $$ LANGUAGE plpgsql; +DROP VIEW pathman_partition_list; + +DROP FUNCTION @extschema@.show_partition_list(); + +CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() +RETURNS TABLE ( + parent REGCLASS, + partition REGCLASS, + parttype INT4, + expr TEXT, + range_min TEXT, + range_max TEXT) +AS 'pg_pathman', 'show_partition_list_internal' +LANGUAGE C STRICT; + +CREATE OR REPLACE VIEW @extschema@.pathman_partition_list +AS SELECT * FROM @extschema@.show_partition_list(); + +GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; + + /* ------------------------------------------------------------------------ * Drop irrelevant objects * ----------------------------------------------------------------------*/ -DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, TEXT); -DROP FUNCTION @extschema@.show_partition_list(); -DROP FUNCTION @extschema@._partition_data_concurrent(REGCLASS, ANYELEMENT, ANYELEMENT, INT, BIGINT); +DROP FUNCTION @extschema@._partition_data_concurrent(REGCLASS, ANYELEMENT, ANYELEMENT, INT, OUT BIGINT); DROP FUNCTION @extschema@.disable_pathman_for(REGCLASS); DROP FUNCTION @extschema@.common_relation_checks(REGCLASS, TEXT); DROP FUNCTION @extschema@.validate_relations_equality(OID, OID); @@ -89,26 +117,6 @@ ALTER FUNCTION @extschema@.build_sequence_name(REGCLASS) STRICT; /* ------------------------------------------------------------------------ * (Re)create functions * ----------------------------------------------------------------------*/ -CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( - partrel REGCLASS, - expr TEXT, - parttype INTEGER, - range_interval TEXT, - cooked_expr TEXT) -RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() -RETURNS TABLE ( - parent REGCLASS, - partition REGCLASS, - parttype INT4, - expr TEXT, - range_min TEXT, - range_max TEXT) -AS 'pg_pathman', 'show_partition_list_internal' -LANGUAGE C STRICT; - CREATE OR REPLACE FUNCTION @extschema@.show_cache_stats() RETURNS TABLE ( context TEXT, @@ -118,6 +126,9 @@ RETURNS TABLE ( AS 'pg_pathman', 'show_cache_stats_internal' LANGUAGE C STRICT; +CREATE OR REPLACE VIEW @extschema@.pathman_cache_stats +AS SELECT * FROM @extschema@.show_cache_stats(); + CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( relation REGCLASS, From 7d39ce5c6fbcef5944b15ed67cdd5283231d1d91 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 6 Jun 2017 17:58:40 +0300 Subject: [PATCH 0602/1124] other fixes to migration script --- pg_pathman--1.3--1.4.sql | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/pg_pathman--1.3--1.4.sql b/pg_pathman--1.3--1.4.sql index 7bfeec48..9e57a7b2 100644 --- a/pg_pathman--1.3--1.4.sql +++ b/pg_pathman--1.3--1.4.sql @@ -89,6 +89,7 @@ DROP FUNCTION @extschema@.on_create_partitions(REGCLASS); DROP FUNCTION @extschema@.on_update_partitions(REGCLASS); DROP FUNCTION @extschema@.on_remove_partitions(REGCLASS); DROP FUNCTION @extschema@.is_attribute_nullable(REGCLASS, TEXT); +DROP FUNCTION @extschema@.build_check_constraint_name(REGCLASS, TEXT); DROP FUNCTION @extschema@.build_check_constraint_name(REGCLASS, INT2); DROP FUNCTION @extschema@.add_to_pathman_config(REGCLASS, TEXT, TEXT); DROP FUNCTION @extschema@.lock_partitioned_relation(REGCLASS); @@ -257,6 +258,31 @@ END $$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() +RETURNS event_trigger AS $$ +DECLARE + obj RECORD; + pg_class_oid OID; + relids REGCLASS[]; + +BEGIN + pg_class_oid = 'pg_catalog.pg_class'::regclass; + + /* Find relids to remove from config */ + SELECT array_agg(cfg.partrel) INTO relids + FROM pg_event_trigger_dropped_objects() AS events + JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid + WHERE events.classid = pg_class_oid AND events.objsubid = 0; + + /* Cleanup pathman_config */ + DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); + + /* Cleanup params table too */ + DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); +END +$$ LANGUAGE plpgsql; + + CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( parent_relid REGCLASS) RETURNS TEXT AS $$ From 1cbe237b9ae9c66eaf6bb3ab4f9936d90f747ae4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 6 Jun 2017 18:18:07 +0300 Subject: [PATCH 0603/1124] add pathman update checker utilities --- tests/update/README.md | 11 +++++++++++ tests/update/dump_pathman_objects | 31 +++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 tests/update/README.md create mode 100755 tests/update/dump_pathman_objects diff --git a/tests/update/README.md b/tests/update/README.md new file mode 100644 index 00000000..f31f4116 --- /dev/null +++ b/tests/update/README.md @@ -0,0 +1,11 @@ +## pg_pathman's update checker + +It's necessary to check that `ALTER EXTENSION pg_pathman UPDATE` produces an SQL frontend that is exactly the same as a fresh install. + +Usage: + +```bash +PG_CONFIG=... ./dump_pathman_objects %DBNAME% + +diff file_1 file_2 +``` diff --git a/tests/update/dump_pathman_objects b/tests/update/dump_pathman_objects new file mode 100755 index 00000000..fff1ed17 --- /dev/null +++ b/tests/update/dump_pathman_objects @@ -0,0 +1,31 @@ +#!/usr/bin/bash + + +rndstr=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 13 ; echo '') +bindir=$($PG_CONFIG --bindir) +dbname=$1 +flname=pathman_objects_$rndstr.txt + +# show file name +echo $flname + +$bindir/psql $dbname << EOF + +\o $flname + +SELECT pg_get_functiondef(objid) +FROM pg_catalog.pg_depend JOIN pg_proc ON pg_proc.oid = pg_depend.objid +WHERE refclassid = 'pg_catalog.pg_extension'::REGCLASS AND + refobjid = (SELECT oid + FROM pg_catalog.pg_extension + WHERE extname = 'pg_pathman') AND + deptype = 'e' +ORDER BY objid::regprocedure::TEXT ASC; + +\d+ pathman_config +\d+ pathman_config_params +\d+ pathman_partition_list +\d+ pathman_cache_stats +\d+ pathman_concurrent_part_tasks + +EOF From 15e74e32615b857a99d2099d1dc8fba39f517ba4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 6 Jun 2017 18:53:58 +0300 Subject: [PATCH 0604/1124] add quick tip to update script --- pg_pathman--1.3--1.4.sql | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pg_pathman--1.3--1.4.sql b/pg_pathman--1.3--1.4.sql index 9e57a7b2..f60634fa 100644 --- a/pg_pathman--1.3--1.4.sql +++ b/pg_pathman--1.3--1.4.sql @@ -1490,3 +1490,15 @@ CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( p_count INTEGER) RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' LANGUAGE C STRICT; + + + +/* ------------------------------------------------------------------------ + * Final words of wisdom + * ----------------------------------------------------------------------*/ +DO language plpgsql +$$ + BEGIN + RAISE WARNING 'Don''t forget to execute "SET pg_pathman.enable = t" to activate pg_pathman'; + END +$$; From 56c61e5e990d214aa1fa8873ee70e17480dd421d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 6 Jun 2017 19:16:11 +0300 Subject: [PATCH 0605/1124] update README.md --- README.md | 50 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 9f11ae34..444438cf 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,12 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions. -The extension is compatible with PostgreSQL 9.5+. +The extension is compatible with: + * PostgreSQL 9.5, 9.6, 10; + * Postgres Pro Standard 9.5, 9.6; + * Postgres Pro Enterprise; + +By the way, we have a growing Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki); ## Overview **Partitioning** means splitting one large table into smaller pieces. Each row in such table is moved to a single partition according to the partitioning key. PostgreSQL supports partitioning via table inheritance: each partition must be created as a child table with CHECK CONSTRAINT. For example: @@ -41,6 +46,7 @@ More interesting features are yet to come. Stay tuned! ## Feature highlights * HASH and RANGE partitioning schemes; + * Partitioning by expression and composite key; * Both automatic and manual partition management; * Support for integer, floating point, date and other types, including domains; * Effective query planning for partitioned tables (JOINs, subselects etc); @@ -55,9 +61,11 @@ More interesting features are yet to come. Stay tuned! * Various GUC toggles and configurable settings. ## Roadmap + + * Multi-level partitioning (ver 1.5); + * Improved referential integrity + foreign keys on partitioned tables (ver 1.5); - * Implement LIST partitioning scheme; - * Optimize hash join (both tables are partitioned by join key). +Take a look at [this page](https://github.com/postgrespro/pg_pathman/wiki/Roadmap); ## Installation guide To install `pg_pathman`, execute this in the module's directory: @@ -97,30 +105,30 @@ SET pg_pathman.enable = t; ### Partition creation ```plpgsql create_hash_partitions(relation REGCLASS, - attribute TEXT, + expr TEXT, partitions_count INTEGER, partition_data BOOLEAN DEFAULT TRUE, partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL) ``` -Performs HASH partitioning for `relation` by integer key `attribute`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. Partition creation callback is invoked for each partition if set beforehand (see `set_init_callback()`). +Performs HASH partitioning for `relation` by partitioning expression `expr`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. Partition creation callback is invoked for each partition if set beforehand (see `set_init_callback()`). ```plpgsql create_range_partitions(relation REGCLASS, - attribute TEXT, + expr TEXT, start_value ANYELEMENT, p_interval ANYELEMENT, p_count INTEGER DEFAULT NULL partition_data BOOLEAN DEFAULT TRUE) create_range_partitions(relation REGCLASS, - attribute TEXT, + expr TEXT, start_value ANYELEMENT, p_interval INTERVAL, p_count INTEGER DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) ``` -Performs RANGE partitioning for `relation` by partitioning key `attribute`, `start_value` argument specifies initial value, `p_interval` sets the default range for auto created partitions or partitions created with `append_range_partition()` or `prepend_range_partition()` (if `NULL` then auto partition creation feature won't work), `p_count` is the number of premade partitions (if not set then `pg_pathman` tries to determine it based on attribute values). Partition creation callback is invoked for each partition if set beforehand. +Performs RANGE partitioning for `relation` by partitioning expression `expr`, `start_value` argument specifies initial value, `p_interval` sets the default range for auto created partitions or partitions created with `append_range_partition()` or `prepend_range_partition()` (if `NULL` then auto partition creation feature won't work), `p_count` is the number of premade partitions (if not set then `pg_pathman` tries to determine it based on expression's values). Partition creation callback is invoked for each partition if set beforehand. ### Data migration @@ -141,7 +149,7 @@ Stops a background worker performing a concurrent partitioning task. Note: worke ```plpgsql create_hash_update_trigger(parent REGCLASS) ``` -Creates the trigger on UPDATE for HASH partitions. The UPDATE trigger isn't created by default because of the overhead. It's useful in cases when the key attribute might change. +Creates the trigger on UPDATE for HASH partitions. The UPDATE trigger isn't created by default because of the overhead. It's useful in cases when the partitioning expression's value might change. ```plpgsql create_range_update_trigger(parent REGCLASS) ``` @@ -281,9 +289,10 @@ When INSERTing new data beyond the partitioning range, use SpawnPartitionsWorker ```plpgsql CREATE TABLE IF NOT EXISTS pathman_config ( partrel REGCLASS NOT NULL PRIMARY KEY, - attname TEXT NOT NULL, + expr TEXT NOT NULL, parttype INTEGER NOT NULL, - range_interval TEXT); + range_interval TEXT, + cooked_expr TEXT); ``` This table stores a list of partitioned tables. @@ -325,7 +334,7 @@ RETURNS TABLE ( parent REGCLASS, partition REGCLASS, parttype INT4, - partattr TEXT, + expr TEXT, range_min TEXT, range_max TEXT) AS 'pg_pathman', 'show_partition_list_internal' @@ -336,6 +345,23 @@ AS SELECT * FROM show_partition_list(); ``` This view lists all existing partitions, as well as their parents and range boundaries (NULL for HASH partitions). +### `pathman_cache_stats` --- per-backend memory consumption +```plpgsql +-- helper SRF function +CREATE OR REPLACE FUNCTION @extschema@.show_cache_stats() +RETURNS TABLE ( + context TEXT, + size INT8, + used INT8, + entries INT8) +AS 'pg_pathman', 'show_cache_stats_internal' +LANGUAGE C STRICT; + +CREATE OR REPLACE VIEW @extschema@.pathman_cache_stats +AS SELECT * FROM @extschema@.show_cache_stats(); +``` +Shows memory consumption of various caches. + ## Custom plan nodes `pg_pathman` provides a couple of [custom plan nodes](https://wiki.postgresql.org/wiki/CustomScanAPI) which aim to reduce execution time, namely: From 2e792065004c670bf4327b93eaff01d72feab95a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 6 Jun 2017 19:19:01 +0300 Subject: [PATCH 0606/1124] fix formatting in README.md --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 444438cf..d19d4fd4 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ The extension is compatible with: * Postgres Pro Standard 9.5, 9.6; * Postgres Pro Enterprise; -By the way, we have a growing Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki); +By the way, we have a growing Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). ## Overview **Partitioning** means splitting one large table into smaller pieces. Each row in such table is moved to a single partition according to the partitioning key. PostgreSQL supports partitioning via table inheritance: each partition must be created as a child table with CHECK CONSTRAINT. For example: @@ -292,7 +292,7 @@ CREATE TABLE IF NOT EXISTS pathman_config ( expr TEXT NOT NULL, parttype INTEGER NOT NULL, range_interval TEXT, - cooked_expr TEXT); + cooked_expr TEXT); ``` This table stores a list of partitioned tables. @@ -345,7 +345,7 @@ AS SELECT * FROM show_partition_list(); ``` This view lists all existing partitions, as well as their parents and range boundaries (NULL for HASH partitions). -### `pathman_cache_stats` --- per-backend memory consumption +#### `pathman_cache_stats` --- per-backend memory consumption ```plpgsql -- helper SRF function CREATE OR REPLACE FUNCTION @extschema@.show_cache_stats() From 99954c5cb7ad9619a93b8873bcd5f6651517ad68 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 6 Jun 2017 19:25:59 +0300 Subject: [PATCH 0607/1124] update META.json --- META.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/META.json b/META.json index 74ec39e6..d4c01616 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.3.2", + "version": "1.4.0", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -22,9 +22,9 @@ "generated_by": "Ildar Musin", "provides": { "pg_pathman": { - "file": "pg_pathman--1.3.sql", + "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.3.2", + "version": "1.4.0", "abstract": "Partitioning tool" } }, From 8e6b5639e10c2d858ae1ba2ba33a04b65b1cb72f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 6 Jun 2017 19:44:59 +0300 Subject: [PATCH 0608/1124] update README.md --- README.md | 48 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index d19d4fd4..d53ad374 100644 --- a/README.md +++ b/README.md @@ -114,21 +114,39 @@ create_hash_partitions(relation REGCLASS, Performs HASH partitioning for `relation` by partitioning expression `expr`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. Partition creation callback is invoked for each partition if set beforehand (see `set_init_callback()`). ```plpgsql -create_range_partitions(relation REGCLASS, - expr TEXT, - start_value ANYELEMENT, - p_interval ANYELEMENT, - p_count INTEGER DEFAULT NULL - partition_data BOOLEAN DEFAULT TRUE) - -create_range_partitions(relation REGCLASS, - expr TEXT, - start_value ANYELEMENT, - p_interval INTERVAL, - p_count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT TRUE) -``` -Performs RANGE partitioning for `relation` by partitioning expression `expr`, `start_value` argument specifies initial value, `p_interval` sets the default range for auto created partitions or partitions created with `append_range_partition()` or `prepend_range_partition()` (if `NULL` then auto partition creation feature won't work), `p_count` is the number of premade partitions (if not set then `pg_pathman` tries to determine it based on expression's values). Partition creation callback is invoked for each partition if set beforehand. +create_range_partitions(relation REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL + partition_data BOOLEAN DEFAULT TRUE) + +create_range_partitions(relation REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) + +create_range_partitions(relation REGCLASS, + expression TEXT, + bounds ANYARRAY, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +``` +Performs RANGE partitioning for `relation` by partitioning expression `expr`, `start_value` argument specifies initial value, `p_interval` sets the default range for auto created partitions or partitions created with `append_range_partition()` or `prepend_range_partition()` (if `NULL` then auto partition creation feature won't work), `p_count` is the number of premade partitions (if not set then `pg_pathman` tries to determine it based on expression's values). The `bounds` array can be built using `generate_range_bounds()`. Partition creation callback is invoked for each partition if set beforehand. + +```plpgsql +generate_range_bounds(p_start ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER) + +generate_range_bounds(p_start ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER) +``` +Builds `bounds` array for `create_range_partitions()`. ### Data migration From 4d9822c6bc1233924cdcd930dd12f75016d22f12 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 7 Jun 2017 15:08:24 +0300 Subject: [PATCH 0609/1124] support for Postgres Pro 10, remove dead code --- src/include/compat/pg_compat.h | 11 ++++++++++- src/partition_creation.c | 3 --- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 8dcc339a..33a28339 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -161,14 +161,23 @@ * create_append_path() */ #if PG_VERSION_NUM >= 100000 + +#ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NIL) +#else +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NIL, \ + false, NIL) + +#endif /* PGPRO_VERSION */ + #elif PG_VERSION_NUM >= 90600 #ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), (parallel_workers)) -#else /* ifdef PGPRO_VERSION */ +#else #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), \ false, NIL, (parallel_workers)) diff --git a/src/partition_creation.c b/src/partition_creation.c index 62f3a6b7..412b3f36 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -754,9 +754,6 @@ create_single_partition_internal(Oid parent_relid, create_stmt.oncommit = ONCOMMIT_NOOP; create_stmt.tablespacename = tablespace; create_stmt.if_not_exists = false; -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 90600 - create_stmt.partition_info = NULL; -#endif #if PG_VERSION_NUM >= 100000 create_stmt.partbound = NULL; create_stmt.partspec = NULL; From 863172dd27a7f82c5bf97b4c0e851112640bc32b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 7 Jun 2017 17:28:35 +0300 Subject: [PATCH 0610/1124] return commutator from IsKeyOpParam() in case of PARAM OP EXPR --- expected/pathman_basic.out | 70 ++++++++++++++++++++++ sql/pathman_basic.sql | 8 +++ src/pg_pathman.c | 120 +++++++++++++++++++------------------ 3 files changed, 141 insertions(+), 57 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 231786bd..69c1458d 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -441,6 +441,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; Filter: (value = 2) (3 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (2 = value) +(3 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; QUERY PLAN ------------------------------ @@ -451,6 +459,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; Filter: (value = 1) (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 = id) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; QUERY PLAN ----------------------------------- @@ -501,6 +526,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; -> Seq Scan on range_rel_4 (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; QUERY PLAN ------------------------------- @@ -565,6 +600,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; Filter: (value = 2) (3 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (2 = value) +(3 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; QUERY PLAN ------------------------------ @@ -575,6 +618,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; Filter: (value = 1) (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (2500 = id) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; QUERY PLAN ---------------------------------------------------------------- @@ -645,6 +705,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; -> Seq Scan on range_rel_4 (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; QUERY PLAN ------------------------------- diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 808292ed..297c4097 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -158,14 +158,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); @@ -179,8 +183,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; @@ -189,6 +196,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 1580bb22..7715c531 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -39,8 +39,8 @@ PG_MODULE_MAGIC; -Oid pathman_config_relid = InvalidOid, - pathman_config_params_relid = InvalidOid; +Oid pathman_config_relid = InvalidOid, + pathman_config_params_relid = InvalidOid; /* pg module functions */ @@ -75,10 +75,6 @@ static void handle_opexpr(const OpExpr *expr, const WalkerContext *context, WrapperNode *result); -static bool is_key_op_param(const OpExpr *expr, - const WalkerContext *context, - Node **param_ptr); - static Datum array_find_min_max(Datum *values, bool *isnull, int length, @@ -212,6 +208,47 @@ ExtractConst(Node *node, const WalkerContext *context) value, isnull, get_typbyval(typid)); } +/* + * Checks if expression is a KEY OP PARAM or PARAM OP KEY, + * where KEY is partitioning expression and PARAM is whatever. + * + * Returns: + * operator's Oid if KEY is a partitioning expr, + * otherwise InvalidOid. + */ +static Oid +IsKeyOpParam(const OpExpr *expr, + const WalkerContext *context, + Node **param_ptr) /* ret value #1 */ +{ + Node *left = linitial(expr->args), + *right = lsecond(expr->args); + + /* Check number of arguments */ + if (list_length(expr->args) != 2) + return InvalidOid; + + /* KEY OP PARAM */ + if (match_expr_to_operand(context->prel_expr, left)) + { + *param_ptr = right; + + /* return the same operator */ + return expr->opno; + } + + /* PARAM OP KEY */ + if (match_expr_to_operand(context->prel_expr, right)) + { + *param_ptr = left; + + /* commute to (KEY OP PARAM) */ + return get_commutator(expr->opno); + } + + return InvalidOid; +} + /* Selectivity estimator for common 'paramsel' */ static inline double estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) @@ -1315,37 +1352,35 @@ handle_opexpr(const OpExpr *expr, { Node *param; const PartRelationInfo *prel = context->prel; + Oid opid; /* operator's Oid */ /* Save expression */ result->orig = (const Node *) expr; - if (list_length(expr->args) == 2) + /* Is it KEY OP PARAM or PARAM OP KEY? */ + if (OidIsValid(opid = IsKeyOpParam(expr, context, ¶m))) { - /* Is it KEY OP PARAM or PARAM OP KEY? */ - if (is_key_op_param(expr, context, ¶m)) - { - TypeCacheEntry *tce; - int strategy; + TypeCacheEntry *tce; + int strategy; - tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); - strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); + tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); + strategy = get_op_opfamily_strategy(opid, tce->btree_opf); - if (IsConstValue(param, context)) - { - handle_const(ExtractConst(param, context), - expr->inputcollid, - strategy, context, result); + if (IsConstValue(param, context)) + { + handle_const(ExtractConst(param, context), + expr->inputcollid, + strategy, context, result); - return; /* done, exit */ - } - /* TODO: estimate selectivity for param if it's Var */ - else if (IsA(param, Param) || IsA(param, Var)) - { - result->rangeset = list_make1_irange_full(prel, IR_LOSSY); - result->paramsel = estimate_paramsel_using_prel(prel, strategy); + return; /* done, exit */ + } + /* TODO: estimate selectivity for param if it's Var */ + else if (IsA(param, Param) || IsA(param, Var)) + { + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + result->paramsel = estimate_paramsel_using_prel(prel, strategy); - return; /* done, exit */ - } + return; /* done, exit */ } } @@ -1354,35 +1389,6 @@ handle_opexpr(const OpExpr *expr, } -/* - * Checks if expression is a KEY OP PARAM or PARAM OP KEY, where - * KEY is partitioning expression and PARAM is whatever. - * - * NOTE: returns false if partition key is not in expression. - */ -static bool -is_key_op_param(const OpExpr *expr, - const WalkerContext *context, - Node **param_ptr) /* ret value #1 */ -{ - Node *left = linitial(expr->args), - *right = lsecond(expr->args); - - if (match_expr_to_operand(context->prel_expr, left)) - { - *param_ptr = right; - return true; - } - - if (match_expr_to_operand(context->prel_expr, right)) - { - *param_ptr = left; - return true; - } - - return false; -} - /* Find Max or Min value of array */ static Datum array_find_min_max(Datum *values, From dc26b4b1e48f23d7863ec301661aba7a57f29868 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 7 Jun 2017 17:31:47 +0300 Subject: [PATCH 0611/1124] bump lib version to 1.4.1 --- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 14eca51d..9ae638b5 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10400 + 10401 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index c1a1041c..262d48a0 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -157,7 +157,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010400 +#define CURRENT_LIB_VERSION 0x010401 void *pathman_cache_search_relid(HTAB *cache_table, From b67cc6fa0ee8f7a85d9ba51343b1c75463c10223 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 8 Jun 2017 16:26:53 +0300 Subject: [PATCH 0612/1124] don't use snapshots in pathman_relcache_hook() --- src/hooks.c | 56 ++++++++++++--------------------------------- src/relation_info.c | 8 +++---- 2 files changed, 18 insertions(+), 46 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 92314c7b..b4f1c5d5 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -699,8 +699,7 @@ pathman_shmem_startup_hook(void) void pathman_relcache_hook(Datum arg, Oid relid) { - PartParentSearch search; - Oid partitioned_table; + Oid parent_relid; /* Hooks can be disabled */ if (!pathman_hooks_enabled) @@ -721,50 +720,23 @@ pathman_relcache_hook(Datum arg, Oid relid) forget_bounds_of_partition(relid); /* Invalidate PartParentInfo cache if needed */ - partitioned_table = forget_parent_of_partition(relid, &search); + parent_relid = forget_parent_of_partition(relid, NULL); - switch (search) + /* It *might have been a partition*, invalidate parent */ + if (OidIsValid(parent_relid)) { - /* It is (or was) a valid partition */ - case PPS_ENTRY_PART_PARENT: - case PPS_ENTRY_PARENT: - { - elog(DEBUG2, "Invalidation message for partition %u [%u]", - relid, MyProcPid); - - delay_invalidation_parent_rel(partitioned_table); - } - break; - - /* Both syscache and pathman's cache say it isn't a partition */ - case PPS_ENTRY_NOT_FOUND: - { - Assert(partitioned_table == InvalidOid); - - /* Which means that 'relid' might be parent */ - if (relid != InvalidOid) - delay_invalidation_vague_rel(relid); -#ifdef NOT_USED - elog(DEBUG2, "Invalidation message for relation %u [%u]", - relid, MyProcPid); -#endif - } - break; + delay_invalidation_parent_rel(parent_relid); - /* We can't say anything (state is not transactional) */ - case PPS_NOT_SURE: - { - elog(DEBUG2, "Invalidation message for vague relation %u [%u]", - relid, MyProcPid); - - delay_invalidation_vague_rel(relid); - } - break; + elog(DEBUG2, "Invalidation message for partition %u [%u]", + relid, MyProcPid); + } + /* We can't say, perform full invalidation procedure */ + else + { + delay_invalidation_vague_rel(relid); - default: - elog(ERROR, "Not implemented yet (%s)", - CppAsString(pathman_relcache_hook)); - break; + elog(DEBUG2, "Invalidation message for vague relation %u [%u]", + relid, MyProcPid); } } diff --git a/src/relation_info.c b/src/relation_info.c index 12965f16..cb33c29d 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -94,7 +94,7 @@ static bool delayed_shutdown = false; /* pathman was dropped */ static bool try_invalidate_parent(Oid relid, Oid *parents, int parents_count); -static Oid try_syscache_parent_search(Oid partition, PartParentSearch *status); +static Oid try_catalog_parent_search(Oid partition, PartParentSearch *status); static Oid get_parent_of_partition_internal(Oid partition, PartParentSearch *status, HASHACTION action); @@ -1089,16 +1089,16 @@ get_parent_of_partition_internal(Oid partition, } /* Try fetching parent from syscache if 'status' is provided */ else if (status) - parent = try_syscache_parent_search(partition, status); + parent = try_catalog_parent_search(partition, status); else parent = InvalidOid; /* we don't have to set status */ return parent; } -/* Try to find parent of a partition using syscache & PATHMAN_CONFIG */ +/* Try to find parent of a partition using catalog & PATHMAN_CONFIG */ static Oid -try_syscache_parent_search(Oid partition, PartParentSearch *status) +try_catalog_parent_search(Oid partition, PartParentSearch *status) { if (!IsTransactionState()) { From 86e280cbb6c7872746baf55808e33ace561ee3cc Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 8 Jun 2017 16:32:01 +0300 Subject: [PATCH 0613/1124] fix build on MSVC (Victor Wagner) --- src/include/compat/rowmarks_fix.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h index 4e441388..cce47f83 100644 --- a/src/include/compat/rowmarks_fix.h +++ b/src/include/compat/rowmarks_fix.h @@ -26,7 +26,7 @@ */ #if defined(ENABLE_PGPRO_PATCHES) && \ defined(ENABLE_ROWMARKS_FIX) && \ - defined(NATIVE_EXPAND_RTE_HOOK /* dependency */ ) + defined(NATIVE_EXPAND_RTE_HOOK) /* dependency */ #define NATIVE_PARTITIONING_ROWMARKS #endif From 7e2254024215b25fd2b64d8e63f723393b3f68e9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 8 Jun 2017 16:41:42 +0300 Subject: [PATCH 0614/1124] add test case for DROP INDEX CONCURRENTLY --- expected/pathman_calamity.out | 19 +++++++++++++++++++ sql/pathman_calamity.sql | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 9ae638b5..5d572c9c 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -1043,6 +1043,25 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; +/* + * ----------------------------------------------------------- + * Special tests (drop index concurrently to test snapshots) + * ----------------------------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +CREATE TABLE calamity.drop_index (val INT4 NOT NULL); +CREATE INDEX ON calamity.drop_index (val); +SELECT create_hash_partitions('calamity.drop_index', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +DROP INDEX CONCURRENTLY calamity.drop_index_0_val_idx; +DROP SCHEMA calamity CASCADE; +NOTICE: drop cascades to 3 other objects +DROP EXTENSION pg_pathman; /* * ------------------------------------------ * Special tests (uninitialized pg_pathman) diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 881cebbd..738a4e98 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -422,6 +422,25 @@ DROP EXTENSION pg_pathman; +/* + * ----------------------------------------------------------- + * Special tests (drop index concurrently to test snapshots) + * ----------------------------------------------------------- + */ + +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; + +CREATE TABLE calamity.drop_index (val INT4 NOT NULL); +CREATE INDEX ON calamity.drop_index (val); +SELECT create_hash_partitions('calamity.drop_index', 'val', 2); +DROP INDEX CONCURRENTLY calamity.drop_index_0_val_idx; + +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; + + + /* * ------------------------------------------ * Special tests (uninitialized pg_pathman) From 59bd7f5f09e974e2f4c42894defbeaf8e1e4ec5d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 8 Jun 2017 17:27:47 +0300 Subject: [PATCH 0615/1124] move DROP INDEX CONCURRENTLY test to a proper place --- expected/pathman_calamity.out | 19 ------------------- expected/pathman_utility_stmt.out | 15 +++++++++++++++ sql/pathman_calamity.sql | 19 ------------------- sql/pathman_utility_stmt.sql | 19 +++++++++++++++++++ 4 files changed, 34 insertions(+), 38 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 5d572c9c..9ae638b5 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -1043,25 +1043,6 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; -/* - * ----------------------------------------------------------- - * Special tests (drop index concurrently to test snapshots) - * ----------------------------------------------------------- - */ -CREATE SCHEMA calamity; -CREATE EXTENSION pg_pathman; -CREATE TABLE calamity.drop_index (val INT4 NOT NULL); -CREATE INDEX ON calamity.drop_index (val); -SELECT create_hash_partitions('calamity.drop_index', 'val', 2); - create_hash_partitions ------------------------- - 2 -(1 row) - -DROP INDEX CONCURRENTLY calamity.drop_index_0_val_idx; -DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 3 other objects -DROP EXTENSION pg_pathman; /* * ------------------------------------------ * Special tests (uninitialized pg_pathman) diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 19bad191..b8d8ad31 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -366,4 +366,19 @@ WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; DROP SCHEMA rename CASCADE; NOTICE: drop cascades to 7 other objects +/* + * Test DROP INDEX CONCURRENTLY (test snapshots) + */ +CREATE SCHEMA drop_index; +CREATE TABLE drop_index.test (val INT4 NOT NULL); +CREATE INDEX ON drop_index.test (val); +SELECT create_hash_partitions('drop_index.test', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; +DROP SCHEMA drop_index CASCADE; +NOTICE: drop cascades to 3 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 738a4e98..881cebbd 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -422,25 +422,6 @@ DROP EXTENSION pg_pathman; -/* - * ----------------------------------------------------------- - * Special tests (drop index concurrently to test snapshots) - * ----------------------------------------------------------- - */ - -CREATE SCHEMA calamity; -CREATE EXTENSION pg_pathman; - -CREATE TABLE calamity.drop_index (val INT4 NOT NULL); -CREATE INDEX ON calamity.drop_index (val); -SELECT create_hash_partitions('calamity.drop_index', 'val', 2); -DROP INDEX CONCURRENTLY calamity.drop_index_0_val_idx; - -DROP SCHEMA calamity CASCADE; -DROP EXTENSION pg_pathman; - - - /* * ------------------------------------------ * Special tests (uninitialized pg_pathman) diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index 7dc9dd2f..a0d4ae0e 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -2,6 +2,8 @@ CREATE EXTENSION pg_pathman; + + /* * Test COPY */ @@ -159,6 +161,7 @@ SELECT COUNT(*) FROM copy_stmt_hooking.test2; DROP SCHEMA copy_stmt_hooking CASCADE; + /* * Test auto check constraint renaming */ @@ -215,4 +218,20 @@ WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; DROP SCHEMA rename CASCADE; + + +/* + * Test DROP INDEX CONCURRENTLY (test snapshots) + */ +CREATE SCHEMA drop_index; + +CREATE TABLE drop_index.test (val INT4 NOT NULL); +CREATE INDEX ON drop_index.test (val); +SELECT create_hash_partitions('drop_index.test', 'val', 2); +DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; + +DROP SCHEMA drop_index CASCADE; + + + DROP EXTENSION pg_pathman; From db001cbcee766a57413572c1b8fece94434f569b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 9 Jun 2017 14:09:07 +0300 Subject: [PATCH 0616/1124] fix RuntimeAppend + dropped columns --- expected/pathman_runtime_nodes.out | 31 ++++++++++++++++++++++++++++++ sql/pathman_runtime_nodes.sql | 9 +++++++++ src/nodes_common.c | 27 ++++++++++++++++++++++++-- 3 files changed, 65 insertions(+), 2 deletions(-) diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index ef9aaa93..d49343b9 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -403,6 +403,37 @@ where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < t (1 row) +/* RuntimeAppend (check that dropped columns don't break tlists) */ +create table test.dropped_cols(val int4 not null); +select pathman.create_hash_partitions('test.dropped_cols', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +insert into test.dropped_cols select generate_series(1, 100); +alter table test.dropped_cols add column new_col text; /* add column */ +alter table test.dropped_cols drop column new_col; /* drop column! */ +explain (costs off) select * from generate_series(1, 10) f(id), lateral (select count(1) FILTER (WHERE true) from test.dropped_cols where val = f.id) c; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Function Scan on generate_series f + -> Aggregate + -> Custom Scan (RuntimeAppend) + Prune by: (dropped_cols.val = f.id) + -> Seq Scan on dropped_cols_0 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_1 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_2 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_3 dropped_cols + Filter: (val = f.id) +(13 rows) + +drop table test.dropped_cols cascade; +NOTICE: drop cascades to 4 other objects set enable_hashjoin = off; set enable_mergejoin = off; select from test.runtime_test_4 diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index e5cf17a5..b54c7571 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -312,6 +312,15 @@ join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; select count(*) = 0 from pathman.pathman_partition_list where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < 0; +/* RuntimeAppend (check that dropped columns don't break tlists) */ +create table test.dropped_cols(val int4 not null); +select pathman.create_hash_partitions('test.dropped_cols', 'val', 4); +insert into test.dropped_cols select generate_series(1, 100); +alter table test.dropped_cols add column new_col text; /* add column */ +alter table test.dropped_cols drop column new_col; /* drop column! */ +explain (costs off) select * from generate_series(1, 10) f(id), lateral (select count(1) FILTER (WHERE true) from test.dropped_cols where val = f.id) c; +drop table test.dropped_cols cascade; + set enable_hashjoin = off; set enable_mergejoin = off; diff --git a/src/nodes_common.c b/src/nodes_common.c index 7688bb07..7a4b71fe 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -141,18 +141,41 @@ build_parent_tlist(List *tlist, AppendRelInfo *appinfo) foreach (lc1, pulled_vars) { - Var *tlist_var = (Var *) lfirst(lc1); + Var *tlist_var = (Var *) lfirst(lc1); + bool found_column = false; + AttrNumber attnum; - AttrNumber attnum = 0; + /* Skip system attributes */ + if (tlist_var->varattno < InvalidAttrNumber) + continue; + + attnum = 0; foreach (lc2, appinfo->translated_vars) { Var *translated_var = (Var *) lfirst(lc2); + /* Don't forget to inc 'attunum'! */ attnum++; + /* Skip dropped columns */ + if (!translated_var) + continue; + + /* Find this column in list of parent table columns */ if (translated_var->varattno == tlist_var->varattno) + { tlist_var->varattno = attnum; + found_column = true; /* successful mapping */ + } } + + /* Raise ERROR if mapping failed */ + if (!found_column) + elog(ERROR, + "table \"%s\" has no attribute %d of partition \"%s\"", + get_rel_name_or_relid(appinfo->parent_relid), + tlist_var->varoattno, + get_rel_name_or_relid(appinfo->child_relid)); } ChangeVarNodes((Node *) temp_tlist, From 11e184e47b218b61aec1e68b2b6eea265ba69773 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 9 Jun 2017 18:51:45 +0300 Subject: [PATCH 0617/1124] bump dev version to 1.5 --- Makefile | 2 +- expected/pathman_calamity.out | 2 +- pg_pathman.control | 2 +- src/include/init.h | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 8b8fa036..4295350e 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ override PG_CPPFLAGS += -I$(CURDIR)/src/include EXTENSION = pg_pathman -EXTVERSION = 1.4 +EXTVERSION = 1.5 DATA_built = pg_pathman--$(EXTVERSION).sql diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 9ae638b5..251ec31c 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10401 + 10500 (1 row) set client_min_messages = NOTICE; diff --git a/pg_pathman.control b/pg_pathman.control index 0d6af5d3..138b26c6 100644 --- a/pg_pathman.control +++ b/pg_pathman.control @@ -1,4 +1,4 @@ # pg_pathman extension comment = 'Partitioning tool for PostgreSQL' -default_version = '1.4' +default_version = '1.5' module_pathname = '$libdir/pg_pathman' diff --git a/src/include/init.h b/src/include/init.h index 262d48a0..3f1790ce 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -154,10 +154,10 @@ simpify_mcxt_name(MemoryContext mcxt) /* Lowest version of Pl/PgSQL frontend compatible with internals (0xAA_BB_CC) */ -#define LOWEST_COMPATIBLE_FRONT 0x010400 +#define LOWEST_COMPATIBLE_FRONT 0x010500 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010401 +#define CURRENT_LIB_VERSION 0x010500 void *pathman_cache_search_relid(HTAB *cache_table, From 62e49520f87011c2b778083aa3c1d50ff9e69f74 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 9 Jun 2017 18:53:41 +0300 Subject: [PATCH 0618/1124] version 1.4.1 in META.json --- META.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/META.json b/META.json index d4c01616..267f5f80 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.0", + "version": "1.4.1", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.0", + "version": "1.4.1", "abstract": "Partitioning tool" } }, From 07a9c885eba3962043a1b6a1b7c74d00c01ffb50 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 16 Jun 2017 19:12:45 +0300 Subject: [PATCH 0619/1124] Copy unlogged attribute from parent relation --- src/partition_creation.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 412b3f36..9b599af2 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -671,6 +671,8 @@ create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace) { + Relation parentrel; + /* Value to be returned */ Oid partition_relid = InvalidOid; /* safety */ @@ -680,7 +682,8 @@ create_single_partition_internal(Oid parent_relid, *parent_nsp_name; /* Elements of the "CREATE TABLE" query tree */ - RangeVar *parent_rv; + RangeVar *parent_rv, + *newrel_rv = copyObject(partition_rv); TableLikeClause like_clause; CreateStmt create_stmt; List *create_stmts; @@ -730,7 +733,10 @@ create_single_partition_internal(Oid parent_relid, /* Make up parent's RangeVar */ parent_rv = makeRangeVar(parent_nsp_name, parent_name, -1); - Assert(partition_rv); + /* Copy attributes */ + parentrel = heap_open(parent_relid, NoLock); + newrel_rv->relpersistence = parentrel->rd_rel->relpersistence; + heap_close(parentrel, NoLock); /* If no 'tablespace' is provided, get parent's tablespace */ if (!tablespace) @@ -745,7 +751,7 @@ create_single_partition_internal(Oid parent_relid, /* Initialize CreateStmt structure */ NodeSetTag(&create_stmt, T_CreateStmt); - create_stmt.relation = copyObject(partition_rv); + create_stmt.relation = newrel_rv; create_stmt.tableElts = list_make1(copyObject(&like_clause)); create_stmt.inhRelations = list_make1(copyObject(parent_rv)); create_stmt.ofTypename = NULL; From 96376f191117ef2edb26a335859b34106c37d67a Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 19 Jun 2017 15:51:46 +0300 Subject: [PATCH 0620/1124] Copy WITH options to partitions --- expected/pathman_basic.out | 28 +++++++++++++++++++ sql/pathman_basic.sql | 13 +++++++++ src/partition_creation.c | 55 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 69c1458d..4e3f53b0 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1339,6 +1339,34 @@ NOTICE: 1000 rows copied from test.num_range_rel_3 DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE UNLOGGED TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | u +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | u +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects /* Test automatic partition creation */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 297c4097..6a209433 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -390,6 +390,19 @@ DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; +/* Test attributes copying */ +CREATE UNLOGGED TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; +DROP TABLE test.range_rel CASCADE; + /* Test automatic partition creation */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, diff --git a/src/partition_creation.c b/src/partition_creation.c index 9b599af2..669295c5 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -74,6 +74,7 @@ static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, Oid relowner); static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); +static void copy_relation_attributes(Oid partition_relid, Datum reloptions); static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid); static Oid text_to_regprocedure(text *proname_args); @@ -671,6 +672,7 @@ create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace) { + HeapTuple tuple = NULL; Relation parentrel; /* Value to be returned */ @@ -693,6 +695,7 @@ create_single_partition_internal(Oid parent_relid, Oid save_userid; int save_sec_context; bool need_priv_escalation = !superuser(); /* we might be a SU */ + Datum reloptions = (Datum) 0; /* Lock parent and check if it exists */ LockRelationOid(parent_relid, ShareUpdateExclusiveLock); @@ -736,6 +739,19 @@ create_single_partition_internal(Oid parent_relid, /* Copy attributes */ parentrel = heap_open(parent_relid, NoLock); newrel_rv->relpersistence = parentrel->rd_rel->relpersistence; + if (parentrel->rd_options) + { + bool isNull; + + tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for relation %u", parent_relid); + + reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, + &isNull); + if (isNull) + reloptions = (Datum) 0; + } heap_close(parentrel, NoLock); /* If no 'tablespace' is provided, get parent's tablespace */ @@ -787,6 +803,10 @@ create_single_partition_internal(Oid parent_relid, partition_relid = create_table_using_stmt((CreateStmt *) cur_stmt, child_relowner).objectId; + /* Copy attributes to partition */ + if (reloptions) + copy_relation_attributes(partition_relid, reloptions); + /* Copy FOREIGN KEYS of the parent table */ copy_foreign_keys(parent_relid, partition_relid); @@ -823,6 +843,9 @@ create_single_partition_internal(Oid parent_relid, if (need_priv_escalation) SetUserIdAndSecContext(save_userid, save_sec_context); + if (tuple != NULL) + ReleaseSysCache(tuple); + return partition_relid; } @@ -1114,6 +1137,38 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) FunctionCallInvoke(©_fkeys_proc_fcinfo); } +/* Copy attributes to partition. Updates partition's tuple in pg_class */ +static void +copy_relation_attributes(Oid partition_relid, Datum reloptions) +{ + Relation classRel; + HeapTuple tuple, + newtuple; + Datum new_val[Natts_pg_class]; + bool new_null[Natts_pg_class], + new_repl[Natts_pg_class]; + + classRel = heap_open(RelationRelationId, RowExclusiveLock); + tuple = SearchSysCacheCopy1(RELOID, + ObjectIdGetDatum(partition_relid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for relation %u", + partition_relid); + + /* Fill in relpartbound value */ + memset(new_val, 0, sizeof(new_val)); + memset(new_null, false, sizeof(new_null)); + memset(new_repl, false, sizeof(new_repl)); + new_val[Anum_pg_class_reloptions - 1] = reloptions; + new_null[Anum_pg_class_reloptions - 1] = false; + new_repl[Anum_pg_class_reloptions - 1] = true; + newtuple = heap_modify_tuple(tuple, RelationGetDescr(classRel), + new_val, new_null, new_repl); + CatalogTupleUpdate(classRel, &newtuple->t_self, newtuple); + heap_freetuple(newtuple); + heap_close(classRel, RowExclusiveLock); +} + /* * ----------------------------- From be634f0b05837260a4f822f90607895d70fe5016 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 19 Jun 2017 17:12:30 +0300 Subject: [PATCH 0621/1124] improved support for rowmarks (select for share; update; delete; etc) --- expected/pathman_rowmarks.out | 142 ++++++++++++++++++++++++++ sql/pathman_rowmarks.sql | 34 +++++++ src/compat/rowmarks_fix.c | 162 ++++-------------------------- src/hooks.c | 10 -- src/include/compat/rowmarks_fix.h | 13 ++- src/pg_pathman.c | 57 ++++++----- src/planner_tree_modification.c | 1 - 7 files changed, 234 insertions(+), 185 deletions(-) diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index e66c41d9..3a0dac8f 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -168,6 +168,148 @@ FOR SHARE; 6 (1 row) +/* Check updates (plan) */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(8 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +--------------------------------------------------- + Update on second + -> Hash Join + Hash Cond: (second.id = first_0.id) + -> Seq Scan on second + -> Hash + -> HashAggregate + Group Key: first_0.id + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(18 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +--------------------------------------------- + Update on second + -> Hash Semi Join + Hash Cond: (second.id = first_0.id) + -> Seq Scan on second + -> Hash + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(8 rows) + +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(8 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +--------------------------------------------------- + Delete on second + -> Hash Join + Hash Cond: (second.id = first_0.id) + -> Seq Scan on second + -> Hash + -> HashAggregate + Group Key: first_0.id + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(18 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +--------------------------------------------- + Delete on second + -> Hash Semi Join + Hash Cond: (second.id = first_0.id) + -> Seq Scan on second + -> Hash + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + DROP SCHEMA rowmarks CASCADE; NOTICE: drop cascades to 7 other objects DETAIL: drop cascades to table rowmarks.first diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index 72e40b8e..0da9eb00 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -1,6 +1,8 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; + + CREATE TABLE rowmarks.first(id int NOT NULL); CREATE TABLE rowmarks.second(id int NOT NULL); @@ -56,6 +58,38 @@ WHERE id = (SELECT id FROM rowmarks.second FOR UPDATE) FOR SHARE; +/* Check updates (plan) */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + +/* Check deletes (plan) */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + + DROP SCHEMA rowmarks CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/compat/rowmarks_fix.c b/src/compat/rowmarks_fix.c index 21259e66..879371f6 100644 --- a/src/compat/rowmarks_fix.c +++ b/src/compat/rowmarks_fix.c @@ -14,165 +14,39 @@ #include "access/sysattr.h" #include "catalog/pg_type.h" -#include "nodes/relation.h" #include "nodes/nodeFuncs.h" +#include "optimizer/planmain.h" #include "utils/builtins.h" #include "utils/rel.h" #ifndef NATIVE_PARTITIONING_ROWMARKS -/* Special column name for rowmarks */ -#define TABLEOID_STR(subst) ( "pathman_tableoid" subst ) -#define TABLEOID_STR_BASE_LEN ( sizeof(TABLEOID_STR("")) - 1 ) - -static void lock_rows_visitor(Plan *plan, void *context); -static List *get_tableoids_list(List *tlist); - - -/* Final rowmark processing for partitioned tables */ void -postprocess_lock_rows(List *rtable, Plan *plan) -{ - plan_tree_walker(plan, lock_rows_visitor, rtable); -} - -/* - * Add missing 'TABLEOID_STR%u' junk attributes for inherited partitions - * - * This is necessary since preprocess_targetlist() heavily - * depends on the 'inh' flag which we have to unset. - * - * postprocess_lock_rows() will later transform 'TABLEOID_STR:Oid' - * relnames into 'tableoid:rowmarkId'. - */ -void -rowmark_add_tableoids(Query *parse) -{ - ListCell *lc; - - /* Generate 'tableoid' for partitioned table rowmark */ - foreach (lc, parse->rowMarks) - { - RowMarkClause *rc = (RowMarkClause *) lfirst(lc); - Oid parent = getrelid(rc->rti, parse->rtable); - Var *var; - TargetEntry *tle; - char resname[64]; - - /* Check that table is partitioned */ - if (!get_pathman_relation_info(parent)) - continue; - - var = makeVar(rc->rti, - TableOidAttributeNumber, - OIDOID, - -1, - InvalidOid, - 0); - - /* Use parent's Oid as TABLEOID_STR's key (%u) */ - snprintf(resname, sizeof(resname), TABLEOID_STR("%u"), parent); - - tle = makeTargetEntry((Expr *) var, - list_length(parse->targetList) + 1, - pstrdup(resname), - true); - - /* There's no problem here since new attribute is junk */ - parse->targetList = lappend(parse->targetList, tle); - } -} - -/* - * Extract target entries with resnames beginning with TABLEOID_STR - * and var->varoattno == TableOidAttributeNumber - */ -static List * -get_tableoids_list(List *tlist) +append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc) { - List *result = NIL; - ListCell *lc; - - foreach (lc, tlist) - { - TargetEntry *te = (TargetEntry *) lfirst(lc); - Var *var = (Var *) te->expr; - - if (!IsA(var, Var)) - continue; - - /* Check that column name begins with TABLEOID_STR & it's tableoid */ - if (var->varoattno == TableOidAttributeNumber && - (te->resname && strlen(te->resname) > TABLEOID_STR_BASE_LEN) && - 0 == strncmp(te->resname, TABLEOID_STR(""), TABLEOID_STR_BASE_LEN)) - { - result = lappend(result, te); - } - } - - return result; -} - -/* - * Find 'TABLEOID_STR%u' attributes that were manually - * created for partitioned tables and replace Oids - * (used for '%u') with expected rc->rowmarkIds - */ -static void -lock_rows_visitor(Plan *plan, void *context) -{ - List *rtable = (List *) context; - LockRows *lock_rows = (LockRows *) plan; - Plan *lock_child = outerPlan(plan); - List *tableoids; - ListCell *lc; - - if (!IsA(lock_rows, LockRows)) - return; - - Assert(rtable && IsA(rtable, List) && lock_child); - - /* Select tableoid attributes that must be renamed */ - tableoids = get_tableoids_list(lock_child->targetlist); - if (!tableoids) - return; /* this LockRows has nothing to do with partitioned table */ - - foreach (lc, lock_rows->rowMarks) - { - PlanRowMark *rc = (PlanRowMark *) lfirst(lc); - Oid parent_oid = getrelid(rc->rti, rtable); - ListCell *mark_lc; - List *finished_tes = NIL; /* postprocessed target entries */ - - foreach (mark_lc, tableoids) - { - TargetEntry *te = (TargetEntry *) lfirst(mark_lc); - const char *cur_oid_str = &(te->resname[TABLEOID_STR_BASE_LEN]); - Datum cur_oid_datum; - - cur_oid_datum = DirectFunctionCall1(oidin, CStringGetDatum(cur_oid_str)); + Var *var; + char resname[32]; + TargetEntry *tle; - if (DatumGetObjectId(cur_oid_datum) == parent_oid) - { - char resname[64]; + var = makeVar(rc->rti, + TableOidAttributeNumber, + OIDOID, + -1, + InvalidOid, + 0); - /* Replace 'TABLEOID_STR:Oid' with 'tableoid:rowmarkId' */ - snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); - te->resname = pstrdup(resname); + snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); - finished_tes = lappend(finished_tes, te); - } - } + tle = makeTargetEntry((Expr *) var, + list_length(root->processed_tlist) + 1, + pstrdup(resname), + true); - /* Remove target entries that have been processed in this step */ - foreach (mark_lc, finished_tes) - tableoids = list_delete_ptr(tableoids, lfirst(mark_lc)); + root->processed_tlist = lappend(root->processed_tlist, tle); - if (list_length(tableoids) == 0) - break; /* nothing to do */ - } + add_vars_to_targetlist(root, list_make1(var), bms_make_singleton(0), true); } #endif /* NATIVE_PARTITIONING_ROWMARKS */ diff --git a/src/hooks.c b/src/hooks.c index b4f1c5d5..c3623418 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -276,13 +276,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, root->parse->resultRelation == rti) return; -/* It's better to exit, since RowMarks might be broken (hook aims to fix them) */ -#ifndef NATIVE_EXPAND_RTE_HOOK - if (root->parse->commandType != CMD_SELECT && - root->parse->commandType != CMD_INSERT) - return; -#endif - /* Skip if this table is not allowed to act as parent (e.g. FROM ONLY) */ if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, rte)) return; @@ -545,9 +538,6 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready) { - /* Give rowmark-related attributes correct names */ - ExecuteForPlanTree(result, postprocess_lock_rows); - /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h index cce47f83..c03e0155 100644 --- a/src/include/compat/rowmarks_fix.h +++ b/src/include/compat/rowmarks_fix.h @@ -18,6 +18,7 @@ #include "postgres.h" #include "nodes/parsenodes.h" #include "nodes/plannodes.h" +#include "nodes/relation.h" /* @@ -31,12 +32,14 @@ #endif -#ifdef NATIVE_PARTITIONING_ROWMARKS -#define postprocess_lock_rows(rtable, plan) ( (void) true ) -#define rowmark_add_tableoids(parse) ( (void) true ) +#ifndef NATIVE_PARTITIONING_ROWMARKS + +void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc); + #else -void postprocess_lock_rows(List *rtable, Plan *plan); -void rowmark_add_tableoids(Query *parse); + +#define append_tle_for_rowmark(root, rc) ( (void) true ) + #endif /* NATIVE_PARTITIONING_ROWMARKS */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 7715c531..1ceba04c 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -11,6 +11,7 @@ #include "compat/expand_rte_hook.h" #include "compat/pg_compat.h" +#include "compat/rowmarks_fix.h" #include "init.h" #include "hooks.h" @@ -410,6 +411,37 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, root->total_table_pages += (double) child_rel->pages; + /* Create rowmarks required for child rels */ + parent_rowmark = get_plan_rowmark(root->rowMarks, parent_rti); + if (parent_rowmark) + { + child_rowmark = makeNode(PlanRowMark); + + child_rowmark->rti = childRTindex; + child_rowmark->prti = parent_rti; + child_rowmark->rowmarkId = parent_rowmark->rowmarkId; + /* Reselect rowmark type, because relkind might not match parent */ + child_rowmark->markType = select_rowmark_type(child_rte, + parent_rowmark->strength); + child_rowmark->allMarkTypes = (1 << child_rowmark->markType); + child_rowmark->strength = parent_rowmark->strength; + child_rowmark->waitPolicy = parent_rowmark->waitPolicy; + child_rowmark->isParent = false; + + root->rowMarks = lappend(root->rowMarks, child_rowmark); + + /* Adjust tlist for RowMarks (see planner.c) */ + if (!parent_rowmark->isParent && !root->parse->setOperations) + { + append_tle_for_rowmark(root, parent_rowmark); + } + + /* Include child's rowmark type in parent's allMarkTypes */ + parent_rowmark->allMarkTypes |= child_rowmark->allMarkTypes; + parent_rowmark->isParent = true; + } + + /* Build an AppendRelInfo for this child */ appinfo = makeNode(AppendRelInfo); appinfo->parent_relid = parent_rti; @@ -519,31 +551,6 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, /* Close child relations, but keep locks */ heap_close(child_relation, NoLock); - - /* Create rowmarks required for child rels */ - parent_rowmark = get_plan_rowmark(root->rowMarks, parent_rti); - if (parent_rowmark) - { - child_rowmark = makeNode(PlanRowMark); - - child_rowmark->rti = childRTindex; - child_rowmark->prti = parent_rti; - child_rowmark->rowmarkId = parent_rowmark->rowmarkId; - /* Reselect rowmark type, because relkind might not match parent */ - child_rowmark->markType = select_rowmark_type(child_rte, - parent_rowmark->strength); - child_rowmark->allMarkTypes = (1 << child_rowmark->markType); - child_rowmark->strength = parent_rowmark->strength; - child_rowmark->waitPolicy = parent_rowmark->waitPolicy; - child_rowmark->isParent = false; - - root->rowMarks = lappend(root->rowMarks, child_rowmark); - - /* Include child's rowmark type in parent's allMarkTypes */ - parent_rowmark->allMarkTypes |= child_rowmark->allMarkTypes; - parent_rowmark->isParent = true; - } - return childRTindex; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 02f20f51..e90bf029 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -158,7 +158,6 @@ pathman_transform_query_walker(Node *node, void *context) assign_query_id(query); /* Apply Query tree modifiers */ - rowmark_add_tableoids(query); disable_standard_inheritance(query); handle_modification_query(query, (ParamListInfo) context); From 725da00d1aed101573819d92276f59a688abbd01 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 19 Jun 2017 17:27:47 +0300 Subject: [PATCH 0622/1124] make regression tests more stable --- expected/pathman_rowmarks.out | 104 ++++++++++++++++++---------------- sql/pathman_rowmarks.sql | 8 +++ 2 files changed, 64 insertions(+), 48 deletions(-) diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 3a0dac8f..a22d8168 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -169,6 +169,8 @@ FOR SHARE; (1 row) /* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); @@ -187,38 +189,38 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); - QUERY PLAN ---------------------------------------------------- + QUERY PLAN +----------------------------------------------- Update on second - -> Hash Join - Hash Cond: (second.id = first_0.id) - -> Seq Scan on second - -> Hash - -> HashAggregate - Group Key: first_0.id - -> Append - -> Seq Scan on first_0 - Filter: (id < 1) - -> Seq Scan on first_1 - Filter: (id < 1) - -> Seq Scan on first_2 - Filter: (id < 1) - -> Seq Scan on first_3 - Filter: (id < 1) - -> Seq Scan on first_4 - Filter: (id < 1) + -> Nested Loop + Join Filter: (second.id = first_0.id) + -> HashAggregate + Group Key: first_0.id + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) + -> Materialize + -> Seq Scan on second (18 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); - QUERY PLAN ---------------------------------------------- + QUERY PLAN +----------------------------------------------- Update on second - -> Hash Semi Join - Hash Cond: (second.id = first_0.id) + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) -> Seq Scan on second - -> Hash + -> Materialize -> Append -> Seq Scan on first_0 Filter: (id = 1) @@ -242,6 +244,8 @@ RETURNING *, tableoid::regclass; Filter: (id = 1) (8 rows) +SET enable_hashjoin = t; +SET enable_mergejoin = t; /* Check updates (execution) */ UPDATE rowmarks.second SET id = 1 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) @@ -253,6 +257,8 @@ RETURNING *, tableoid::regclass; (2 rows) /* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); @@ -271,38 +277,38 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); - QUERY PLAN ---------------------------------------------------- + QUERY PLAN +----------------------------------------------- Delete on second - -> Hash Join - Hash Cond: (second.id = first_0.id) - -> Seq Scan on second - -> Hash - -> HashAggregate - Group Key: first_0.id - -> Append - -> Seq Scan on first_0 - Filter: (id < 1) - -> Seq Scan on first_1 - Filter: (id < 1) - -> Seq Scan on first_2 - Filter: (id < 1) - -> Seq Scan on first_3 - Filter: (id < 1) - -> Seq Scan on first_4 - Filter: (id < 1) + -> Nested Loop + Join Filter: (second.id = first_0.id) + -> HashAggregate + Group Key: first_0.id + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) + -> Materialize + -> Seq Scan on second (18 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); - QUERY PLAN ---------------------------------------------- + QUERY PLAN +----------------------------------------------- Delete on second - -> Hash Semi Join - Hash Cond: (second.id = first_0.id) + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) -> Seq Scan on second - -> Hash + -> Materialize -> Append -> Seq Scan on first_0 Filter: (id = 1) @@ -310,6 +316,8 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = Filter: (id = 2) (10 rows) +SET enable_hashjoin = t; +SET enable_mergejoin = t; DROP SCHEMA rowmarks CASCADE; NOTICE: drop cascades to 7 other objects DETAIL: drop cascades to table rowmarks.first diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index 0da9eb00..b60c185a 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -59,6 +59,8 @@ WHERE id = (SELECT id FROM rowmarks.second FOR SHARE; /* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); @@ -72,6 +74,8 @@ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) RETURNING *, tableoid::regclass; +SET enable_hashjoin = t; +SET enable_mergejoin = t; /* Check updates (execution) */ UPDATE rowmarks.second SET id = 1 @@ -79,6 +83,8 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = RETURNING *, tableoid::regclass; /* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); @@ -88,6 +94,8 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); +SET enable_hashjoin = t; +SET enable_mergejoin = t; From ea8054a76842795cc15324c4de9a2b02bea88af9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 19 Jun 2017 18:41:03 +0300 Subject: [PATCH 0623/1124] add compatibility code for 9.5 --- Makefile | 4 +- expected/pathman_rowmarks.out | 5 + expected/pathman_rowmarks_1.out | 306 +++++++++++++++++++++ sql/pathman_rowmarks.sql | 6 + src/compat/expand_rte_hook.c | 59 ---- src/compat/rowmarks_fix.c | 163 ++++++++++- src/hooks.c | 11 +- src/include/compat/debug_compat_features.h | 4 - src/include/compat/expand_rte_hook.h | 37 --- src/include/compat/rowmarks_fix.h | 26 +- src/pg_pathman.c | 4 - src/planner_tree_modification.c | 2 +- 12 files changed, 502 insertions(+), 125 deletions(-) create mode 100644 expected/pathman_rowmarks_1.out delete mode 100644 src/compat/expand_rte_hook.c delete mode 100644 src/include/compat/expand_rte_hook.h diff --git a/Makefile b/Makefile index 8b8fa036..5a2d01f3 100644 --- a/Makefile +++ b/Makefile @@ -7,8 +7,8 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ - src/compat/pg_compat.o src/compat/relation_tags.o src/compat/expand_rte_hook.o \ - src/compat/rowmarks_fix.o $(WIN32RES) + src/compat/pg_compat.o src/compat/relation_tags.o src/compat/rowmarks_fix.o \ + $(WIN32RES) override PG_CPPFLAGS += -I$(CURDIR)/src/include diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index a22d8168..3e37c57f 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -1,3 +1,8 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; CREATE TABLE rowmarks.first(id int NOT NULL); diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out new file mode 100644 index 00000000..1ae02cf2 --- /dev/null +++ b/expected/pathman_rowmarks_1.out @@ -0,0 +1,306 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +--------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +----------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +-------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Materialize + -> Seq Scan on first + Filter: (id = 1) +(7 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +--------------------------------------------- + Update on second + -> Nested Loop + Join Filter: (second.id = first.id) + -> HashAggregate + Group Key: first.id + -> Seq Scan on first + Filter: (id < 1) + -> Materialize + -> Seq Scan on second +(9 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +---------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Seq Scan on first + Filter: ((id = 1) OR (id = 2)) +(7 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +-------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Materialize + -> Seq Scan on first + Filter: (id = 1) +(7 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+---------- +(0 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +-------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Materialize + -> Seq Scan on first + Filter: (id = 1) +(7 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +--------------------------------------------- + Delete on second + -> Nested Loop + Join Filter: (second.id = first.id) + -> HashAggregate + Group Key: first.id + -> Seq Scan on first + Filter: (id < 1) + -> Materialize + -> Seq Scan on second +(9 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +---------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Seq Scan on first + Filter: ((id = 1) OR (id = 2)) +(7 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP SCHEMA rowmarks CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to table rowmarks.first +drop cascades to table rowmarks.second +drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index b60c185a..dac456d7 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -1,3 +1,9 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ + CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; diff --git a/src/compat/expand_rte_hook.c b/src/compat/expand_rte_hook.c deleted file mode 100644 index 94c866b3..00000000 --- a/src/compat/expand_rte_hook.c +++ /dev/null @@ -1,59 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * expand_rte_hook.c - * Fix rowmarks etc using the 'expand_inherited_rtentry_hook' - * NOTE: this hook exists in PostgresPro - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#include "compat/expand_rte_hook.h" -#include "relation_info.h" -#include "init.h" - -#include "postgres.h" -#include "optimizer/prep.h" - - -#ifdef NATIVE_EXPAND_RTE_HOOK - -static expand_inherited_rtentry_hook_type expand_inherited_rtentry_hook_next = NULL; - -static void pathman_expand_inherited_rtentry_hook(PlannerInfo *root, - RangeTblEntry *rte, - Index rti); - - -/* Initialize 'expand_inherited_rtentry_hook' */ -void -init_expand_rte_hook(void) -{ - expand_inherited_rtentry_hook_next = expand_inherited_rtentry_hook; - expand_inherited_rtentry_hook = pathman_expand_inherited_rtentry_hook; -} - - -/* Fix parent's RowMark (makes 'rowmarks_fix' pointless) */ -static void -pathman_expand_inherited_rtentry_hook(PlannerInfo *root, - RangeTblEntry *rte, - Index rti) -{ - PlanRowMark *oldrc; - - if (!IsPathmanReady()) - return; - - /* Check that table is partitioned by pg_pathman */ - if (!get_pathman_relation_info(rte->relid)) - return; - - /* HACK: fix rowmark for parent (for preprocess_targetlist() etc) */ - oldrc = get_plan_rowmark(root->rowMarks, rti); - if (oldrc) - oldrc->isParent = true; -} - -#endif /* NATIVE_EXPAND_RTE_HOOK */ diff --git a/src/compat/rowmarks_fix.c b/src/compat/rowmarks_fix.c index 879371f6..66257d9d 100644 --- a/src/compat/rowmarks_fix.c +++ b/src/compat/rowmarks_fix.c @@ -20,9 +20,10 @@ #include "utils/rel.h" -#ifndef NATIVE_PARTITIONING_ROWMARKS +#if PG_VERSION_NUM >= 90600 +/* Add missing "tableoid" column for partitioned table */ void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc) { @@ -49,4 +50,162 @@ append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc) add_vars_to_targetlist(root, list_make1(var), bms_make_singleton(0), true); } -#endif /* NATIVE_PARTITIONING_ROWMARKS */ + +#else + + +/* Special column name for rowmarks */ +#define TABLEOID_STR(subst) ( "pathman_tableoid" subst ) +#define TABLEOID_STR_BASE_LEN ( sizeof(TABLEOID_STR("")) - 1 ) + + +static void lock_rows_visitor(Plan *plan, void *context); +static List *get_tableoids_list(List *tlist); + + +/* Final rowmark processing for partitioned tables */ +void +postprocess_lock_rows(List *rtable, Plan *plan) +{ + plan_tree_walker(plan, lock_rows_visitor, rtable); +} + +/* + * Add missing 'TABLEOID_STR%u' junk attributes for inherited partitions + * + * This is necessary since preprocess_targetlist() heavily + * depends on the 'inh' flag which we have to unset. + * + * postprocess_lock_rows() will later transform 'TABLEOID_STR:Oid' + * relnames into 'tableoid:rowmarkId'. + */ +void +rowmark_add_tableoids(Query *parse) +{ + ListCell *lc; + + /* Generate 'tableoid' for partitioned table rowmark */ + foreach (lc, parse->rowMarks) + { + RowMarkClause *rc = (RowMarkClause *) lfirst(lc); + Oid parent = getrelid(rc->rti, parse->rtable); + Var *var; + TargetEntry *tle; + char resname[64]; + + /* Check that table is partitioned */ + if (!get_pathman_relation_info(parent)) + continue; + + var = makeVar(rc->rti, + TableOidAttributeNumber, + OIDOID, + -1, + InvalidOid, + 0); + + /* Use parent's Oid as TABLEOID_STR's key (%u) */ + snprintf(resname, sizeof(resname), TABLEOID_STR("%u"), parent); + + tle = makeTargetEntry((Expr *) var, + list_length(parse->targetList) + 1, + pstrdup(resname), + true); + + /* There's no problem here since new attribute is junk */ + parse->targetList = lappend(parse->targetList, tle); + } +} + +/* + * Extract target entries with resnames beginning with TABLEOID_STR + * and var->varoattno == TableOidAttributeNumber + */ +static List * +get_tableoids_list(List *tlist) +{ + List *result = NIL; + ListCell *lc; + + foreach (lc, tlist) + { + TargetEntry *te = (TargetEntry *) lfirst(lc); + Var *var = (Var *) te->expr; + + if (!IsA(var, Var)) + continue; + + /* Check that column name begins with TABLEOID_STR & it's tableoid */ + if (var->varoattno == TableOidAttributeNumber && + (te->resname && strlen(te->resname) > TABLEOID_STR_BASE_LEN) && + 0 == strncmp(te->resname, TABLEOID_STR(""), TABLEOID_STR_BASE_LEN)) + { + result = lappend(result, te); + } + } + + return result; +} + +/* + * Find 'TABLEOID_STR%u' attributes that were manually + * created for partitioned tables and replace Oids + * (used for '%u') with expected rc->rowmarkIds + */ +static void +lock_rows_visitor(Plan *plan, void *context) +{ + List *rtable = (List *) context; + LockRows *lock_rows = (LockRows *) plan; + Plan *lock_child = outerPlan(plan); + List *tableoids; + ListCell *lc; + + if (!IsA(lock_rows, LockRows)) + return; + + Assert(rtable && IsA(rtable, List) && lock_child); + + /* Select tableoid attributes that must be renamed */ + tableoids = get_tableoids_list(lock_child->targetlist); + if (!tableoids) + return; /* this LockRows has nothing to do with partitioned table */ + + foreach (lc, lock_rows->rowMarks) + { + PlanRowMark *rc = (PlanRowMark *) lfirst(lc); + Oid parent_oid = getrelid(rc->rti, rtable); + ListCell *mark_lc; + List *finished_tes = NIL; /* postprocessed target entries */ + + foreach (mark_lc, tableoids) + { + TargetEntry *te = (TargetEntry *) lfirst(mark_lc); + const char *cur_oid_str = &(te->resname[TABLEOID_STR_BASE_LEN]); + Datum cur_oid_datum; + + cur_oid_datum = DirectFunctionCall1(oidin, CStringGetDatum(cur_oid_str)); + + if (DatumGetObjectId(cur_oid_datum) == parent_oid) + { + char resname[64]; + + /* Replace 'TABLEOID_STR:Oid' with 'tableoid:rowmarkId' */ + snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); + te->resname = pstrdup(resname); + + finished_tes = lappend(finished_tes, te); + } + } + + /* Remove target entries that have been processed in this step */ + foreach (mark_lc, finished_tes) + tableoids = list_delete_ptr(tableoids, lfirst(mark_lc)); + + if (list_length(tableoids) == 0) + break; /* nothing to do */ + } +} + + +#endif diff --git a/src/hooks.c b/src/hooks.c index c3623418..fbd72231 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -10,7 +10,6 @@ * ------------------------------------------------------------------------ */ -#include "compat/expand_rte_hook.h" #include "compat/pg_compat.h" #include "compat/relation_tags.h" #include "compat/rowmarks_fix.h" @@ -276,6 +275,13 @@ pathman_rel_pathlist_hook(PlannerInfo *root, root->parse->resultRelation == rti) return; +#ifdef LEGACY_ROWMARKS_95 + /* It's better to exit, since RowMarks might be broken */ + if (root->parse->commandType != CMD_SELECT && + root->parse->commandType != CMD_INSERT) + return; +#endif + /* Skip if this table is not allowed to act as parent (e.g. FROM ONLY) */ if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, rte)) return; @@ -538,6 +544,9 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready) { + /* Give rowmark-related attributes correct names */ + ExecuteForPlanTree(result, postprocess_lock_rows); + /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); diff --git a/src/include/compat/debug_compat_features.h b/src/include/compat/debug_compat_features.h index c668d4ce..8caa6d44 100644 --- a/src/include/compat/debug_compat_features.h +++ b/src/include/compat/debug_compat_features.h @@ -12,9 +12,5 @@ #define ENABLE_PGPRO_PATCHES /* PgPro exclusive features */ -//#define ENABLE_EXPAND_RTE_HOOK //#define ENABLE_RELATION_TAGS #define ENABLE_PATHMAN_AWARE_COPY_WIN32 - -/* Hacks for vanilla */ -#define ENABLE_ROWMARKS_FIX diff --git a/src/include/compat/expand_rte_hook.h b/src/include/compat/expand_rte_hook.h deleted file mode 100644 index 51b57dd3..00000000 --- a/src/include/compat/expand_rte_hook.h +++ /dev/null @@ -1,37 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * expand_rte_hook.h - * Fix rowmarks etc using the 'expand_inherited_rtentry_hook' - * NOTE: this hook exists in PostgresPro - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef EXPAND_RTE_HOOK_H -#define EXPAND_RTE_HOOK_H - -#include "compat/debug_compat_features.h" - - -/* Does PostgreSQL have 'expand_inherited_rtentry_hook'? */ -/* TODO: fix this definition once PgPro contains 'expand_rte_hook' patch */ -#if defined(ENABLE_PGPRO_PATCHES) && \ - defined(ENABLE_EXPAND_RTE_HOOK) /* && ... */ -#define NATIVE_EXPAND_RTE_HOOK -#endif - - -#ifdef NATIVE_EXPAND_RTE_HOOK - -void init_expand_rte_hook(void); - -#else - -#define init_expand_rte_hook() ( (void) true ) - -#endif /* NATIVE_EXPAND_RTE_HOOK */ - - -#endif /* EXPAND_RTE_HOOK_H */ diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h index c03e0155..d2587cee 100644 --- a/src/include/compat/rowmarks_fix.h +++ b/src/include/compat/rowmarks_fix.h @@ -13,7 +13,6 @@ #define ROWMARKS_FIX_H #include "compat/debug_compat_features.h" -#include "compat/expand_rte_hook.h" #include "postgres.h" #include "nodes/parsenodes.h" @@ -21,26 +20,23 @@ #include "nodes/relation.h" -/* - * If PostgreSQL supports 'expand_inherited_rtentry_hook', - * our hacks are completely unnecessary. - */ -#if defined(ENABLE_PGPRO_PATCHES) && \ - defined(ENABLE_ROWMARKS_FIX) && \ - defined(NATIVE_EXPAND_RTE_HOOK) /* dependency */ -#define NATIVE_PARTITIONING_ROWMARKS -#endif - - -#ifndef NATIVE_PARTITIONING_ROWMARKS +#if PG_VERSION_NUM >= 90600 void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc); +#define postprocess_lock_rows(rtable, plan) ( (void) true ) +#define rowmark_add_tableoids(parse) ( (void) true ) + #else -#define append_tle_for_rowmark(root, rc) ( (void) true ) +#define LEGACY_ROWMARKS_95 /* NOTE: can't fix 9.5, see PlannerInfo->processed_tlist */ + +#define append_tle_for_rowmark(root, rc) ( (void) true ) -#endif /* NATIVE_PARTITIONING_ROWMARKS */ +void postprocess_lock_rows(List *rtable, Plan *plan); +void rowmark_add_tableoids(Query *parse); + +#endif #endif /* ROWMARKS_FIX_H */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 1ceba04c..4f61effc 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -9,7 +9,6 @@ * ------------------------------------------------------------------------ */ -#include "compat/expand_rte_hook.h" #include "compat/pg_compat.h" #include "compat/rowmarks_fix.h" @@ -312,9 +311,6 @@ _PG_init(void) process_utility_hook_next = ProcessUtility_hook; ProcessUtility_hook = pathman_process_utility_hook; - /* Initialize PgPro-specific subsystems */ - init_expand_rte_hook(); - /* Initialize static data for all subsystems */ init_main_pathman_toggles(); init_relation_info_static_data(); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index e90bf029..32ed3f46 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -10,7 +10,6 @@ * ------------------------------------------------------------------------ */ -#include "compat/expand_rte_hook.h" #include "compat/relation_tags.h" #include "compat/rowmarks_fix.h" @@ -158,6 +157,7 @@ pathman_transform_query_walker(Node *node, void *context) assign_query_id(query); /* Apply Query tree modifiers */ + rowmark_add_tableoids(query); disable_standard_inheritance(query); handle_modification_query(query, (ParamListInfo) context); From 0c4108e05e355b077893341ccdbde2ffd3248d2e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 27 Jun 2017 17:22:42 +0300 Subject: [PATCH 0624/1124] Remove lowering for expression in create_hash_partitions --- expected/pathman_basic.out | 2 +- expected/pathman_expressions.out | 12 ++++++------ hash.sql | 1 - 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 69c1458d..7128532c 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -18,7 +18,7 @@ PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM \set VERBOSITY terse ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 948fdd5e..204dcff7 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -234,7 +234,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using system attributes */ SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); ERROR: failed to analyze partitioning expression "xmin" @@ -244,7 +244,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using subqueries */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value, (select oid from pg_class limit 1)', @@ -256,7 +256,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using mutable expression */ SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); ERROR: failed to analyze partitioning expression "random()" @@ -266,7 +266,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using broken parentheses */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); ERROR: failed to parse partitioning expression "value * value2))" @@ -276,7 +276,7 @@ CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using missing columns */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); ERROR: failed to analyze partitioning expression "value * value3" @@ -287,7 +287,7 @@ CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; diff --git a/hash.sql b/hash.sql index 6bfd77a5..8cf9b19a 100644 --- a/hash.sql +++ b/hash.sql @@ -20,7 +20,6 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( tablespaces TEXT[] DEFAULT NULL) RETURNS INTEGER AS $$ BEGIN - expression := lower(expression); PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); From 4d544c82714b8e0c91d30afd48543ff18dbe7f3e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 28 Jun 2017 17:57:14 +0300 Subject: [PATCH 0625/1124] Add docker files --- .travis.yml | 39 +++++++++++++++++++------------------- Dockerfile.tmpl | 21 ++++++++++++++++++++ src/utility_stmt_hooking.c | 6 ++++-- travis/pg-travis-test.sh | 2 +- 4 files changed, 46 insertions(+), 22 deletions(-) create mode 100644 Dockerfile.tmpl diff --git a/.travis.yml b/.travis.yml index fd0e57ed..2ea8bbdb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,28 +6,29 @@ dist: trusty language: c -compiler: - - clang - - gcc +services: + - docker -before_install: - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo apt-get -y install -qq wget ca-certificates; fi - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then source ./travis/dep-ubuntu-postgres.sh; fi - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then source ./travis/dep-ubuntu-llvm.sh; fi - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo apt-get update -qq; fi +install: + - sed -e 's/${CC}/'${CC}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - docker-compose build -env: - global: - - LLVM_VER=4.0 - matrix: - - PG_VER=10 CHECK_CODE=true - - PG_VER=10 CHECK_CODE=false - - PG_VER=9.6 CHECK_CODE=true - - PG_VER=9.6 CHECK_CODE=false - - PG_VER=9.5 CHECK_CODE=true - - PG_VER=9.5 CHECK_CODE=false +script: + - docker-compose run tests -script: bash ./travis/pg-travis-test.sh +env: + - PG_VERSION=10 CHECK_CODE=true CC=clang + - PG_VERSION=10 CHECK_CODE=false CC=clang + - PG_VERSION=9.6 CHECK_CODE=true CC=clang + - PG_VERSION=9.6 CHECK_CODE=false CC=clang + - PG_VERSION=9.5 CHECK_CODE=true CC=clang + - PG_VERSION=9.5 CHECK_CODE=false CC=clang + - PG_VERSION=10 CHECK_CODE=true CC=gcc + - PG_VERSION=10 CHECK_CODE=false CC=gcc + - PG_VERSION=9.6 CHECK_CODE=true CC=gcc + - PG_VERSION=9.6 CHECK_CODE=false CC=gcc + - PG_VERSION=9.5 CHECK_CODE=true CC=gcc + - PG_VERSION=9.5 CHECK_CODE=false CC=gcc after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl new file mode 100644 index 00000000..9efe71df --- /dev/null +++ b/Dockerfile.tmpl @@ -0,0 +1,21 @@ +FROM postgres:${PG_VERSION}-alpine + +ENV LANG=C.UTF-8 +RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ + echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ + apk --no-cache add cmocka cppcheck cmake python3 gcc make ${CC} +RUN apk add --no-cache musl-dev cmocka-dev +RUN pip3 install testgres + +ENV PGDATA=/pg/data +RUN mkdir -p /pg/data && \ + mkdir /pg/pg_pathman && \ + chown postgres:postgres ${PGDATA} && \ + chmod a+rwx /usr/local/lib/postgresql && \ + chmod a+rwx /usr/local/share/postgresql/extension + +ADD . /pg/pg_pathman +WORKDIR /pg/pg_pathman +RUN chmod -R go+rwX /pg/pg_pathman +USER postgres +RUN PGDATA=${PGDATA} CC=${CC} CHECK_CODE=${CHECK_CODE} bash run_tests.sh diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index f05aae27..e0196b45 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -31,8 +31,10 @@ #include "utils/memutils.h" #include "utils/rls.h" -#include "libpq/libpq.h" - +/* we avoid includig libpq.h because it requires openssl.h */ +#include "libpq/pqcomm.h" +extern ProtocolVersion FrontendProtocol; +extern void pq_endmsgread(void); /* Determine whether we should enable COPY or not (PostgresPro has a fix) */ #if defined(WIN32) && \ diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index 5c0ec44e..890897a4 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -68,7 +68,7 @@ CLUSTER_PATH=$(pwd)/test_cluster $initdb_path -D $CLUSTER_PATH -U $USER -A trust # build pg_pathman (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -coverage" +make USE_PGXS=1 CC=${CC} PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -coverage" sudo make install USE_PGXS=1 PG_CONFIG=$config_path # check build From 0571525470a2096cf801238227f723f0539c625e Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 12:19:19 +0300 Subject: [PATCH 0626/1124] Fix tests --- .gitignore | 1 + .travis.yml | 5 +- Dockerfile.tmpl | 19 +- docker-compose.yml | 2 + run_tests.sh | 75 ++ tests/cmocka/Makefile | 5 +- tests/cmocka/cmocka-1.1.1.tar.xz | Bin 85648 -> 0 bytes tests/cmocka/cmockery.c | 1770 ++++++++++++++++++++++++++++++ tests/cmocka/cmockery.h | 484 ++++++++ tests/cmocka/rangeset_tests.c | 18 +- travis/dep-ubuntu-llvm.sh | 4 - travis/dep-ubuntu-postgres.sh | 4 - travis/llvm-snapshot.gpg.key | 52 - travis/pg-travis-test.sh | 139 --- travis/postgresql.gpg.key | 77 -- 15 files changed, 2354 insertions(+), 301 deletions(-) create mode 100644 docker-compose.yml create mode 100755 run_tests.sh delete mode 100644 tests/cmocka/cmocka-1.1.1.tar.xz create mode 100755 tests/cmocka/cmockery.c create mode 100755 tests/cmocka/cmockery.h delete mode 100755 travis/dep-ubuntu-llvm.sh delete mode 100755 travis/dep-ubuntu-postgres.sh delete mode 100644 travis/llvm-snapshot.gpg.key delete mode 100755 travis/pg-travis-test.sh delete mode 100644 travis/postgresql.gpg.key diff --git a/.gitignore b/.gitignore index 9cf8da8f..3eb50e54 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ regression.out pg_pathman--*.sql tags cscope* +Dockerfile diff --git a/.travis.yml b/.travis.yml index 2ea8bbdb..b498e674 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ services: - docker install: - - sed -e 's/${CC}/'${CC}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${CC}/'${CC}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile - docker-compose build script: @@ -18,11 +18,8 @@ script: env: - PG_VERSION=10 CHECK_CODE=true CC=clang - - PG_VERSION=10 CHECK_CODE=false CC=clang - PG_VERSION=9.6 CHECK_CODE=true CC=clang - - PG_VERSION=9.6 CHECK_CODE=false CC=clang - PG_VERSION=9.5 CHECK_CODE=true CC=clang - - PG_VERSION=9.5 CHECK_CODE=false CC=clang - PG_VERSION=10 CHECK_CODE=true CC=gcc - PG_VERSION=10 CHECK_CODE=false CC=gcc - PG_VERSION=9.6 CHECK_CODE=true CC=gcc diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 9efe71df..b74538fc 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,14 +1,15 @@ FROM postgres:${PG_VERSION}-alpine -ENV LANG=C.UTF-8 -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ - echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ - apk --no-cache add cmocka cppcheck cmake python3 gcc make ${CC} -RUN apk add --no-cache musl-dev cmocka-dev -RUN pip3 install testgres +ENV LANG=C.UTF-8 PGDATA=/pg/data +RUN apk --no-cache add python3 gcc make musl-dev ${CC} -ENV PGDATA=/pg/data -RUN mkdir -p /pg/data && \ +RUN if ${CHECK_CODE} -eq "true" && ${CC} -eq "gcc"; then \ + echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ + echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ + apk --no-cache add cppcheck; \ + fi && \ + pip3 install testgres && \ + mkdir -p /pg/data && \ mkdir /pg/pg_pathman && \ chown postgres:postgres ${PGDATA} && \ chmod a+rwx /usr/local/lib/postgresql && \ @@ -18,4 +19,4 @@ ADD . /pg/pg_pathman WORKDIR /pg/pg_pathman RUN chmod -R go+rwX /pg/pg_pathman USER postgres -RUN PGDATA=${PGDATA} CC=${CC} CHECK_CODE=${CHECK_CODE} bash run_tests.sh +ENTRYPOINT PGDATA=${PGDATA} CC=${CC} CHECK_CODE=${CHECK_CODE} bash run_tests.sh diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..471ab779 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,2 @@ +tests: + build: . diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 00000000..b87c00e3 --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +set -eux + +id + +# perform code analysis if necessary +if [ $CHECK_CODE = "true" ]; then + + if [ "$CC" = "clang" ]; then + scan-build --status-bugs make USE_PGXS=1 || status=$? + exit $status + + elif [ "$CC" = "gcc" ]; then + cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ + --enable=warning,portability,performance \ + --suppress=redundantAssignment \ + --suppress=uselessAssignmentPtrArg \ + --suppress=incorrectStringBooleanError \ + --std=c89 src/*.c src/*.h 2> cppcheck.log + + if [ -s cppcheck.log ]; then + cat cppcheck.log + status=1 # error + fi + + exit $status + fi + + # don't forget to "make clean" + make USE_PGXS=1 clean +fi + +# initialize database +initdb + +# build pg_pathman (using CFLAGS_SL for gcov) +make USE_PGXS=1 CFLAGS_SL="$(pg_config --cflags_sl) -coverage" +make USE_PGXS=1 install + +# check build +status=$? +if [ $status -ne 0 ]; then exit $status; fi + +# add pg_pathman to shared_preload_libraries and restart cluster 'test' +echo "shared_preload_libraries = 'pg_pathman'" >> $PGDATA/postgresql.conf +echo "port = 55435" >> $PGDATA/postgresql.conf +pg_ctl start -l /tmp/postgres.log -w + +# run regression tests +PGPORT=55435 make USE_PGXS=1 installcheck || status=$? + +# show diff if it exists +if test -f regression.diffs; then cat regression.diffs; fi + +set +u + +# run python tests +make USE_PGXS=1 python_tests || status=$? +if [ $status -ne 0 ]; then exit $status; fi + +set -u + +# run mock tests (using CFLAGS_SL for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || status=$? +if [ $status -ne 0 ]; then exit $status; fi + +# remove useless gcov files +rm -f tests/cmocka/*.gcno +rm -f tests/cmocka/*.gcda + +#generate *.gcov files +gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h + +exit $status diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index d46ad869..2d4d8bff 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -3,16 +3,15 @@ TOP_SRC_DIR = ../../src CC = gcc CFLAGS += -I $(TOP_SRC_DIR) -I $(shell $(PG_CONFIG) --includedir-server) -CFLAGS += -I$(CURDIR)/../../src/include +CFLAGS += -I$(CURDIR)/../../src/include -I. CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) CFLAGS += $(PG_CPPFLAGS) -LDFLAGS = -lcmocka TEST_BIN = rangeset_tests OBJ = missing_basic.o missing_list.o missing_stringinfo.o \ - missing_bitmapset.o rangeset_tests.o \ + missing_bitmapset.o rangeset_tests.o cmockery.o \ $(TOP_SRC_DIR)/rangeset.o diff --git a/tests/cmocka/cmocka-1.1.1.tar.xz b/tests/cmocka/cmocka-1.1.1.tar.xz deleted file mode 100644 index 7b25e7ff504a2fa1b8f7bb33abdb73fdfef9f860..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 85648 zcmV(fK>EM^H+ooF000E$*0e?f03iVu0001VFXf}<#2oMcT>vqgNp#i!F4oYh6SI@RU(QpFQN#cI8k7#Ud|7b7`vEDl1dQ~Yt%NG> zAl&LHlMzfr2D8pX0z8W8wy7j&V5vaAk!kaCi==(M!K9ou8;9)CIJ_f) zE+w{h_uBKy0-Q+x8?;1Oy2zGGEWw^HS_T07W-z$K^a-ySrqfTEp@rX{dae5q^jG`8i|)@cT>~iqA_aO zo6aasK$cFN8aKuu-uEYai=GP5>1PQ2KhhQ0I;}^6i+M=cQ$Y^*eh;j&R>Vn$#@9@% z8t$zvcn?@#Pmci+^Bh_#=Vm!uFHeb>#yKY>fQZZI{)L7h&Oud3VHX zOh$BTsHmefBBMj5gHoOrV|>xr`B@W@is60dpD9SYv**OSLX>wWI4?RDTa)%$cEsxn zWev8yJ38%{nS>2V6jaON7|2D%Cj7XQxnz?N@PyXZJddM(3D;B%A@Ko3RhbY=24ahoiM8Na@|L?u48gQ8HCpThNkR7a5 zOmE64P!9VWc`l*z2YGDw2r))$z}v}Z7{0j1+H6T7Tv%9T`mb0#t0dKGS`a(vt5%o- z&=vpW8vO~ek_Cf5qr0<Q9j zUsRE%G6zu!Fd2MFz~7EZg+s8WVf!KpDnrwvO(yvnCjNUk zjZa5vXJMS_0Tz`;XS%MbWQ~N*qW6kI*m+_>FRhB~`5m29PSf%8z%(P0UHa&IE0l)* zXgl&;f|y`5$>0k!-rH}j3XD`CVGY#&-Kejwn@Trc>IG9=%$3)$ob(WQZ-JPL5u(v^ zPk?#h3EeIw%~lTV)adycrIPj62-!gH4sDrQ#{uS5M$Jd-v9O#wmHqBqV_`#-vyr*L zB6-i4!dWjOGLN|}$5WQDv@Wuy?u@hBv44{)fz+4yyJcFMY7gvZn{CxR@s9eiv^HjW zBVB$E5JspVX(z~Z{wa41*`axKJlEnhzoE^Uwm9fV=%w*a(y{@tanJf_cq$cc?`Y@) z?ea}o0Ql0v%?qXi1-Hcs2g^1j&{qpzu^CBY{7!FEM?DFT#1e5Ms|W;Xn2YSdo$rgG z{`NLcFFIBhW>hgwuVH>82{otCH4q}%?_{rAy%QZ2u&M=3Gf&<5pPtQjSWLCD-wvJ1 z{l<5IV0{NB~o{K_@01dR^$^VC4+DO0v)t~ z5RpNIWDb3+pl&58Ab|n*t8SB^$j;R;9tDsO;L;2FGT1RNLKaa6X2h(!=XH_gd)0g0 zXpTU8bO3&?F*5LTY(PJL`C74$PA7c;5!2YGkSjqMN*GYOy9_C^fs_mbQZsbUliWEc16hA7 z$^3 zQfDPBAEeo6tYlff39@R;D*R*brM0dBlr(QG3m7B_RrLWVnsA!i&pRp8V1_oAbwN;0NT%m5(i`W(_?EU{(TXAE`O(Jx|LtTHl@IC{`mQ_yDVr{TRZ1IJ z2{|o+?q54<;GL;j*nJBNrwYyfK6&ijux*c3%|Kpn-;&)IwUSc?rY0CS;JD2kK`5CGq( z4!qBaLoRS-jG}eJ9+EoPWzk_<8bXMzMGUY(X`sY$O?ZTa=Mo1#Q|Wd<)H{s4%dqYD z$_YcJREJv5fb310<2iJjVr?~mpOYQ}=(27Xy?lZ-J(hrF_#nniNdKezFwTJERc=Z% zLQo^z7WGn6)3^88%6Zl=4<0EwLi(;8_&)7-TiHqR-n)Hv09Wk5#|$7UlJtTf$QXXc zL(pP>jf|-*a2Y4r5@}1w(D9>`He% zy6E3Ln8lUL4_ce8RV2Y@RYV<4XG`tX)PkoB!bN8MiY` z%r*lVsED}=qID^Ts0r113`oZqDRl2ZbHm?T(xXNR*feSY+XE4YvoxNwVd3ZJU2iWr zk+X&I>qKb$td5`I;(rfxL6U#kbpJB#*^e};vod$q>jnW7{mM#h zZ(7F&jmStwLUo;h3gW@>;dD*N^JQT|z5Qz$iDWWaZmYBUd3^Kokfn)NouRQ$IqAt@ zyDO_n--~@wVyzG%m0p$SB4S~&L=BOE@1%@FD<69w3OyQ>jtVTikf$_vbZylc3tKc7`6Q=bRveS3`CS=7=>H zpI@gvY{kM|P6}Ggo`Z=wH}!gsIyWj*=dlnhhlK%FMSTF)Wp_VI@_UWD`7FTfP_arF zw+OL+IAjELj8wGavD3&TP@jH~UnaZ&KG;{a*^}8~1C^I_wwv;qP(+Ha#M}Du>g}H? zm-b!{1)r`YZ^WB6r$D?HJy!Uut#M&+dSZx1KFwpxR*%fcE=;K7BqZrq4S04Yg?rcD zL3Pf0jMwD2_!rB|{S5zi*g73h+x-rTrI}v){6Ua7ctB*H%Qq z#hbtHEifJG{fZjWF~ydnEP_=Ux$+3E6Mpo3*;%S@HM$5G^4we$maSTip8}4qL2n&4 z_#y;F#i}wteBX-WYX%6EDml*N5m5uE{%%ByKOa3%SPt;lEd;#Bb*ba&$I`?5-2x6z zy(pDSBYDUmQoBG(G_@3yuaK+-A;PpDlfS zfr*53%5bHq^u)zF-!d(F`M1#pDt<6;b-qh%f>uKg=JGuPl3cAo0(A7Le31k!sHuF7 zt(>~9Kq5wDs(AD|-uxT^RC)O|GSFR*-Y7BNe|hcvR$w0aTDG)WaTQFPq$- zRgb5KyUah{`@hwQjvgi+U<2{pp}N_cE=Aa6u3< z^@*sGVC`7M+_e3(fRixQv!OOr7ovs!6%PwW_qt1-bV?1%nD@gY!hdZbbQ&7S4|dUv zk-1GJdylmbffno+u=!MKzLz=afJo3`p$f&<8hW5(rIW8LS#Y&+9n3luAEHy3z;qtpOid`rN;N1XY5UYB>?K7dXq>?e%i5uC_A8(b8o%C@ABBQzb{lgx! z5eOIBsK<0d@x`JVO1gUaEw}ny{6q46(Jlwd7HX-YxWJ^smvJM9ja`z0p&|ge5UG&Z z#pTdRU~xc{VXMM4y~@}a_y>-NKHJG29aU)gmhx!5Sh%GPuDv`-G+C#-%j!jqO$`50 zv!lgx2BuF-*q|mv|90Z2rYkhS;hzi=?k3SyTeWc@J+t6GmsQP=|BB8U|G29OeO9|( zLR*D5=2Nz+t{}mu2T6X@vhy)Tq>ED(h?0~g5ySfF<|XR8h0~Ix^6d+WccuseC|pbk zVVuFqG^T%jX9mNj;$51+@cMwKH_9}4pLu@DO8O3%=HBjA7>&>;UG z)wLxqMMuv`amKAE_jcWrOd|F*=F|_~#!H%_9Gs?;yjg%)B0}ANl=4gLviB1jF!Oyk zZJyH*E9yCz#){!5OMQs(-UOY$Q>pAX{NW{!{5g~)W=u#ik>QN}BVfr& zi%)=)sMs1zx>>#ao~K7bg@8X6b`^cNUfEf#Te~s4fyQ)eOA=HdjABU21o5ob8`x?X zXXe4SV{XbZeR0sRWnXFl)yJz;FzYEgzCeS+aGl;1)EY}hikz|B{y%MW|J5zxW_b1F z-<9^aou}WgwgLi_TM3c$pwf~MShUl!UgoB+<;aanb7@r*sMJkNf526T+zjaB9^$J^ zo8)T?0-j5Y$)|JjR*kKU(sr`I&p$bpaVdT!K}8pADw1{AGbP{Al>@VTOeLqo)y-~E z^*cgg9`3GlZE*pTg%iKklRBx}QpE|KAiU>7DOZ>7kY!*tpL_`-K|tQLMDw9;G^*sI z3dRojvFS3t^!=@zI=qCmm>&1<*cZSt#lKR4$|fq4jfvDd6l%RlY`kB6MQ$oKlgh;& zE6$ex7r_Dsc{1zI>Clu=AfW29XMri5v)RrQ+88^dadwdZKSTKiy#FRvB52f zTXPngA#&~eR%kO_lP->^_onp!Ww`Z&lmrNnNWCyUS0eJQFFK;F8=r);9|WAa+DaoR zypDN5=>g&s`f|X$>%L z?Z(d}⁢(OEzJiDyfA!LUJiSTG<#>K>V0i?r}!;e-}WH33|>^1UAq-=wAhN-ZfNz z4E4|`FR^}xum@zj9z>m>h%WCOz{jo4tqC35_=9?)PIz2|dv=VaAisteAP&q@Lrknf z-C{*7*Ea>5^$o%ft-f5TVqQ$#Q$7u@mEdI@j8otWNo@2C=}3`KUjeGq&Z+^gj|{Om z5QvY@|0X3rst9r61n{?42a1J=Hg{__`+aH!fKn9T06DxV1=8|ads^D))-atQv!lts zq^H8A4EsT-#``D&z0vqTeSLBM7M=Pc3}qI{=Qd@{zgK2#4vYTrK)|5-Jj!(2p2Gnd zhtW7TVlR4=UTDNdYP-s#V{iQ#s&mB?72CK3-=vk9gIypJHFX#LhzX%gzZ+n*mToob=r^Vc=#9&liV@{%@@}|&3iHuy!5qF2j{BW zGf4O!r*jstDv?&Wo!Tb*7WoIagc5; z-O9n;Wj0O`^SVI%3}`Qg%kuD0KV?Pnl;y$S786CTRdOT(mIZwp?cGit0PtYyRzi@N zCCt}S*8<2$oKDG`aYB;vn)TDWOwRS8Li5qMWJ9IBF-w`*%>;v{Z2>v*m{>vh1~JsF zqoGe@g1sh;kBD?cFB;EFf0oz6FzA1?F@S-!SdZ^X%DCl2SmyPc? za16G<*Y$%(2nMXqhiiM|-_B3C)N9URPgcSdLQEa0aR_9V46rPWz-jvs2{q_K>Mctz zLH{+P$oc}`wCp;JlzRKR;&6w4?e8vUm}0@y)AK`sw05Hg#b*QlG8e zlFtB|JpcU*4oCl;EW0yYK?<#|Ur+oVOBQmHXW3N{-1N7brwedB*9^T9Ou1`}XU5VH zIK-B{GYmCSar+;5#E1lYWV~VWcq**?i?a0`7*W{?&Efk62V)q#?&OZ$oB{aY zJh^6m#2_WbWXyY|7b99yAZO^Zt<6{}#oU$INNCV%akodJz!KTQ*<7q&Gme2hw#z^0 z?67S~Bpgw~hr%l*^h^K00NG{^vpG9hW}sd9-ncCOHs6ehFKn%cY4#TskCs73Fp$8+ zp7_R3PG@C-Yuk#)7Zz=X+=ZMa{=zp3E-{0EZw4JR?%j^D4nI>;l+0ZfoSUjd{%IoQPYO8dbb$^X+B#+VO(suKwJ43nrDZlJj(z1 z7J0TI)?=@Y=tIFJ|2*()Tuyyn_P()#+`PUQIdqu7*nlHE73zwO)ecK>$>T zkO5RPqX7sZ7_L;pQp|A5by9m;4RR!(ws-OH*$S6n5J zm-1~FlueRHh=^=P?<I^lAmB!IF?@b$}ZfWJB7QIT4gU$t5>tdMZex=ATi#XSJTAi|S#8eD~9 z9;xH}vgWu*gsX@#x3oa=T7_+izm6n9H-}_z5<(j~!tJJyXw-f1Fn>QQw2&}7YT+~m zKK-z5)S4;KSW9^zL*4;6X2+<@0nYCFXSH9m&+r z4&D@721$5{N*qJ#!kcszS9$xENe8Qy=)1pkfu0 zT2E_XW&T2Km{}4x*^vl?W^m#$rK19ek6py5h7mdz)vGU86{&pg!k`Gy9B!mzI3#)^ z(?tWl|0zttC4CR!ZAGohTq^CvtO^{828EFGk?LV;){h zYHo>Lkf$<_Vku%r_JoT}(h+p0HM}%{!uqgGX%jufMI9#EP3L`lsXc(8(dHxA8o0XJ zh&8K_yO!j>J5cT4UOlM_WaTgVKTHCFXElm<%k|Fkt22kUy&#J}KF775B*iT*4;RcKqNFL%^HQ-L*e&~E z#s?HLblF>D2s=%AK2qjJ9i~$AH&DX+E>jots+?-w8mNJNOEukKRlPNgEeg9IiI*ux z69G)VH89DX%s@U zJuZP5_&2F9Rm-P8f&jq<*pOSd!EG#e7#c5k?ckQsfw6#f5hfpkGJy1~UIcX}Mqp`%2dx--^UeX6+Y$hxge z`AC+R3$^s75|#8nNS@{SP~U0!ZjgI%iWVqmmO?H_QIjHOiT*+n7pf>Tg1$VSFzlUG z_~yVD@ww+0TaTka)o95BmB80E9!5_%)AbIA_wrZd8EOwPzc<=Uyo7f_54-)ut1fCl z_MFsY87aJ~2~t3A!tLKu?t0IjZzKRb2vMetgGyew|MxYN!$Y030rnw8lx!=9G`^#q zOO<6B;gaP+*IlYfg4uvIf%5u}~)z#-E_1fzH+Fg_FAaD2*U^ zjj0^5YG6&9YaW`rW|7_js4ex<0DMAL#1$a&k~1$84-#JIkBntYG{2PZ9-X&?-(7qt zBX$>tA_d2DEGfh&=FaVIpN2>4Mo$M$e9qxlWU7)^Llw{CIlW%2AlV z7~!V=hOx`s`4<7s0z~Nu7@@~Db-1y#JSz}5LB#-{yKv^4`T* z{V9&61GH=IGsUAmDCWJfS-~2wyM0_uw&txruyARCvHrmFPqeoCyVeo=uAG6OEchq) zHMh8RAlLG%Rt-47bEeDYIizi2NYz9fh`hOO5YHB7_67tv=HMEQIZ8MF#IxYE_6+q% zKbMERN}WTV`iUa8(<(+K+iY@)K*ek}Ga#|V2~10Wrn|^^8RD}KD8{E|4~zOwi*Vnk z#@Z}UMFXVr@xojRAk1?aZH*O9BuD5$;#Cwxpe#3$=UExTwQm0QmOP)gumB3;G>rt4 z{;byeX}yL3NfyGDB#l`wD#giy zXCSLK8C@dIU7`q37CSUKJ*bdBy%w(EM|AtY2TCAebCmcX(TgO_GJQb#o~SnQjGy7De-!1#ECx){=h z;`BW?me#}Iu3(a7_06kLDqp%qRN8(XQ+B&gocP+BQ(R^dZN^fB5uy>kcJb6Jsopmy zMF@1LU!NGvkd5ET9H5e)C+nOm_LeiW^*p+x^n+ThQ+Hb^8cPEP3|T#GY6r9VJ)kar zfKc}UMsUX7Q`?obr!yjnP`JShrr1jf>u{i%7uizGvx1t-tFQZLK)@4mD4Sr@h)i&TAP?+_of8;_6QoHaVh?$w%kaYjqTqFpwpAD^sW$v11}@0V|<54nn!r8hc=c;!D|OuPuPy0~&#n{E7zBU;ZLkXTjyx zLO$oGe=6^46-mt9T=`(L8^^oRG4hH4u%RzjuE%JT(v{Ygm*H?vEp<|N`hlY5dIvkx zBw9idxZ0LTnn_?gq|AD5E9uh?GyU&6U6m(FKxq#wq} zRU*9OeqTi)W4Q?jde2#yu)~4$^>bKB@2%d$d}-JI2pZHMvtmoh)`C0#460wkvFr5eSVLOtP;S3-h%4GEr!b-<%=0P`NP2h6DqV zBk;y^-g;@YWafFTA(quZ@$3SVNuJb^ry;6#M%cc$&_wnd7^%b;A`NmzHFQYz*;SWg%lr8%-R3l9 zH7c+xFlbrhgwdyYk3|86o_08-+>`{&NN@av{=eJ4U96~B=3`mhv)*2wQ$dY1_*=)7 zNQSzraIf?8@6rL#OKHuY?gFom;#g=QmuAiMFya2}8BCPr&cZ*TnV7grCd{7{QRiF$ zijzzda{SZYw_gn41fWD}l@j0AIK^sY@BK_#V@5t}m-E+;l$!`yi>u*z|LgXDT0Kd_ zOw%?m2Ef}uu3*mp*UQ?saN)ZboIm$is`k6HTvIDbfk<##$Id63F5BoCV;mg!QGZ1k zy~n%*E=XB)Z2H5=@C?yw-g!v$qOm3&$UesbJOtKcW69H#6L?pQd!t8z(%CA&uRd0Z zf$WZU#lOinwf`X4(wg4=VbcyS;C0+cWhPSc9gP z&3;&kt1*#pk=IQ4u=iR#x0H!ja?2gM(yOdP5I|43LO0p*tKGHFnfKIT55_`1l{i>V z7`j$3dmyQEjjL5H3dM@mp1Lk8Dd+Rl*q5_%9&f7-cM>uHBPC6d@t?jy{h8Erkj7@A>@~nAaVK-gt1!% z^rX6eWxzfX(iU2L(m@fsmca^zfnGU`q`9@|Y=Bk*#W_q9o&+_P0PI1#%Hw+~8=E@i zYe#fRM@&G3)4Cco;}t0r#C6DXer39BFg^OdJf2HT64b+FOB4vHq&p*R7r>I}Xq_uh z>9#guO%M9x#9dGfmR)&k-Mn5Tx~0wei`3^{Ln>r)$1s^u4craE94bvgbqS7O8x!EoM;^F0I`(Vxq)Bd&} zy5v`T!rDFV`dp^q2=Qcg^z%teN2-td+s1ovWVf{uHO^+=d0@fu<j_<+9s2_ES5WVv zN3Cg_t8}ZLFB7`Ltp`Sq%c}`$U4A=mfAeTW%F4;6lg7)x^X(AOCU=5%@w#=KD<;1^ z?pg+9Ab0WvemSSiR#@$I7SnJFmbb}84v9cJX@F{)`7xEBLDW(+2<0y|Y*bx`rO@G` zN~}0~JK)|z`kYJM)U2JG`a6Oid+$!DqzVOUk*0Cgz3SOU3YE2k(fIu4mvwr%%L{rI zXg*C_cdX@$R0~%&K%R=+tb)%}(KuQRp9tzPQFiRH=*q(tLQxMX6iKzmUCc zy^bqLn;HScCSih6pw?1<`lJIVTkRco5W`IqOYY(Yb$N$buW5k2g4x<&wn~T>=qb={ z_T=Ts^GUfcVO#9fnn?fkzNm^n9M+g8`WLI(VZ1mrE_kEQou2o0D7&~1abCE(UOh~o z!COCS{)v<__`St;Y!BSU*Z$E11GSS>I{VXpDmhxk$1d_2?Md`oqJy`{z0ai8Q%ubfEo~*nUZN>x0u3Ni5&)1-5u(qTse2*eNrII7AShV^F3H%NWoVb zj1-th@pjFj9R!7M{&n+}c7pZjP_ux_oh~?iJr6zfKd$8 zP4xKAzdDrSdNwE4HhJRIl4P~>?ml42@*=imL)>7e0S1q4^$pyvk#-B3Ktw5mdoV`A z$A{m^C21Of{gDb#7Vm=f<|A~z*8L{sYQIEz@7^QgdD%^`ZSOyNw)E^{X{Sdu0PMLXi#(5|J)Tin*5I9tAb- zv$Dy;QDszwNQxY0g+9mcNWtODGI)@)@mi;Uc~;`iT8sCIvxE&<6hiMwH5lYR5S=3n z!y9ScwIBDrrmwZGKJ(cH=#(`!%BYIKM%Ck%m*w_CFVt+GQ5pWgR<0EYP9lZ!e+p#P zeh1B(MeDN=u1gJh&D&+KKK?C%p=5lR+<4IS|i{mvF_kjX1~+L zG|ybh0B&&)h*hP3#(%KoLVQAaO1%$q`U;L~gu-88*6SnweV*0^WD31O`f$?!Hb1Qm^E zv6Tk;A0icD{FkkQS^URm107SR=qRH#iP%Nnc35p-1 zuir)~-Iz3lhS5~J+5{6#*@@@eJNfMEhAsbvss5*~FC(t&rTdY?5;ohpqkdKvnIwJ_ z!=h{af!OG2&|NCcZejhxu%j3J?*PTKFKZ3ORoI%<~=Y#Hnkped>#O7KFX1- zbW2GTP-!Bn&JeEy5nPBGR9kMA@0tsY&d^1}3*-%cOr-AKNtpk(>8F7a1m1kjZ-5AT z$PvrqTM5at>DphY6Yl&sP`D3zSm4Nl{$AV4*P|2d9%(jH79kDo07I|TLgC)P$fSzY ztVrkcBPlaR!G+%3*A35Znp17?LJ1zoQsLJo+~zJZ1Vows#Ub2KS5C+q({U7vOEt2~ zI_SE}H@fdRD$ULYE))N*Iq*RMx^NoyUsaPrbzMA5>>*N=0JnooW)7b4w^Lqey}Uq} zIvaHukASyF_kp^%v3QFgWKRMX!k{H>;+%@Npqu)rlt|>!(9nckZk5 zdv&H$+}RF+BOexrOr~1iYo>5G^V=eleK&dNjCjV;=zok5u!g3Hl%IVcHLM5R40L~7 zz}JGpDY^VCs5ImYT{?a9MR)-g*Gwm>V&K!5#;cO>DC!h~R z_i1b8gZJpOL!2Fx_K;*KtXGuID2z-%6}YI#s=?5w-ACuC{RYiPWr=oA*ImRiwiJnM z3i}VU)O_F|O*zCzzOB{~z7;W|uYT^0Z{!OotCNTIHP z7#`}z2&b#a(Ei5b9Zeh=k@pfj?2TsyjJskr-3))O=PFR)&-OH zt&>nqYv^8Vm0VZb%1q^g=Xt&!T8+%%<?{ul8i{ z1jRW}cPA6O%b2rg?LW?J9+m9C4@E1>EhVp3l2R5^nXt&4Yo0_^$*AGpF+kC^pLT`$ zPfO48kTZTxr0pXHeU5Lnc%3Ezt@Gc4#tJL=cB?+RLv<%P?8<$@n)rs_v(~AnO!u?i zn5|#AZ#6^D_b17W0{LTFI1$ilwRbN+?XWq;z=(-g_Y|?h2h#@wW_)Q2&hwCo>tdrs z0R+o)N-s0cIac{6*(E;N2&CM#8(s*$^ckcYhtv>fqW?}pQ*8vPlEL5e zWdPSU=&WI z`EY_7pQbQmx<&TXVu}BT-zx-4Qup3BoZs-0c?G0hldx@Q7g34MfBUGkwGUdk)BQrp zf>J2sGyGANe#pK4Y@7_|TR>r>xvr8C@M#;N1<)&{W)(Cwrfv+t4;$!*FARR8DGDst zcDV4Ai*ltyp`ELlIx{2ro=vLG*)IElmgc7_i2d@h^}jj$KOz>ju(Ua+8A7@#QKdGS zrC?ED2qEYUY~c2pNyi&e+y>TmCpreKZlFn#`(8OsBY2z^&v^~L0x{`8E-Jr_3mFzV z7oPNR0OiEn4I%SVmX;!_-o{yX;YbrGx9GM3iQL)-QTi{cK1Cp1FoWU@eP)z_ z>2O<*Zxk_(n~R7+b^Y6zJ9XJ(A+V-`DCvvVh1EZRe9Lv1!?UXM zijXV&9p-rK6n^U1Ql1N;;=;y7#1_~gzTn2grUodV1aU;9jhz6+mHzq%dY?eLm@c<%D z0E?FsL+A*T?uBP3@--g~mf?8o)__b2j;7=lxDBi#SI7F>$5OugQ3k92DVH<(_C_Wp z`HVbgG~zH~B#WIy{seVZfbjrDt23D(v36pB&b@c?P*{awAD-yGMLCISBfYz=G4-O& zj&xEAdm#V*D75j=h3#$y7`~90X}wUSAA=>gKDPfz90KN%;`;6_Fu-p0Ro$m~t%=Ra z;S>`^`%~4>mKtQAQY0><@7}rLcBsZI?BCo#oVZvUEI%_e+TXadL6{D{O!MBPU7RA_ z4Y&jMT#_ccbWc_|ZgN;Ki3EqF=_Rb2+tPHo4to(a?Pu%X`<^VfH2I6dFn=rOdKJL0 z=qJ;Kzy={R$=+&rvhj9&MA?yVPY>tle`-emt~XR2SJ zqcU(8XKGMD$I~y3j`3`s)VHXl0dr#y2;+f%`+7H29CcFM21!~pM%bgkkH8c=FG}Z7 zubkfHcWU0TJV=@OKdI!LjW9VQLJR%f3koq42d4V1$#`K~%Bj6|T3@XYr`MrP!OO1~ zpE+OvyWwH6baqvRpD9U^7~Vv3^@GmDW&;EPIAGaozXF$Nh;`JM6C3i_kav6h7uHgqATF6o~S*sQ8|clJj<_nu{H>jBIc_!vu-Lidqo$ zJjuPZgLp|H7)Kq|b}8cl$3NO%Q40)#y>ksLBe~y&Dh?4P)P;%RLnOYNN zjLcP1MWM5jEFppWuQ%UvQACzed8Wl_9`$YM8EKnxe#TT{L+5%3W1h#36A+x_R4TG9 z;_#^GvW{jMVwhnP1n^vZn-1C#ecpX~HbBt5t$U?I-H$=1m0{n$zZb`pt4B6U+JvxV&?}dWLQcB)ud(yGx#*xnZkG6fChy;C!<6 zg>gbENj;?$mBvu{JXiJkib)aGYtDqLjqf@v6hrlP(ZmU^>?e|@ zB9Wzh{kp4(wqEMDK5;@&Wh8BnD#fEbz>WtCpog~ZXnmk6+syIs7&Uc28f5h?pz!+& z3^HxW2$?xK-Sa6RO$PO>>@3J(i0Z4T`-(KQy5rGCgGst-KpL4ngfOP|Q z=r;vkEh#kh!cNy_IMTn+*(IDDnz7(JmujAOLxO|mjCLY%`~Ft3aLg?*6Fe+$J1hO~ z@J`acWpaYLUlM~V(qM~px($BV&nklNXni1Fn-iFDtwEk+%W2mT z%$74VI*(mhZkp}A<&!5sio~Vb1SUxzT3XB| zHXr9QNVo(9-X=Si`Z?||<9ahI1nB{pJ1#tgMd6WnJr=$jdaM1Z%IXFLeIo+?3JyTw z^2nghszH&_PXuL=Lf;>}r6o<-|bXHuwzpvQA7lbXMx^ zc6G1>dC7o9Hh~YYRCA?@-?>by*a@W;%tAE`FqfmX`_#kY!o&DNeG?F8B@4FL$xJC@ zdl+m8FnrWL`Kh`rKYqi!G}Vr6PN;kN-ae9%^!Mti zX}uHPk-TI|TgT~&!DxB+Gx{ME{$0<8qOtSqF|;^we6_z{=8>7bqj~DDDPwHYs%{zX zj9J(a*QXSuSV$kTarkZO9`qi`=blExy1r5g)4f5XJE!+M?XQiXe-S%+?C}44;}Ilt zGc!*iuPF23V~dR^2ff2E04+e$znw3nf^Dipn}k?B3bdX(QDKx^zLmCJSlp_z?7v~A zxemEr-OL%m49eF+ba1t}%n%p|#uA<+oEQM4YH684y>kwwZjOt2oO?Hh8R|f@?JsF6 z#j&AlqJ499vmN|UY@8tEAbk0a2nGoG4$NAyK9> zE73KfU1A~<)7l^!%ze2DY}!xi3I1pUm~LB501GnH4T9Ek0Ctl(BruOt_1Wa@G6kKL z_m9)h>P{$nt6JOm-Lf8+hoZ)bBJ9Y?pEU}m#|al;41y6~eMY+1#4v&Mo3s-1bh^= zU8e<{{>>JoHQNiR!;V*}K@23#LTUPjE>jRCvX|$+4iDK+J8lBfuGG zij?0fUW6X}(kyPbGqaN1{pv2L_+Q2a|K2zLZcEjR_63avMAG-0JuFdOBmJS@#?EzkZmop05=7Ar>76*qzwH4(H+47bswxI zm4QS+(@B@d!KpcT;zS<#9(x*P?mV;1jv^ZCqjRE#!KQ$wKl637UPx+?(<#BhDLT>c zB4~(3qU#DG-_7}M|7oqZnv__vNbLNZ6F}TIFpD*Wqj|12rGATWuvBAefzp+#`hU4~ zn8m6)Aw*xtDMUMJp~?A$G zll=b%x}#yTVfRGKC+3#klETDjMfl})PR5R(7lh`BF@Fb^)x5NInN#(^rpQ|vq)Ai2jO07~EzuzxhQ0npN`1$-Q>88Rp8C!Y69K}>SSr~(`dN@5i+)mwuCxwd3jC+*xHp&he?yXv zLagMb(k}PP3p!h*K@b~nO^2$!|7`g+)Yz)@bILEP=z&mSraZF}G{Jk)ZR+L2U`TtQ zS!gga+T{;CH3~Z-Qo`?z7$EPun8*cNqhAWx_`YNEpR?@#Mu?A!dxnn9BqmX23<=L& zFPzu2*PvbTb)r=IzakAyP{GIyoce5F2r(ot+~R5CYZodgXpHRW zdqk)@n2)Rvq?IpeUBvH|eZDv)+fPxZ@bPNg4O0NcG+A%9Ym^h{{N7eUyD#SY>&%g= z2y8M0xr}o+wCq%?ktN?VEY83WTCA(~4eGev4L1=*{i^3-h8sFjbH!R!gcwwCNz{V* z@B=L-GbkzGVeKA%T#>=S;ijTsOo*8}5yK+IkhTGQ{l2oTviq8K44;Z|nUsPL;GkO{ zd9vZ@X;gab)`=#)l#sNr7Hu`aw;4wR}3z3I`j-48(U zp#{%ydP*V1T2m?;P`_L7gbw1{Q2$t3tsu6uF&jNL@&JConLl@$~lAgkOzaj7nF zOXy8&8Y+`MgmHRjku?1Lj7l0Sn^~xM+4Rp(VquP5wLG#Fp#Rulf+}4yM99eR-4Vid z^mn@~fuqcAaZDmBsRO*zQ3FIv>2wqi{YcjHisLjhiaaRGjKPgkS0jWCpE~1f4blpo zG@}25$Ah!vjYc9?lo|X$<|=#c=1s16K9u31U*iwPi|V$#yQYAEY8Fk%Z!Xi@{|bGC zy8V`krK*-_mS1s-GS!y<;Ybs;KA$hR{2|=_)*w2q)HQo>eV$!b4PkKXfTdK4E)OkI z@jp%liE0?F2yrzSw9nz81bg!^6>T3(vpn!9H$-alIZr>=KCZViYLHVgrKcwuqND3l zmlD;NGQ+9WdsqogOQnl^bV1%N?!pC+R6v`y_T9v!o6f$w zJk5oU-kO&5kCJ`u18vpetv68HkE3hSf7KP>*QK@5ZMI+NMdLHq;w%Ehwl{iu8c=U_ zHff!P4~47|F-Xj`mbjAzscB@pty1AI1@Nuh9klFPusv;I+Pxv+y%bgLZhp6IJAae%!IYor$avGN z#5#*iQ(k=S5^Gx422#@x{-gCV4~`D~#>o}yz;?xFLV7+NDR^Oug%D!ZQI2PhfEukH zw-MK>Z@wMqu5rScV%w&%lX)kA%{%OV)93;lRB6W_c2#_kuqqO2kzPK_de+TP)ckEi zGP5*gfe>xOys)relg!Lq_|aJ0VP&E1#Q4kaAREn2L1kCv$LOhVsU+AFnqenc^172{ zJvVsFwWivRV2!`3DwLD>Hr{FE7b3UsdZ1m~UH{Xj-U}_Djwq}vmJ~k)J>?-r2i%W0 z&mMup4~LWjYQ*Nnx1M&RG}s5jbn9!Ua52@G%%J*9BnJ5KB@hTZMJ<~7>P5Ayatpi)j(FCnGCY_o zyA>$ZcV9)QuWK*rz;T`b!!6&WWkVec9;UUhFAPRH(Q_r=X&LM z)`0_w7F@@(i5Pc$(G3_27>V%a0vI@W?9D2W%~lh|x)%$U3H1sek%{IU>6UaCSulxdKKUk+JCzspO>^W-Xjf(d1Sbx1H0f~3Y&cx;%hJdAgNG;N{3RwlD(lwU^y|qju!7e4Nk=U^-b%akPk&J!S*SM zQn5Rxz+hXE**}7d1rf_L9`=HiY9pHXjgmf{z&a^zOcYTp$hV#ilgE>zamtw!-$Z*J_gpS8Et72`@sV-~PZS7M27U9OZCPdg_7p8%pE&c{ysBWn%cP z&i%NO_f_7je}NulBNi$2KRTP1u>7Q@8-Y>=_2`397W5~pa*X< zQ+A0SgfU@%nkgj8aX{z_z8BeFi{~$`|00^IwNS| z4AlWQqp)m1_EXBWXpk+B-xkJH>Z{quITK2hJ%SpC#tBzx&PD~;0%|Y1zCsa7bVvt^ z6PC8tm8yCA?@V{xP<^edW&gA4P4s_4JF$QMwXXNUac^%Ely`!lh~X*5!=>!xN#&7Y zY(gec&mV${UE2(trJJ*8nje@hq0SXf2WTSV+Uw0KS~df8_WjrzwRvMDFUY#K@SJt^ zfyIIRGcl@H4`5m{fZbL(mk@j8@o1NwHbhU)BoHx02 zg>14iD@9SAQSJP8_BUSkkCNrjb(=hEkz>(MPG^FA9TIQ7oVWv`F6dA#;^qnVXmmf} zw+VYw?Ac5xXgE0FGTO(4hT1b!iBb`|!w+ZZF#{J9|M!h@>^+`5hV z+x&{PRG_;>;R7tBj1t3_Hkhs|?Us~WpU%Jslsj+Pl`9M3YKa25sA~n$0JXdJo@{47 z+8=uk6eRSDH;%d{gcJVo3=y!W*|(>F9vo>L^A)CW;kmQS_yha`ks1~fCElB0)J9CC)a#CK^93c;`+6`QQWq<|{kf=$lYpk-%8;0EKo9}J}_(cFnZpprbP_|o$dcQ#L2a{8!8= zp3NSKCWf^xiScB2oUM<8+LN!vQw_pzoGkg+5m%Lm%m~p;&oUR^Uu6`-(z`zp=_Z~A z;Th;)bZJL&o|_^c+q>P9}3dWM^(M3T(EeR*T%J9XC*9?Ql;HmT(?okzWTNZ>Nbz+H82F*tq~Y9Y;5vbOhJS z{&(7KEHW6q;AB&C#eu$rJ@qm|G^}yDb^lD2PbBAs;4o?e3}Xo&F*I~h-Y0!BD928C zuzDW0w`wTY2k&{25#AdRwbVgkRcKF7%j~HW^w~L~t5*+isFNWI`}&GaBqep6hnweg zCxJ*YT{&YSBunmt_#UNC810G^E`uYzHJvJAgE-O2`Z$m<#QCt?>+#$~s;6kuHOshl z#|~=24khWM4_UA(U9T+d@E;LU=)7M0@9<{lGv)Vt|PGRJE zq8b7A=I$Z77$@`d*fj&E={6SGN8+9T>e%Tl(cAc9Fqs)oH0DD&H^S5lECu(-2vwej z$@nlLVt%t2aA5`$|1?X5I}l`H(x_X=KW3z2gv#tA)G>QdM6GXk)soMqZaM`?nM~!~ ziCSLoycSHEAFGe!hRRXR5x$hRjiobt*w$95Ga~pg#+ZsQvvrpc`?6opiyRQyd9=0< z2B=FJdEy`2bhsIi#4N3i!p?8*I1hxS3}Zm_jNnMMbTh0|{J3g)^aEfT;2C87@b56H z=F3RY?pLG6h6HnblO0(IhYwo*k2+qh3_%!sYPRm zV*oFuEVrbPM0g{Hs!KHrw4{h!B#zj?Ul$(qty>QvAMJZXAC;3TySUKEl0JhBn*Y)W z>#um54wg$5(SYU5e&=ZXn0h4@<6w-AY1&>gBjYEhuK2zcsoHd zCHPVl-j(BeDY&g~<5NTqQ`4nvvHROhKnil#RzGCL-D1Lm0F@7DU(l~{ZF(79& zfDanjx~^F1ZQ0_Oj77x43Oh-2QT|1c>J4Yt8Ntr=1Y!4>{Vj9 z+P~0u9T3<4r~i4USAdS|&01+um}!5kz%6AIycx~SXzY99_Us#mC#KmYpNbRGl11qU zapIe*G}Dm!d2^hE=4YLgUx%Sci&1F*N6+DW{eMP=vz_cPAflZ^+D90TOhQ*vzcAOi z-{V*q^Y7ZDHODAQ_}dGHg=+WVydRl&uHajRC02sMsM0pJ=WSVw#oNPb9azl$R zSx6%O+bIO2smI@JSQ2l?_`E1;f3p~yuq6N@%1PZa1UboJOBxVtIwuTA*h5BM2YJ>C zq5Ken%*(csP5%~OZ=p1cH;cthJLg*A6UNO?I(F8racI^)5G~J@m)6N7&}6Qu(cp@) zhI`FbqR&eX=z05l*I8-h3ssX`k`%Q>Wh*M>Xq$45T+jkxrd{Lh0$(Y}HLnb10 zgiIx&kg}hrHlbf$labGi&1+JAz(GQ#`eV|(@w776sf6d!>rMWgCRJLdU4wu&!0QM{ z5S=kRb&>>k@ZOsl_Ko+gkMjALPWNS0@B8JDvSJw(Fvhf3yHw|&aeUeaHuhKOhu}Nk zE^p#`8c3=B9G{I;({ia2psRUYez|2TvZD%gj}MSKdZrd49gnVsf{1^n zLKZOo+%Cdc`o>tQio{^?r;c;KZ807|@4HLaP(uDjxhLCa2u&WA;P}j_s3+%t%?ZFd zR(a$ZG~}2i>Cwo1DSkMsPGSdM5jcqAqTR~~DsMP??Jt_N+(IT!X(rqrkEJ))2s#_klF^2zI`GUbc_)i zv)gVFD#Tiwx?NFgr^(y%x_TcRj%MV2_ajiC0H|*vv=4$WB*!57&_I5}INVZ*KLWI6 z4pxs&Y7HbUMNhV~kkS*OD)9E883nd_gl-XsXeUEfD-{S>&6^CYwbM%qWpf@Be9=4^ zDBDJ3c_GH)+FfNx5wt+U!da6<%KoHtUfY#>b9O8`@BKGb3*9nf=rZ1^;}8{rK@)1X zz1?3j@oE^XcaHp57*n_?Mhic)uMVYc!?W0Gg^+SxzdS(&M}1M@I%D7)wqB4W84_t+ zk^?Taw@ecPh0(%g;)T2J2AyKKw?Pj*l8K7sRGbJ&2C?xc8>S-HNV!=vHE@Mce%09< z!&!9V>cWKicW1$R;xFt8VjtE!>z)NV2jgjZ8FH&B{fkL$79;mM?k$1hOgm|E6o#UeqdW$}tp)`9L zXnM-*6hrQb5BzoTgs)p%Zl4=h@~g84+3t&DeBHYnozlo#M0 zdTvlW?9PU?WTjzx%Q!p09BE7)xOHqdq+N=)d5sCbgX z%w|OH^t~$(7UF8(F>FGRm=CD822@;RkrZ>P^?sxTu-w}O;g3z-p;U#pTT~)zb^@Q9 z<5`9!IxCqjZU>6kztYTuEA~&uI7H+A$!ort)|6d&I|MZYl}GdU#!|Swsw#TbPY@za z7q6Y?Ip^VIrJ$fs2Dy9S3DnJ(*ztjAz;p&|+mkONRxHFT?ej!Pkj4kM{oUnl(TKo)+Lmj0BTnG?{EW8HOqy;3S5tjLdU|NDOm=Rf^j$=%Oo&_zJq zdeRxcf@h+J@=nMVsGJ14%q0uf$9m^WUA~T{qJb~0$CcESR{)h(7}lj<#%;KSDt^=K z1ac+I0F{jCpS#L(_T2v{Pr?>;{b=k#XQF-_tm?r*mu;B2csU>Y@9BE<(@ev>dl?9{ z@Mea_#J?z+X|Q7InxeJTH8T$McU?=nB9caZwGk`5aVDliQ%z>+zp#|A_~6h}L&=HA zM=DnlBcCHPipg=H;2{40I@&L*T+WAuJ|b~O2R?&Dvu%5B(d?^Ty}BJjL_|kIZ`;{1<;Au1=Es?-*+Vv``y%)@ITDk2WS{qNhYg;TWlqZxWp zYS!}OXF9rIgu9t_57LY7ZXm1?Chz%c$BOjHpjSH1=5#iEuuGt{T1q2QN+Uuf67&@3 z?A2*K(y0UK6lsmY=e@L&Q7^=DPstJYoyFaad}?7o@m*5q{%$qMkiC=xF0K@D14=c> z^?!4KB9~Tp`cEuWK_&}_=pl>cq|Uf4j6WM_i!JcMCpRnro_b@J-)ORx&8x9O*AlOj zoPlJe^Y$Y`d!9@5ouEz8>Zv5!Ze))NxW525L;+9(%U3N8jk9re9zzF28L&~|8P!#U zFho4dePWZDwB4Yns!&%uxhE3aU`(kfSV}W@;NTdUhkf1lZ4b}^HrPmL^*Eckmh-O& zU`*rg`VuAqx8g=n+1Op<6-DfYfW@~-$*jU7eN_q1ldDf#N-8DmM`N1ARNuEZ7^%Jb z$NXoqtw^A{MTq^gs*@IJ-S4?!t76bzI8I&kfcIH&aV^WEDI0DdfRsFI694(xAVF=_ zoL>*acuCRXQ-%J3iCF*sFKMR~S@mtfoRu>Fw4ruQma)*Zg?W|U{iyJss~mYk`*sZ^ zl0fGqXY?`V5l_^61&5oFAyjs7^gWs-_gm-=qAY0TrbFs=TI1+@V$cR z80`aerSXoZL_~j>b=Js1kAAHBAF)(PUJt7x_~ ztN4m=ourI>MOq%)6vh1-LC{6_$?+4e45|9nh<3X*v&tRFDA(nNh5~QC+6u_y@ylKE zPihW9lM@VI=8`I4y0I;CSR&tctTv^^heXUZZgBx0pvoco*ehg$iqV>*| z=sKPfSZw{U?hKYA?Lh`kYL-IjGi;-U|JMuocqGsrOT3#@d%O9S;X9s}!-qW|ey6kD zjdx(^zAB{a*K~C^o94~U8`NMY4gG&9zBvL=EEv)xVai^Ohx0FH^3DihEm80|XCu5M|Zq8Bz^&m;8mB=+2aiE4<@6p_D~2Q=}SVG30#rZWsYeI&<%3!LKAK?Mrvl- zu(_Ih{y92f5<&b|iwJp94TEGoL7t;^p>bH|SnG9bb%@I|KDZ$ww~33_CG*pf!))$| zegdVg@C*KbbOMSo_5!6b2X7Ptz|Hjsh9ck5^&Tpi5{HEF=ZH5I`;;r1&`CMc@hRm- z(_7a8X{4nAaLy+w;*EWI|NO6W^4HHA_>#QMjy_W#IdA-vXxbUJ+7++) z3p$UIPXZQA6eoeM$6D0P+01Q4eFUGJDLBWVLh!yp#zN3+x6TGTSF z1mwy0a2w!Ru{_pDlDNX?aHp0%rMmi#p^f5$plItHcs5w+*Xm1MgKN1 zZ>A|k)VxAvL8V;u_G$z~LFGp%lw{UOpHKPLKqV*eh5l8ko(4woJq_rIAX@%-^;%aK zfkaRn?MlzYvC+hoE=&%0fp(kKf~WZ+)fe{Mbq*HM9`R+9msyk z(d<%rZ$yUuxW8#BXf6-0z{D0#0#0&hk6ATpVK{6$qW_cxsWDNU)o75x;mDTjbs7p~ zJ*$SD-0}}~Sfi8fqdDoaox@@8w5ON1?ly?RIQ55$VufEo`${YBNynUh^Rg0Hf)J3M zm7$k<3p+~dgk<7VlpWV9v{LAm8aea()BgGn7XydL z22!21+~)rB=q;_eWCxqoglz| z7sn*;8Tq~uA3rhH9j}gzYZukQ{RSKrDhJ*X>cLlYqXI1KQF#1^erFks zfS6KzR$Y;-fW!`ME>7_Yo2^S1 z2K)sD+EDuLRJHkuF|W0EA$)0m&WCOV$F0Q@cgvZ9Lg>8YD3t%2fR*I!TTytU-47^# zSkG)H$>E0Fm%X+<^AHh>WP4bk=79TEUa|#9Q(`Vfz z_g4_L*@-WUI!MUr3Cxre`>L&`n*46^50khLIPgeiFf>b?LS|NiXnf37hVAN(>n$HG z@FprYLzn$Ry9o9J3b4xmUk@n9hS*nvv5SX#H?xkRNAkUlb{|s)J@bz5mi_Md(G{*)#p8_Sl7yv|q3ZzSNFaIbocDK8bl7DmWAavui76+_Jkvz^G1Ix$9` zpepK> zpRkjFf+IEi!2Ivu500#Y6JdQvL~Pf;@aE9W-}#XMAX`RfLgn)#gY^-Ih;k3$zu@nz zeQGd$Ysxo!*E3QQ3I7_7j-(S06Su~F0L%UP-3FB)-zAy>BF`offVXz$ z4K=?qpBzCv)g&pq@nQhYbk9yZYb&VCE2Yi8?+_RUkF{VY`I*)(ykUK&p6hhtbf6=H z>g|B8s|LIq84L<8g&rFO7lL161k^-i1q#i7&licqw_lc7DIWP$H1us_MGV(q<+=D9x>&)wHX%Eftq>Ayvz7k3TZilSg}>Id1hrL z`#tg)7PFa7-KV0Qi(~g44q|qGkJ-(i3P>`C7(_8|;}R$V-Urpvzis7pgrNB!5#Ab* zzeYfQKJwOz<)iwrx1cO*+WtfTej})RHkD))k<^?iusL(qY9wu=1HR>17p3!W!DiO{ z9bOsdn_0GX9dxXT#F+xfVPWH4LMN)ftKXjyW*s1NV~1djS&MW%3&^>O-loNK0jqewm-()s%W& z3{v1^+iRo6tc2XK53aKlK9QnRamBmr=%^*SGhNG5tf852B|z+p3Bq@7MhJYK?XSyR zgHGW2wBm^lXL_r_5uL4-W~oW$bUNuWtM17us}->G_y4x6k?70sJYoBJA+>tumC7sR z16yW!qMdI+i$JqJXDwq)cIJwW^ixCLd1?8Tc4dgTP~p9`C%%DSU*t@hg09qPzSOB? z-4P^pxe^Cu7HdS?Zpvx;hAGxu1rh}uWL+0zZHNOQQ~XqBF3`BJgz8_Wm%A|9WNJ!= z59|XKkNYZR_`g(D(vJ*qE40!8^!?Xc<`rje;v`HpW>QRLrQxbYDtyflk zf?1R#e8sd|F4#okk>c;#j9AJ41G<|YfiSWylfC7si!^I{Z`;%-zPLy;j$g>Wr81%G z{0G5vK%}8od8<)mt9R8yx6QCw{N0uPe$QNA8+q=`Jm|Pywznb_ z34HS&W#usA@%24y;6T{o4%|QqtnB5jzky_mF8yO7JkWMS-ki&h>x~MWDHSj3mAIr~ z19>G+W@~YAGGxoyWbfI&HfZSYafj3dA#guS)l^PT%if~^kP1Km9GIALMuqy^`M1LG zD>U~$65t4@t?E}u@X6Tk_5dbU8R6Y5DW5FKKJ(NbrYNI^EAQ*y>YM=_} zI^wRS=i5K;CS=k_AHqcFRb2VN0@C8(RQskupb$NhoST{Rwv9{Xtfo4N(Xk zuE(jYEOlI=dezGA4O0{_fK@-SV*uJ&qH>eoR-yn%M7u_2KRJgBq}8E>J6jW>eBR*K(f()Z{O47oD>__g1V8j z8L@wVE~Qb}GW1KyZ`O#$GskE%{`MO5`pfL+pYeVc!mQY1K9F1NG1cL&|V zDy1xO;GyoRAap)D*#eLqbtRKYGDW)#5}p**?%f+A)1?`E-@bdRJf?>*e!G8~p~AB9 zL{|c0LAQ|Vb1R%jG_#oHcYY}zRaOfkPRvBQk)8U(Dp3l_rrnSx31vp;1;DBEP;V^w zCIR9|3=@_0M|w2I9E`X04cF>Bdc?>&MQ23|f@60D7|_oUI({01-_@Oaq%nv$sUn!! zg7>Ec@hSP`$^s7ZILCDLhC+Jv&U3OXT6?587}CUYoi(h*@{C#ie=1%wXZlB%YV|iM zaC$o}7QpYfZU%1mH$;x;NEK# z_5x<^PEpo9!I0Cjx2i|N9E}_oHT&^3uv@knX7{^;qem}o^paU@S%{)8P`xWv#x66F zS3Io?&kTHAnLeho5Ked)BbrNH(WMd(9r((%+>Vk1)o`0bPsPk3gYV+U zEK6!4u>*Z5D0YedW$s`hlrlQqgMrczn2t}tv(`pes>-k3+KNf$44{bGmpI?$Bz0%I zaD%u~Z-ZAarR!s|+K-_QYV_$$`oFDQL4%6t*@;FZHWGEQZ}AvE**Y z;R6Y^5S5k78t!Gbrnm@A%a<9&zL4zqT|l%r9if$F63=>WU=!yD61qO4`yIBMxV|8Ec()ON00(Qsz@F(p5 za#$`e>6>rBqMjo_-3nE`YRH=N|$sE}a# zxp?24QkYr5`0^fdE0Hvy@*oaI=0Gn|;J)YdrQbx3>$iZILl}5eCA4lCZ&Ggb;1e}; zRjZO2ESSfLDhDSp{*-us?9ULOmf=nvF@w8)A6Xt{=af6X-{0#Y zJGB9Jb?1v8aTGE;efjR2?GVEyTG9u@fXzHoMCDDrY|i5C=|36BJ3nGE%k%Y!>p+Bk zN|idg+}U4aiaGv=4cAcwpNN@8Qp+ABBYU=^dX~^F-rK-14crUotOlr+ zNj@$Mo%EmqP$*&tttWyp=FHq+p=9UdXU9@rT>lEVC((zW%VJ37*FWR1JkV-V?*d1l z?rmrc#645Xxc5eu(A52vb@gtwI~F@8%ZwHhpded8lBjH0=PipaCQ&5I%o$EtV245_ zJfOh(aqZ zr!nivV^&*g!aQ2u5?}adG0D4Inc$OX(xYhlAk#wukPd)gL`Wya-BS2I=Fv8LHH~*& zYXqL)knmWU-)}?mEx4i(8HME8q0gsrlNR=0I5V|nfoE|dj(d{xD z+u^n!oF2e3mwJ%iMgogGOI_#>1V&YzVhnvvgh_4QOS(=*Qr%xeQcP~G0f0l&i_)kY&L zh>!r25+=ZQJqmS%1?wtN+&xl|W=e_;lk6sz-^+$cDv=K4{I8yBvw2|aP zp`I09I|`Y{Zvb9(H-ozDCYQ(ZeR^>@%q- zLb88ax&ME~eDWfn? z!?!<{s<|;8d`-sYn_hQy%=n}5UbD}M&4It05^P%weeDEb)4j+R#+)3JRl?){w=Dni z3HK9&H;=gBTyJ493nji?c!6ZG^Vdms$wjij@xu&T(>$wlr~gN3{r+aCHr+B?RcDGx znczS>KYY_G(}c?H!%{C|nj6JnW^0$_1_t9h3X;Z$cXKbCGa&w8kYmAc2KIR z!}ewmK^5L5leD5G+4Tdig*lcpt2lz`kMAj!@(4Tl7y-yVC{tVLEkEy$6QAXq2Cy4M zPkB1I`A(?tx&)aWZ0^B%r~?T3%dhE#_M|_#j|S^%L4nfpuQLuJmBwlhPZ~0K94+|> zz2w+*>yxB?gBIboR0QA(U6SKY=lG4*mqm|n?t_VQN{*~S&{a_YUL=Wvo4%a2eeqzAJ(UF+laNg!jqw7iZ}y%PRt4t3f9=`(7x~;|p^t(j!$9GD zTavjBreNbzB&B3f4%3<*qyN_(ivjyk0|_7aa%+_T0CzQ=&cCZTpAl#$aI+AoQvF_{ z+TNiLo|B#u+XB1vo25;l-RA<$!9`BoVpf1$hF%=QUF8a5-1q=#Lyq5O*4fZ1GF7l@ zA6$~CrT#Xh5}`OCOn14g+M+FU-?CO*84kayn$=Pflv2w|^$_4FYh5v{0<2ke(c@+{ zAsRc{vNnKCiW^vn31}8-1@WlHF^Y~Vlzt^YrMMPfmT8ge>z0+z@VaBj3?H98>n zr?G{%l|ga5Dh^Hp7C{k&>LumG^Sr6H7z%?Z6Sx_6vq{PE7}~^;>WlKELBvKBkM#Ko ztp;HBbMU*);SI2>gF_G-dziEnaYl$uf+OQ1J~YMxZphC{y-=nK@wr@gksUmM_!~d~ z4wjcJ$r6#(5aWrN^f?QldhBZ=U9HwD#>eF;Mgp6@3utC2;MU?3v&^WON%6*PlH4w~ zTYbgxiT!jNR-}|KbH7|SF2FmMTHk6)bl16*tee}qSl(g;M1)Xav?hN7TC9nWGeke_7J>OYhIn#ijJG=XwOt zz$R=fHh;ZJ^BpL>SBVwW@^JpOY_!0qtmG~nh(nYJe?<5pfLGV?KABsI#@7qH)nr7aZ8Wi>Q%&V9pn zzN5KH>`#dbtdepQy~Y%!l%SB+qcY|&rzHCzTTQUOrE;B8RhR1#3DW3svXhzZ}+*sTzLHXU%^%b!@Cy)v7Xpf%w1^IOJ)ghJ$hHqP~|Bv)^1PZtCdke*2 z@V;&5ys}IuMfe?3e7tiz70RoT__!r8WXhLEi;LFWu)ElJPd*4VC))%DQNkjHQZgt6 z{&|MH+Q74DYLUP%vcD>ZZYE~MY{dAp^*Z*Rd5^svN$i^Rn*7p=JUH*!8o~nS?;`Yb zYj)!|)&m$j@bys!i4888h)hE!jTMV|`xolqUlnKao0>#}itv0kswfkcwHQmlICSaF z;r4%oDI-@sBVBHfJxRYuho0u`UcnV!6C`y3+Gl7ri_==m59jSg ziaV(+Dt`Dp`$ky=codn7t}&nNvK&rMw5%Dsk5O4W(8v+Ve3~r4EBfd#jAxnq7j!&p z_giCOQ{Zjnl5AwkcAM~jhTL?d_Q%EC#uwj*9pOdTYH<{_4>Na+bg5fo8ZTOjD|7>T zk3@>QKi2PpyLv+k>FdpL-(k`$cBDb>Qm&oE5SAlir z-fz?6ZThP4p$4Ee_>? zP|1IFYrEsLin8Cw#VmyhFI31>af~T-vn0%dww%XKw_3)$_JMxy)I#QAi@zF{+?S zcGwJXFto~0s^6J*Ny}|6N}f_)4OXo*3xWPcQDg6uELa7+%{>A~I0He+L;@HU2q#hw zL0m^EK>?|i1V7m~A4dg~2DD}c#2-Y_WMq7u)&%obU4oc(B^QXSL21b2W8NUti%GfJ zpa9Aj5$9XD7#^-ocvz^;)5RLr+ZvLmps)rcQ>Y^lz22r9O=7A|>lnOZcR=R7Qeow{ zq~K*M!dT2|;I8|U_O(@nW;}NH6w;&PgrWD5#gSDAUG$M;{LE_LNGEe|WQ2OWL!9N4 zJCH5Y(h)=;9o#jZG-2^GvS)uGw{1K0>M_zVy*hQpKc#)dUf#C$@j^H_Dku0 zQ5ihBGZEvFuy-P221~orzZ;?)w*Jy(c!D}S>idFimz+Hf+ zWPC;#lGA0XR-uv0N04}i4Fu@mFFp&Y@hjOo(sLp#D4s=%O)DjXjz-8QTwU&$>Yh@! zzfmf6A&W3a{vZaO7Al;`5IV>$YZ6Y}K2TEXw;XCG6?LIJG?!8CuW(IgggtY+90_w! zld|s4$^z(>$_F7}hjWdlMrR{w7{4xpuZ8gLvjq7wcC0kiOJqscSupS$uLAkuZufzc z&GL(8RO`|!n&T+iT^s@>ig0Hgb%=YUARhg(z|5*`jw2KTtGA}L)xQ&sGOh9<6eGNc zG~r)EEVG%`hRKku7kF5<0eM#I`KZiCpk*I43wP$A3D&Qr-tvem!Hc?h@Usf-ES>1W zSo5o(zpt@_5yzDW^GWubEx_!Sx}yYFn7MP7*l&+=vY?`f{%@6b{Dl-yKGUwETHK1* zNl4!Mc|wtCWTx76tu%G20|$%MO3Kn#p531lbyMU+MyUI?3#C**(A~RM`UM z?9;SdQAVZ4jhduVLt#;g;Ju}7kZS}OUT6F2Qu;29qm2mbHaAb&82g@dU-e%V7bhnz z7qqd>;C%g#mol}=tSO>vnu};tA*enxv*Xu`ynzytMVqPmh#dUgS2S;RgK^<+&qog87@}Dn1cRbRYBaFP8efzgZOY zCzLWoEs9hAr4can2foQ&(m+1Fn|0xI>I(J^D$1i%0&O?h_d{s%DwZy*uI>*Hr4Ar!OIm~9NXe)_Q8<20 zepK=jT*uWwcBXFj$-8(BDfyxRY}xgh5E;>11c&v!35c&um?Oz1%@*yFAJJox%qakz;%4rS0eL0-3R z5ll=UMgsuZktuLbG_&Ae1mw5>0C(3kn>dR zuons2(gTlc-&5Qw*9x_de>39e*ho9trX9=kb8cWGU9FKV=& z%EF=A;>hLz3NkW>$u64p9<#I3;wYuy-G<-4Ak8%j{i?M# z=OYxxeF7sQQehSLA83hJ?b<$yX{KH+S-1x@+=MHNz7A%&#a;&>K6R@kKeoDHo@&61 z=grEqnJF0RljKUni4(nMj2?B{SE3A66iZsPpTZ~LIAa2Zkf29f06gbPDtQJDYot_X z6Du!z(Q?G)FVmuX4-(@>w-^@7e3rm0UIyw+*w}5$<~@F!vwR_oU)-G|ddfHA>&?@` z3$bOToW|HGv^a+|Mr1-%6Nv4%>8v4ES8F_aXiFs{9i{)K#I!Sze#NPAW9fj zE0owFoMy({+06e&;E(J|%tat=vnU+x)pxW0ZR%w+-k+Yd?&Hq!F#RoE7;?k~635ip z1%9_EO~FE*dhb(V+-s=vRH@lpPCpwD@@iy-H{6eMBqNVn*K7SI^N<}l-M?43kAHPd zd)7r>Laafn95JXyX+pS))Dma3P=&YyiDDt}uvQ0LxM3iOQUXB|66crv)qdYH_Un&o zg7y}e0!zk$hVR-lW_%4H!ob>$kzUa&EHpkL6_22P73lu8T9xlR^*qo_`Fq^@vgR#^ zdyZQ{CG($b=?lYunai$nQj*c!GcERz_=TkJ#~KN??b1U6Rh18b*bfN9u_Uio6)6CB zN44msuys2Ocvyp(*kSSVi0WmlumJk8g%7C#>^%DpXG0h5iD<8sbAs7B2+ucx(!l#) z6gBl$MV|SsiI&)<&Lr<>>nu%@r}dosfzhyG*m5gz=}KF;_ZzuVE_CvAmA3OM^wr0o zQ@VZEaCwPtnyN*qfiiOnSVC;*m8W!N;gLASw45;w`4xNTUB_c$fEIY&PYSYSkPcHW zmqNEyc>pJAQCxo_k}LQ*zXylPM!IlK)CcFITa|B8C;*=hfx9*(pTliu;`>XCh2E}< zHLkW*Qs+rGFafMtlMw9>`AV6X%9B(b%)iRRo~AfCwd(c6Z9?^RD#knI3s_Znd1F+Q z7%cuKy@K;87gD;7X)&F2nw@}lBu9~fbJErT!X}$~>Q`mU93EVVHEr;7w#H;^)I}Ai zfp<+?ycrJN)K>!-%5H18L0mMN&09(&b=-153(iNy8sMO+iQs!aRPmG9t$T&Y!l^dy$PcNo=v(ysRKy5c%1|n zM-Kuf$6ySpSg?u1J`v^p#(GlfU+yyOq^|F{eh-xLx;Qp)8ztKgn6Bfa?^t62Y8qO-kd)r$$j{33S8MKp-=zE*X!@%l`+VpEIDy{g1QMuiD&j- z27EI16f_-c0HYf*(Dp$?x(+<_9um1mto@r|0Bj75*Bv3**yltyj+eEYc`D2z(IsW; zvPr5yGtOQv&$-E6KIz5KL#Wbw`&Ib(Ya(7~Q@T+fRtsVv{fdT0^Z&5kI;nmQIS+fw zTY&vslS9rB)O(TexN#ucQKGLNHX~zIS5-hVD?uW&8^5smQ64uxTEfj~^~#(4S7*xU zKF~=-x-B673v#&BzE`WtO}-%A8vD2gw=@9TD%!?lQTLTUK_3>x^ghaMX8Af+7~O%h zY{%_<8(UC_-ZF@|OqOyqr{PB&VXeLl+59jP!|3SykK`N@2aml7mQMjd9mDlns3*MLsh?-}1a^PlHV@=pdzu&&VuZ~GATVmo^ zsuc{pYNGgC99G_hIaB__-gUy#gQ7XBT9u3so>@7rd#)htsDv!D?dZ_xXgbA71KhIX zCrxfne_<({l<iKZ#-40*@Sz+3Awt83S<|7#h%%``r)%s3BgrDI@rqY=?!O6 z%*?t9DiGpdy?+M5+VRec|5tJ3{j#3PJ7q7W;8xa&l>c-#iWiC)4MiT zd;g@P4Fvxt1ZB4TKG?K?l>c=W78?TztQ8c)44m1g{Qk_%ne{ zFxYq4+uu=qF-A!}$NwFl`AdXdF^R=H3ezZ_4|+v5jC~4aXq>Lpe;w}@b4iBjuv8`4 zp#3iIWuXI9Q4Yk^tKFNtP@uH3<*pI~x`tQ8?9L66D}zYpMG=UBn-x7a1S&aYBiSP5 zNw-Veg+F~{T96ZbvOg&Kw3cC@MNd?ezhcD1J0vs2^|`bW>X9|R>!$x(Z2P|3Vcc{K z@6rMqWD(zzpjeMF9kF&{cV7#WYHz6YnHDo4Ia7vPqy?%2-bRRUCaeru74+kwde04( z@%}b7{;ARrro;9Te~g~q$gYfC#c|mJMf3{LM9V))`>?J0xO6T{snXtO3dBK z6tpp6zj%Z{MVr`X&E?o6L{P~^{Poa6)M84xQoFzNsDz~hOp5h;Po^@_#%_>+Qsf5t z%qQcKn(|!_z`~0e=@jfAR}5ufK8urr2h#3aIY|z>y-mPVSF>17)V|lgUW%O|8-32r zknIV^bxurZd3BIOd3w`Be!W~9n*d-pi1&cSS_=+v z_xuUN!_)esOSCaBB;imFczzVQcV0b^KOI{{j11Pnh_))k{p^jpxr~VSIp6S&8q~Y0 zRR^v+_@@0>>VJ7P4NBc~ng$vOTT$%lpM$}`bYjQ97;+7DX$(X>i~4oqlaT_e3SPdd z?C2Ds6AG;|s)oG&qF=m0gMI7pUvxJo3GC);GvU`@I6m2C5cN-EY%YUH?5_qFx~9mP7}dB;&7Iz0 zh#ANdl$$i{BO@**^z~wOa6J2=uyC<&{(&Mw8lxR(WgRGftdW6h489i7(QkUt>}BS! zgszO(iz`X@{YrHHMq$Je5tU96i`~*@avc+dg2-`fVEH?@^96y>O%p#=_Xu11W`cZD zGK`Tu7N(!GL|x4v*YU)s?M+G%l1z6|^;WR|HK`Wk9~t89Au3~;;fx1^&_Vr%;0ZD3 zjXl>Qnoe9Uiz^9|4d>m^m-(8rVP-cx)Sbb6;LEz3W*3YYI){7X5ejii;YLTG#UCy( z%Iv{&#YE1BikEG>!mujjAJ(~9mob9tcGV*tjI912df&v1xpkgm9v+qhEOfh@CF;+I zEFh84ECO3?H)?N7NqZ`TK|6AI2|j-TF{Kz4;T%l6HIG!jYCyyaPoVf@Tf1ol!e9`z ziOC7liR~T`O(w-%g`$0O*+}?N=r9&5je=OXZDc#(ad0fxY8x*Jg{jxU0p*ix&|jKh zTC3?~=PX&qp0Br;>WHTq3?bUFm+vqI1IH6|7b7D(Me2DHv1^DJWVnIoFLQ3C`xmp#yrOaV~jGtMBMFsLfr>%8Pp#cJG1u3Gb@LO;XkBoFG|W@qNoeY^hZiKB8MMs17W{F^brTQwl zz0I0@)h@U{EZo5~0}iG1FP0G^*XtSsE!zEOWHix>zP{8>-l*vK|IeOt1Iv-Hm++Zw z7_@~K#lE30U|FOLtN=HaTtQ`mdl#a0^i9|*`s_6iw8mxBBF&Kx<7}RQu0$DSAL>DJ z$SQ?N>FyE-TC@24J!f0 zaUO>+G08jbwvd}X40ll4-aN6W)RzO|%~(a1pH-vHS&_7Jgf*MqE~w8JHBh=i4425K z=dI(ZTONDq9_I5~7C&#D9y>Z=_&EtLp8RoXJh;ps4R{LCM09ep`Wz zA2?8(@A~_(lza3ekf?BIKaEzTXQ|{<#d2QxA8L8%D-(B0qC81Ha;8B5@obTm7}Uch z+jI+lRuW?nAJ}5BJ6DmQCgH<~(l4?tfB?O{<$>p-w4qxFH}E{WTb-~6fsjGnA44)i z{J=-U0)81~@!L-(X6&D!j4d{fqXHvdlP3B)S-(1tv`-HpnP4lM?aQQFU~M`KR$V~o zLJ1n`c5<&oGJx2X5q2dBbqbh)zp*6jEO=$SXcazOV>qMjaDxuL0T#&(!3mWW=v9~c z7+Id)rb25%lG2H=Mw|)a5PNojXgX(&xWw&fWY6P>k2kuGFiB+xw#wWw!cl6g!prq~ z?-ss3k%)n;P~!-!jFyqaiJ7&#^iC5kU0u-RNGYv+0SquBH{(C@jb>Yd`sbj2mehwL zk=Z#2u^D&;h{%qkFZ)SymZ3^rIz1&}5wR!19M%LC1=HW)=o!f}ttxT89zG>gP)*J9 z@I^9YmmG^2e4v$<(#tBMQ)+fzspWT!9Aq zY>r;(up8a6f^l`>19mFKL?GxVUG@=mF`22!+79LDl-Gn89pDih5SfRlKXA>T*s16$ z`tON&E#G)IsM&U)H!PU?9{n^hVvxK_z^u%qqi zA|dqbliRa9@iW2N_uA;E$OYpaNz|&t3iYg|#s@p^3e(H!N%eglo&o!8JSDZM0aOkF z>4@6I*)Lm@XN`U{@-#zzrVBYvu0@rsaa_;><#jh~v#AvmSpttysFt?IoyWY`N+jyl zOK0bhzOTcOIpE8-OJLNZqt6Bw7IpQUpwzpE5QLndGWs>!NLHbYGk$*4qG_gszJw#v z`aguWJ=ZnIoU$n1o5wOuzW>_!NjZU49CIp$s+NO#AZJnQDcPd$A6#5RKi5G zCL?nf=26XHMRxUgjvVr+sBpPky;$?nQ#Rdt*XNRnp%nQTHumQ94M`sYuMqDg`fFVl znc^eDM#fc1}dFtBER-v7&(2^ya8}_9+-?k=^ z^&?%UF0aH0dA6p3t$XMeqFrd^jOWlu4VL)DV{p!*yAX2w@qAAdZ+{hwbwmt=v4Mp7 zBIZoOn#yZjU)WDw8zpREawIcSucCjTpM^Nxx zLZ#=Mg@!h??G6N(aaO^h^)q~SmH%`tkpG^UQ_Pd-9gAu^X97C9+>rW`$ZlPD=mZk6 zT?fGe+i^2Yna}mD517pSf&3s*N|tx-ONbH}Tok;3k&={3?=$Qn&)w7?)_Q|SNv>dM z8==B*dThXzuf@OcAVF}bcYopX+@5Vk%UTyyRQ0Ht27iG5A+KML(dH8EvWA$(p-CYD zqhh&y0@D;1h0_&i&fO#Ve|{0AA9}TzF{PHoRNBCNL6$YD(&ArfIK4vAdyO-q!*L8L ztRTKUaoX15r1#9vk2M-pWigZ-n9$hNeZLo}SS!$o9JhKmi~2|4{R53+b85LQ)z?97 z{;yBRa3|a+@rV0#NRh$lrhk#7REr8bWgXL-jBBbgZ0W7S^O1jUk`g@*mYm_fR~G9$!s8 zpkqKn(^sI%D(7Cbz|Y^a#q8_BTT{AVjb;?pOnf2ZFt+e#raYwV@!`Yo{V%D)QtqT} z*f5v%-2<7gtQ_MsqI82s)keB8PSy)Sm=}IM9;qS4?)t6jKNX922STS6r($ElyLq~c z272KBi`}}c9*Wv(833?fnC%4>`&x`0fW4=4@jAV)r2_2SM(h7`v(s#8UOSb4nq1mc zKM_2%w~qz-XrDBWuTyO=r=w>ZSD>hFPN*?$0=jF1h{SE0;Fo~XS?#>*5ISfxa!_j} zUJdwD!qgPt1qY`9y@&NQDLNgNV}TwTR-R}it&{dC0{AvQ5s|r;xO+q&i_R|c6aGH> z6=RbOOY;nv-i|jKUO)?DQJ47*58}14Y5+dy*%UUlZJG@^TV?SpZv1+kvtLVih#NY;IXE}ull-(QTDi<#G5mAhE-^n4v zS9Bs-!00fH+#MmR9t+0Hoyx*+tyK5>L=9?}1oB z>8<*gYwXHJpcr(CpR@|tOP969hzQO_*oC^vto~jOaO*{jRjY1Wf6-_ONx}LqANkgz zy9RwFXA|L`%R7>L%E5fX=KdFzzxhzS^aq`O_;v_k6GcO84N|kIh?6ynx&PXaO?WMH zZvJ*UdI>Cx0#Lp7t0?yhTeY=<&=#;sZ?;VbJ0X~nf*x4;>nWrxYuqC))+$AI7cUqQ zxamDwLKa=G;W>YH2{_ElnfJby&;cSh7DTGTcXBAel~K@n)g(LFWm&dx3pzT94bz544Mza|Dd(4*x*Ju4_~bzzG0Px( zmB>=>X$1&rp0UFLpks~KhxQ&|(Wf79_AaMLKc`DI@6Eo3E-CnpQ-!2Vidl2-lcpjz zu~!!*d97I&yI`r`(}J~A6->W~dbJKM$b!;?f_N9K(BXB@({9@o{~nbX4G+0%P4LUF zO^r6&f)-6~y>n%@&3!>GnlSV!`B{7*Vbr0@gG1V&OptyfE)GE>66yH>iwvJM#6Ne&HVBii>jjibDZ>gv zh|8F+BBx=6CALU+xYC+bqT52w0chUFATL9lNbV*l)ytqj8#RMF)&7+`*7%7{HG(`8K$k&w49% za@;9&c&j#uRW6~Ja!j*!YC}3_QDctChNB*yQCvq4CAAK~5x5bhWBYh_B@X%Dv>VAN?AnA%_%K(D8ez)yycwl?_H^Em38%ZuwHTMS$1cL-TWi zzYmaNFl@H61p%}M6pJl3x<&0g?73lwA%&a?px}4mLZt*`{SOnO5&e*=F0!uK;e`)Y z67(8;HohT(3bvisfuYcD(?wfb5QRlu6k{V+2kFyA?_A51Fof1q9~oZ`I**r16He>> zV)H1K_cIE_7I!NrS@Gdt0ZZ-$6cYCYZGFa#!JVyxj1}hJrjn*)v@Bh9Uq0T( z^CSTvs2qeuXW=k*YsL1;dqD(gw7!Xp9+mq&uW*m9_&I|@_Ol&3-;=@3MOXgisRTr9 zy!_r%QW19%TQP~t>gU2v<9U1`y=P1w%7RtJkSF^|(%$67eSz2dIsXij37N7NedIwI z&007B=dX=A5j|Grt*S(!i;g6-$d!q*b0C^i12UlXByZ?Zt+0Dwn}r4S)Jjjnh>8_l zS%F93yx7hMK9IEoWGJx#MoF&YaXGs2fISZG0x^lvu)3w6}wA=*U_x!Yi!QW-ot0G*Ql~r8#-u@$-~fkIM_x z2B9@v9FDfXn8)@P%eW|bq+yuRF%+NwssYAAx%E+<^to)}N77jHh(@=c`cHkJ%DwTE z%qya_`FD6O3vJwRd%jp@-X)tgdhms=H{@! z!#cXfU^F9SsBi87@qONUzJOj)eQds<_2Vm#5#)T=*_gz&1@2LX4Mi|Xy>h+E`_&7S z>uEj{i(-eR8KV<`OIRvLn|pWiFdY}YL~W3bAwk>0QR0nhD-8W!W(SK+M)+PL{)ork z+tD{=-+T4ZMAuKb^ex35CG@zm)PU)vR_C^0mk}jeJI@$=4Ot5LSfvUcMWjG&hrB0Q zXR;YJeYg4sE{zq&7+~i@I<8d~?itE8k9!a!e@lEQp9?_F8p?O06mO%~Fi-4Tk+~6i z*618Di(7F!H!n-!)gzWgc?Vx7aaPbTn-&2@4X$Uloz?UG>lEj>HX?799t3TlChB^l z6roCy(KKg$f@xD+Z#x-`mu{eQg$25%Wp0$C88A;Le(o?nm+_Rvy8vLG0^IQ8Abjb$ zD`vpqx?@{rkU3A~yxQvUda6i>YE2$zD)xi7m8p8qksMuHCgBhz(VVErG(l>o>fBt> zmGS83-RH9H5>iu&v#8pIag4gexY|x}S=-ZFMX1(3;_ngcg^M{mfx`&KZ(H7AJh1Zg z{a!RB8PaOEKam*Tkthv05WI($&B!E=1H8iDXo>I6pYsj>cHXpN9GYMTIQJIRB zRGd4^yNr#?vg|vUfj^7dPC~p^0XKmp@U!9^#hBmnt*qk>yFuE==EuWm z9FkWRd+9q+F_@=aXLUM~L6eF?I{%pE(3uVqvCbZ*_xgLyEhXjR_`0@qI#1+@_V?eV ziz~1ABo29jTo`9aNZO`LQtP!Ssaw^`6UqK)Q!&@7#s5^3aYte9d2Ah`C(r}zwUNxaQHlg)_MTZ6YOIDcJTJtTSYURt2 z^4Bw(creB3mD=#XKo&{OLq6mK3z!&$=Xdw_mN)i|)aW8gvj5Gu}xRH4s<~}yR|0uZw^itt26yn#9K&*Ui zv|oekL)A{CRk!J!DJz*XmJYO+TlKcPNmiuXak2I0Hy|H;(#;)$(9a~T2>BeA;DMvs zdJ;S3+~`?jdI!$qe-JaPElwx!b-DX`qWZ^adf(H?#wOCAdjv?hvf!kSOlRaW_ErjU zprD9XDt6`GRmS*&K={evhib(OgwF+aN8-QtVbxbC=Ev5Ll4AvZlXDD`B7vAkjCRh{MaJ<^oDvM|W#2VbeKb1j>P{SwT0F+L|Bs!Y1ah)<;F{Lz(cy^odPZ0 zt%Yfcm!2u=ZvlokZtg_ae#bLyb6H|bM|t}Ayekxg3y(q8!SlBK37Kf1X)PVXKzF(8bdlIC%+bJ1iFQQyiIUywv>~uul$JKm&Z1RIe z>*NpXLupk&dEC8bSNJg|@LzEOz1f~3o&W%Es=yUvKLOTh-o7VZzXu{DWZyK^ zcmxQQpk~F?soQ+llE->|k9Lu?Vrw_awkJ2es?7AprmGD)aB2_v z_!>q*>xO%wQrpf-45?>xTG>#Y^i4|W0B3@tmH`yFn4UE0Qyv9WtUY*VH~&%Y*N@CB z)v5N=!y_V|T42n`K~PYb+vrM3d-(FlZDv}xJh1{lE!u}1Nem@_(2SW4Zgywz==Lfq*2+YWL9XbV{!uh;w7eaMMlF=1ym}1A2U5Rrg~WNq`}slW+MJJ|H6(A<4s)o%En)f?6_6{OzII2X^mSluSv)?9$krctpW`)72vM9 zqY{BV&>Y86-|--hkq^$wbVJNIZl`}zU$tL%lVR=B2ymA8JSr8&MZQ3u2asq2PhxMx zOZ7cp4T}=n+Lu?b?B`bH(WDuema|LB z8u?_?Xkdx!bZYYxPP58V4wF2*IAA`-WsvZQfqxnT(VGlrSuWDX<4Q_LkD&PwKu$z0 zE%e>Pgt(rFtd`j*tPutn0dS6K)@|I9WAqLGap`eAjt|uId&^=CJuW41Q;T*%VN9-7 z=U!+?NI;q3mHjCX799-y8ac8iLQ5(Nl1X^$RHb`Th?5N*xszC}J(NOlkVAf#^`MBa zSG)l$lG4n|1HaKFZ^-_?|JRoZ;XSX`HdEGamT^Mdte#ii_k`Y~A|r58hT zX+jPNmLIhUx9wfGhau^p(5i)$Wq3ueIQgQS%rcN>{R@`7tq<)_Gu_M49Nyx~2_{y#Hz`)483R*7^tfd`!`|PqbA{h+ zAa9y|X_(NT1}aqA0*`=HKP{hq5Q=5V zxl@J;a)Bn8;TxX|i<%Laj=7SHO4a{wl4|;CMU;GJs<*sZ>B??3V~?*Q=c4%`Bccy& zSLE=AJK*`#7kgB!qdavNx1JR|uk@KG6J)lDtP#U1f_+@>Hvr(+`1Boswx-QJ41#Bu zM(sj0Gy6Ge-Bt(WYgm&F#v=QDF{h&~Ek@wh<}Xhi3N0e(c_ZSV$o&R;s3v%;`gQt8 z7uinI0DY$sFU>8d(}9xdFw~Xo?Tzs4Tp2CtW@A1KJJf?p$d4TW+@}1>!tPkpeIJlK z)Lh193AKW``$0uK3L4=6j$LNGvag*t$E@joX~xB53g4MVDcixB-nn6~QUAmM!P*mM z*pdJdWdl3ChN;=d=hvfYo&ex~q6{3QrZ;%zeSSxBlW+)|Zub+|PPrLH**6^vHLOo~ z>G{|I&jWBhvs@fnA<>x!q&Wej|Fj0DUD|O%$m5i@MXyiZkbQtDTPzbCry>y=vaI+^ z+zd}O!@O8Ld}ecLONPLOKBl_z6uO$0;I{E!Q<0;SQic}lzQ`J%59P5N0BWi5B4?7) z32!`)o$Zx@x-cntmYk0(!pW>;Ol9PNf*NAH_XCU6o0~A<6CmmReDy|J{0~26@xZcZ z9+NH>+4wXdOV6y7_V1m7pZX`)xz8Di_7_H5pQQps#5TGAHKbfE4WD&KBLAa7Ar$#caV5 z!I#2Ztn&HYDT}0)Dw+DV(UZGlvuHhVdFY}es4q+TU;CS+{Uu|d^fXHpm}De#(z=hc z{!XYWywV71t)82F+#k|fi(XqNfY1C}hmW^b5thqX7mpD~itLj`I%-S`iHN|E2biqI^ zAst>=cvvvPM*T=yl-dNE=mvnY@=aq`F#>>71&})W%1#Dl;9LSVytlY-4OQ28ZFsUS zkkS0U1P*cNgZbxGKqZS-MXs;&mX;(s8aM|z#H}s|Y0RzJwE5WLYztb)-KFujUmBXL zEk*iYoQH9l#FL)3)0bcw{)WM#qQ2Livd?yF3$-1_5kgFIZ@TJ#YbIu#5U|c$6>*+k zP07Tn)X0$Y?xZ4r5O)?q-s~M=;Sg=-_f6sU=p<2K5lUV!yJrD|#_g_ErggG&un1Dd zAQBtiRA!@9ZPbAhRv@hs{Eh$Q^S14ltv;kc#NO)nvO9QLHrwv^ww@BwTXLLo2xFiY z$n5g4XG6jrhI}$JjE2kdl3zf#+8L(-kH(W*c*0uVHRx>`!(@iV0 zwB}jqg#EEV>9%p{M58J)Kg$a-)FFbcU^nkL}T``dk-W9Q_Xv7e-2~GCzC=pxKL8lkbw*2pqkn z!frJ9Q{>e;qFMbfdmj)@JH2w;Fx0J>sM)*WagE4RvZ-NTFq3 z!aljY*gUo5qEbz}E|^MgFWe1!TSm~SQwRWRlQP8_QtWQsPBNO$fw9%6(LAZke%Qs8 zy@RUF!1qhjl#yD}Dh}mQ2`ldtvM3gH1LK%9;&V)?Y5TfhXMViH4gcO@TtPo`Pa@lH z>7U4u24-tWOO2(HlJg=PJAe6{b-!Uiqbe6&^61PjhchQk2~zTc_53Jl&dFl@ukC?> zz6w#6A<3L-$-ycyI10d%i`JwYPQL!t(|~%PWp2m0lS@X`K-j(^_}gAv4Ko>~pMK%apM`-#om? z?BNPPAch zp#B)@0<1WOu9OnFHBT0A{iSdb7NF{Ci$oaDDUACwrlS;>{$nAb!SGMUmN2uhNs$)L zc20TNaba8VdE3=~vEfx*d`*t~rW=E`@vZ4Qz11$+lj}=!kv_dXvA_A;dN= z8_ZH-=L@y1#Jl%PC%D7=i^=7%%`erKaQp;RiWIj=+c?o$=~$$8_ww_9;hGtV$RI*w zWi^-AW{f>?~GuN z=p;DCQATaVx#Ni4$b(6xxip!k09K20$i3EJkV8fApTwON%!L}(FN7^ITl5iSixY(a zm&~-}@QyRyKkUKI94_5Uj%6gA2gbI#6WAP3qd)3gT(p6~+u?zA9#_%Bp7f}e1fd0t zx2td5gR}YUncz2*Rx@_vMYGN)p)*rtT;>Hlf9oeU216((%^P%A&gUYs<9Jg1&UI#e zVI&Ndsp%N4pg&oe-DpY1smbZ}lC+>9(rK5GT#tgc?_$scj6%$Ij~&EU8r8gvLrte^ zf2P8|y8t3f9e-KSH7zkn8Y}%q|IuwC){Wli5q$+wf`{vGV12CxfUwRJIsd7+9@RVe zDj8EvI8*XRKwG?3C}JITvf|0M76AUky^_-z3Tgln)!8ch@C$)H6#QRWqmHGR)_xXJ z`6lE(iI+n@$;N{LOvd>?&fxUbkU^;^6W3tY`C`(iU-CUF@kTYq%BHntV?OP|9}q8!-KQi^l)2w{_z)FaQZpMd|2klk5MpsA0*tZ72N81?+OK@JSi?w& zlLUnyT!QeskOx@Ifx-xEoPD=w^)JOp%Z9l31$Xyj9sppaf8o#GvWT^!EyAX2k(?WU z@k{tt7)09qwfD$Ks3LX$j;x+0RVOc1l7r+IGnDwrEY`5)moAqrF{=L}!U@&F*=|yR z8ApjK9fe_$r3Jw}Ma(B3iK zlX4Bf(6=mo;0xLWUaJ}A-_NV&;)XD;9xOVD9uc7i@3nv|IM5MD-{qF8gvEj1rei9@ z5y1C?j_N0}hVdFKny!vddgGBb$TRH-6B$)zGAK}=*Fyh$COYVpZ=*nFjc(?i1KlX; z_G6#wyCk?THf{*)cDkC4Rf0C7G%YYpP+uT`$Lx8SfreCd z-Ui5{$xR;m&*F2t;AdB5s>});s`c-fVQc9TfhXkk!YvdECZ<-9k}o^&_9$uxatieR z)za8Mb1;wX5N2rLTmQe%-usf$lvf%wK=+r#)1tDfaTuk#sYw@-TJJ{K!_4j}?3Kc4 zP)eiCyVpKNg0^#^Qgw0FOjkhj_A~5%O4a&$k3~gZiUK@KNq7rM8ZGS1)vm}amiGt} z*NQL$E|hz!g0D6@WOI&_BQ7vUEbk8auggn}xQ!xBAz;wd$J}>o?b~WPQUmg~w zZJ!Lho|N0pZcrEf4U{S*-^24_Lt#@=F9m}9xs!>!KVGv2Faynq9w)(BU8xx0(j?Jh zQ%I=Pw8wvE<#5N?yH!fYA!MwP^ltcJpNp^C z+JnIl=7gM3)Yd3H2I+h;(lSf=sYDF7#0^pmKJ(ji^8!_uls4`+r>n-eYOIPh-03mU`Qud*y>=CotW|fgDhpmy4pFFhjre7&(rIn z=y4l-V!t6z;|J{PEa3f}DN#FJ)tf%M8D6^cc7x;NVj0NoQ81?f=K^ReWIQ{2KVoe> zT^oLDu-CY)5c}6A^3|1}nLb5;c)9~}*?d;_W?Zh5_PR8V9!3#?WG_3IBZ7QB;Y#(wbm;*t7;5TwH z@q(yXMRE$~3F4nCaLMD6t28X@(rPM6bjQun(|5@F&0ZJUVWu9EL%}P<*sokHjtska zvK4ZW>qafR1?22!t181Nelh5eI_$9DRwpXbIvKbKoG>L-XnS)UY z&9^CrioUwQ-^sBRK+=EX5~ZYJhCqZ2TCkUkGCbfH_;@s39DtdQ>DDTPuRA+(-@=;5 z&W@YykD|*VWAI>mpO>;q(pb6JhI?IH6JQ#&v^A&^U- z{m7hLh;w%M{qBg3%?UXOzp10VjHE5u48HkD#c8C5Xy~uTsT0bI=Zm%hE{WR-ZPKV3 z8G~Bgn3BE_ovw|}Em6cwt}w?f4tQxgA+d*nCrR{pKwWHrfTc#)8L_^_7~BdO#-Mc~ z>y;{8gQ|aR=9A|Y1V4@gVF?`+ZR6xpu!nL03AO<3y+2kj)qtUFs#G}=27+3-1>t%S zxIdm5o@B}ID~JG#18%zw?AJgBEC79^lPaMdOl#|ps?PYeh#4~FSegVI@^;(OOCWje zI{Hy(;tFOw=UqIE9?_*FbKJi48ZIZrcfK8g5t)Y(mwWVbnAbRykkBB{0N6aNG0;QP z#VdEMRGYT(jiJ1Rg~*aWp`Kc$$3_TEo0bkYpfk0cR;u`_ieuQeR|ST^KNk{|^(f@* zYj$6!zdPzE&7EtT3D)5vO)kCpcdK00r$GOPiNUkI3&|iMHu&5@|b)V~rZ?*Zbvu9^3ps`~bG16bouY0|Pk}bu}C@_{L{&DXbPJAria5+zFP3xI!B*o0VltHmW{CZMyV| zxN=>B_pFTQ_g8|Jo|}=hyu#mG?Qt12eJ7DzjOr>{?CVEsmX|3tV1@koTFEj`z&Kz3|TVG4oe1 z_9^4Bz@qf+jzl2p?KBI69kK0qq;5vyE0o`<8HrEs6+BeZ+)-EuU}h)Rl=-Wk zwzh1BX*tQ&{|NDN6siG@-2KT%MQ7#^(-SA#CaOGhqAR@7hh+w7Ev9k!v&5-+lm!Op zSksA7M3${1N|P+S?bAcxq-}iRErD*KbKub9?LJxB{$e-L;ZYmBTbR^?4K&f;22e*- z7tDhg`XKl3rAy(b>!T@#-*?Htk9f6>Ac{qh88Rf($b?XE?4B`>3FffRWe!lg4 z|Bh#-FTF?JTvL%_;x8Nc=Oghg*2XYgl?BDy5g$!s*81jR zByYGlBawwx_?HyGhImEjHJ|tJ1Oy?T^#ff4sszT{AG&p$$ft2?G1^7Mo3K1~(#4-N zmBVj3n%;CtMTneRqLTS;`GSj$p3?_wVc7t{Z(VI8FD0>E5F#8?5SFi$WDIKDcGm8cHCNU3!~lWma1vb#BGg3C z^4Cs#x^lHNIm*U zXeg$bpgzD9-_!4Q{ozaNca%S~4rQQK_LGp1 zRG1u6xk6-GmuiQa78vcBrYwkoRQ?0@VxNCwY;x#;R6Z*UBq7}D{ zPqLdZt{l2VtKbmSl;sonEwLY;PW7vGMS_+f#uRleOM#Ik2_NJtup&nP@g;_Kr5ea2 zygoSPbhFTe>D^s~wQ>i4k*+^a%dgU-THOl}li1}RKw5ApkLG3QU+=l4DjbvjqSq+D zo~u={#K_sWeU2vT`0!7~!+Z2qPSa-$ER-8$Uo!pn?_QFmRh+@hsf)wXqZ{w8I-`Tx zh;$8M=IuN|2&L-9D1*UJ?H`OYuVXm)pEFsBz%0R#NNwyIPVAhgu|aChbl zT^ElRJ>t$5gY+%A$L4)@R|^F#7(kJS+UkOk-tKvDcf?1N7%q#Bkr*J$ev-EIu$$9hTy){s+m+mp^^Fr)5;7#hJN&AQ2%}UhZwXS+SAWL@=w7 z*cPX+v zujrJ)w!`zXS8<-(nLGimfjx?V(D2ZcV_gGo$}jNAEBa`I7MA3~dG!FDh#(EjV@| z{vNX&0)HoQ7g%R@O(-;VboqTSER+am-7-k91bPEQ=nZs$4-Eg3J0#ObFbh7fPT{dx zZs!wul=Ww#uD`ZHhb#h^><4*q?Hkvn@<__y=8_2Pb9?)6NK)xwE|`h3BW~CBW>6>G znAIm#M43p;d>n<2TeMmKU2bd$LX?Kg{}U(3?bXz?R#jlhP8Yin&m-EDgPyS{q2!Dq zlY4+$Z{gdMzy~NBP`xw76%wR}WKK&~uyE}TZ1>vya}A{+I&Fyz5r^h6nXp%>&WlCK z=N+jwm@yvPC<7nbMV`3WV_?tj7MxDru<%;0`x&6JgzV+hB7nU`-fxAVk9-@)OogyZ z%f3{gC#a?CGRo~UF4xcx)fb&BU116UeQw;&Lu|ufE3?oPQ*AmZXA%R3C?#Pl%f7^? zocXw8$e$Ji{$2RT9jjQj7@(6 zE0f^{sO#Jz9=^f;?PIY!7(A}d{Z0z|S=Xrz)1|pZhbegvm-|zvfHboTb>8kX_u-3- zN+*H!jBPM^-4nLCOwI?0$qG*h2o75xml<>$K-7m$JvfAfpjd^5xSSj+4w$3~U@|C- zX1CF(l`Ws&iZAlG?l9y`=sL7RZF!{j14+DY#P618syjg1+gwz9^kICdp9cITN;h2r zG;1tZ#?tAmn`>76rjL+0TFA&h(KDo4jry;7c@crK8~q3Tr!OE3)M+d*9V2UsYYhZ7 zMOI;gSr_C>U;m}F^6wd!RlZzC@bVovU>bJC#E{1$=WGBmxwJTh@O0BSkoOo2Egh24 z9ZQhKZ&rLK%`H;NQ&}SZacaE=?1u4rs>=w(lq-_QoO1NuY_B;6=Q&U)3%uN9{WACD z24EK=>t(>fVaUp@N%xEBjStR>*RO=tgo+tIsU@Aa=PH9k#1^Fz)_XvexxQVLn<|<~ znFwMZ^%B;A$J=MuH#lz$fxwW+Tx*%`vP$pz><0zE^zf_PZsFa`$QUA_`8r$>=tffi zQ@EbVQvTKCY=vj(GKt@AEHNhz@3oe7wEdrgn;tdBYF)qlpNSV!ggGi2Mg?=m-^;(7 zsfB1^3GtCL=$U_EaFp93&4X7m%Ye284=g|#JQGPlZpkwv@^}*l$5y-U<9k4|DAYsg z&KY#HIB%xhtM?iZ7GaK(Os6q}Te{=x3;Qk!+F4Q@LHuf58FY)*(L9Q`uCyu*WN|6zh^Bw(n|bV~rq?w~EARx66xK3amG2kP}TtN`bQ1+E~ueohtbqn*RpX=T9t7C}NyVpm#Q z({9Nary-u)p*eC_TntP-TT9RNus-|GsZGizq#$zobx$7Y=M~yBj>gi>;Q+&o*+T91De5^FLM>x zW_{Xs>s7|`K-&x_wC{NT-i7HA>OzAsLhZ?tm?XyP@*%l4WwoL>%%ngWsn{&D^kMJ0 z3{37jp7Wd?ru{at?b9YvgTk%?F1<-w{*1LpiAVZ3SB4ODAWsZga$_U$yureHpG`|V z7rO{AA@B-Sf>~6Jp1gy4$!v4OcTTY3To84O8vUPN*Q6EId!ij0VxaHs1P5b~W1&i= zWWOI~xUU;Q)8nBAC%wv@xWXH1ZTrb0gkCA7LHtrPU|~VaUHa|f`zcBa=J)|gZ(M!K zkW6@hIE7|1f|1FZ&zn=A~L=sfCvYZNGH0(^wKFU^N2b47_in@^EhM0$*31ZUp<6cCukjP&8)7ex`*F9(!#1zdqG1{aSN4kX( z$a(CMDZ2rZGVi$w%OQwhdFqfE9#=4m&IRU?L$#w>o?R06)^S0a@aprfO7JS3+d)uhsOM$C>#p!7B>L+nj)D{;um{p36J zM>+{`ZYJ&QLe(C6-^+1K`@&ndC4RpaCOikXW;Q3XZ^zDzLIF7+r4wK<;!2`5PW8FU zJWs9Og=@GX5pk7HRt2CYcKyQsNAY?kFxBSw4@pyn^=e>%3>Ow(8zcb-QU4O)f$M(F zi!AA-Ce#vPjFQ%VaI-K~es?9Y18`Oa6G+06L(&DLd}V(+`rH>R6OO6;?a#<@k$X!z zoh*&0im)U8Lt_k;DL?#0Hrf_!X5#jCXwMFjT)!1DnqiIgbeX8)f5vqGBMHl+QG2=G z>pA~zclt52U~$FvW2qQ`lynBVef8uA_700!&rU{p?yuU_h+2}&myky(Wn8)|F?qiu zO+zdlD2qtbvMzVtOgYc!o3{vt6^#!I;RDWBYo?GS=)ydvF!`k(|LScOJu4mEv@|{+ za_mMFQ_>*@ui4$kQfHT7xS&Lw>BH*xIybtRVOXH%e=1m6;WUVnvxvaBMz?6-@f9Cl z+$hskwSx;$%J28vG-dxbRpn$`G8+$Xs6He7YsY8Y%sr$b$8HmNNY1A6GS6fh+MX#p zR`URB>TRCULmg0ao(inOb3D1>{oJDZ&p7f;1DYp)E;rcN=Q2awo&D5t2@(uj@G<d$+K*2TWMuxg3zsNQ_^D+w z>S9~d8n>JH!CmONjvZTAx?650S~3R-#Ak$oWMBrfJYR zUvl>P2Ye={xWAR;qpYX6boCc$S(D$2YFfERZYbY(QE2R9-rI2_|Fk(Y$#GT^j}fRX z#4Q_9B#6eM26ZiIK&a%N4#@v{!9l$!(>xQ)SpPr6tP3HdB5W%5u&tg(sz`N>)s6zF z81;7^HFB>3G(}8Ch>GCcjD@6nt0)I?hJa%Y(QA&3zHj>?7AlQ{tZo0yC$#%w`c~Xx z4w=FFrod_fyq2KF^xrrG~6yyVR<-AWp&I z3(q5`Q9CGM`OVdjnPgk%wxy!iBg+2CDqrcI0Vi-H1uMHke28X0) zk85&6pD~muns=hDq+_ug!6Pq<_ToRFs~hJ&Rst2@{GZm>0b*Bg9^Mv58QbV)jP4w# zb^V)R*M!?zpOtEq4S?Z~H12seKd@{nd1X)ES#S>LrVs-d&d-IU`Kli*C0SQB9$Lyn zBDWP-1PTj(|8-yWt;PnVjT)*DxN>zvMD5u2R}SMo+r8M;vfg6Q{Ch=at}OuV&n&9C zhT;?Sru}-e^QoXh;m07OVoBkXjID8!(UQW@#k#r+T1J%CdgHQ&!FXR9awjp2qjU&; zEKCN=(vX|*P}3WeP|f=2?%zvuS!{Ut`u;OeR31AezrvmFgWluCneV}xovGuRa2o`3 zX42wvX+t0zh5^I=>+b1~;!=AWRJHJ(+I22}YTNN;)ap&Y)Jf%CBu?T!{f1(o?(k@* zC}y0FQxdWKepM~NQ@Vc|?joxlxjI{BTd_DDB9u!Yn-`%>e$GR=S_g83c}y(}ng`2Z^Y+@V03A z*YK}yPN!Ab`%i@{sC7>#?D-S;@*X7PcKZf=SE%ya#fsqkrKFYbiJKj7+1YFoNcZ(i zR*yUwRArluN~Ek&w?|0E1_Y<{O7ND4PU8_zu#h!99DVk8*57}PdBw5*6J5~q=#dF~ zKPgh%fs}N~F)KdknOFAGrsu6^tLq0va)p;?Lk-_LIOoeAXKBOw>sRJ2HNry9d7fJ# z0x9VDlv;8jek6irgPy^qK=?G$E!zt-#F3s#ts}Van{Fcv=Mmr9>jcs5T>pvHqa`Q< zvPtqb%h2!mjiG0ow7t|>wIWC6B*u+h45hVToR1MrmPX~!9MMXt%{pV29M3y3;a^~E zQgMzJf-3OR8Q+7I6;RSmJ2^_jY>s*(`ao?Jt z?&?&yb6M|Nqu6`iXQqT>{G|(K(2PqMf^D6h>;e|Z_B)8jI#2*h#uE7|yKW$<=hFn1 z={g&2f`#baQt$0qlfCh{2O-e?)7~;_;(_%tBc81jMGiGmJKw1NNJj^ZyYSouQ%@Sn zKXyF~k_Y`AU^HLtYn~2r5Zbl>QT5s-g zM|w@+nb;puv-zSr5Tn5qZf2V1CxLte^S1)DCUbWi=3WRQ){&x2qBWtuo3<)`A_kXI ze-&;N-h-Mjm~(CLqWVao1R~d^+SFv89jp^8V|xzA7x*g5b7o>yB zoG78j_m(TqH2BsjYjQhgQ|25;AE~&zSisP9`+hyn>e^x%iD9*zu|*;JB`5c-Bak^@ z-w;$~z6GX0A!X05Yc;2u0BmL(ys0h_qi0XkR0i!q^W-1~pg=b-h98`js@$KibSvek z@}IMkKUYfZQJs$Cu+cJH z8Zr{gX;=^4h!Ghx-SDFw-IrY-+)Wgxchh^p_^7xet#-L!CjW}5CYjlyM^`R${;3kl zH%-Kuy#^Npi%7&IP)rE$XGJkftqm!WI=cBR=Zb{##Q{qw5qrg=(S0%iNoLX{5E`cO z_@x%abm9#1L-x|vS-Ueczw^TbZzDr!50OJ~eTSC)6I6nQuq?1Po12HUF@$9z5SmX! zFy%5u)5&%)287+;4T*39`@Sx)=82V_KH6i*6&Jb*Mx%8g(itf5v*|k&(?JjnQp{OY zOYA-34!JPHX)IV+ET1j21(cK2J#wk@`sy(F+0~swTC^^rYxb{F&!A*Ue;fAry>}%2 z$fk-HJ2Kh8(QP#ka^y9lOfx>w2o+nniWZHI$Hgkk2+Ur*%v=7aZXz#mtFF6!};n`Zuj*1Fo(--Vu z-#n32dgiNCiYg+%E_5T=UYv+^o_<>gZN!$n37x`yHLk7(2ccU`X6t7M!+XtP4*fs4 z(u?rdq8|ZpJ$xH+ED68sVdGo1!wS?GMm<+WTIx%IFhf!5_T3M8F4wmYGAfYvvexRP zV1#GL&}G-qI3X{vKcTo?WPIWIDc!r$r`chGm=GKfL|nv3osN>Z3rG<^Bwk&j2`-Lm zIg|sD*d5MsLBS+U78ty+`}9cw_8v7IO>DBtQcGWBgq;1721J8TlbtA^(w}_*WTb5N zXJR2m>1M&`JDF+iSGgkZdeUj>C6tH^Imxg(3((Q~azek1RC$3fq~dsV#4gK|%_(H% z9@|+-&C`fSmg6~YfqU9%)#p#A^{8D~gdHLD6Nsrs5>*z3?N~6R*l9Hz^WL4RCYf9n zeauoiQ49)zuk0ZGHmBbmLmWf#El(6S;NjwXRRg@~z#r_n@!Qwi<4B(1RZPW)l<;^f z$%XPR>xS(Xv?JbStLT`5zLz)P(kWh&cjyc+ZS?=HqPJ{M%sKwr#4Z`V~Ei{@L($ zNZlql#>z;-z%TJ~lxM#xjXw8Q*tdO=dOK_X3v&L0BBCKMj#hrVO@L@CQn|JX(3uOz zN5a&Jy6LrvdsF5MVanlkUqL8*uP5Z|8lQ#7hFQBT=Wue4KN|uva|{(IU`4NKLr~lR zn+E8}?zhsmSZWgJ8*Wf4R{l3BA_el>8bBgc4_+obFaS}SILmmR8`$ASj)H_0i{e7i z%Jc~s#T5Eg6TGr$pbOwg=q-o$MC+-qwq4oPVzoKM1L|meE^i-p8ZxJH5H!Q@Bf<)3ivc z=Aw^gN7^Rk2;(+GiZ6sqEwhsKvSLgmE}^X{Fp>DekyQ^!P` z6-nkG5$>4!d}VIT(4~#jeT!>tdhL)0%g&=FUKB(BlFqYB=x{K=G^S}j61MLMv)c;)XMC@=s1N$ zy`fI2JjxqBe>8e~bPl9_@Kg;w0*vCxBRLY!T?r*9Y{?GOt+hLvD+YVfMb%5WM8-L@ z>P?jSgE`uQ;HnL9Uiov%x;?cQ3|0azw_Q*%>4?2zE3OedzgZXq)Y&cz<}eh)R0aaTwd?Q1a> zx^R)^bEYB==Rt-V65sCC=)3+PwF5Rzh}+pq6)y4c26UO-_Ti9n)5turA$Qqumk(#q z{#*;Z0SuCJch+nRlEjB<{POg1eH`V_gnm>Fz!N{Ke{qa2ITecCT2|pU@g3WGv1_5z zy|%?dRK~)qrv@E{Bb87Mzd66N_uC^eKR=~cbye9Nsr&4jRPYG-9mB=e$gsjsCviy@ z!0Ts^mYwp@I;F&~oOBA`)5?eTw5UmsY^*FsBTvsLJNMd@L+u#EWfAXgod+u9?AnkS zFo=o6xt%%O0HdLw#$J=4}r__q29*s4f*C> zzgkToD3A8j_9Mm@6N3{-Nw?{=uL(%upE%=BT;6tf`0x)(9oihQfe;IBr=gQQxL;-f z8$XTe1yK32n^6+z$>)FR-AKPPHj1{Js?xOyKh(zNg*1SIWYr_IahS{1oEK`R@T{3N z8Mh&68De!Kdc4Srs+%cBmCo}&*q0FV{-^-$3m5J36S$B{p8>IX^__1rd}E*6P9S(_ z%p^W}$#9EeS_fR}_Q&`+=uU#;I;I*=wth@q_?f(HkFX&tO5aoy7Rod@9u&*r$8*%e zOnCEFC8qq#5<5et;%tAAfndUKNlp1Xl^@XOYH+tdFbmdo!2J?mF=w4`>yhlm|eZp_|qdJu4` z7EA1`JZ3#=llgntLCR(tsvpF#0Y%Ypt4CDg$eb4TmVw^R2DY5pL-azD3lP$$Yqw>8 zC*stMj;8{pw~TiyOs-cT)?W_GFb?Phw0t+ClVp&r`~j^X5Y)DXEP0q zhvu&8RTBTDqTU;^YxpX|1jDEwE%QfTzc9;%4INM0%`pgJzelsLGRtAzBEV&wa+R9r zKr*)e+gw0VZ_rN;8*(MqlZU2Fad)B1BIikbz9h8I7g=haSnK@~YyZ=^|5tjRPwP4H zt}k7}xiJm-T9R^zNm5U;s3fRY=nW0FD+XSBJEI&W5wt{2;hwd`8AkNWkosQS`?dn)y~1ykl;|=(Ouo%KJfGzsr!_>c+KDn%Mb1vbn5A zt&usfJX+HDU`jXqA*W^OEQO)U*c#cSezVc!S`zP;sc7s0^`e`FQLZSndjl3?tlEMDy<*qA-XFV z7@jsr8FlGx$AfbW7upskxm3?>(U$2Xxg-$&luYR!oro_JR&ig^TcklMsMc7zJtK&R zg@f)(>&cgx>2=Wfwre>8iKnK? zzl8Y|R~y6k+vwA?iO{HPZkcO_0{N@><6JiPO>9O`@^o8s+C~9xK#3_`Ru}jyXt!N+ zMFQA)>2--3BbJ;%MPAlS;C(%)`j;#&Rq|W?m!~pfDoE)YoBo($mQrr3{eX6)>#!gT z1i7zJY$YoRyb?@<15HS&X19yx*y^qAE7!;7VUQZ~9b&~zko`Qav+MG*6U!wM$et}R ziXk>5{rZOF#|LyrGRc_a##Y%h_f<~L#hNb;3h+0f!Z4UaV%TyP&dp>yVJmcj7YdwJ zdVy^<8wTwXJdE~8Sv_8Xtfdz1Ey+ChOxbR^v&_CJ4<~o)^IHU*M zwc;HwL;H7ATcY-Aj}!6%N%8Dz+6(WWzpTRGoyr6WMYwUD*@{Al1*+&VFOjPBv7pQU zdX&s;L*|S6m{QnspTw#FG+$@^jEFX%`eD;^7?hJL1ktH89~k|g^3Mo0Ajhl?+!z^= z6@MX$eCVmg&R9%m8ib>aSn?ztT`l1xw3Hm63qG>d{?hkm=-NeX8EP_r;yP6w?Y;v< z%YNMTu@8lqH!2Iu)%=9d>kG{2gJfg?PeIxx#b{StcOffh{j8w^R{i72a`Gx$*#))} zjf4oQD%?6uX$Ye`l}=^~gKjy6(*m`7yt9(TqQ=-j9yUKV@J~9oW@FHP^0FOk5_jo)y!tW!0+7 z_G>s!>tLs`m-CbHm+A6K;nm%l@UMiw$&p6~h{BhQhzv{gDWPCaXh2hhZZ2maM8-v! z?M;Uk9ui1PDc^FHq?D!@%b+x^u{xL1x=-@(2Z6CjGe3uMftTz_W^NNW02)4-(kleH zJf%p&2UzoA{oIHby&XC?5PcC--Lo6PHFn%Y3du5wB_qveI$$g z)-yW|r?A(y+C*^m9f7xkc))<$R%!u4hMl&=K3&u0{tZ0@k<31l{wg+OIRQr3z5Pm* z#WsX%gg~HMR-?33RsEi5q17-O3|wqfl7y$~2DtZ^UEr4L!#VSpkAl(AD4Kb(@R78^ zZ4+f_It7(#ky9xL#B3SEtsL(}u-Iphm=}(3pD~kt4wAYsRxEaL`O@f3LG0G;_;eVv zG-)ctHJXYtQfEgoM5>UVY4Ecqmk~)x>2OeXiXj*tNs=b{Gf=ad>abGb5~_eX)Ud|1 z-4~op(D9(NQSsiWO<7jIX41AH|B%RMx@_@Hvg}rz{7K6Yszw7)g?A;!^bA$ME-R@( z>pB`EonP2Kxy^zDHDCu67hkABihQvA?~}h3sWX-ir-0!Wtewjd+>)15*o)Qsg|k_v zR|zF)15}smqLv=Ol8^#RS&`Z!(Fp;|MJ}$-2y3Rk5q)>|;&G zoq5%DNF`OPE=`}qB@`Y_^JoG+K^7U%iI((ZVsv%OBaxT5sh71_%(X0}V)EL%K)>j( zK_=8O-neZxP>a*lQ=g{Dr7D4eBfhL@LYtlqxDgu#{oB%+E>j9J24`GJH&rg&|1CI# zcIZH*)}Ge)Y9%lu_8Q=_*80?~R!FU(8;3*3g{zN2^_(>xe)#;e;|=Fw=kqL)eM!{k zw)U4o;6{%o5fDThjfU@*pam)Y7r^Xd8M*i}0i;sTdnN|}wbxm(Q+O!MmPuz*#rT5) zhCj+8NRf#a*9tRwQFZjW8&o9{x)o=?!*Zxf!z~)Dy|k^U8c9Z-H3;Ct9{A2IY&2(M zUP|^pxm%G1aXosscW*6B7)$_xTBLog5uHVn)+z$;IaLx^)yQVv1CBxz(%uigC}Kn{ z?M8rt<-#~==J261BP_o*{7%jt6V`l$DvsR7!(rQD*QATMn-5?at}gbgr`+$pY@5ex zG#9FyJ*lDc5I*MCN}h>@1azACadK`fwEzYL($LcNwEM}9*$B|I=VwsOd*jQ>1CiA6qaPgp>YTa${zeS zwEB(MAlOWL0d?(jsd&?3<0t7@lqv5C)FsaQSeJ3qfVL%@3Nx2-#braLMn%S4yN_q> zamNM41pHn0Syhz92c%~Vl<2?8^H@)vsGW?A@t&pYp+o@%%&w%QQHhwPT@nzC6Z~I6 za4Tt~5g^Sa6KqP~a1ZO3df%sOY^bcYS5aO3try_qV;p~PQRV~Ul|jCEE_nR0l#~rD z`Qg{`Qg)?;_~_gF{Ux$#NkUxw+(8)|nOObMY#nFWC@g{wldG}Qr}5&F+Cc1?3!24U z%#pAVZ`osGSr^u#%g99P#a)+f)lWMxgjSKFJ%SE;X(8N7*u(UtX{hUHZ& z6EA^ig!u2qJw#TEzW~sXc%>EwXhP^rhJ7~@D@LNdn(X$D*Hq2#w;9-GD437!&j4*FFAJ2IP-j19T6UU0cM~>CYYK_+nVJ=n*g1+r%E>b zvO8yTm_UM!N3FgI8>ByHd)M1m@aLKj7iPG)x8!of^5l{KNrb`bTI~H& zdW-5{&$P&g)BP*_DR+u1@F#s21w+PEA(gamSMD zH?Wb2D{+(RKVX#o55Wf_<-5Z1t91Myi+2*X&4FKr>PezwWHxJf*jhKC7)a706!1S} z*xhr;lddj*@l|u9`g~iPpR?v(s+OLi7nEoB;YBV0jIr(Y?+v1W*FN=AB=k%T96+q? z{2aHFaPj_0H@y!PK#T9T>GtP*DGMQI!C%Aiev0nymbyw5_TaP|4{Py}4$e~I9b)7n zkY-^XkK2%ts=dBpPMytQ8n^=1BA)w+!}8u2hmgD%n1VPzJ;)}? zr4^>LY`&yz0(K2CKpSU*yG8!Qa&mA+5d{$dg2m!qNC3(~o@VV$Ump6$Xx_r((razV z_jU;w49ReI?9QDcK}!`{S?nTSmM8Ib(~uo)ClK>kf98eHGw{$|8?hA&p6Xol@sJT0f4+$2%Ce4ogCGq~VFN}qKoCb1m-w!rX8AP(}v zsoi}Al#SCSWm1_Ui11nt|IIiBXDshSs4`hFq;BHka@vq)q}xKOYQY3Y_%rl6-fmCez6p0^UN%fYJBfl zcggZ@klFwht645H58gAeu$LHo%>ure_K4UAAn&m0}h3a`Fj?@!PwG$_M5hpzqmPiH){}Z;6QKYtQpWpojB%&=wwLKy03T$w2B z-^eKEw2F6$??b-vLefeoqGI*&xL87^ZB)=`DApo0H^N_Ey1T4K1Iu)UjbKiSjVq+7 zladSx<$5X;DWy1{UQFQbZ^TWj}7TOV@70ZH9@r707 zhvhxPRjbnnZ(fdFI=#-+4!NPT>9nm{sNliN7x8-fk%EN-u_|Yp9e==x$|6s>Wz%vq zt~nZwUo;Tm@o5J7#T?*{#_^H$d_q+5d$aFpSu=F%{d@j0)#7kJj)MFW;{txf^71xM zt+$N#mEF8{a8UkdvmkL@(AMa(erRfG<>HiSLf~s=-0>g z0N>N+AgB~8D#zBwBp~ZPO5;_*`}youkR*}K2&H$fX(7cVFU$%{aP*6lsg3|*+DA7* zGCEgz?dqiH91glNxaz@zjT-&cZ)D(DJ0&8oeu;~#?_+bXrv@o2c;srP*J~h4B(mZJ zb5M(BBMExyC4~Hb6j2Wkl{u?ST;(;DEP;XvnuGHKy+DeoOYA&!r7s2S9FS`jvodt< znO_bPzb&G~Tk+C!aC#ZBEylgkD3IzfsU@QGp{!{lAR@v;<)7PP#`4YMXxpg@XM-nn zIpiVoWf3_NnP|_Sk=RHbDAI}1F@Vm!w%tUCcMw?OH3lgBZxQog77HeJDa9pN=IDh5 zpuQ}QQEb4`i)icKW`}rDD#QUjSkj|SX$c>s=)rTr`J5uXst$!Mw@;cxlXSn3&3?Fb zKrL;|J6_U`OrEoAZ6mM5J!5DGW8WkbF5N|)MKXf{MBhOSJte@PfCjlp33%PB zs|e3bLu`$6pyl8%`3&{|{Cps}3|qo<;<mLmDc|Ih^kx`}Exb6oJ>zhPOmNI*!o{?S0ihhtsuSla!jh8K47Pvoi=K9uG7 zv=qvJp=#%K2oL)xr<#Yq2du1FfZy#r>Kgz5e+J=y8z01#=V&aRC#6hUl4LaXeslJ#v4tR73U zvM-D_SyE093voD;LuVe!W&r>4Qnh)Zn{Cx^fH*PFC6>19>3nwwrQ}A1KTenWj=KWX zUn5pEv3p*-f4&>ETr3%e-o5%|^%|(yzu_y9&^Wvk7ch4EBg3&B-TxHw2U|W!_R3{D zMqRI5cJ=CajfUQJ`oA@p%p$~j?aq__eD@|dgW*h%10;)9*?(X}z-50z6Q-*CDi~7% zoVWAw6e5-{#QP2=qqPhT@f85JVtk2W9CtYFvTc2;S(-qAgtHBOR3n;62M&DR3><(G zshTQ%NPDX|M#>o*6S7MK`Pb1wKi-tt*_J{CQFG`HHTr3@fdos<*nJ>e6^L=FB($_` zlbF_DmM*GJW@CZeQnV@VHFzCp%uJ;B=Copaysk}rL!tYN@=jmnV6sBdngtmv+E5O- zNd0}u@j=Ae-1ZU}h(>&eMfdo0wf%IU%Q^PzGw6*^aNmXph%(a;&Wk%zc(?r8c3 z*X$03G06Hw$5K5q{yh=cp0dVpm*x@`!g?8nowDS6hk1>29bi?HCxY)u-P}A|yjCaD zF-iW53Ax~C*rQbZlUunNeVTLCEI*50CVeHCAuQF;y4~+tgFb6Gyt*A3!qi~h-mmU} zPcn_+jwxO09V8#;WKhOU_IrVaT0qg(olLBV+IDLF))<=Lcf;L2Vdz~&o{YGQJ7QF@ zA`g`Kx8g@?^o#jDsE}=RVT4DP0qX~j##aKQRlW)~fxzfrZc)nLMGaQevSNc`F|zUu zFEhchroFyaJ<&3?rNAcxuL0RDTPmS|obm}*z%%oWi>EE)h|9{zQC?3rvuI7!c4W+L ze#{mbueQb!&CYe9T<8B~IX&E7oHjh$2+#Fc`nO_-!ceWq>aN9Su&5H0$5%NC#L$4H>$Esww}}j|g$&Rt!zuA2=IJ8ZR!-$1mtYaT?1=v;>U3#rcrjUe1tEP0jwM-Y6<8(Z&>U>< zccvPxJFs|M>Ik@=_^P9KwZk12bCDPuh0U4^KwhC2rzdVOIE}GH@2OS2&kJwwDQdlC;Ed{;fs17B+1fPm zB<4^}erny~ZeaZt*P_+-fV{gF%gv{NL#GoH-eV z#{ecq$G|;UckknHxl9(%PaIQf+pnsM)DE%5#xA;C`Y_y)cvn;fJy^t%V?4rii?8N}T*h|_+8ph%Wbo`V`5(H0cZamn=$nP=8&o?@Oe+@|qws;zi)K!d= zO?#Q3In2W`bBJ1^k9_?dcQktkWec{4dkUaA<+ZHX^jh* z>pEU1XS(>6!WS)_*t)0T4Nn^6X46@SWP)1(WD8Cpq&VE2zjE=&S#`i6F?xo!EC0v|TQBATPv(m2fvH5yU^bGKO59IVs1=8X(pK8yurXdqGyL2hU#vD;5UtLUW{i zI)Z8e3_etE`xpZ^lmwI`gRxdrL)U5|d^^)yqf8ZavR#u)ZMxa8oT|2mosp$X#FTS==^?W_vMy>x;|aiVf+3BdxaowCzgvl`YB@y3_}GTpE*) z=Q6z2NZ~Xtfh|&AVjiL0miNGYbUHp7bFqi*%GVHweQ-4+gG6Xx0G_Y|>xx)X+q&)$ zGGyj@^q8-3Bl%7&^4OZwYA|QZt}tFXkmyCMAXkRYawA0KKfrC`$RLMl#ou<4<42WC z;ASAfRyo;cPb?!NQ!y{llV9^Pe)sRuaJjm!XBnJI3F=xPP zt^_uM46A+OwB0lgA6l#^P?Fg-i#_hMXJU(f_VgRjG=4FeaJ}SE2#Q27WtR+oR%Ye0 zqnC)P(Uci0arB~S%JG@ss!FVNT&mE20xyCUXPKA42>BECw_+I9E=4hvg~OiykylT| zc+<$RS~tqm9ao)l)@6E1CRJ>uEN9B>qDv3~ z0~<#ubCK1NoS-*r>SqNs){!M@QXW32(*ooH=Zo3*AsZK`bOyFvyW5?Y#Wj!)8W~7c z>aaF*JKQQ*_WuSqJ0h~l{?(9wB#&)>*Y;{JgnORMW=1ZlMq4RA+jeM^@h+c*^^*ss z(`^77+QbU_$I*l2?#P>((7O=0{;!mLLPnfXW2}aR=rI6EK(@b72=4UkB$TImMiOIk zAD3pDaS+ns61c8E`{2^UoM5;<(g4{562*q`Czij#?*#bUMB=ktd5C}vZ1OV*gn|~@ zE+vPVMu_-?{CGyJUE|=|Mt5vxd?1ZY^*&`)i}& z&TBs2$_+aM&zwWrLD(6Vjb=~i1zGG7XD!f4kL1&I>k#Rr({0DVtG04L$QS1>zJaf@ z==DQes-*F0R+n523vWhC=chFo3K@3d-Po^4^&m%7t!UL&PuX<`ManMjMQSsfH^Qx| z)_aQ+6PpGAm5t3+i&=n=?$sF&~3 z%XcOA7js^W$D9Lxk#jA~|#+L$v z=z;-#t<^RjkdNZsJXXe+D4rpA$$&U&P0P4>a$NPjrl9;X>@MoCeG5FWRa^NWSsU|j%19o@_ym^F zMvV>g7fB_Qprd1sx;zI}P1%T7VT~eVxuWehcuqBJdAT#My!>k&rS`=sC@YqkTS#=D zRT3St>VwR&dUxoR8O)v+4)IUIDgHE?2m2hnE$m|wR8H}ja?LrhnYAGcII&%tNMCt3 zy60yL=?SNF#ElmjhXjYYGM{=l=Nlt2fhNCPOo%k&+u4RDBmvj-E^%2gJpAu!6mB!M)cq;-;9bVwd8+|_h8lzfb4{Y`TWGV?fB51yx+$E1(u zYzYDS_iJ$MQ&5lnyg+?diOIeF*}8JPD;*lrqn<#bz-^!0AhDRqAKoeh8)-LOcuT^q z3ke1oLHJkPMnsnMsL)xP7UxT>TTL&=DfuAPpq zcxgDkg^-dNA5n2y40u@EiAIGRy=0g6X4yI}POLB0 zl%UDvXS*k<3G|k*v?mxSZS{0dA)uH1CXtUEVAt8n*w-=5oO!M5Li=N`p9Y`Bm5F71 zTE(hrTy3Tbs8sW$BeXZPi7B0}(2mj*ev4Gi*N{f#mPUmZiVOO7ZhuO7ShYmYD0z}n z-@&-QQmJA7sB9DDf|o$row$2aEfLg!EA;IAc3W}LJxNhU7~vcb7MMk20n9a@H!s%M9HQeyzm2 zW7Mc6avTI36$zj@#{NM)$!+QC+>9!_x^|%rYxpLk)vMqLx zOwr#wA*!fWD+G4%S*gJi?&WQs84=x13nuel6IKOTX|ae-d$L;anF)xINbzeVWkN+{ z#Dt&N-S-AHn0&Oe&072W(#19ERvc$84ZvD-?#=k%pa`6af9MJmq|NM>y(ObT6by2b z)A@}Jc4z&+L9YOhfkDrVy}w;MjUR!6n!M#U?hWFlE9RWI82?lJJ^Qj0)Eu&55mXJaM8dvg*$NNSHZ~ltc3ZQ*cNBoq${WzdpODkX=kP)OQ5-1U3&Ba6 z&{0}p%3-_K_hq36H*{{c{5&ZQPU)@d@!{*8FreR)*~#}gy_AU{CW3FIG5ebwTFI7bgR@EcR?aSDL)2)wh&%*fEB%T-T5|YQ5Y)u82Q#Yvd~AZDUwk zXnv#9u@Z{+C$rD9&Kn-2gTS%?oR=~jphcLqof#_~R__Z>q&s1pwg$J<8u6*#6hj&3_?41%A$X69ekEXoQ=}sCbz}_L-{kHmWbc4C_%44!>CvO$1WbRC>Hqxf;n@=$f0vNd$nv zIqP36wGwgj`JRUtsnN3^EFW_H)FGA7FLY(y?rirTJ%0R=QKWnFOxczNt-0?an~UyoMB*D+|eXgE7AXF z6j0eJa5@DQ_GEy>;pU`gOE1r{a@11I?-E?Z|41Hmk8?)6azi0@Hc=09f$ddSmvzxy zT6W!wfa)vGg(j8i=wxs@Y#C^wJo{<-LD=jH$J=l*>6XqN6q8Sj5D~)1efMG{vO5wy z;TW!lSo+IRVM%w~L`OA)fV!@cP*573jtnZOs|u_fmOI&di98;}Ov62j=`hs;5$8Q8 zTcNNM*JaZaDF0(5n%8s`H3^h#Y3s+S=bf_MCRgn`ou?@{1a}5`X!AA4VH^1n6??VH z9ibgwtJ32`Tavq`LjP7;*rkUyF^E6apj0T5$~y5J@U;iK6D=Q$n-zFuWfIUj0Wli* zfx8h6?44Oa0eABpHN%y+|DVd%tE69m!GNO^9t8lbx?O;m&nQvmdU8Bc+WEdv(~*ZX4z)AY*-rRBBIdjLu;LmG$y~|Ey5MX0G0wL z>G}j@ka@2HRy#a$Hy4J3$TT^Mt8VpLMeqbv6L`$E_wG0jn=|wbQSjcby4D}c zRbgWTUyVn1Jp4$10_V?qNib8(9u*T=wu7Pj7k@p-zJ%c>;K5V&rnxhHJVy>QOInJx zgVzm463W5-N=#!!m1VTpKgwTD70mRM_bkd&I!c#rP_eO8T;MGi54|eYPt!XnIb9Qe zUbYo_KcquYCvYSPPI*ijpe*6=ED8&I&jp(%3|PQsFnpTH(xhu^1*)8FUrk<);2;O~ z6ae`f(I~p#++V2+2>{ooH}D?h5KT84~m z(whgmtS*%PSEk(15)Gmr-f#|+>+C980SrIG>XqZU_H4Kpr;<}c^bsQyb7jiVZ(hvu zUJ)iwU}6TaB2S+n-izwm{6VW|Poz?7BJV6@}TwFLZz)%{%AeM;q8OU01)J z?x*ScAH9_`=iSBT{$3q5Atv__{$M_}-{0=`TIeXSt{JiQZ;BvsxaSqm+?0ZeQ;$yf znCSisk?37rRq`p}t&enm#3SE-*i9uO)TEYbGXPfg*Cuozz?H)4SZOnWI+S(~1`;g@ zB0qo1g=2sQu}ZA)*^4926Hufl7VsWZ7vbcNdEg7T189HcRt833axBH3O^$r4;J7c= zC@@a_Cw|vjeUE=G*lJmz#MAZ(k$1SP|GRr{E~|ek zkWAT8wFedD!mtGsWZZIbP|!HZX#k0V5iy&?`|gd!L{SpqUG_Bz zuU7iTaIXpYdFSsHi?wzovWQrYk&|yZsGPBJ9D>jW8T929;NZ^k zVOiWrRHh~kVlgYRt2T}u*tBIXC1lnKt6@wP5G{bc0;U7;hN|1=03(Wrby~_QxS3K5(TC2$2dVc$5(g$f7SexKui*Jp$T)PXT^CYJ@uiFXTRkccmm@i!#^@^$w?EAf1_3}S$~N(M;HAx zq97g#Zi4PGHgmpc+%t(NDI46}Kkg|N+8w!{5r0Ezi;+>AO zSERj%4x2d26V4l*-02f<#^2QafRT+ZcTc)4hKbWYOM+88wF=+heurdC1Crkvr z*?RYqXpMP6t4hN)7dBuTC549k$IS?d{x?Q1N?I=ep<>>-xAb^#>dydMr2t+8#nD@Z z0`n}htGog=PRb{~V!6bwor5&LL74x*1sEY#e}A{bu)fM=^G%)9Pax1I$fF?E5 zYxG; zb9p}V5Bz@F45QxVEAQ+~;(=?`ZOY!bOD|63buUVq;)4$#QtW*=Tlt?G#2^8GLg(Bp z$H#$&AZMm0)wEo$#Qs=$oKP<|(t%#2uR;65@H3U(t+Gq*I^W&*Wm!1r6Li_*`}@h4 z#7cmImMmU24weuW-}00;;kacElfhJL$wm8l_1kmCS?{;d3d@YWF9+hl zK)ldt&ZfVrME`E>@gxBTR(aq<19aU=tw{Gt9$PL{g8LswMBJQypZ1X%{YwAX%uJUl zU!XtOF2_;29i_Yv)wG!oN|=ZU0H0x$@I-b@8E*%;SxF*z=abp}*jz#r@5iKJAf6Ou zc7FKvRl5Sz5*!$Gtb+s==PTZPzW`rtN~xSc=;#Xgdg9QShrVolHjeF@ zBwxWoVMw>FuRpq~@ZhdzlD$MXPrT4*V@U31|JBP)fO+~X>``ndGvhIPYv$6IfMk|A zc9wBr=GsDbF^3qb3)AcXk8(NrbJpYzm+u%l_WEwdEzckUu}S3wmlXF=02doPS}End zzatOlY(6Q_M*RA6ZPM1-MmkDW=iys#o-A0OpGR%xxU`2+T1<{a1fQ(ZRXToHAgY}N7xwE1;aGv2itdiw#sKG| zrT=OBR#B37W z3PFtJp>OCIWyxY~saefuq``z`{_(%q+|Dp$se>9|+bZE5u7+^!gbs$wJ|CF90>)FU zG{fopVpY6_+(_$(6fpYKb^Hzp6F*&%a%DEZv?Gf^(V?R}b?K()Pk5}9Fxd6i1-2CL z!3=mGocOvB(n)gyP+IfpL}CY3*ujUBRJVil{WMemT26;5@Rwwu*b$!`^lf6$!LuH> zcR@a~J?;x9zuMmyCye|>R7?&E^U_{3G*oIn*mL>nflVdN^J z{#S2qi787kT{w;r)HdICO6MWal-D4~z-{r=swmtm+*R)RPYa+8bMEPttqV&mQOX=nRETP_ z0b$%C&(cRx*p#{-Btej5aR5Ra&YNIn8Xqd9Gy3l;>Z!G}pn!mQaxjYaQel%KQ&2n5 z_?)6F!qF`e^?*%-H{LqLa7m(o|0}({l9~pzBLS$Mu&a=Y24^^9>}@^^oNg194{Y*H z=}De;AL%M|i8$$kjk{QO-$bNkRq}@yDe#hC)k&=`DG2F$3B`~UN&imv4rK#zS7X?` zv$Y0N4#&f&)70s{Ql)7gTo^;}r?i1GECoxLb|rp|#lw2b{s1y>DtezqY`prt9t6!X z<|{n83XHQnX^ol-m3pfYJ^?)!wqju8>P+@9#cw^@qIYHs5<_4r5C!NB4}qO_RJuY%I&K~=xEz&C25Eckjp$tULCuiKcQ-R_;Gc2XvsE-7gpZP%l1 zrt9=Z?y6PMV1`;)Ib*dTQnW-OCMlm*%ML}vZLS@TS>fQR8-M3q?P)x)QxyN7Dr^oX z>Rwh=<4rWdcyXU&U9hvg2|n-?KsND++)8vG3;+2c($Bsjc8@JsI4U1qW+zEZhIXu` zu|+F3%HG;Obe>!*g*lGkleL#^8e1lZCSmimaJ$j_G+)N3eK3*&1kcfp^^HytGd1YH z^PTpD0)i1OJwMGDp*aWd4g@cOeNB)qzu)H>-t-F`eRz-%7r3VH1e+&2;y+{@x3awG z2iUwK?0=}}J!TYaEmaxw1gBpY80!wny8C}-#no=X50(+EAm|?~aU&ORgPB`ef=t;0 zoBq&YX0hQ9C&i+dDATl5&+u1UAfje8y+6B9BX~5~P;zJ(l{l*5`M(IZDGVPd|0B~M zpT%6v`sV;a`D{HAocy5oR-W3127EY~=Yh`|GieB=Wsze_nLwwPAT@V3jDLw6QWDpF zaB=%Ao%)FZQ0q+|j;8H#=UI%}MOt&*)%Y7|Od?py8fS0DO^Z659?)^~kjfZMQ(F{1 zf6(>iP$Sdr>l-aWIitCxdPgG;(8`d)U_ZgQR%g&!rfK_U%27q6Wxc@Q;Jjd4`#?Gm z2OVPau~vIu55W9tbyK7!p`D);+C@2VBZ<2TK8it;Ad)E-I` z`h}j*0}f)lVJo6#_$Qo4^v(zJpqaDy|DcQ5z6QEPro3mdeQ{hVCh2AaKp=*rWi)Qx=B^L zOO;wLX*o=M!D1)Zof-k6}$@xfwcK ze!->AQLi+NFOuW?^fxkbV7r=#jN>_|K?IH04-u^{Qb2{uQfJ>h6abSoq>|ZV7TBjG zBm>hkKWN02#Zn(Z&zU(6*Ai~m``@)B_C(>hT|+_wPdoCLn%2)U^RHhSp@Zwr7PkO< zhr`FGSuQRAXMAvb+qPn$hg2WeZPnO#ZQ(VSJ4ga#u6FF{cJ(kblhs-C+aab4R9E)x zbow+eiqKH-N}?6kg;t|cg+I*ojgVZ^6U8e4>Jto+7Yxx=1LB%qMu&~jl#>i$Px*So zMG-I9trh&Z`#OFZK@H6o{Vn{4_+LUfiCQxc_JN;NZ}&YmRW zKF%-V2oh)mm3@$7tq*jOv05~>1oC%HWU83=zRJgb__T6dg2$}em`=diS3#4-Tfyx_ zWy2cjmlT-Pr?zd@8J2G_{}(s`jm64_s$r$ROF5iJewfs%&aR{bDV)kWkX~pSir)f#Uq?tijYW&>2-9qowG#^NAGL?x)9NuAeQg;dsxbOngW?A+y8td}6U1hS(fW zMhXhSsJs_oe{n~Xk%-fWGO_yg827@$GjsU}Dwz>XC&$GYJo9QcpU}u9u;bDlYcoWY z_C~b?c6V7}r-orI^wC=W z+}!B`R!S^2j-_cm$bMw9&do50HpA?tt$L(TT#Cp%6NVvC*Vc((gqcJ z!laWKK@15jM*X?gd1e%qcT`^zD5CO=XE5I5Y?vbdO1-llyzkw3ylvNV)55}ziy%{m zq@RjjM|-OQojxr2-(Q>4DA9?U7&|q5RT{Wv6Q+EH&me08j|?OF8{RcqkncJEIol&8 zq8R4}YCdE3dFSO@3Y-CbfdpUYB&YN0u~h8>CID1u1z)S{D;Vi!Ho?cTj$-AS-s9Fq zS?#oCNZ}_W*n-{WXL@0vA)u73Kk5KjKiyB6AjCX-16jx8ouLwOJ zM(az?pGk1s4E?4yZkW2ar1HYK-{?8Ru6A+b$8!f+UHVRD4NVJ8k)RwJHaU|l_{(7r z?IT2pgw?MXYX@k{)s_~DxwPx9kf;80S||@9e?CWSSywSgRtqZ6d3Z;4P?tK(%0>CU ztcsA%`Av;YRT4_&Zhk#{Z=T?FxOK0}YMQgCsAr3q2^)Qr>Ia#uPF41DN|sG|f-JqK zilHT8K=9_7aoL{5KNP*V;+MZ|zHP;iFoJCr`ns!ZuwwyQ^ZnWfJ6T@8IVp~n@6~xc zNS~2g>=c?G+f00uLmE(hT(r$U^|dU}){2jSh7e6C$kP)%SKez}-i6;QvwY&IoJ=y1 z0lzO3rA?h_Z`zFQSQH)Zw%*LIMj$;Y=dUh5?rhHF2}E;oOZoyTXiaUtaL zQpFC}^eWVZvOc7N=HqE!@#+JY(}5j_MO43xKq=9OzXtU;N$?{+pLifBQSAV~FM+)U-Rm-%!;P2I zvuRp}V(`=G!${5ExDj?C1P5rkKDZTQ-ZF&sc+U33vSJFL8U{(g*#)VhTO#xy~q(Y+|`^xu8P2DSsECq1*F|l{g zZ7Fjx^WO+2X7G4TiD0J$&{{FZ$D)&>mWX)5l2r4fUGyL2*SgME8_UZJC zl{Bf+^4<$1f;c!B-t9vI2hB_9I%+pvoXo}XAGD^4x2Am3Iv#fmT%MudN)hH;ThitpQ^x}6%GgvF_G*()@BT*x4?&SjjBr+nCy5d_6W=MqLD9usy&Pf zAFc~fQtzEM|3mLx+Isl&7LIBDS9=XSG@=M!dKlF9Z$8N~jkvhRbNr}|4NEVX3A&|r z=BQ=Wo(+8EeuvE|8!Z@w{nqJez_vxI3c0T{($Y&!{9l`8qf0&e3M2r3L|QG%n7?3x04kJ^4wRXv|EYnwWZ1o&(((Ez)jbNH2WhR4=h0<3uCXhe8= zYY`y`2aozhmIZO7xLiaBT#zqSpvsV-uRmP15^OIt z#wL-M8#2E}g}ixY@tbd>s}D+zsoue6mKQiaP}NNlZ_3IVOJ0{X)|+}lvSb55Psj%A zr$qKqL6-L-ZC+f6yqng8NZ)V7Y)fA^LmG@4+NO&+;XULX=vAG`x;JqObKe*c;PAC8 z)Bcxw*sBexCL+Rkfsckn$JX#%qk(%+_k{+ECThBLjxhHV8bI5R=iR*C=sQW)IraTN zWv<$_SZh(dOuy&4_KE;ffw3gMK4E*5VKl!;t_%qvB8sY_YHQb zYt@X96_^hKx!76b&YAg5f!0@96V#H?q#*Vmsz50eX92UsFo`~r=OAT}o(px}8^?^Q`#>F4z!WR40W)d$qhVLF1+e4xv!=|g6FT<^?1awoG-_X|a?(u4;Ns!f2vBCjE+8uf^ zAgsuH!0zC^6%tG*uQ^y>r&jD{7amH_SKXe za1An{2AH}l-QTK%B;>`o^3?GVZdL|u6;0 z_z0!6xOhz@6uBDx1y;-lU9N{^3EBdfDr1S3QP8R^V)X;8Mw+%=H^;c1Sbs0(_={VH zNh~+yFg;XIicI7liiSq9A5NSz3!H97#eCxbTBeHZTj03hXoQ+N`;MnN7)opbm`+%6 z3~5{1=}GKn%3UtJEhLtjK`~H3Qyrvm{;nVIZB&d2gndS~j2Fod>o@0SMSjUeOMN9A0iC9o48ucFa z?a&l{381-zzQ@-G59Gim5f=E5eywn?ydlf<6m?RBSMRXwO~Z6N@zI96?82N2?d9{V zChceA77});i@yx2ZeGj~%tnNJVi#&=!7${G_xGXzXzL_C{x3`2* zPj>j(P1PO$6Wif&F=LxsECYtc$W|%W)5SX>BG`*J?t!rLt;#S1S_6&aH47a8Sp|LM zzI+Hd{J%o)f!v|$ItUGW;1sB__KrGU3KLB8lX#!m^hW(t&;Nu|+))G3rlp{_4wyLk zu3A=pek%g;Q45pt+(>`2WSLyF#2!^tcOa4MO$c}EHY(8TIEGx}u%S8Wo(A4H0l6Rx z%-$HS3;t_Lw_j>eeq)U~wM#5!wJ3S-7Ae5YUNWk$JOaber_!uC13l;aA`nunQNYR-@W{h>GS1#E}>e$M#> z1bYo+!en^^avNa)N%K>oH%=vQQ~{6ihv?zI%ldppI{+}S&J(V3a;aPYvC!~3I!FH) zXmp!ORjZK;;+yh*Hm~FSQI-up$!x#}i~*tva5jham94b-q+oRwLYLiog;DCAY7=nO zy$^XT8eGi)<&i=;dJHf5b1`3~!p`!>CrF_HY7sE$lCwaWuZGMIYYr=YZbu?%9(127 z<%1S|+ET(r%62@pq#0={I z4H6edgZBqDK3$_Jv}ap&lPr^U$rboPNAu;CEjF~=G&$vQhh-a4

Bsq&QG4>18IIq@mGq`YN>q! z->OyCaQ)?F%R?oOj}>E2J7sj%j#(xOeqqRnirU1YZ7B?<26B1Z&U|rxKA}PUgXYqb zgSItC)_q!czyv=bpKuI@5sgL=a%NxgMJWtx9r+@)1_eA(PP|yX) z#sy`Yvs3YYPTBI&5l&Sy6oDOJSKl>W1h`=;U!?OX*eH6DS&U%ElAKRfdy`;N+Jep? zO5=y#V4W-SSGnnG-xg)nnEJsY7Y_6GleIZRZVlR(WoeS|ua9)&?Ox-W3#Hj*PFO{m zJ@T#7URV2=Tbn-?1xQd;TRLvEDOmW^1d`vrTecEnVYo00vZjnL0gDa%>BW04wLfR) zw8co$B`G|9o_T!;;0(InKE#t6XIGrWDTC1yL<;qBKzM_jZ70wC@d$&c7t)gO&G7G2 z39i_1eby*wx(1HQ78W>-;!7(87@(!;?<=--D!U|W0aQd5koskm#~W(m@gM6%@RY`( zkee(;vuJAeakik4h@iDd5GXW}VuU95K6IM!jY=y4XCrM7Vjulb&?hPeZWjK`-$r3J z-qSLZB>kHOy-f6VO5?XjZb!!={I?_3D`m_BVeVa1HVobnD_?z|u4e*gV!hsyfe1|r z>1jkXxH!Ig#XSsXE6}|{lfWxHDch$w#nJ>uk{0x)TG?AeB_7bnC?Z9*Ocy!|k8C*5 z9b|v9hX!ESzTauqIvUiHV-h}P#bd1O)is6M8)_-{gJfK8SRSw70e$?vDWUTSAtUOL zgg3jVy))Xrj4u221oAvwb6?DMklTK$Y=Fs673#*mKx^GU3ffY79y3jwNnJwY2L#@O zGED8WVD!VEy`=Qut?BPqF&94Fkh8qK$Eg?NT}j3$x+;66-`zs@h!oX%#l_(ahLK1> zEPr-F1oZ}*dXVEjV}0TPtCqEAK=raN$isUhZ6H#$iGI0k~56*%5{=_w);K7@5eKlK0X+6Vo@< zk#~i%vNk=CbZ%N2{IIxnlt2D2b_K3fzz^}* zW2%1ZmUVhjk7a^ba=QwOycc7No`j>+;^19Xl#t2`4N4=sUbbg(o-9HfKFcvapBIR@ zR^-rS1aQ9gh-6uHkPm=r`!YpTC>XJ!eaP-!wVr|h1*&3WTqLwcLl=-j78)rSb@xW@ z=A~0^v5L%B~1Trx~RsX*6wUi4_VBoIU^j`S3Ck9^}MyFukw~`1uKYtoSl~HFVXv)`F<; zwcMI7(zdI7qILTK5#mPtLS>#=CTwM)50O}W{l3M?u*Zv#U~Y!t-WE$0vA<-+OAX~IxDQ|qbYLD(6}I4!#xZVJcnCD0dxB#evk+lz&!g2Gr`M?gMe-jHjad-&y<-PBEykt$p>aM~DGzwp#xs0eCp~2*mmPwYPIaWjNy2yDNTBWPT?wgS zDkcouTes@zVc}hCyk>3TSC}g-f(yxb%a;1InOC%-u55ZzFoEW23bv#p#Kbx$;N<|I9p#z7lnW#UuQ>U8us9|5+2KjT)chs1eM?r@x9^HrfYRD@H0xs{f= z%B_P^`S{{jL3X$65OvqS0fPr`eHFrUvnKz^EWKAJG=EY*`k7##wOv`zHeh=$ss43( z)Swg9)Xo{8m-Dyy1iJ?~1iogt1I5MR#$Q{v_!Q^sCM0l0*m?SLl0{hA93Y*s1hkjH zlBioCnjDy$m^XhVm@8`hm=+=Up&Bk)lF6umk72CmKzo{Oj%FY^<%AsNxhWYYJ#HD{ zl1tyhRRzpU?he|9tB`-lHQcAsn)Hky8|^g}vsI5FD8C(pmEkR|J!%U;+=uf+O6@+- zgf+@d$R#`?sL)-Hl4V>i(#w;OiaZmelj_-dt@dySau(IZ_`xQ$kvICP(T~L7V@ENe zCDZHnS91;B-~+wKHI6fega0XC)F?;yba#n5ln`yP7j~9GV5(^Fb@jb3;{qN;7R5_> zZ|*sf-ioS7*I!%#T0Nla9pCOO=xww|%xmkpP6kk8ur5-6Z)2zk^(>mWL+F^GCYZYx zm9SxYhr(B*SD8+=ThS6t4k+wN)YA1^GLb9;&2B>Ge~&h@xXM=-mxd+LDA1w_;oAc@ z3I!OE1Br1Sj5(Tm61m;&_|)kLX(Sl7BtJ$bV|!WsAxW}NOFg$`;xgvRhyf>TQcFMw zYk62cMNkz7Y~o(Qvb^c)C{aY!`Pg2+t4?5Rm`Jsc%S1LydchAXzMRWE3E0ks4f0i$ zt_DkEY}hN%+U&H4hI*dwaJu4+uc?%Umkd#xUx%R$327hk*Mm1<$d71Jd~Mi(5%?HeCfIWtzV<9#l*0g&IBP?Fp;`t?w544S1unpMT)9LGmSNE;d8v)7< zKoLy@vcRdP9Zu7PD45owAcK9T8#l);C)aKyd-gh`&m8CE!J_?(w`1O8c%W!IYBA>- zY~=LHHkoUA^vuTI3qosbTeQ1n*zVTdQ6D`y@*O|y&o>m)B<)gA4H*cw@7dANM*Xg;*N-mPZ_m}lzba&Ja!SRx#Ar&kXij5%cOc%* zWA@2-ZG)T&qlHPgydXavz%Px6O~!s{a>jOC5Bg>TZyl{=F92o58Wppos-Bn2@<4d6 zvhC5l?CKGb2Aq7psd`WCXR7ho3=q*LNvEB!fI3LO6RR}B!I6#|nTq3IDDV}DQqmJG zciMv&t0k8fr4npxYcx3;h#4`9m1SdbT*X?pi=4I}od9tW?(xOucT_^MWp_NgzryA3 zpO@jMI#*YdGJB&Lc4g{Um3^kh&guEyql)m_l%OuqF3xrBAko$J0xS7Y|Je@WXY$Bp z0G1E@v*RO{E7ba{g4`8OZ!QnNdeYO#K>M<7+OJWk7w8KMY?C)W{6iKVwNHj9XQ;!6 zCK^aVY7Bk^7>DI`JfQfc;Tclif~FxV@cKExW|8<2?C6cNzIQ&X%qG?k2zC@zqvn>R%TY8~wrvCe`GMFb7Ek zW&!{1Q$cBXzu-X~E}bMo+o*jXfz~2_Dg4BToV6>g1Opk)Z@rFy?*UdshE~VOh(}}l zQg}C17(z(WIJwsuq}EwZTuTB#lwcyqGu%-3gGyE=WdEPM3&AKEBV{Z{C7^ka#x^bh zReO8VJJ{}NIi*jN-+3pb9dfRn*Kuw*K4WOqA4m)!I5FNuP>SMb>fWECn_E(z+fPrV zC^L0BC5z(HT=B94TfdP1$isn4MEAR}CNPOQX6_*1MWLBccs=>&KCj>Zi1J9DJ+o5G z?*`;R01q;Ll31IXd9ESJm!Uq)D*{J{L@{1W<_G01F{&P&`ibrKP#ZLpVvLuCC?i#= zk-q8y2_%Ogw9rS=s0ftWautg|zIjx1*S+S!7y*EwGRTum)Gz$Ftr)XUN|j=A4LQ3d zh8R_TSRgiFUkWJQG3BXI+uuHx1Ld~T@l@+M$tXemm98xzk0B87+Gs&#c8szEYR(~G zn@O^~Hy|fokriN;m_HKVz@q_AUuVU+^@+&uot7_m7V@X6YXJ`tE*S1HJf&W9lwYpJ ze@dzo3P7*d3t1KG_tQ$k6KHDTfrR*-L6oeR);)#*#>!od}Ggp&JlN_p(h1`qm9@2~D zF%!v^S%OLSQArdoznStxgovK`HjU8%uPX$4yAmp$Vk{fI+OdN6B(5wDZ2Q5K8n_E!MK#OAOVFy~%xgi>k zZ*ZeAGE#d&C3?DT^#%++{7ngazwB@KuY0^pkU~1dT~dsErNkK7|F;h`|H`;!9H$)d zNhp6L!*!kYr4ZJWZ!ufy%h21HcnjF0SKbGeeq^!*is&d$%T5BhC|Yv0)Cn~U(yJLOS^+v9(TY(WfV ztUqJS-dNCUrmo`3K*eU6OWIU^M*KDlUb>sriy8-w!nu^)OO#W;;+U|Ro&0n=E(c07{;-q|Y184hg@y$ISWVpK~x1on`|+YL}h&nC{X z*>26=crRk0FT%$`XW1PF;?ywhSM6sZIoq0<)$ch?D@_}M$kw2-2*^KQND&N@KwJ>i z512@n6c^eWvALId@-0yLBIm+;3rJCF+<=c}L1j-f1|c7!lc86&l;u?Dj9S7&D(%zE zu*3;wb!6VoE>+owm7OqP2j_w%9W7ewxM^p4J4VVFv&yp z7@qaL$N~ym|-!wr$hw*NMp6%`0&!9UTZJv>U z1JgNZ8+5Tm)1t4agb1W>)-N+)qP8PKxYe*Ju+}zCE554?PcK!|1UN?!sQB77$Fgf8FzvL!H@Hxps*$MfIr2CDR;(5Z81n~)m(K3 z=~qcC8sPXs$?*v7xm;mQW$WtgZK#DgER0;KQundq!5tw5G`3=rhH?cB=`@UN&1kYI zAL_D-GXb?2z4}V*Rim$om87#6cL_1XKUJygjgQkJMWSIbxIafek-b>h5h5+HGtp!X zm^vti9*nE?D=v0T@U$RP@m^A|1w&mZD0c`1LhHW6&i6lPtt#V76CY<%MHZHW^LBGlPK08$YI*jMozEKN^MQ8V>37 z53X>q8sT$S{`hg-@sLk6NHduuRc24{Xoo~Z6QRaP7NgQ~bMUYiv&wK7+)x(_WIton zZO$%XFw9ZTsCYf6{c{NZut%#8{n-f$?ST>S;+1ZZu>@I&CUj+vm3wlU)_4@VW7UcrC-h*^_Ak6kM zsh1AHOe8xT-cM}-?7DbAYsrV1mP(#zK>Fp9KTwM8@qfAtI~V~8#HY|NpQag`$Z~EkAc;h*HWEtPq1MRYz3O? zSW+S;x^7_DQo*Sut1!+}t*7EmZbFD6KcNk=gwS^GP#kF7Ezzusplct47Z~VEr4FNJx zM>7p;0EwT>$xIg%6=h|3o2K3G&ROl$cj8=#aejIH(hXA-2i>I&`@}J~DbE6@0<*|j zD1rtS5=s&hiF{!2#SX0zbg{YJDcUO+WPgZktmXooW+tC^Rtk#M;BK^%gkKfmreneD zJH=mOp6}^KdwAr)Frvd&97VRW#dmr_ z9kNs1JSc(54)Ii`$}C35cCZ!nnt^pmIsX$@iM|c`+ZaL~3OS-bvjbTaI76gzQQ#W~ zs~A9))ReR{g(;&))^;A!BK;C_GRbmk`P@oUa1g5UZ=58wzck@oJxh*AOt{ zjyqG@^$?E~P4RrEErk(|5nNQXh^y-c!}kb2){=1Dd$1sg;uPm2tGacC4+>jgkT z%*+Xn+{M?-x7mBOOD_tG?@90bwdA^+vw2t~c2og`lfR6(^6yS`K|4US$r++BiOJt#?4nTFkS~VS z|Iha7z>9SMGT8N1H{Gk52_Qt~-GsEwMQ@-%WpMw$F@Eir{Q?OI7J#dwvJZ#P7;@*tfF|0c!RgC^hh>_s2%4}WN*3f z1>^nQ9W2MAE2WwA787XwaUUDk+To~11}CY%7Ya-=zW^_r!03c;+#o?G>L^B0pm4St zy9?8EHiN_O#s(e~bMR1b{!+NKQkp;hy>Uso z?=H8YA#oFFRSg5;<+S{@_Q&w99krwQ?6%#9V;q-vNu* z+Y^vwonI#?h@_E?duWavK7Ous%Ni^lAofIek{z=)8m?=maS(Q-%+ioCzsw-QhTn+p zC{qiQOd3+knWp+KSm)X~r9C{+m8oiy#)7$nEf7Ek(2#`v}5VMWw!#&_g^-6=BRfCu4>XSZ<;*$i}^ z8xR3bHrwr4*6gxDa7-aYfvi+9vc!{Q+!*;mx$nV1HR!PwP450rlb?<7>do!|W!}Je zepxDhzeB&9ZmhHoMb`bbG?0g;>|v0#a-3I1c5*WM?+nr^r%fGny^*@+8882ic7~*5 z#vTV)%PC69y|jo9NzD9(P`tnobSD2 zMJxFLHo%KrIZDGH&6Od`&l9OR@R)UT7cC5Y)r3tep`vC3GXz3uEk^as<}c}$af=s0%rU> zU+F1$`+66aQs(myU?&R98pplA@FGT?9KO}ok^MZfR~%&ZtP4!=55~8>fWD5?V|b`3}!e5^D9WJ z9#^}zhuK$frwtxR8p$N0W(gw&fl{LJXolaBLkI=C2i+S$_5B9eJ9$%zaerUEi z+-LH>kkjFB9TOR|2E_&Nw(5QGgMhZ4IK3f!&P1zDYihlN1r{Vplg(|}S`6a735Ri} z()F>{1d67kBW#}g=mZVzgm)tFNLha^q;L&pDqNnR>T^t7@N`}2(J(uA5?rWS67>?E zuFy!cim0YOil4V$JKcwh$=U0dHL+wVs^(^C|Hsu1FFY4v1LjL&u6ulsO$6>7L^S_o z&RA#4mtVBd1&9)>+@E@Xw9Gz+&MBl$Zg>lxQr{eeY0%#@k>>G;a=k+{GQ;`6BeflV zgyOZd=eCWuFAm`NVt%!ar84$bxGdk@Dr=zzaoc$RRP^N{sMx_a)Um_IwQUBy?$Ib% z26B}U`4Q_Cw^5lyYxc*c1d397$(09Pg4gj`Lm~#7XiD?moX;b!4IvZ>17S*1!vKPr za1T#a!9c3&GeADeq0MLEax0oeqdppi)nR|>Oe@s|9=6xC@;<`R_%b1e(i-ZU>@7L~ n000001M +#endif +#include +#ifndef _WIN32 +#include +#endif // !_WIN32 +#include +#include +#include +#include +#include +#ifdef _WIN32 +#include +#endif // _WIN32 +#include + +#ifdef _WIN32 +#define vsnprintf _vsnprintf +#endif // _WIN32 + +/* Backwards compatibility with headers shipped with Visual Studio 2005 and + * earlier. */ +#ifdef _WIN32 +WINBASEAPI BOOL WINAPI IsDebuggerPresent(VOID); +#endif // _WIN32 + +// Size of guard bytes around dynamically allocated blocks. +#define MALLOC_GUARD_SIZE 16 +// Pattern used to initialize guard blocks. +#define MALLOC_GUARD_PATTERN 0xEF +// Pattern used to initialize memory allocated with test_malloc(). +#define MALLOC_ALLOC_PATTERN 0xBA +#define MALLOC_FREE_PATTERN 0xCD +// Alignment of allocated blocks. NOTE: This must be base2. +#define MALLOC_ALIGNMENT sizeof(size_t) + +// Printf formatting for source code locations. +#define SOURCE_LOCATION_FORMAT "%s:%d" + +// Calculates the number of elements in an array. +#define ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) + +// Declare and initialize the pointer member of ValuePointer variable name +// with ptr. +#define declare_initialize_value_pointer_pointer(name, ptr) \ + ValuePointer name ; \ + name.value = 0; \ + name.pointer = (void*)(ptr) + +// Declare and initialize the value member of ValuePointer variable name +// with val. +#define declare_initialize_value_pointer_value(name, val) \ + ValuePointer name ; \ + name.value = val + +// Cast a LargestIntegralType to pointer_type via a ValuePointer. +#define cast_largest_integral_type_to_pointer( \ + pointer_type, largest_integral_type) \ + ((pointer_type)((ValuePointer*)&(largest_integral_type))->pointer) + +// Used to cast LargetIntegralType to void* and vice versa. +typedef union ValuePointer { + LargestIntegralType value; + void *pointer; +} ValuePointer; + +// Doubly linked list node. +typedef struct ListNode { + const void *value; + int refcount; + struct ListNode *next; + struct ListNode *prev; +} ListNode; + +// Debug information for malloc(). +typedef struct MallocBlockInfo { + void* block; // Address of the block returned by malloc(). + size_t allocated_size; // Total size of the allocated block. + size_t size; // Request block size. + SourceLocation location; // Where the block was allocated. + ListNode node; // Node within list of all allocated blocks. +} MallocBlockInfo; + +// State of each test. +typedef struct TestState { + const ListNode *check_point; // Check point of the test if there's a + // setup function. + void *state; // State associated with the test. +} TestState; + +// Determines whether two values are the same. +typedef int (*EqualityFunction)(const void *left, const void *right); + +// Value of a symbol and the place it was declared. +typedef struct SymbolValue { + SourceLocation location; + LargestIntegralType value; +} SymbolValue; + +/* Contains a list of values for a symbol. + * NOTE: Each structure referenced by symbol_values_list_head must have a + * SourceLocation as its' first member. + */ +typedef struct SymbolMapValue { + const char *symbol_name; + ListNode symbol_values_list_head; +} SymbolMapValue; + +// Used by list_free() to deallocate values referenced by list nodes. +typedef void (*CleanupListValue)(const void *value, void *cleanup_value_data); + +// Structure used to check the range of integer types. +typedef struct CheckIntegerRange { + CheckParameterEvent event; + LargestIntegralType minimum; + LargestIntegralType maximum; +} CheckIntegerRange; + +// Structure used to check whether an integer value is in a set. +typedef struct CheckIntegerSet { + CheckParameterEvent event; + const LargestIntegralType *set; + size_t size_of_set; +} CheckIntegerSet; + +/* Used to check whether a parameter matches the area of memory referenced by + * this structure. */ +typedef struct CheckMemoryData { + CheckParameterEvent event; + const void *memory; + size_t size; +} CheckMemoryData; + +static ListNode* list_initialize(ListNode * const node); +static ListNode* list_add(ListNode * const head, ListNode *new_node); +static ListNode* list_add_value(ListNode * const head, const void *value, + const int count); +static ListNode* list_remove( + ListNode * const node, const CleanupListValue cleanup_value, + void * const cleanup_value_data); +static void list_remove_free( + ListNode * const node, const CleanupListValue cleanup_value, + void * const cleanup_value_data); +static int list_empty(const ListNode * const head); +static int list_find( + ListNode * const head, const void *value, + const EqualityFunction equal_func, ListNode **output); +static int list_first(ListNode * const head, ListNode **output); +static ListNode* list_free( + ListNode * const head, const CleanupListValue cleanup_value, + void * const cleanup_value_data); + +static void add_symbol_value( + ListNode * const symbol_map_head, const char * const symbol_names[], + const size_t number_of_symbol_names, const void* value, const int count); +static int get_symbol_value( + ListNode * const symbol_map_head, const char * const symbol_names[], + const size_t number_of_symbol_names, void **output); +static void free_value(const void *value, void *cleanup_value_data); +static void free_symbol_map_value( + const void *value, void *cleanup_value_data); +static void remove_always_return_values(ListNode * const map_head, + const size_t number_of_symbol_names); +static int check_for_leftover_values( + const ListNode * const map_head, const char * const error_message, + const size_t number_of_symbol_names); +// This must be called at the beginning of a test to initialize some data +// structures. +static void initialize_testing(const char *test_name); +// This must be called at the end of a test to free() allocated structures. +static void teardown_testing(const char *test_name); +static void fail_if_leftover_values(const char *test_name); + + +// Keeps track of the calling context returned by setenv() so that the fail() +// method can jump out of a test. +static jmp_buf global_run_test_env; +static int global_running_test = 0; + +// Keeps track of the calling context returned by setenv() so that +// mock_assert() can optionally jump back to expect_assert_failure(). +jmp_buf global_expect_assert_env; +const char *global_last_failed_assert = NULL; +int global_expecting_assert = 0; + +// Keeps a map of the values that functions will have to return to provide +// mocked interfaces. +static ListNode global_function_result_map_head; +// Location of the last mock value returned was declared. +static SourceLocation global_last_mock_value_location; + +/* Keeps a map of the values that functions expect as parameters to their + * mocked interfaces. */ +static ListNode global_function_parameter_map_head; +// Location of last parameter value checked was declared. +static SourceLocation global_last_parameter_location; + +// List of all currently allocated blocks. +static ListNode global_allocated_blocks; + +#ifndef _WIN32 +// Signals caught by exception_handler(). +static const int exception_signals[] = { + SIGFPE, + SIGILL, + SIGSEGV, + SIGBUS, + SIGSYS, +}; + +// Default signal functions that should be restored after a test is complete. +typedef void (*SignalFunction)(int signal); +static SignalFunction default_signal_functions[ + ARRAY_LENGTH(exception_signals)]; + +#else // _WIN32 + +// The default exception filter. +static LPTOP_LEVEL_EXCEPTION_FILTER previous_exception_filter; + +// Fatal exceptions. +typedef struct ExceptionCodeInfo { + DWORD code; + const char* description; +} ExceptionCodeInfo; + +#define EXCEPTION_CODE_INFO(exception_code) {exception_code, #exception_code} + +static const ExceptionCodeInfo exception_codes[] = { + EXCEPTION_CODE_INFO(EXCEPTION_ACCESS_VIOLATION), + EXCEPTION_CODE_INFO(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), + EXCEPTION_CODE_INFO(EXCEPTION_DATATYPE_MISALIGNMENT), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_DENORMAL_OPERAND), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_DIVIDE_BY_ZERO), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_INEXACT_RESULT), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_INVALID_OPERATION), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_OVERFLOW), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_STACK_CHECK), + EXCEPTION_CODE_INFO(EXCEPTION_FLT_UNDERFLOW), + EXCEPTION_CODE_INFO(EXCEPTION_GUARD_PAGE), + EXCEPTION_CODE_INFO(EXCEPTION_ILLEGAL_INSTRUCTION), + EXCEPTION_CODE_INFO(EXCEPTION_INT_DIVIDE_BY_ZERO), + EXCEPTION_CODE_INFO(EXCEPTION_INT_OVERFLOW), + EXCEPTION_CODE_INFO(EXCEPTION_INVALID_DISPOSITION), + EXCEPTION_CODE_INFO(EXCEPTION_INVALID_HANDLE), + EXCEPTION_CODE_INFO(EXCEPTION_IN_PAGE_ERROR), + EXCEPTION_CODE_INFO(EXCEPTION_NONCONTINUABLE_EXCEPTION), + EXCEPTION_CODE_INFO(EXCEPTION_PRIV_INSTRUCTION), + EXCEPTION_CODE_INFO(EXCEPTION_STACK_OVERFLOW), +}; +#endif // !_WIN32 + + +// Exit the currently executing test. +static void exit_test(const int quit_application) { + if (global_running_test) { + longjmp(global_run_test_env, 1); + } else if (quit_application) { + exit(-1); + } +} + + +// Initialize a SourceLocation structure. +static void initialize_source_location(SourceLocation * const location) { + assert_true(location); + location->file = NULL; + location->line = 0; +} + + +// Determine whether a source location is currently set. +static int source_location_is_set(const SourceLocation * const location) { + assert_true(location); + return location->file && location->line; +} + + +// Set a source location. +static void set_source_location( + SourceLocation * const location, const char * const file, + const int line) { + assert_true(location); + location->file = file; + location->line = line; +} + + +// Create function results and expected parameter lists. +void initialize_testing(const char *test_name) { + list_initialize(&global_function_result_map_head); + initialize_source_location(&global_last_mock_value_location); + list_initialize(&global_function_parameter_map_head); + initialize_source_location(&global_last_parameter_location); +} + + +static void fail_if_leftover_values(const char *test_name) { + int error_occurred = 0; + remove_always_return_values(&global_function_result_map_head, 1); + if (check_for_leftover_values( + &global_function_result_map_head, + "%s() has remaining non-returned values.\n", 1)) { + error_occurred = 1; + } + + remove_always_return_values(&global_function_parameter_map_head, 2); + if (check_for_leftover_values( + &global_function_parameter_map_head, + "%s parameter still has values that haven't been checked.\n", 2)) { + error_occurred = 1; + } + if (error_occurred) { + exit_test(1); + } +} + + +void teardown_testing(const char *test_name) { + list_free(&global_function_result_map_head, free_symbol_map_value, + (void*)0); + initialize_source_location(&global_last_mock_value_location); + list_free(&global_function_parameter_map_head, free_symbol_map_value, + (void*)1); + initialize_source_location(&global_last_parameter_location); +} + +// Initialize a list node. +static ListNode* list_initialize(ListNode * const node) { + node->value = NULL; + node->next = node; + node->prev = node; + node->refcount = 1; + return node; +} + + +/* Adds a value at the tail of a given list. + * The node referencing the value is allocated from the heap. */ +static ListNode* list_add_value(ListNode * const head, const void *value, + const int refcount) { + ListNode * const new_node = (ListNode*)malloc(sizeof(ListNode)); + assert_true(head); + assert_true(value); + new_node->value = value; + new_node->refcount = refcount; + return list_add(head, new_node); +} + + +// Add new_node to the end of the list. +static ListNode* list_add(ListNode * const head, ListNode *new_node) { + assert_true(head); + assert_true(new_node); + new_node->next = head; + new_node->prev = head->prev; + head->prev->next = new_node; + head->prev = new_node; + return new_node; +} + + +// Remove a node from a list. +static ListNode* list_remove( + ListNode * const node, const CleanupListValue cleanup_value, + void * const cleanup_value_data) { + assert_true(node); + node->prev->next = node->next; + node->next->prev = node->prev; + if (cleanup_value) { + cleanup_value(node->value, cleanup_value_data); + } + return node; +} + + +/* Remove a list node from a list and free the node. */ +static void list_remove_free( + ListNode * const node, const CleanupListValue cleanup_value, + void * const cleanup_value_data) { + assert_true(node); + free(list_remove(node, cleanup_value, cleanup_value_data)); +} + + +/* Frees memory kept by a linked list + * The cleanup_value function is called for every "value" field of nodes in the + * list, except for the head. In addition to each list value, + * cleanup_value_data is passed to each call to cleanup_value. The head + * of the list is not deallocated. + */ +static ListNode* list_free( + ListNode * const head, const CleanupListValue cleanup_value, + void * const cleanup_value_data) { + assert_true(head); + while (!list_empty(head)) { + list_remove_free(head->next, cleanup_value, cleanup_value_data); + } + return head; +} + + +// Determine whether a list is empty. +static int list_empty(const ListNode * const head) { + assert_true(head); + return head->next == head; +} + + +/* Find a value in the list using the equal_func to compare each node with the + * value. + */ +static int list_find(ListNode * const head, const void *value, + const EqualityFunction equal_func, ListNode **output) { + ListNode *current; + assert_true(head); + for (current = head->next; current != head; current = current->next) { + if (equal_func(current->value, value)) { + *output = current; + return 1; + } + } + return 0; +} + +// Returns the first node of a list +static int list_first(ListNode * const head, ListNode **output) { + ListNode *target_node; + assert_true(head); + if (list_empty(head)) { + return 0; + } + target_node = head->next; + *output = target_node; + return 1; +} + + +// Deallocate a value referenced by a list. +static void free_value(const void *value, void *cleanup_value_data) { + assert_true(value); + free((void*)value); +} + + +// Releases memory associated to a symbol_map_value. +static void free_symbol_map_value(const void *value, + void *cleanup_value_data) { + SymbolMapValue * const map_value = (SymbolMapValue*)value; + assert_true(value); + list_free(&map_value->symbol_values_list_head, + cleanup_value_data ? free_symbol_map_value : free_value, + (void *)((char *) cleanup_value_data - 1)); + free(map_value); +} + + +/* Determine whether a symbol name referenced by a symbol_map_value + * matches the specified function name. */ +static int symbol_names_match(const void *map_value, const void *symbol) { + return !strcmp(((SymbolMapValue*)map_value)->symbol_name, + (const char*)symbol); +} + + +/* Adds a value to the queue of values associated with the given + * hierarchy of symbols. It's assumed value is allocated from the heap. + */ +static void add_symbol_value(ListNode * const symbol_map_head, + const char * const symbol_names[], + const size_t number_of_symbol_names, + const void* value, const int refcount) { + const char* symbol_name; + ListNode *target_node; + SymbolMapValue *target_map_value; + assert_true(symbol_map_head); + assert_true(symbol_names); + assert_true(number_of_symbol_names); + symbol_name = symbol_names[0]; + + if (!list_find(symbol_map_head, symbol_name, symbol_names_match, + &target_node)) { + SymbolMapValue * const new_symbol_map_value = + malloc(sizeof(*new_symbol_map_value)); + new_symbol_map_value->symbol_name = symbol_name; + list_initialize(&new_symbol_map_value->symbol_values_list_head); + target_node = list_add_value(symbol_map_head, new_symbol_map_value, + 1); + } + + target_map_value = (SymbolMapValue*)target_node->value; + if (number_of_symbol_names == 1) { + list_add_value(&target_map_value->symbol_values_list_head, + value, refcount); + } else { + add_symbol_value(&target_map_value->symbol_values_list_head, + &symbol_names[1], number_of_symbol_names - 1, value, + refcount); + } +} + + +/* Gets the next value associated with the given hierarchy of symbols. + * The value is returned as an output parameter with the function returning the + * node's old refcount value if a value is found, 0 otherwise. + * This means that a return value of 1 indicates the node was just removed from + * the list. + */ +static int get_symbol_value( + ListNode * const head, const char * const symbol_names[], + const size_t number_of_symbol_names, void **output) { + const char* symbol_name; + ListNode *target_node; + assert_true(head); + assert_true(symbol_names); + assert_true(number_of_symbol_names); + assert_true(output); + symbol_name = symbol_names[0]; + + if (list_find(head, symbol_name, symbol_names_match, &target_node)) { + SymbolMapValue *map_value; + ListNode *child_list; + int return_value = 0; + assert_true(target_node); + assert_true(target_node->value); + + map_value = (SymbolMapValue*)target_node->value; + child_list = &map_value->symbol_values_list_head; + + if (number_of_symbol_names == 1) { + ListNode *value_node = NULL; + return_value = list_first(child_list, &value_node); + assert_true(return_value); + *output = (void*) value_node->value; + return_value = value_node->refcount; + if (--value_node->refcount == 0) { + list_remove_free(value_node, NULL, NULL); + } + } else { + return_value = get_symbol_value( + child_list, &symbol_names[1], number_of_symbol_names - 1, + output); + } + if (list_empty(child_list)) { + list_remove_free(target_node, free_symbol_map_value, (void*)0); + } + return return_value; + } else { + print_error("No entries for symbol %s.\n", symbol_name); + } + return 0; +} + + +/* Traverse down a tree of symbol values and remove the first symbol value + * in each branch that has a refcount < -1 (i.e should always be returned + * and has been returned at least once). + */ +static void remove_always_return_values(ListNode * const map_head, + const size_t number_of_symbol_names) { + ListNode *current; + assert_true(map_head); + assert_true(number_of_symbol_names); + current = map_head->next; + while (current != map_head) { + SymbolMapValue * const value = (SymbolMapValue*)current->value; + ListNode * const next = current->next; + ListNode *child_list; + assert_true(value); + child_list = &value->symbol_values_list_head; + + if (!list_empty(child_list)) { + if (number_of_symbol_names == 1) { + ListNode * const child_node = child_list->next; + // If this item has been returned more than once, free it. + if (child_node->refcount < -1) { + list_remove_free(child_node, free_value, NULL); + } + } else { + remove_always_return_values(child_list, + number_of_symbol_names - 1); + } + } + + if (list_empty(child_list)) { + list_remove_free(current, free_value, NULL); + } + current = next; + } +} + +/* Checks if there are any leftover values set up by the test that were never + * retrieved through execution, and fail the test if that is the case. + */ +static int check_for_leftover_values( + const ListNode * const map_head, const char * const error_message, + const size_t number_of_symbol_names) { + const ListNode *current; + int symbols_with_leftover_values = 0; + assert_true(map_head); + assert_true(number_of_symbol_names); + + for (current = map_head->next; current != map_head; + current = current->next) { + const SymbolMapValue * const value = + (SymbolMapValue*)current->value; + const ListNode *child_list; + assert_true(value); + child_list = &value->symbol_values_list_head; + + if (!list_empty(child_list)) { + if (number_of_symbol_names == 1) { + const ListNode *child_node; + print_error(error_message, value->symbol_name); + print_error(" Remaining item(s) declared at...\n"); + + for (child_node = child_list->next; child_node != child_list; + child_node = child_node->next) { + const SourceLocation * const location = child_node->value; + print_error(" " SOURCE_LOCATION_FORMAT "\n", + location->file, location->line); + } + } else { + print_error("%s.", value->symbol_name); + check_for_leftover_values(child_list, error_message, + number_of_symbol_names - 1); + } + symbols_with_leftover_values ++; + } + } + return symbols_with_leftover_values; +} + + +// Get the next return value for the specified mock function. +LargestIntegralType _mock(const char * const function, const char* const file, + const int line) { + void *result; + const int rc = get_symbol_value(&global_function_result_map_head, + &function, 1, &result); + if (rc) { + SymbolValue * const symbol = (SymbolValue*)result; + const LargestIntegralType value = symbol->value; + global_last_mock_value_location = symbol->location; + if (rc == 1) { + free(symbol); + } + return value; + } else { + print_error("ERROR: " SOURCE_LOCATION_FORMAT " - Could not get value " + "to mock function %s\n", file, line, function); + if (source_location_is_set(&global_last_mock_value_location)) { + print_error("Previously returned mock value was declared at " + SOURCE_LOCATION_FORMAT "\n", + global_last_mock_value_location.file, + global_last_mock_value_location.line); + } else { + print_error("There were no previously returned mock values for " + "this test.\n"); + } + exit_test(1); + } + return 0; +} + + +// Add a return value for the specified mock function name. +void _will_return(const char * const function_name, const char * const file, + const int line, const LargestIntegralType value, + const int count) { + SymbolValue * const return_value = malloc(sizeof(*return_value)); + assert_true(count > 0 || count == -1); + return_value->value = value; + set_source_location(&return_value->location, file, line); + add_symbol_value(&global_function_result_map_head, &function_name, 1, + return_value, count); +} + + +/* Add a custom parameter checking function. If the event parameter is NULL + * the event structure is allocated internally by this function. If event + * parameter is provided it must be allocated on the heap and doesn't need to + * be deallocated by the caller. + */ +void _expect_check( + const char* const function, const char* const parameter, + const char* const file, const int line, + const CheckParameterValue check_function, + const LargestIntegralType check_data, + CheckParameterEvent * const event, const int count) { + CheckParameterEvent * const check = + event ? event : malloc(sizeof(*check)); + const char* symbols[] = {function, parameter}; + check->parameter_name = parameter; + check->check_value = check_function; + check->check_value_data = check_data; + set_source_location(&check->location, file, line); + add_symbol_value(&global_function_parameter_map_head, symbols, 2, check, + count); +} + + +/* Returns 1 if the specified values are equal. If the values are not equal + * an error is displayed and 0 is returned. */ +static int values_equal_display_error(const LargestIntegralType left, + const LargestIntegralType right) { + const int equal = left == right; + if (!equal) { + print_error(LargestIntegralTypePrintfFormat " != " + LargestIntegralTypePrintfFormat "\n", left, right); + } + return equal; +} + +/* Returns 1 if the specified values are not equal. If the values are equal + * an error is displayed and 0 is returned. */ +static int values_not_equal_display_error(const LargestIntegralType left, + const LargestIntegralType right) { + const int not_equal = left != right; + if (!not_equal) { + print_error(LargestIntegralTypePrintfFormat " == " + LargestIntegralTypePrintfFormat "\n", left, right); + } + return not_equal; +} + + +/* Determine whether value is contained within check_integer_set. + * If invert is 0 and the value is in the set 1 is returned, otherwise 0 is + * returned and an error is displayed. If invert is 1 and the value is not + * in the set 1 is returned, otherwise 0 is returned and an error is + * displayed. */ +static int value_in_set_display_error( + const LargestIntegralType value, + const CheckIntegerSet * const check_integer_set, const int invert) { + int succeeded = invert; + assert_true(check_integer_set); + { + const LargestIntegralType * const set = check_integer_set->set; + const size_t size_of_set = check_integer_set->size_of_set; + size_t i; + for (i = 0; i < size_of_set; i++) { + if (set[i] == value) { + // If invert = 0 and item is found, succeeded = 1. + // If invert = 1 and item is found, succeeded = 0. + succeeded = !succeeded; + break; + } + } + if (succeeded) { + return 1; + } + print_error("%d is %sin the set (", value, invert ? "" : "not "); + for (i = 0; i < size_of_set; i++) { + print_error("%d, ", set[i]); + } + print_error(")\n"); + } + return 0; +} + + +/* Determine whether a value is within the specified range. If the value is + * within the specified range 1 is returned. If the value isn't within the + * specified range an error is displayed and 0 is returned. */ +static int integer_in_range_display_error( + const LargestIntegralType value, const LargestIntegralType range_min, + const LargestIntegralType range_max) { + if (value >= range_min && value <= range_max) { + return 1; + } + print_error("%d is not within the range %d-%d\n", value, range_min, + range_max); + return 0; +} + + +/* Determine whether a value is within the specified range. If the value + * is not within the range 1 is returned. If the value is within the + * specified range an error is displayed and zero is returned. */ +static int integer_not_in_range_display_error( + const LargestIntegralType value, const LargestIntegralType range_min, + const LargestIntegralType range_max) { + if (value < range_min || value > range_max) { + return 1; + } + print_error("%d is within the range %d-%d\n", value, range_min, + range_max); + return 0; +} + + +/* Determine whether the specified strings are equal. If the strings are equal + * 1 is returned. If they're not equal an error is displayed and 0 is + * returned. */ +static int string_equal_display_error( + const char * const left, const char * const right) { + if (strcmp(left, right) == 0) { + return 1; + } + print_error("\"%s\" != \"%s\"\n", left, right); + return 0; +} + + +/* Determine whether the specified strings are equal. If the strings are not + * equal 1 is returned. If they're not equal an error is displayed and 0 is + * returned */ +static int string_not_equal_display_error( + const char * const left, const char * const right) { + if (strcmp(left, right) != 0) { + return 1; + } + print_error("\"%s\" == \"%s\"\n", left, right); + return 0; +} + + +/* Determine whether the specified areas of memory are equal. If they're equal + * 1 is returned otherwise an error is displayed and 0 is returned. */ +static int memory_equal_display_error(const char* const a, const char* const b, + const size_t size) { + int differences = 0; + size_t i; + for (i = 0; i < size; i++) { + const char l = a[i]; + const char r = b[i]; + if (l != r) { + print_error("difference at offset %d 0x%02x 0x%02x\n", i, l, r); + differences ++; + } + } + if (differences) { + print_error("%d bytes of 0x%08x and 0x%08x differ\n", differences, + a, b); + return 0; + } + return 1; +} + + +/* Determine whether the specified areas of memory are not equal. If they're + * not equal 1 is returned otherwise an error is displayed and 0 is + * returned. */ +static int memory_not_equal_display_error( + const char* const a, const char* const b, const size_t size) { + int same = 0; + size_t i; + for (i = 0; i < size; i++) { + const char l = a[i]; + const char r = b[i]; + if (l == r) { + same ++; + } + } + if (same == size) { + print_error("%d bytes of 0x%08x and 0x%08x the same\n", same, + a, b); + return 0; + } + return 1; +} + + +// CheckParameterValue callback to check whether a value is within a set. +static int check_in_set(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return value_in_set_display_error(value, + cast_largest_integral_type_to_pointer(CheckIntegerSet*, + check_value_data), 0); +} + + +// CheckParameterValue callback to check whether a value isn't within a set. +static int check_not_in_set(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return value_in_set_display_error(value, + cast_largest_integral_type_to_pointer(CheckIntegerSet*, + check_value_data), 1); +} + + +/* Create the callback data for check_in_set() or check_not_in_set() and + * register a check event. */ +static void expect_set( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType values[], const size_t number_of_values, + const CheckParameterValue check_function, const int count) { + CheckIntegerSet * const check_integer_set = + malloc(sizeof(*check_integer_set) + + (sizeof(values[0]) * number_of_values)); + LargestIntegralType * const set = (LargestIntegralType*)( + check_integer_set + 1); + declare_initialize_value_pointer_pointer(check_data, check_integer_set); + assert_true(values); + assert_true(number_of_values); + memcpy(set, values, number_of_values * sizeof(values[0])); + check_integer_set->set = set; + _expect_check( + function, parameter, file, line, check_function, + check_data.value, &check_integer_set->event, count); +} + + +// Add an event to check whether a value is in a set. +void _expect_in_set( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType values[], const size_t number_of_values, + const int count) { + expect_set(function, parameter, file, line, values, number_of_values, + check_in_set, count); +} + + +// Add an event to check whether a value isn't in a set. +void _expect_not_in_set( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType values[], const size_t number_of_values, + const int count) { + expect_set(function, parameter, file, line, values, number_of_values, + check_not_in_set, count); +} + + +// CheckParameterValue callback to check whether a value is within a range. +static int check_in_range(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + CheckIntegerRange * const check_integer_range = + cast_largest_integral_type_to_pointer(CheckIntegerRange*, + check_value_data); + assert_true(check_integer_range); + return integer_in_range_display_error(value, check_integer_range->minimum, + check_integer_range->maximum); +} + + +// CheckParameterValue callback to check whether a value is not within a range. +static int check_not_in_range(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + CheckIntegerRange * const check_integer_range = + cast_largest_integral_type_to_pointer(CheckIntegerRange*, + check_value_data); + assert_true(check_integer_range); + return integer_not_in_range_display_error( + value, check_integer_range->minimum, check_integer_range->maximum); +} + + +/* Create the callback data for check_in_range() or check_not_in_range() and + * register a check event. */ +static void expect_range( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType minimum, const LargestIntegralType maximum, + const CheckParameterValue check_function, const int count) { + CheckIntegerRange * const check_integer_range = + malloc(sizeof(*check_integer_range)); + declare_initialize_value_pointer_pointer(check_data, check_integer_range); + check_integer_range->minimum = minimum; + check_integer_range->maximum = maximum; + _expect_check(function, parameter, file, line, check_function, + check_data.value, &check_integer_range->event, count); +} + + +// Add an event to determine whether a parameter is within a range. +void _expect_in_range( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType minimum, const LargestIntegralType maximum, + const int count) { + expect_range(function, parameter, file, line, minimum, maximum, + check_in_range, count); +} + + +// Add an event to determine whether a parameter is not within a range. +void _expect_not_in_range( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType minimum, const LargestIntegralType maximum, + const int count) { + expect_range(function, parameter, file, line, minimum, maximum, + check_not_in_range, count); +} + + +/* CheckParameterValue callback to check whether a value is equal to an + * expected value. */ +static int check_value(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return values_equal_display_error(value, check_value_data); +} + + +// Add an event to check a parameter equals an expected value. +void _expect_value( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType value, const int count) { + _expect_check(function, parameter, file, line, check_value, value, NULL, + count); +} + + +/* CheckParameterValue callback to check whether a value is not equal to an + * expected value. */ +static int check_not_value(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return values_not_equal_display_error(value, check_value_data); +} + + +// Add an event to check a parameter is not equal to an expected value. +void _expect_not_value( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType value, const int count) { + _expect_check(function, parameter, file, line, check_not_value, value, + NULL, count); +} + + +// CheckParameterValue callback to check whether a parameter equals a string. +static int check_string(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return string_equal_display_error( + cast_largest_integral_type_to_pointer(char*, value), + cast_largest_integral_type_to_pointer(char*, check_value_data)); +} + + +// Add an event to check whether a parameter is equal to a string. +void _expect_string( + const char* const function, const char* const parameter, + const char* const file, const int line, const char* string, + const int count) { + declare_initialize_value_pointer_pointer(string_pointer, (char*)string); + _expect_check(function, parameter, file, line, check_string, + string_pointer.value, NULL, count); +} + + +/* CheckParameterValue callback to check whether a parameter is not equals to + * a string. */ +static int check_not_string(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return string_not_equal_display_error( + cast_largest_integral_type_to_pointer(char*, value), + cast_largest_integral_type_to_pointer(char*, check_value_data)); +} + + +// Add an event to check whether a parameter is not equal to a string. +void _expect_not_string( + const char* const function, const char* const parameter, + const char* const file, const int line, const char* string, + const int count) { + declare_initialize_value_pointer_pointer(string_pointer, (char*)string); + _expect_check(function, parameter, file, line, check_not_string, + string_pointer.value, NULL, count); +} + +/* CheckParameterValue callback to check whether a parameter equals an area of + * memory. */ +static int check_memory(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + CheckMemoryData * const check = cast_largest_integral_type_to_pointer( + CheckMemoryData*, check_value_data); + assert_true(check); + return memory_equal_display_error( + cast_largest_integral_type_to_pointer(void*, value), + check->memory, check->size); +} + + +/* Create the callback data for check_memory() or check_not_memory() and + * register a check event. */ +static void expect_memory_setup( + const char* const function, const char* const parameter, + const char* const file, const int line, + const void * const memory, const size_t size, + const CheckParameterValue check_function, const int count) { + CheckMemoryData * const check_data = malloc(sizeof(*check_data) + size); + void * const mem = (void*)(check_data + 1); + declare_initialize_value_pointer_pointer(check_data_pointer, check_data); + assert_true(memory); + assert_true(size); + memcpy(mem, memory, size); + check_data->memory = mem; + check_data->size = size; + _expect_check(function, parameter, file, line, check_function, + check_data_pointer.value, &check_data->event, count); +} + + +// Add an event to check whether a parameter matches an area of memory. +void _expect_memory( + const char* const function, const char* const parameter, + const char* const file, const int line, const void* const memory, + const size_t size, const int count) { + expect_memory_setup(function, parameter, file, line, memory, size, + check_memory, count); +} + + +/* CheckParameterValue callback to check whether a parameter is not equal to + * an area of memory. */ +static int check_not_memory(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + CheckMemoryData * const check = cast_largest_integral_type_to_pointer( + CheckMemoryData*, check_value_data); + assert_true(check); + return memory_not_equal_display_error( + cast_largest_integral_type_to_pointer(void*, value), check->memory, + check->size); +} + + +// Add an event to check whether a parameter doesn't match an area of memory. +void _expect_not_memory( + const char* const function, const char* const parameter, + const char* const file, const int line, const void* const memory, + const size_t size, const int count) { + expect_memory_setup(function, parameter, file, line, memory, size, + check_not_memory, count); +} + + +// CheckParameterValue callback that always returns 1. +static int check_any(const LargestIntegralType value, + const LargestIntegralType check_value_data) { + return 1; +} + + +// Add an event to allow any value for a parameter. +void _expect_any( + const char* const function, const char* const parameter, + const char* const file, const int line, const int count) { + _expect_check(function, parameter, file, line, check_any, 0, NULL, + count); +} + + +void _check_expected( + const char * const function_name, const char * const parameter_name, + const char* file, const int line, const LargestIntegralType value) { + void *result; + const char* symbols[] = {function_name, parameter_name}; + const int rc = get_symbol_value(&global_function_parameter_map_head, + symbols, 2, &result); + if (rc) { + CheckParameterEvent * const check = (CheckParameterEvent*)result; + int check_succeeded; + global_last_parameter_location = check->location; + check_succeeded = check->check_value(value, check->check_value_data); + if (rc == 1) { + free(check); + } + if (!check_succeeded) { + print_error("ERROR: Check of parameter %s, function %s failed\n" + "Expected parameter declared at " + SOURCE_LOCATION_FORMAT "\n", + parameter_name, function_name, + global_last_parameter_location.file, + global_last_parameter_location.line); + _fail(file, line); + } + } else { + print_error("ERROR: " SOURCE_LOCATION_FORMAT " - Could not get value " + "to check parameter %s of function %s\n", file, line, + parameter_name, function_name); + if (source_location_is_set(&global_last_parameter_location)) { + print_error("Previously declared parameter value was declared at " + SOURCE_LOCATION_FORMAT "\n", + global_last_parameter_location.file, + global_last_parameter_location.line); + } else { + print_error("There were no previously declared parameter values " + "for this test.\n"); + } + exit_test(1); + } +} + + + +/* Replacement for assert. */ +void mock_assert(const int result, const char* const expression, + const char* const file, const int line) { + if (!result) { + if (global_expecting_assert) { + global_last_failed_assert = expression; + longjmp(global_expect_assert_env, result); + } else { + print_error("ASSERT: %s\n", expression); + _fail(file, line); + } + } +} + + +void _assert_true(const LargestIntegralType result, + const char * const expression, + const char * const file, const int line) { + if (!result) { + print_error("%s\n", expression); + _fail(file, line); + } +} + +void _assert_int_equal( + const LargestIntegralType a, const LargestIntegralType b, + const char * const file, const int line) { + if (!values_equal_display_error(a, b)) { + _fail(file, line); + } +} + + +void _assert_int_not_equal( + const LargestIntegralType a, const LargestIntegralType b, + const char * const file, const int line) { + if (!values_not_equal_display_error(a, b)) { + _fail(file, line); + } +} + + +void _assert_string_equal(const char * const a, const char * const b, + const char * const file, const int line) { + if (!string_equal_display_error(a, b)) { + _fail(file, line); + } +} + + +void _assert_string_not_equal(const char * const a, const char * const b, + const char *file, const int line) { + if (!string_not_equal_display_error(a, b)) { + _fail(file, line); + } +} + + +void _assert_memory_equal(const void * const a, const void * const b, + const size_t size, const char* const file, + const int line) { + if (!memory_equal_display_error((const char*)a, (const char*)b, size)) { + _fail(file, line); + } +} + + +void _assert_memory_not_equal(const void * const a, const void * const b, + const size_t size, const char* const file, + const int line) { + if (!memory_not_equal_display_error((const char*)a, (const char*)b, + size)) { + _fail(file, line); + } +} + + +void _assert_in_range( + const LargestIntegralType value, const LargestIntegralType minimum, + const LargestIntegralType maximum, const char* const file, + const int line) { + if (!integer_in_range_display_error(value, minimum, maximum)) { + _fail(file, line); + } +} + +void _assert_not_in_range( + const LargestIntegralType value, const LargestIntegralType minimum, + const LargestIntegralType maximum, const char* const file, + const int line) { + if (!integer_not_in_range_display_error(value, minimum, maximum)) { + _fail(file, line); + } +} + +void _assert_in_set(const LargestIntegralType value, + const LargestIntegralType values[], + const size_t number_of_values, const char* const file, + const int line) { + CheckIntegerSet check_integer_set; + check_integer_set.set = values; + check_integer_set.size_of_set = number_of_values; + if (!value_in_set_display_error(value, &check_integer_set, 0)) { + _fail(file, line); + } +} + +void _assert_not_in_set(const LargestIntegralType value, + const LargestIntegralType values[], + const size_t number_of_values, const char* const file, + const int line) { + CheckIntegerSet check_integer_set; + check_integer_set.set = values; + check_integer_set.size_of_set = number_of_values; + if (!value_in_set_display_error(value, &check_integer_set, 1)) { + _fail(file, line); + } +} + + +// Get the list of allocated blocks. +static ListNode* get_allocated_blocks_list() { + // If it initialized, initialize the list of allocated blocks. + if (!global_allocated_blocks.value) { + list_initialize(&global_allocated_blocks); + global_allocated_blocks.value = (void*)1; + } + return &global_allocated_blocks; +} + +// Use the real malloc in this function. +#undef malloc +void* _test_malloc(const size_t size, const char* file, const int line) { + char* ptr; + MallocBlockInfo *block_info; + ListNode * const block_list = get_allocated_blocks_list(); + const size_t allocate_size = size + (MALLOC_GUARD_SIZE * 2) + + sizeof(*block_info) + MALLOC_ALIGNMENT; + char* const block = (char*)malloc(allocate_size); + assert_true(block); + + // Calculate the returned address. + ptr = (char*)(((size_t)block + MALLOC_GUARD_SIZE + sizeof(*block_info) + + MALLOC_ALIGNMENT) & ~(MALLOC_ALIGNMENT - 1)); + + // Initialize the guard blocks. + memset(ptr - MALLOC_GUARD_SIZE, MALLOC_GUARD_PATTERN, MALLOC_GUARD_SIZE); + memset(ptr + size, MALLOC_GUARD_PATTERN, MALLOC_GUARD_SIZE); + memset(ptr, MALLOC_ALLOC_PATTERN, size); + + block_info = (MallocBlockInfo*)(ptr - (MALLOC_GUARD_SIZE + + sizeof(*block_info))); + set_source_location(&block_info->location, file, line); + block_info->allocated_size = allocate_size; + block_info->size = size; + block_info->block = block; + block_info->node.value = block_info; + list_add(block_list, &block_info->node); + return ptr; +} +#define malloc test_malloc + + +void* _test_calloc(const size_t number_of_elements, const size_t size, + const char* file, const int line) { + void* const ptr = _test_malloc(number_of_elements * size, file, line); + if (ptr) { + memset(ptr, 0, number_of_elements * size); + } + return ptr; +} + + +// Use the real free in this function. +#undef free +void _test_free(void* const ptr, const char* file, const int line) { + unsigned int i; + char *block = (char*)ptr; + MallocBlockInfo *block_info; + _assert_true((LargestIntegralType)ptr, "ptr", file, line); + block_info = (MallocBlockInfo*)(block - (MALLOC_GUARD_SIZE + + sizeof(*block_info))); + // Check the guard blocks. + { + char *guards[2] = {block - MALLOC_GUARD_SIZE, + block + block_info->size}; + for (i = 0; i < ARRAY_LENGTH(guards); i++) { + unsigned int j; + char * const guard = guards[i]; + for (j = 0; j < MALLOC_GUARD_SIZE; j++) { + const char diff = guard[j] - MALLOC_GUARD_PATTERN; + if (diff) { + print_error( + "Guard block of 0x%08x size=%d allocated by " + SOURCE_LOCATION_FORMAT " at 0x%08x is corrupt\n", + (size_t)ptr, block_info->size, + block_info->location.file, block_info->location.line, + (size_t)&guard[j]); + _fail(file, line); + } + } + } + } + list_remove(&block_info->node, NULL, NULL); + + block = block_info->block; + memset(block, MALLOC_FREE_PATTERN, block_info->allocated_size); + free(block); +} +#define free test_free + + +// Crudely checkpoint the current heap state. +static const ListNode* check_point_allocated_blocks() { + return get_allocated_blocks_list()->prev; +} + + +/* Display the blocks allocated after the specified check point. This + * function returns the number of blocks displayed. */ +static int display_allocated_blocks(const ListNode * const check_point) { + const ListNode * const head = get_allocated_blocks_list(); + const ListNode *node; + int allocated_blocks = 0; + assert_true(check_point); + assert_true(check_point->next); + + for (node = check_point->next; node != head; node = node->next) { + const MallocBlockInfo * const block_info = node->value; + assert_true(block_info); + + if (!allocated_blocks) { + print_error("Blocks allocated...\n"); + } + print_error(" 0x%08x : " SOURCE_LOCATION_FORMAT "\n", + block_info->block, block_info->location.file, + block_info->location.line); + allocated_blocks ++; + } + return allocated_blocks; +} + + +// Free all blocks allocated after the specified check point. +static void free_allocated_blocks(const ListNode * const check_point) { + const ListNode * const head = get_allocated_blocks_list(); + const ListNode *node; + assert_true(check_point); + + node = check_point->next; + assert_true(node); + + while (node != head) { + MallocBlockInfo * const block_info = (MallocBlockInfo*)node->value; + node = node->next; + free((char*)block_info + sizeof(*block_info) + MALLOC_GUARD_SIZE); + } +} + + +// Fail if any any blocks are allocated after the specified check point. +static void fail_if_blocks_allocated(const ListNode * const check_point, + const char * const test_name) { + const int allocated_blocks = display_allocated_blocks(check_point); + if (allocated_blocks) { + free_allocated_blocks(check_point); + print_error("ERROR: %s leaked %d block(s)\n", test_name, + allocated_blocks); + exit_test(1); + } +} + + +void _fail(const char * const file, const int line) { + print_error("ERROR: " SOURCE_LOCATION_FORMAT " Failure!\n", file, line); + exit_test(1); +} + + +#ifndef _WIN32 +static void exception_handler(int sig) { + print_error("%s\n", strsignal(sig)); + exit_test(1); +} + +#else // _WIN32 + +static LONG WINAPI exception_filter(EXCEPTION_POINTERS *exception_pointers) { + EXCEPTION_RECORD * const exception_record = + exception_pointers->ExceptionRecord; + const DWORD code = exception_record->ExceptionCode; + unsigned int i; + for (i = 0; i < ARRAY_LENGTH(exception_codes); i++) { + const ExceptionCodeInfo * const code_info = &exception_codes[i]; + if (code == code_info->code) { + static int shown_debug_message = 0; + fflush(stdout); + print_error("%s occurred at 0x%08x.\n", code_info->description, + exception_record->ExceptionAddress); + if (!shown_debug_message) { + print_error( + "\n" + "To debug in Visual Studio...\n" + "1. Select menu item File->Open Project\n" + "2. Change 'Files of type' to 'Executable Files'\n" + "3. Open this executable.\n" + "4. Select menu item Debug->Start\n" + "\n" + "Alternatively, set the environment variable \n" + "UNIT_TESTING_DEBUG to 1 and rebuild this executable, \n" + "then click 'Debug' in the popup dialog box.\n" + "\n"); + shown_debug_message = 1; + } + exit_test(0); + return EXCEPTION_EXECUTE_HANDLER; + } + } + return EXCEPTION_CONTINUE_SEARCH; +} +#endif // !_WIN32 + + +// Standard output and error print methods. +void vprint_message(const char* const format, va_list args) { + char buffer[1024]; + vsnprintf(buffer, sizeof(buffer), format, args); + puts(buffer); +#ifdef _WIN32 + OutputDebugString(buffer); +#endif // _WIN32 +} + + +void vprint_error(const char* const format, va_list args) { + char buffer[1024]; + vsnprintf(buffer, sizeof(buffer), format, args); + fputs(buffer, stderr); +#ifdef _WIN32 + OutputDebugString(buffer); +#endif // _WIN32 +} + + +void print_message(const char* const format, ...) { + va_list args; + va_start(args, format); + vprint_message(format, args); + va_end(args); +} + + +void print_error(const char* const format, ...) { + va_list args; + va_start(args, format); + vprint_error(format, args); + va_end(args); +} + + +int _run_test( + const char * const function_name, const UnitTestFunction Function, + void ** const state, const UnitTestFunctionType function_type, + const void* const heap_check_point) { + const ListNode * const check_point = heap_check_point ? + heap_check_point : check_point_allocated_blocks(); + void *current_state = NULL; + int rc = 1; + int handle_exceptions = 1; +#ifdef _WIN32 + handle_exceptions = !IsDebuggerPresent(); +#endif // _WIN32 +#if UNIT_TESTING_DEBUG + handle_exceptions = 0; +#endif // UNIT_TESTING_DEBUG + + if (handle_exceptions) { +#ifndef _WIN32 + unsigned int i; + for (i = 0; i < ARRAY_LENGTH(exception_signals); i++) { + default_signal_functions[i] = signal( + exception_signals[i], exception_handler); + } +#else // _WIN32 + previous_exception_filter = SetUnhandledExceptionFilter( + exception_filter); +#endif // !_WIN32 + } + + if (function_type == UNIT_TEST_FUNCTION_TYPE_TEST) { + print_message("%s: Starting test\n", function_name); + } + initialize_testing(function_name); + global_running_test = 1; + if (setjmp(global_run_test_env) == 0) { + Function(state ? state : ¤t_state); + fail_if_leftover_values(function_name); + + /* If this is a setup function then ignore any allocated blocks + * only ensure they're deallocated on tear down. */ + if (function_type != UNIT_TEST_FUNCTION_TYPE_SETUP) { + fail_if_blocks_allocated(check_point, function_name); + } + + global_running_test = 0; + + if (function_type == UNIT_TEST_FUNCTION_TYPE_TEST) { + print_message("%s: Test completed successfully.\n", function_name); + } + rc = 0; + } else { + global_running_test = 0; + print_message("%s: Test failed.\n", function_name); + } + teardown_testing(function_name); + + if (handle_exceptions) { +#ifndef _WIN32 + unsigned int i; + for (i = 0; i < ARRAY_LENGTH(exception_signals); i++) { + signal(exception_signals[i], default_signal_functions[i]); + } +#else // _WIN32 + if (previous_exception_filter) { + SetUnhandledExceptionFilter(previous_exception_filter); + previous_exception_filter = NULL; + } +#endif // !_WIN32 + } + + return rc; +} + + +int _run_tests(const UnitTest * const tests, const size_t number_of_tests) { + // Whether to execute the next test. + int run_next_test = 1; + // Whether the previous test failed. + int previous_test_failed = 0; + // Check point of the heap state. + const ListNode * const check_point = check_point_allocated_blocks(); + // Current test being executed. + size_t current_test = 0; + // Number of tests executed. + size_t tests_executed = 0; + // Number of failed tests. + size_t total_failed = 0; + // Number of setup functions. + size_t setups = 0; + // Number of teardown functions. + size_t teardowns = 0; + /* A stack of test states. A state is pushed on the stack + * when a test setup occurs and popped on tear down. */ + TestState* test_states = malloc(number_of_tests * sizeof(*test_states)); + size_t number_of_test_states = 0; + // Names of the tests that failed. + const char** failed_names = malloc(number_of_tests * + sizeof(*failed_names)); + void **current_state = NULL; + // Make sure LargestIntegralType is at least the size of a pointer. + assert_true(sizeof(LargestIntegralType) >= sizeof(void*)); + + while (current_test < number_of_tests) { + const ListNode *test_check_point = NULL; + TestState *current_TestState; + const UnitTest * const test = &tests[current_test++]; + if (!test->function) { + continue; + } + + switch (test->function_type) { + case UNIT_TEST_FUNCTION_TYPE_TEST: + run_next_test = 1; + break; + case UNIT_TEST_FUNCTION_TYPE_SETUP: { + // Checkpoint the heap before the setup. + current_TestState = &test_states[number_of_test_states++]; + current_TestState->check_point = check_point_allocated_blocks(); + test_check_point = current_TestState->check_point; + current_state = ¤t_TestState->state; + *current_state = NULL; + run_next_test = 1; + setups ++; + break; + } + case UNIT_TEST_FUNCTION_TYPE_TEARDOWN: + // Check the heap based on the last setup checkpoint. + assert_true(number_of_test_states); + current_TestState = &test_states[--number_of_test_states]; + test_check_point = current_TestState->check_point; + current_state = ¤t_TestState->state; + teardowns ++; + break; + default: + print_error("Invalid unit test function type %d\n", + test->function_type); + exit_test(1); + break; + } + + if (run_next_test) { + int failed = _run_test(test->name, test->function, current_state, + test->function_type, test_check_point); + if (failed) { + failed_names[total_failed] = test->name; + } + + switch (test->function_type) { + case UNIT_TEST_FUNCTION_TYPE_TEST: + previous_test_failed = failed; + total_failed += failed; + tests_executed ++; + break; + + case UNIT_TEST_FUNCTION_TYPE_SETUP: + if (failed) { + total_failed ++; + tests_executed ++; + // Skip forward until the next test or setup function. + run_next_test = 0; + } + previous_test_failed = 0; + break; + + case UNIT_TEST_FUNCTION_TYPE_TEARDOWN: + // If this test failed. + if (failed && !previous_test_failed) { + total_failed ++; + } + break; + default: + assert_false("BUG: shouldn't be here!"); + break; + } + } + } + + if (total_failed) { + size_t i; + print_error("%d out of %d tests failed!\n", total_failed, + tests_executed); + for (i = 0; i < total_failed; i++) { + print_error(" %s\n", failed_names[i]); + } + } else { + print_message("All %d tests passed\n", tests_executed); + } + + if (number_of_test_states) { + print_error("Mismatched number of setup %d and teardown %d " + "functions\n", setups, teardowns); + total_failed = -1; + } + + free(test_states); + free((void*)failed_names); + + fail_if_blocks_allocated(check_point, "run_tests"); + return (int)total_failed; +} diff --git a/tests/cmocka/cmockery.h b/tests/cmocka/cmockery.h new file mode 100755 index 00000000..4d5235cd --- /dev/null +++ b/tests/cmocka/cmockery.h @@ -0,0 +1,484 @@ +/* + * Copyright 2008 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef CMOCKERY_H_ +#define CMOCKERY_H_ +/* + * These headers or their equivalents should be included prior to including + * this header file. + * + * #include + * #include + * #include + * + * This allows test applications to use custom definitions of C standard + * library functions and types. + */ + +// For those who are used to __func__ from gcc. +#ifndef __func__ +#define __func__ __FUNCTION__ +#endif + +/* Largest integral type. This type should be large enough to hold any + * pointer or integer supported by the compiler. */ +#ifndef LargestIntegralType +#define LargestIntegralType unsigned long long +#endif // LargestIntegralType + +// Printf format used to display LargestIntegralType. +#ifndef LargestIntegralTypePrintfFormat +#ifdef _WIN32 +#define LargestIntegralTypePrintfFormat "%I64x" +#else +#define LargestIntegralTypePrintfFormat "%llx" +#endif // _WIN32 +#endif // LargestIntegralTypePrintfFormat + +// Perform an unsigned cast to LargestIntegralType. +#define cast_to_largest_integral_type(value) \ + ((LargestIntegralType)(value)) + +// Retrieves a return value for the current function. +#define mock() _mock(__func__, __FILE__, __LINE__) + +/* Stores a value to be returned by the specified function later. + * The count parameter returns the number of times the value should be returned + * by mock(). If count is set to -1 the value will always be returned. + */ +#define will_return(function, value) \ + _will_return(#function, __FILE__, __LINE__, \ + cast_to_largest_integral_type(value), 1) +#define will_return_count(function, value, count) \ + _will_return(#function, __FILE__, __LINE__, \ + cast_to_largest_integral_type(value), count) + +/* Add a custom parameter checking function. If the event parameter is NULL + * the event structure is allocated internally by this function. If event + * parameter is provided it must be allocated on the heap and doesn't need to + * be deallocated by the caller. + */ +#define expect_check(function, parameter, check_function, check_data) \ + _expect_check(#function, #parameter, __FILE__, __LINE__, check_function, \ + cast_to_largest_integral_type(check_data), NULL, 0) + +/* Add an event to check a parameter, using check_expected(), against a set of + * values. See will_return() for a description of the count parameter. + */ +#define expect_in_set(function, parameter, value_array) \ + expect_in_set_count(function, parameter, value_array, 1) +#define expect_in_set_count(function, parameter, value_array, count) \ + _expect_in_set(#function, #parameter, __FILE__, __LINE__, value_array, \ + sizeof(value_array) / sizeof((value_array)[0]), count) +#define expect_not_in_set(function, parameter, value_array) \ + expect_not_in_set_count(function, parameter, value_array, 1) +#define expect_not_in_set_count(function, parameter, value_array, count) \ + _expect_not_in_set( \ + #function, #parameter, __FILE__, __LINE__, value_array, \ + sizeof(value_array) / sizeof((value_array)[0]), count) + + +/* Add an event to check a parameter, using check_expected(), against a + * signed range. Where range is minimum <= value <= maximum. + * See will_return() for a description of the count parameter. + */ +#define expect_in_range(function, parameter, minimum, maximum) \ + expect_in_range_count(function, parameter, minimum, maximum, 1) +#define expect_in_range_count(function, parameter, minimum, maximum, count) \ + _expect_in_range(#function, #parameter, __FILE__, __LINE__, minimum, \ + maximum, count) + +/* Add an event to check a parameter, using check_expected(), against a + * signed range. Where range is value < minimum or value > maximum. + * See will_return() for a description of the count parameter. + */ +#define expect_not_in_range(function, parameter, minimum, maximum) \ + expect_not_in_range_count(function, parameter, minimum, maximum, 1) +#define expect_not_in_range_count(function, parameter, minimum, maximum, \ + count) \ + _expect_not_in_range(#function, #parameter, __FILE__, __LINE__, \ + minimum, maximum, count) + +/* Add an event to check whether a parameter, using check_expected(), is or + * isn't a value. See will_return() for a description of the count parameter. + */ +#define expect_value(function, parameter, value) \ + expect_value_count(function, parameter, value, 1) +#define expect_value_count(function, parameter, value, count) \ + _expect_value(#function, #parameter, __FILE__, __LINE__, \ + cast_to_largest_integral_type(value), count) +#define expect_not_value(function, parameter, value) \ + expect_not_value_count(function, parameter, value, 1) +#define expect_not_value_count(function, parameter, value, count) \ + _expect_not_value(#function, #parameter, __FILE__, __LINE__, \ + cast_to_largest_integral_type(value), count) + +/* Add an event to check whether a parameter, using check_expected(), + * is or isn't a string. See will_return() for a description of the count + * parameter. + */ +#define expect_string(function, parameter, string) \ + expect_string_count(function, parameter, string, 1) +#define expect_string_count(function, parameter, string, count) \ + _expect_string(#function, #parameter, __FILE__, __LINE__, \ + (const char*)(string), count) +#define expect_not_string(function, parameter, string) \ + expect_not_string_count(function, parameter, string, 1) +#define expect_not_string_count(function, parameter, string, count) \ + _expect_not_string(#function, #parameter, __FILE__, __LINE__, \ + (const char*)(string), count) + +/* Add an event to check whether a parameter, using check_expected() does or + * doesn't match an area of memory. See will_return() for a description of + * the count parameter. + */ +#define expect_memory(function, parameter, memory, size) \ + expect_memory_count(function, parameter, memory, size, 1) +#define expect_memory_count(function, parameter, memory, size, count) \ + _expect_memory(#function, #parameter, __FILE__, __LINE__, \ + (const void*)(memory), size, count) +#define expect_not_memory(function, parameter, memory, size) \ + expect_not_memory_count(function, parameter, memory, size, 1) +#define expect_not_memory_count(function, parameter, memory, size, count) \ + _expect_not_memory(#function, #parameter, __FILE__, __LINE__, \ + (const void*)(memory), size, count) + + +/* Add an event to allow any value for a parameter checked using + * check_expected(). See will_return() for a description of the count + * parameter. + */ +#define expect_any(function, parameter) \ + expect_any_count(function, parameter, 1) +#define expect_any_count(function, parameter, count) \ + _expect_any(#function, #parameter, __FILE__, __LINE__, count) + +/* Determine whether a function parameter is correct. This ensures the next + * value queued by one of the expect_*() macros matches the specified variable. + */ +#define check_expected(parameter) \ + _check_expected(__func__, #parameter, __FILE__, __LINE__, \ + cast_to_largest_integral_type(parameter)) + +// Assert that the given expression is true. +#define assert_true(c) _assert_true(cast_to_largest_integral_type(c), #c, \ + __FILE__, __LINE__) +// Assert that the given expression is false. +#define assert_false(c) _assert_true(!(cast_to_largest_integral_type(c)), #c, \ + __FILE__, __LINE__) + +// Assert that the two given integers are equal, otherwise fail. +#define assert_int_equal(a, b) \ + _assert_int_equal(cast_to_largest_integral_type(a), \ + cast_to_largest_integral_type(b), \ + __FILE__, __LINE__) +// Assert that the two given integers are not equal, otherwise fail. +#define assert_int_not_equal(a, b) \ + _assert_int_not_equal(cast_to_largest_integral_type(a), \ + cast_to_largest_integral_type(b), \ + __FILE__, __LINE__) + +// Assert that the two given strings are equal, otherwise fail. +#define assert_string_equal(a, b) \ + _assert_string_equal((const char*)(a), (const char*)(b), __FILE__, \ + __LINE__) +// Assert that the two given strings are not equal, otherwise fail. +#define assert_string_not_equal(a, b) \ + _assert_string_not_equal((const char*)(a), (const char*)(b), __FILE__, \ + __LINE__) + +// Assert that the two given areas of memory are equal, otherwise fail. +#define assert_memory_equal(a, b, size) \ + _assert_memory_equal((const char*)(a), (const char*)(b), size, __FILE__, \ + __LINE__) +// Assert that the two given areas of memory are not equal, otherwise fail. +#define assert_memory_not_equal(a, b, size) \ + _assert_memory_not_equal((const char*)(a), (const char*)(b), size, \ + __FILE__, __LINE__) + +// Assert that the specified value is >= minimum and <= maximum. +#define assert_in_range(value, minimum, maximum) \ + _assert_in_range( \ + cast_to_largest_integral_type(value), \ + cast_to_largest_integral_type(minimum), \ + cast_to_largest_integral_type(maximum), __FILE__, __LINE__) + +// Assert that the specified value is < minumum or > maximum +#define assert_not_in_range(value, minimum, maximum) \ + _assert_not_in_range( \ + cast_to_largest_integral_type(value), \ + cast_to_largest_integral_type(minimum), \ + cast_to_largest_integral_type(maximum), __FILE__, __LINE__) + +// Assert that the specified value is within a set. +#define assert_in_set(value, values, number_of_values) \ + _assert_in_set(value, values, number_of_values, __FILE__, __LINE__) +// Assert that the specified value is not within a set. +#define assert_not_in_set(value, values, number_of_values) \ + _assert_not_in_set(value, values, number_of_values, __FILE__, __LINE__) + + +// Forces the test to fail immediately and quit. +#define fail() _fail(__FILE__, __LINE__) + +// Generic method to kick off testing +#define run_test(f) _run_test(#f, f, NULL, UNIT_TEST_FUNCTION_TYPE_TEST, NULL) + +// Initializes a UnitTest structure. +#define unit_test(f) { #f, f, UNIT_TEST_FUNCTION_TYPE_TEST } +#define unit_test_setup(test, setup) \ + { #test "_" #setup, setup, UNIT_TEST_FUNCTION_TYPE_SETUP } +#define unit_test_teardown(test, teardown) \ + { #test "_" #teardown, teardown, UNIT_TEST_FUNCTION_TYPE_TEARDOWN } + +/* Initialize an array of UnitTest structures with a setup function for a test + * and a teardown function. Either setup or teardown can be NULL. + */ +#define unit_test_setup_teardown(test, setup, teardown) \ + unit_test_setup(test, setup), \ + unit_test(test), \ + unit_test_teardown(test, teardown) + +/* + * Run tests specified by an array of UnitTest structures. The following + * example illustrates this macro's use with the unit_test macro. + * + * void Test0(); + * void Test1(); + * + * int main(int argc, char* argv[]) { + * const UnitTest tests[] = { + * unit_test(Test0); + * unit_test(Test1); + * }; + * return run_tests(tests); + * } + */ +#define run_tests(tests) _run_tests(tests, sizeof(tests) / sizeof(tests)[0]) + +// Dynamic allocators +#define test_malloc(size) _test_malloc(size, __FILE__, __LINE__) +#define test_calloc(num, size) _test_calloc(num, size, __FILE__, __LINE__) +#define test_free(ptr) _test_free(ptr, __FILE__, __LINE__) + +// Redirect malloc, calloc and free to the unit test allocators. +#if UNIT_TESTING +#define malloc test_malloc +#define calloc test_calloc +#define free test_free +#endif // UNIT_TESTING + +/* + * Ensure mock_assert() is called. If mock_assert() is called the assert + * expression string is returned. + * For example: + * + * #define assert mock_assert + * + * void showmessage(const char *message) { + * assert(message); + * } + * + * int main(int argc, const char* argv[]) { + * expect_assert_failure(show_message(NULL)); + * printf("succeeded\n"); + * return 0; + * } + */ +#define expect_assert_failure(function_call) \ + { \ + const int expression = setjmp(global_expect_assert_env); \ + global_expecting_assert = 1; \ + if (expression) { \ + print_message("Expected assertion %s occurred\n", \ + *((const char**)&expression)); \ + global_expecting_assert = 0; \ + } else { \ + function_call ; \ + global_expecting_assert = 0; \ + print_error("Expected assert in %s\n", #function_call); \ + _fail(__FILE__, __LINE__); \ + } \ + } + +// Function prototype for setup, test and teardown functions. +typedef void (*UnitTestFunction)(void **state); + +// Function that determines whether a function parameter value is correct. +typedef int (*CheckParameterValue)(const LargestIntegralType value, + const LargestIntegralType check_value_data); + +// Type of the unit test function. +typedef enum UnitTestFunctionType { + UNIT_TEST_FUNCTION_TYPE_TEST = 0, + UNIT_TEST_FUNCTION_TYPE_SETUP, + UNIT_TEST_FUNCTION_TYPE_TEARDOWN, +} UnitTestFunctionType; + +/* Stores a unit test function with its name and type. + * NOTE: Every setup function must be paired with a teardown function. It's + * possible to specify NULL function pointers. + */ +typedef struct UnitTest { + const char* name; + UnitTestFunction function; + UnitTestFunctionType function_type; +} UnitTest; + + +// Location within some source code. +typedef struct SourceLocation { + const char* file; + int line; +} SourceLocation; + +// Event that's called to check a parameter value. +typedef struct CheckParameterEvent { + SourceLocation location; + const char *parameter_name; + CheckParameterValue check_value; + LargestIntegralType check_value_data; +} CheckParameterEvent; + +// Used by expect_assert_failure() and mock_assert(). +extern int global_expecting_assert; +extern jmp_buf global_expect_assert_env; + +// Retrieves a value for the given function, as set by "will_return". +LargestIntegralType _mock(const char * const function, const char* const file, + const int line); + +void _expect_check( + const char* const function, const char* const parameter, + const char* const file, const int line, + const CheckParameterValue check_function, + const LargestIntegralType check_data, CheckParameterEvent * const event, + const int count); + +void _expect_in_set( + const char* const function, const char* const parameter, + const char* const file, const int line, const LargestIntegralType values[], + const size_t number_of_values, const int count); +void _expect_not_in_set( + const char* const function, const char* const parameter, + const char* const file, const int line, const LargestIntegralType values[], + const size_t number_of_values, const int count); + +void _expect_in_range( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType minimum, + const LargestIntegralType maximum, const int count); +void _expect_not_in_range( + const char* const function, const char* const parameter, + const char* const file, const int line, + const LargestIntegralType minimum, + const LargestIntegralType maximum, const int count); + +void _expect_value( + const char* const function, const char* const parameter, + const char* const file, const int line, const LargestIntegralType value, + const int count); +void _expect_not_value( + const char* const function, const char* const parameter, + const char* const file, const int line, const LargestIntegralType value, + const int count); + +void _expect_string( + const char* const function, const char* const parameter, + const char* const file, const int line, const char* string, + const int count); +void _expect_not_string( + const char* const function, const char* const parameter, + const char* const file, const int line, const char* string, + const int count); + +void _expect_memory( + const char* const function, const char* const parameter, + const char* const file, const int line, const void* const memory, + const size_t size, const int count); +void _expect_not_memory( + const char* const function, const char* const parameter, + const char* const file, const int line, const void* const memory, + const size_t size, const int count); + +void _expect_any( + const char* const function, const char* const parameter, + const char* const file, const int line, const int count); + +void _check_expected( + const char * const function_name, const char * const parameter_name, + const char* file, const int line, const LargestIntegralType value); + +// Can be used to replace assert in tested code so that in conjuction with +// check_assert() it's possible to determine whether an assert condition has +// failed without stopping a test. +void mock_assert(const int result, const char* const expression, + const char * const file, const int line); + +void _will_return(const char * const function_name, const char * const file, + const int line, const LargestIntegralType value, + const int count); +void _assert_true(const LargestIntegralType result, + const char* const expression, + const char * const file, const int line); +void _assert_int_equal( + const LargestIntegralType a, const LargestIntegralType b, + const char * const file, const int line); +void _assert_int_not_equal( + const LargestIntegralType a, const LargestIntegralType b, + const char * const file, const int line); +void _assert_string_equal(const char * const a, const char * const b, + const char * const file, const int line); +void _assert_string_not_equal(const char * const a, const char * const b, + const char *file, const int line); +void _assert_memory_equal(const void * const a, const void * const b, + const size_t size, const char* const file, + const int line); +void _assert_memory_not_equal(const void * const a, const void * const b, + const size_t size, const char* const file, + const int line); +void _assert_in_range( + const LargestIntegralType value, const LargestIntegralType minimum, + const LargestIntegralType maximum, const char* const file, const int line); +void _assert_not_in_range( + const LargestIntegralType value, const LargestIntegralType minimum, + const LargestIntegralType maximum, const char* const file, const int line); +void _assert_in_set( + const LargestIntegralType value, const LargestIntegralType values[], + const size_t number_of_values, const char* const file, const int line); +void _assert_not_in_set( + const LargestIntegralType value, const LargestIntegralType values[], + const size_t number_of_values, const char* const file, const int line); + +void* _test_malloc(const size_t size, const char* file, const int line); +void* _test_calloc(const size_t number_of_elements, const size_t size, + const char* file, const int line); +void _test_free(void* const ptr, const char* file, const int line); + +void _fail(const char * const file, const int line); +int _run_test( + const char * const function_name, const UnitTestFunction Function, + void ** const state, const UnitTestFunctionType function_type, + const void* const heap_check_point); +int _run_tests(const UnitTest * const tests, const size_t number_of_tests); + +// Standard output and error print methods. +void print_message(const char* const format, ...); +void print_error(const char* const format, ...); +void vprint_message(const char* const format, va_list args); +void vprint_error(const char* const format, va_list args); + +#endif // CMOCKERY_H_ diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index 98d8d4d5..589554f9 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -1,9 +1,9 @@ #include #include #include -#include #include "rangeset.h" +#include "cmockery.h" /* for "print" functions */ #include "debug_print.c" @@ -30,18 +30,18 @@ int main(void) { /* Array of test functions */ - const struct CMUnitTest tests[] = + const struct UnitTest tests[] = { - cmocka_unit_test(test_irange_basic), - cmocka_unit_test(test_irange_list_union_merge), - cmocka_unit_test(test_irange_list_union_lossy_cov), - cmocka_unit_test(test_irange_list_union_complete_cov), - cmocka_unit_test(test_irange_list_union_intersecting), - cmocka_unit_test(test_irange_list_intersection), + unit_test(test_irange_basic), + unit_test(test_irange_list_union_merge), + unit_test(test_irange_list_union_lossy_cov), + unit_test(test_irange_list_union_complete_cov), + unit_test(test_irange_list_union_intersecting), + unit_test(test_irange_list_intersection), }; /* Run series of tests */ - return cmocka_run_group_tests(tests, NULL, NULL); + return run_tests(tests); } /* diff --git a/travis/dep-ubuntu-llvm.sh b/travis/dep-ubuntu-llvm.sh deleted file mode 100755 index e640d5b5..00000000 --- a/travis/dep-ubuntu-llvm.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cat ./travis/llvm-snapshot.gpg.key | sudo apt-key add - -echo "deb http://apt.llvm.org/trusty/ llvm-toolchain-$(lsb_release -cs)-$LLVM_VER main" | sudo tee /etc/apt/sources.list.d/llvm.list diff --git a/travis/dep-ubuntu-postgres.sh b/travis/dep-ubuntu-postgres.sh deleted file mode 100755 index 41c7d346..00000000 --- a/travis/dep-ubuntu-postgres.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cat ./travis/postgresql.gpg.key | sudo apt-key add - -echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PG_VER" | sudo tee /etc/apt/sources.list.d/pgdg.list diff --git a/travis/llvm-snapshot.gpg.key b/travis/llvm-snapshot.gpg.key deleted file mode 100644 index aa6b105a..00000000 --- a/travis/llvm-snapshot.gpg.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.4.12 (GNU/Linux) - -mQINBFE9lCwBEADi0WUAApM/mgHJRU8lVkkw0CHsZNpqaQDNaHefD6Rw3S4LxNmM -EZaOTkhP200XZM8lVdbfUW9xSjA3oPldc1HG26NjbqqCmWpdo2fb+r7VmU2dq3NM -R18ZlKixiLDE6OUfaXWKamZsXb6ITTYmgTO6orQWYrnW6ckYHSeaAkW0wkDAryl2 -B5v8aoFnQ1rFiVEMo4NGzw4UX+MelF7rxaaregmKVTPiqCOSPJ1McC1dHFN533FY -Wh/RVLKWo6npu+owtwYFQW+zyQhKzSIMvNujFRzhIxzxR9Gn87MoLAyfgKEzrbbT -DhqqNXTxS4UMUKCQaO93TzetX/EBrRpJj+vP640yio80h4Dr5pAd7+LnKwgpTDk1 -G88bBXJAcPZnTSKu9I2c6KY4iRNbvRz4i+ZdwwZtdW4nSdl2792L7Sl7Nc44uLL/ -ZqkKDXEBF6lsX5XpABwyK89S/SbHOytXv9o4puv+65Ac5/UShspQTMSKGZgvDauU -cs8kE1U9dPOqVNCYq9Nfwinkf6RxV1k1+gwtclxQuY7UpKXP0hNAXjAiA5KS5Crq -7aaJg9q2F4bub0mNU6n7UI6vXguF2n4SEtzPRk6RP+4TiT3bZUsmr+1ktogyOJCc -Ha8G5VdL+NBIYQthOcieYCBnTeIH7D3Sp6FYQTYtVbKFzmMK+36ERreL/wARAQAB -tD1TeWx2ZXN0cmUgTGVkcnUgLSBEZWJpYW4gTExWTSBwYWNrYWdlcyA8c3lsdmVz -dHJlQGRlYmlhbi5vcmc+iQI4BBMBAgAiBQJRPZQsAhsDBgsJCAcDAgYVCAIJCgsE -FgIDAQIeAQIXgAAKCRAVz00Yr090Ibx+EADArS/hvkDF8juWMXxh17CgR0WZlHCC -9CTBWkg5a0bNN/3bb97cPQt/vIKWjQtkQpav6/5JTVCSx2riL4FHYhH0iuo4iAPR -udC7Cvg8g7bSPrKO6tenQZNvQm+tUmBHgFiMBJi92AjZ/Qn1Shg7p9ITivFxpLyX -wpmnF1OKyI2Kof2rm4BFwfSWuf8Fvh7kDMRLHv+MlnK/7j/BNpKdozXxLcwoFBmn -l0WjpAH3OFF7Pvm1LJdf1DjWKH0Dc3sc6zxtmBR/KHHg6kK4BGQNnFKujcP7TVdv -gMYv84kun14pnwjZcqOtN3UJtcx22880DOQzinoMs3Q4w4o05oIF+sSgHViFpc3W -R0v+RllnH05vKZo+LDzc83DQVrdwliV12eHxrMQ8UYg88zCbF/cHHnlzZWAJgftg -hB08v1BKPgYRUzwJ6VdVqXYcZWEaUJmQAPuAALyZESw94hSo28FAn0/gzEc5uOYx -K+xG/lFwgAGYNb3uGM5m0P6LVTfdg6vDwwOeTNIExVk3KVFXeSQef2ZMkhwA7wya -KJptkb62wBHFE+o9TUdtMCY6qONxMMdwioRE5BYNwAsS1PnRD2+jtlI0DzvKHt7B -MWd8hnoUKhMeZ9TNmo+8CpsAtXZcBho0zPGz/R8NlJhAWpdAZ1CmcPo83EW86Yq7 -BxQUKnNHcwj2ebkCDQRRPZQsARAA4jxYmbTHwmMjqSizlMJYNuGOpIidEdx9zQ5g -zOr431/VfWq4S+VhMDhs15j9lyml0y4ok215VRFwrAREDg6UPMr7ajLmBQGau0Fc -bvZJ90l4NjXp5p0NEE/qOb9UEHT7EGkEhaZ1ekkWFTWCgsy7rRXfZLxB6sk7pzLC -DshyW3zjIakWAnpQ5j5obiDy708pReAuGB94NSyb1HoW/xGsGgvvCw4r0w3xPStw -F1PhmScE6NTBIfLliea3pl8vhKPlCh54Hk7I8QGjo1ETlRP4Qll1ZxHJ8u25f/ta -RES2Aw8Hi7j0EVcZ6MT9JWTI83yUcnUlZPZS2HyeWcUj+8nUC8W4N8An+aNps9l/ -21inIl2TbGo3Yn1JQLnA1YCoGwC34g8QZTJhElEQBN0X29ayWW6OdFx8MDvllbBV -ymmKq2lK1U55mQTfDli7S3vfGz9Gp/oQwZ8bQpOeUkc5hbZszYwP4RX+68xDPfn+ -M9udl+qW9wu+LyePbW6HX90LmkhNkkY2ZzUPRPDHZANU5btaPXc2H7edX4y4maQa -xenqD0lGh9LGz/mps4HEZtCI5CY8o0uCMF3lT0XfXhuLksr7Pxv57yue8LLTItOJ -d9Hmzp9G97SRYYeqU+8lyNXtU2PdrLLq7QHkzrsloG78lCpQcalHGACJzrlUWVP/ -fN3Ht3kAEQEAAYkCHwQYAQIACQUCUT2ULAIbDAAKCRAVz00Yr090IbhWEADbr50X -OEXMIMGRLe+YMjeMX9NG4jxs0jZaWHc/WrGR+CCSUb9r6aPXeLo+45949uEfdSsB -pbaEdNWxF5Vr1CSjuO5siIlgDjmT655voXo67xVpEN4HhMrxugDJfCa6z97P0+ML -PdDxim57uNqkam9XIq9hKQaurxMAECDPmlEXI4QT3eu5qw5/knMzDMZj4Vi6hovL -wvvAeLHO/jsyfIdNmhBGU2RWCEZ9uo/MeerPHtRPfg74g+9PPfP6nyHD2Wes6yGd -oVQwtPNAQD6Cj7EaA2xdZYLJ7/jW6yiPu98FFWP74FN2dlyEA2uVziLsfBrgpS4l -tVOlrO2YzkkqUGrybzbLpj6eeHx+Cd7wcjI8CalsqtL6cG8cUEjtWQUHyTbQWAgG -5VPEgIAVhJ6RTZ26i/G+4J8neKyRs4vz+57UGwY6zI4AB1ZcWGEE3Bf+CDEDgmnP -LSwbnHefK9IljT9XU98PelSryUO/5UPw7leE0akXKB4DtekToO226px1VnGp3Bov -1GBGvpHvL2WizEwdk+nfk8LtrLzej+9FtIcq3uIrYnsac47Pf7p0otcFeTJTjSq3 -krCaoG4Hx0zGQG2ZFpHrSrZTVy6lxvIdfi0beMgY6h78p6M9eYZHQHc02DjFkQXN -bXb5c6gCHESH5PXwPU4jQEE7Ib9J6sbk7ZT2Mw== -=j+4q ------END PGP PUBLIC KEY BLOCK----- diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh deleted file mode 100755 index 890897a4..00000000 --- a/travis/pg-travis-test.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/bash - -set -eux - -sudo apt-get update - - -# required packages -apt_packages="postgresql-$PG_VER postgresql-server-dev-$PG_VER postgresql-common python-pip python-dev build-essential" -pip_packages="testgres" - -# exit code -status=0 - -# pg_config path -pg_ctl_path=/usr/lib/postgresql/$PG_VER/bin/pg_ctl -initdb_path=/usr/lib/postgresql/$PG_VER/bin/initdb -config_path=/usr/lib/postgresql/$PG_VER/bin/pg_config - - -# bug: http://www.postgresql.org/message-id/20130508192711.GA9243@msgid.df7cb.de -sudo update-alternatives --remove-all postmaster.1.gz - -# stop all existing instances (because of https://github.com/travis-ci/travis-cookbooks/pull/221) -sudo service postgresql stop -# ... and make sure they don't come back -echo 'exit 0' | sudo tee /etc/init.d/postgresql -sudo chmod a+x /etc/init.d/postgresql - -# install required packages -sudo apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install -qq $apt_packages - - -# perform code analysis if necessary -if [ $CHECK_CODE = "true" ]; then - - if [ "$CC" = "clang" ]; then - sudo apt-get -y install -qq clang-$LLVM_VER - - scan-build-$LLVM_VER --status-bugs make USE_PGXS=1 PG_CONFIG=$config_path || status=$? - exit $status - - elif [ "$CC" = "gcc" ]; then - sudo apt-get -y install -qq cppcheck - - cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ - --enable=warning,portability,performance \ - --suppress=redundantAssignment \ - --suppress=uselessAssignmentPtrArg \ - --suppress=incorrectStringBooleanError \ - --std=c89 src/*.c src/*.h 2> cppcheck.log - - if [ -s cppcheck.log ]; then - cat cppcheck.log - status=1 # error - fi - - exit $status - fi - - # don't forget to "make clean" - make clean USE_PGXS=1 PG_CONFIG=$config_path -fi - - -# create cluster 'test' -CLUSTER_PATH=$(pwd)/test_cluster -$initdb_path -D $CLUSTER_PATH -U $USER -A trust - -# build pg_pathman (using CFLAGS_SL for gcov) -make USE_PGXS=1 CC=${CC} PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -coverage" -sudo make install USE_PGXS=1 PG_CONFIG=$config_path - -# check build -status=$? -if [ $status -ne 0 ]; then exit $status; fi - -# set permission to write postgres locks -sudo chown $USER /var/run/postgresql/ - -# add pg_pathman to shared_preload_libraries and restart cluster 'test' -echo "shared_preload_libraries = 'pg_pathman'" >> $CLUSTER_PATH/postgresql.conf -echo "port = 55435" >> $CLUSTER_PATH/postgresql.conf -$pg_ctl_path -D $CLUSTER_PATH start -l postgres.log -w - -# run regression tests -PGPORT=55435 PGUSER=$USER PG_CONFIG=$config_path make installcheck USE_PGXS=1 || status=$? - -# show diff if it exists -if test -f regression.diffs; then cat regression.diffs; fi - - -set +u - -# create virtual environment and activate it -virtualenv /tmp/envs/pg_pathman -source /tmp/envs/pg_pathman/bin/activate - -# install pip packages -pip3 install $pip_packages - -# run python tests -make USE_PGXS=1 PG_CONFIG=$config_path python_tests || status=$? - -# deactivate virtual environment -deactivate - -set -u - - -# install cmake for cmocka -sudo apt-get -y install -qq cmake - -# build & install cmocka -CMOCKA_VER=1.1.1 -cd tests/cmocka -tar xf cmocka-$CMOCKA_VER.tar.xz -cd cmocka-$CMOCKA_VER -mkdir build && cd build -cmake .. -make && sudo make install -cd ../../../.. - -# export path to libcmocka.so -LD_LIBRARY_PATH=/usr/local/lib -export LD_LIBRARY_PATH - -# run cmocka tests (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path PG_CPPFLAGS="-coverage" cmocka_tests || status=$? - -# remove useless gcov files -rm -f tests/cmocka/*.gcno -rm -f tests/cmocka/*.gcda - -#generate *.gcov files -gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h - - -exit $status diff --git a/travis/postgresql.gpg.key b/travis/postgresql.gpg.key deleted file mode 100644 index 8480576e..00000000 --- a/travis/postgresql.gpg.key +++ /dev/null @@ -1,77 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja -UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V -G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4 -bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi -c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC -IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh -hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U -A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3 -RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj -Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2 -AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB -tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQJOBBMBCAA4AhsDBQsJCAcD -BRUKCQgLBRYCAwEAAh4BAheAFiEEuXsK/KoaR/BE8kSgf8x9RqzMTPgFAlhtCD8A -CgkQf8x9RqzMTPgECxAAk8uL+dwveTv6eH21tIHcltt8U3Ofajdo+D/ayO53LiYO -xi27kdHD0zvFMUWXLGxQtWyeqqDRvDagfWglHucIcaLxoxNwL8+e+9hVFIEskQAY -kVToBCKMXTQDLarz8/J030Pmcv3ihbwB+jhnykMuyyNmht4kq0CNgnlcMCdVz0d3 -z/09puryIHJrD+A8y3TD4RM74snQuwc9u5bsckvRtRJKbP3GX5JaFZAqUyZNRJRJ -Tn2OQRBhCpxhlZ2afkAPFIq2aVnEt/Ie6tmeRCzsW3lOxEH2K7MQSfSu/kRz7ELf -Cz3NJHj7rMzC+76Rhsas60t9CjmvMuGONEpctijDWONLCuch3Pdj6XpC+MVxpgBy -2VUdkunb48YhXNW0jgFGM/BFRj+dMQOUbY8PjJjsmVV0joDruWATQG/M4C7O8iU0 -B7o6yVv4m8LDEN9CiR6r7H17m4xZseT3f+0QpMe7iQjz6XxTUFRQxXqzmNnloA1T -7VjwPqIIzkj/u0V8nICG/ktLzp1OsCFatWXh7LbU+hwYl6gsFH/mFDqVxJ3+DKQi -vyf1NatzEwl62foVjGUSpvh3ymtmtUQ4JUkNDsXiRBWczaiGSuzD9Qi0ONdkAX3b -ewqmN4TfE+XIpCPxxHXwGq9Rv1IFjOdCX0iG436GHyTLC1tTUIKF5xV4Y0+cXIOI -RgQQEQgABgUCTpdI7gAKCRDFr3dKWFELWqaPAKD1TtT5c3sZz92Fj97KYmqbNQZP -+ACfSC6+hfvlj4GxmUjp1aepoVTo3weJAhwEEAEIAAYFAk6XSQsACgkQTFprqxLS -p64F8Q//cCcutwrH50UoRFejg0EIZav6LUKejC6kpLeubbEtuaIH3r2zMblPGc4i -+eMQKo/PqyQrceRXeNNlqO6/exHozYi2meudxa6IudhwJIOn1MQykJbNMSC2sGUp -1W5M1N5EYgt4hy+qhlfnD66LR4G+9t5FscTJSy84SdiOuqgCOpQmPkVRm1HX5X1+ -dmnzMOCk5LHHQuiacV0qeGO7JcBCVEIDr+uhU1H2u5GPFNHm5u15n25tOxVivb94 -xg6NDjouECBH7cCVuW79YcExH/0X3/9G45rjdHlKPH1OIUJiiX47OTxdG3dAbB4Q -fnViRJhjehFscFvYWSqXo3pgWqUsEvv9qJac2ZEMSz9x2mj0ekWxuM6/hGWxJdB+ -+985rIelPmc7VRAXOjIxWknrXnPCZAMlPlDLu6+vZ5BhFX0Be3y38f7GNCxFkJzl -hWZ4Cj3WojMj+0DaC1eKTj3rJ7OJlt9S9xnO7OOPEUTGyzgNIDAyCiu8F4huLPaT -ape6RupxOMHZeoCVlqx3ouWctelB2oNXcxxiQ/8y+21aHfD4n/CiIFwDvIQjl7dg -mT3u5Lr6yxuosR3QJx1P6rP5ZrDTP9khT30t+HZCbvs5Pq+v/9m6XDmi+NlU7Zuh -Ehy97tL3uBDgoL4b/5BpFL5U9nruPlQzGq1P9jj40dxAaDAX/WKJAj0EEwEIACcC -GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlB5KywFCQPDFt8ACgkQf8x9RqzM -TPhuCQ//QAjRSAOCQ02qmUAikT+mTB6baOAakkYq6uHbEO7qPZkv4E/M+HPIJ4wd -nBNeSQjfvdNcZBA/x0hr5EMcBneKKPDj4hJ0panOIRQmNSTThQw9OU351gm3YQct -AMPRUu1fTJAL/AuZUQf9ESmhyVtWNlH/56HBfYjE4iVeaRkkNLJyX3vkWdJSMwC/ -LO3Lw/0M3R8itDsm74F8w4xOdSQ52nSRFRh7PunFtREl+QzQ3EA/WB4AIj3VohIG -kWDfPFCzV3cyZQiEnjAe9gG5pHsXHUWQsDFZ12t784JgkGyO5wT26pzTiuApWM3k -/9V+o3HJSgH5hn7wuTi3TelEFwP1fNzI5iUUtZdtxbFOfWMnZAypEhaLmXNkg4zD -kH44r0ss9fR0DAgUav1a25UnbOn4PgIEQy2fgHKHwRpCy20d6oCSlmgyWsR40EPP -YvtGq49A2aK6ibXmdvvFT+Ts8Z+q2SkFpoYFX20mR2nsF0fbt1lfH65P64dukxeR -GteWIeNakDD40bAAOH8+OaoTGVBJ2ACJfLVNM53PEoftavAwUYMrR910qvwYfd/4 -6rh46g1Frr9SFMKYE9uvIJIgDsQB3QBp71houU4H55M5GD8XURYs+bfiQpJG1p7e -B8e5jZx1SagNWc4XwL2FzQ9svrkbg1Y+359buUiP7T6QXX2zY++JAj0EEwEIACcC -GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlEqbZUFCQg2wEEACgkQf8x9RqzM -TPhFMQ//WxAfKMdpSIA9oIC/yPD/dJpY/+DyouOljpE6MucMy/ArBECjFTBwi/j9 -NYM4ynAk34IkhuNexc1i9/05f5RM6+riLCLgAOsADDbHD4miZzoSxiVr6GQ3YXMb -OGld9kV9Sy6mGNjcUov7iFcf5Hy5w3AjPfKuR9zXswyfzIU1YXObiiZT38l55pp/ -BSgvGVQsvbNjsff5CbEKXS7q3xW+WzN0QWF6YsfNVhFjRGj8hKtHvwKcA02wwjLe -LXVTm6915ZUKhZXUFc0vM4Pj4EgNswH8Ojw9AJaKWJIZmLyW+aP+wpu6YwVCicxB -Y59CzBO2pPJDfKFQzUtrErk9irXeuCCLesDyirxJhv8o0JAvmnMAKOLhNFUrSQ2m -+3EnF7zhfz70gHW+EG8X8mL/EN3/dUM09j6TVrjtw43RLxBzwMDeariFF9yC+5bL -tnGgxjsB9Ik6GV5v34/NEEGf1qBiAzFmDVFRZlrNDkq6gmpvGnA5hUWNr+y0i01L -jGyaLSWHYjgw2UEQOqcUtTFK9MNzbZze4mVaHMEz9/aMfX25R6qbiNqCChveIm8m -Yr5Ds2zdZx+G5bAKdzX7nx2IUAxFQJEE94VLSp3npAaTWv3sHr7dR8tSyUJ9poDw -gw4W9BIcnAM7zvFYbLF5FNggg/26njHCCN70sHt8zGxKQINMc6SJAj0EEwEIACcC -GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlLpFRkFCQ6EJy0ACgkQf8x9RqzM -TPjOZA//Zp0e25pcvle7cLc0YuFr9pBv2JIkLzPm83nkcwKmxaWayUIG4Sv6pH6h -m8+S/CHQij/yFCX+o3ngMw2J9HBUvafZ4bnbI0RGJ70GsAwraQ0VlkIfg7GUw3Tz -voGYO42rZTru9S0K/6nFP6D1HUu+U+AsJONLeb6oypQgInfXQExPZyliUnHdipei -4WR1YFW6sjSkZT/5C3J1wkAvPl5lvOVthI9Zs6bZlJLZwusKxU0UM4Btgu1Sf3nn -JcHmzisixwS9PMHE+AgPWIGSec/N27a0KmTTvImV6K6nEjXJey0K2+EYJuIBsYUN -orOGBwDFIhfRk9qGlpgt0KRyguV+AP5qvgry95IrYtrOuE7307SidEbSnvO5ezNe -mE7gT9Z1tM7IMPfmoKph4BfpNoH7aXiQh1Wo+ChdP92hZUtQrY2Nm13cmkxYjQ4Z -gMWfYMC+DA/GooSgZM5i6hYqyyfAuUD9kwRN6BqTbuAUAp+hCWYeN4D88sLYpFh3 -paDYNKJ+Gf7Yyi6gThcV956RUFDH3ys5Dk0vDL9NiWwdebWfRFbzoRM3dyGP889a -OyLzS3mh6nHzZrNGhW73kslSQek8tjKrB+56hXOnb4HaElTZGDvD5wmrrhN94kby -Gtz3cydIohvNO9d90+29h0eGEDYti7j7maHkBKUAwlcPvMg5m3Y= -=DA1T ------END PGP PUBLIC KEY BLOCK----- From 17cc5fb94b802266eda56e03f2915891eb7608fa Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 13:01:47 +0300 Subject: [PATCH 0627/1124] Fix condition in Dockerfile.tmpl --- Dockerfile.tmpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index b74538fc..2192600e 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,9 +1,9 @@ FROM postgres:${PG_VERSION}-alpine -ENV LANG=C.UTF-8 PGDATA=/pg/data +ENV LANG=C.UTF-8 PGDATA=/pg/data SELCC=${CC} RUN apk --no-cache add python3 gcc make musl-dev ${CC} -RUN if ${CHECK_CODE} -eq "true" && ${CC} -eq "gcc"; then \ +RUN if ${CHECK_CODE} -eq "true" && ${SELCC} -eq "gcc"; then \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ apk --no-cache add cppcheck; \ From 1c93a96f448ff69d9d9aca6a5b6aa7c55bc8f846 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 13:06:58 +0300 Subject: [PATCH 0628/1124] Simplify Dockerfile.tmpl --- Dockerfile.tmpl | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 2192600e..a5013263 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,13 +1,10 @@ FROM postgres:${PG_VERSION}-alpine ENV LANG=C.UTF-8 PGDATA=/pg/data SELCC=${CC} -RUN apk --no-cache add python3 gcc make musl-dev ${CC} -RUN if ${CHECK_CODE} -eq "true" && ${SELCC} -eq "gcc"; then \ - echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ +RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ - apk --no-cache add cppcheck; \ - fi && \ + apk --no-cache add python3 gcc make musl-dev cppcheck ${CC} && \ pip3 install testgres && \ mkdir -p /pg/data && \ mkdir /pg/pg_pathman && \ From b2c151b42355f2743bc76545615c4ef112cff21f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 13:21:19 +0300 Subject: [PATCH 0629/1124] Fix docker image --- Dockerfile.tmpl | 2 +- run_tests.sh | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index a5013263..de22cf8f 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,6 +1,6 @@ FROM postgres:${PG_VERSION}-alpine -ENV LANG=C.UTF-8 PGDATA=/pg/data SELCC=${CC} +ENV LANG=C.UTF-8 PGDATA=/pg/data RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ diff --git a/run_tests.sh b/run_tests.sh index b87c00e3..26de354d 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -2,7 +2,9 @@ set -eux -id +echo CC=$CC +echo CHECK_CODE=$CHECK_CODE +echo PG_VERSION=$PG_VERSION # perform code analysis if necessary if [ $CHECK_CODE = "true" ]; then From aac3bc2dbe97492efdc674a8cabf01af4d0ea616 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 13:27:00 +0300 Subject: [PATCH 0630/1124] Change variable name --- .travis.yml | 20 ++++++++++---------- Dockerfile.tmpl | 4 ++-- run_tests.sh | 5 +++-- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/.travis.yml b/.travis.yml index b498e674..e0212902 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,22 +10,22 @@ services: - docker install: - - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${CC}/'${CC}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${COMPILER}/'${COMPILER}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile - docker-compose build script: - docker-compose run tests env: - - PG_VERSION=10 CHECK_CODE=true CC=clang - - PG_VERSION=9.6 CHECK_CODE=true CC=clang - - PG_VERSION=9.5 CHECK_CODE=true CC=clang - - PG_VERSION=10 CHECK_CODE=true CC=gcc - - PG_VERSION=10 CHECK_CODE=false CC=gcc - - PG_VERSION=9.6 CHECK_CODE=true CC=gcc - - PG_VERSION=9.6 CHECK_CODE=false CC=gcc - - PG_VERSION=9.5 CHECK_CODE=true CC=gcc - - PG_VERSION=9.5 CHECK_CODE=false CC=gcc + - PG_VERSION=10 CHECK_CODE=true COMPILER=clang + - PG_VERSION=9.6 CHECK_CODE=true COMPILER=clang + - PG_VERSION=9.5 CHECK_CODE=true COMPILER=clang + - PG_VERSION=10 CHECK_CODE=true COMPILER=gcc + - PG_VERSION=10 CHECK_CODE=false COMPILER=gcc + - PG_VERSION=9.6 CHECK_CODE=true COMPILER=gcc + - PG_VERSION=9.6 CHECK_CODE=false COMPILER=gcc + - PG_VERSION=9.5 CHECK_CODE=true COMPILER=gcc + - PG_VERSION=9.5 CHECK_CODE=false COMPILER=gcc after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index de22cf8f..cf2b1aa6 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -4,7 +4,7 @@ ENV LANG=C.UTF-8 PGDATA=/pg/data RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ - apk --no-cache add python3 gcc make musl-dev cppcheck ${CC} && \ + apk --no-cache add python3 gcc make musl-dev cppcheck ${COMPILER} && \ pip3 install testgres && \ mkdir -p /pg/data && \ mkdir /pg/pg_pathman && \ @@ -16,4 +16,4 @@ ADD . /pg/pg_pathman WORKDIR /pg/pg_pathman RUN chmod -R go+rwX /pg/pg_pathman USER postgres -ENTRYPOINT PGDATA=${PGDATA} CC=${CC} CHECK_CODE=${CHECK_CODE} bash run_tests.sh +ENTRYPOINT PGDATA=${PGDATA} COMPILER=${COMPILER} CHECK_CODE=${CHECK_CODE} bash run_tests.sh diff --git a/run_tests.sh b/run_tests.sh index 26de354d..f5216d7b 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -3,17 +3,18 @@ set -eux echo CC=$CC +echo COMPILER=$COMPILER echo CHECK_CODE=$CHECK_CODE echo PG_VERSION=$PG_VERSION # perform code analysis if necessary if [ $CHECK_CODE = "true" ]; then - if [ "$CC" = "clang" ]; then + if [ "$COMPILER" = "clang" ]; then scan-build --status-bugs make USE_PGXS=1 || status=$? exit $status - elif [ "$CC" = "gcc" ]; then + elif [ "$COMPILER" = "gcc" ]; then cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ --enable=warning,portability,performance \ --suppress=redundantAssignment \ From ad285a903e9c2c61c4db8feeb95c7283c8c71b16 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 13:29:47 +0300 Subject: [PATCH 0631/1124] Fix variable error --- run_tests.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index f5216d7b..abc2b128 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -2,7 +2,6 @@ set -eux -echo CC=$CC echo COMPILER=$COMPILER echo CHECK_CODE=$CHECK_CODE echo PG_VERSION=$PG_VERSION From 39929932f761585035bc700ce0040d0fa48f0c73 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 14:07:55 +0300 Subject: [PATCH 0632/1124] Change travis configuration --- .travis.yml | 18 +++++++++--------- Dockerfile.tmpl | 5 +++-- run_tests.sh | 46 +++++++++++++++++++++------------------------- 3 files changed, 33 insertions(+), 36 deletions(-) diff --git a/.travis.yml b/.travis.yml index e0212902..7e99f5dc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,15 +17,15 @@ script: - docker-compose run tests env: - - PG_VERSION=10 CHECK_CODE=true COMPILER=clang - - PG_VERSION=9.6 CHECK_CODE=true COMPILER=clang - - PG_VERSION=9.5 CHECK_CODE=true COMPILER=clang - - PG_VERSION=10 CHECK_CODE=true COMPILER=gcc - - PG_VERSION=10 CHECK_CODE=false COMPILER=gcc - - PG_VERSION=9.6 CHECK_CODE=true COMPILER=gcc - - PG_VERSION=9.6 CHECK_CODE=false COMPILER=gcc - - PG_VERSION=9.5 CHECK_CODE=true COMPILER=gcc - - PG_VERSION=9.5 CHECK_CODE=false COMPILER=gcc + - PG_VERSION=10 CHECK_CODE=clang + - PG_VERSION=9.6 CHECK_CODE=clang + - PG_VERSION=9.5 CHECK_CODE=clang + - PG_VERSION=10 CHECK_CODE=cppcheck + - PG_VERSION=10 CHECK_CODE=false + - PG_VERSION=9.6 CHECK_CODE=cppcheck + - PG_VERSION=9.6 CHECK_CODE=false + - PG_VERSION=9.5 CHECK_CODE=cppcheck + - PG_VERSION=9.5 CHECK_CODE=false after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index cf2b1aa6..beda726b 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -4,7 +4,8 @@ ENV LANG=C.UTF-8 PGDATA=/pg/data RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ - apk --no-cache add python3 gcc make musl-dev cppcheck ${COMPILER} && \ + apk --no-cache add python3 gcc make musl-dev cppcheck && \ + apk --no-cache add clang-analyzer --repository http://dl-3.alpinelinux.org/alpine/edge/main/ && \ pip3 install testgres && \ mkdir -p /pg/data && \ mkdir /pg/pg_pathman && \ @@ -16,4 +17,4 @@ ADD . /pg/pg_pathman WORKDIR /pg/pg_pathman RUN chmod -R go+rwX /pg/pg_pathman USER postgres -ENTRYPOINT PGDATA=${PGDATA} COMPILER=${COMPILER} CHECK_CODE=${CHECK_CODE} bash run_tests.sh +ENTRYPOINT PGDATA=${PGDATA} CHECK_CODE=${CHECK_CODE} bash run_tests.sh diff --git a/run_tests.sh b/run_tests.sh index abc2b128..ebda8f79 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -2,37 +2,33 @@ set -eux -echo COMPILER=$COMPILER echo CHECK_CODE=$CHECK_CODE echo PG_VERSION=$PG_VERSION # perform code analysis if necessary -if [ $CHECK_CODE = "true" ]; then - - if [ "$COMPILER" = "clang" ]; then - scan-build --status-bugs make USE_PGXS=1 || status=$? - exit $status - - elif [ "$COMPILER" = "gcc" ]; then - cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ - --enable=warning,portability,performance \ - --suppress=redundantAssignment \ - --suppress=uselessAssignmentPtrArg \ - --suppress=incorrectStringBooleanError \ - --std=c89 src/*.c src/*.h 2> cppcheck.log - - if [ -s cppcheck.log ]; then - cat cppcheck.log - status=1 # error - fi - - exit $status - fi - - # don't forget to "make clean" - make USE_PGXS=1 clean +if [ "$CHECK_CODE" = "clang" ]; then + scan-build --status-bugs make USE_PGXS=1 || status=$? + exit $status + +elif [ "$CHECK_CODE" = "cppcheck" ]; then + cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ + --enable=warning,portability,performance \ + --suppress=redundantAssignment \ + --suppress=uselessAssignmentPtrArg \ + --suppress=incorrectStringBooleanError \ + --std=c89 src/*.c src/include/*.h 2> cppcheck.log + + if [ -s cppcheck.log ]; then + cat cppcheck.log + status=1 # error + fi + + exit $status fi +# don't forget to "make clean" +make USE_PGXS=1 clean + # initialize database initdb From 7cf3500b6f60c3f89ac90e80a466cd568a41be28 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 14:24:24 +0300 Subject: [PATCH 0633/1124] Fix few errors in tests --- run_tests.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index ebda8f79..dc1f4114 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -3,7 +3,8 @@ set -eux echo CHECK_CODE=$CHECK_CODE -echo PG_VERSION=$PG_VERSION + +status=0 # perform code analysis if necessary if [ "$CHECK_CODE" = "clang" ]; then @@ -15,6 +16,7 @@ elif [ "$CHECK_CODE" = "cppcheck" ]; then --enable=warning,portability,performance \ --suppress=redundantAssignment \ --suppress=uselessAssignmentPtrArg \ + --suppress=literalWithCharPtrCompare \ --suppress=incorrectStringBooleanError \ --std=c89 src/*.c src/include/*.h 2> cppcheck.log From 8de8b0e1c0621cbbd5a307725cb46dd6c583406f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 14:29:06 +0300 Subject: [PATCH 0634/1124] Fix .travis.yml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7e99f5dc..576d9efb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ services: - docker install: - - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${COMPILER}/'${COMPILER}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile - docker-compose build script: From c596089cee4b91f0080e0550d025a377ca8940ac Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 16:12:44 +0300 Subject: [PATCH 0635/1124] Try to optimize tests --- Dockerfile.tmpl | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index beda726b..529324ca 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -2,12 +2,20 @@ FROM postgres:${PG_VERSION}-alpine ENV LANG=C.UTF-8 PGDATA=/pg/data -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' > /etc/apk/repositories && \ - echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories && \ +RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ + apk --no-cache add clang-analyzer --repository http://dl-3.alpinelinux.org/alpine/edge/main; \ + fi + +RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ + apk --no-cache add cppcheck --repository http://dl-cdn.alpinelinux.org/alpine/v3.6/community \ + fi + +RUN if [ "${CHECK_CODE}" = "false" ] ; then \ apk --no-cache add python3 gcc make musl-dev cppcheck && \ - apk --no-cache add clang-analyzer --repository http://dl-3.alpinelinux.org/alpine/edge/main/ && \ pip3 install testgres && \ - mkdir -p /pg/data && \ + fi + +RUN mkdir -p /pg/data && \ mkdir /pg/pg_pathman && \ chown postgres:postgres ${PGDATA} && \ chmod a+rwx /usr/local/lib/postgresql && \ From cc1b0229849b995dae24aa114dcbc2d00cafd87d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 16:29:46 +0300 Subject: [PATCH 0636/1124] Fix tests --- Dockerfile.tmpl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 529324ca..16337fbb 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -7,12 +7,12 @@ RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ fi RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ - apk --no-cache add cppcheck --repository http://dl-cdn.alpinelinux.org/alpine/v3.6/community \ + apk --no-cache add cppcheck --repository http://dl-cdn.alpinelinux.org/alpine/v3.6/community; \ fi RUN if [ "${CHECK_CODE}" = "false" ] ; then \ - apk --no-cache add python3 gcc make musl-dev cppcheck && \ - pip3 install testgres && \ + apk --no-cache add python3 gcc make musl-dev cppcheck;\ + pip3 install testgres; \ fi RUN mkdir -p /pg/data && \ From 915593e330d897875efc0fcb2cec39f588eeef8c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 16:42:54 +0300 Subject: [PATCH 0637/1124] Fix tests --- Dockerfile.tmpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 16337fbb..a748df46 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -3,7 +3,7 @@ FROM postgres:${PG_VERSION}-alpine ENV LANG=C.UTF-8 PGDATA=/pg/data RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ - apk --no-cache add clang-analyzer --repository http://dl-3.alpinelinux.org/alpine/edge/main; \ + apk --no-cache add clang-analyzer make musl-dev --repository http://dl-3.alpinelinux.org/alpine/edge/main; \ fi RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ @@ -11,7 +11,7 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ fi RUN if [ "${CHECK_CODE}" = "false" ] ; then \ - apk --no-cache add python3 gcc make musl-dev cppcheck;\ + apk --no-cache add python3 gcc make musl-dev;\ pip3 install testgres; \ fi From 3cac044e2971dd93474c247aebe2ed77e30ab513 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 16:45:04 +0300 Subject: [PATCH 0638/1124] Fix tests --- Dockerfile.tmpl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index a748df46..b5e2f0f2 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -3,7 +3,8 @@ FROM postgres:${PG_VERSION}-alpine ENV LANG=C.UTF-8 PGDATA=/pg/data RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ - apk --no-cache add clang-analyzer make musl-dev --repository http://dl-3.alpinelinux.org/alpine/edge/main; \ + echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ + apk --no-cache add clang-analyzer make musl-dev; \ fi RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ From f949215de94e305cada522410a3e3ecaca9bf8cf Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 29 Jun 2017 17:33:13 +0300 Subject: [PATCH 0639/1124] Fix tests --- Dockerfile.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index b5e2f0f2..80ede1c0 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -4,7 +4,7 @@ ENV LANG=C.UTF-8 PGDATA=/pg/data RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ - apk --no-cache add clang-analyzer make musl-dev; \ + apk --no-cache add clang-analyzer make musl-dev gcc; \ fi RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ From f60a86572ed31a3c19d8804e834ec4841f0d3c54 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 10 Jul 2017 14:41:05 +0300 Subject: [PATCH 0640/1124] [fix #101] Compatibility fix for Postgres versions 9.5.0--9.5.5 and 9.6.0--9.6.1 --- src/compat/pg_compat.c | 67 ++++++++++++++++++++++++++++++++++ src/include/compat/pg_compat.h | 8 ++++ src/pl_funcs.c | 1 - 3 files changed, 75 insertions(+), 1 deletion(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index e9792b3c..0e2b9f05 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -24,6 +24,7 @@ #include "optimizer/prep.h" #include "parser/parse_utilcmd.h" #include "port.h" +#include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/syscache.h" @@ -575,3 +576,69 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) rel->tuples = parent_rows; } + +#if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) \ + || (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) +/* + * Return a palloc'd bare attribute map for tuple conversion, matching input + * and output columns by name. (Dropped columns are ignored in both input and + * output.) This is normally a subroutine for convert_tuples_by_name, but can + * be used standalone. + */ +AttrNumber * +convert_tuples_by_name_map(TupleDesc indesc, + TupleDesc outdesc, + const char *msg) +{ + AttrNumber *attrMap; + int n; + int i; + + n = outdesc->natts; + attrMap = (AttrNumber *) palloc0(n * sizeof(AttrNumber)); + for (i = 0; i < n; i++) + { + Form_pg_attribute att = outdesc->attrs[i]; + char *attname; + Oid atttypid; + int32 atttypmod; + int j; + + if (att->attisdropped) + continue; /* attrMap[i] is already 0 */ + attname = NameStr(att->attname); + atttypid = att->atttypid; + atttypmod = att->atttypmod; + for (j = 0; j < indesc->natts; j++) + { + att = indesc->attrs[j]; + if (att->attisdropped) + continue; + if (strcmp(attname, NameStr(att->attname)) == 0) + { + /* Found it, check type */ + if (atttypid != att->atttypid || atttypmod != att->atttypmod) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg_internal("%s", _(msg)), + errdetail("Attribute \"%s\" of type %s does not match corresponding attribute of type %s.", + attname, + format_type_be(outdesc->tdtypeid), + format_type_be(indesc->tdtypeid)))); + attrMap[i] = (AttrNumber) (j + 1); + break; + } + } + if (attrMap[i] == 0) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg_internal("%s", _(msg)), + errdetail("Attribute \"%s\" of type %s does not exist in type %s.", + attname, + format_type_be(outdesc->tdtypeid), + format_type_be(indesc->tdtypeid)))); + } + + return attrMap; +} +#endif diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 33a28339..bb95c153 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -548,6 +548,14 @@ extern void set_rel_consider_parallel(PlannerInfo *root, tlist_member_ignore_relabel((Node *) (expr), (targetlist)) #endif +#if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) \ + || (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) +extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, + TupleDesc outdesc, + const char *msg); +#else +#include "access/tupconvert.h" +#endif /* * ------------- diff --git a/src/pl_funcs.c b/src/pl_funcs.c index f2ca6164..bb66506d 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -18,7 +18,6 @@ #include "xact_handling.h" #include "utils.h" -#include "access/tupconvert.h" #include "access/htup_details.h" #include "catalog/dependency.h" #include "catalog/indexing.h" From 85e2b804fcfd69e47a3f2fbada0e11eb55cf33fe Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 10 Jul 2017 16:20:38 +0300 Subject: [PATCH 0641/1124] use cmocka instead of cmockery --- Dockerfile.tmpl | 3 +- run_tests.sh | 2 +- tests/cmocka/Makefile | 4 +- tests/cmocka/cmockery.c | 1770 --------------------------------- tests/cmocka/cmockery.h | 484 --------- tests/cmocka/rangeset_tests.c | 18 +- 6 files changed, 14 insertions(+), 2267 deletions(-) delete mode 100755 tests/cmocka/cmockery.c delete mode 100755 tests/cmocka/cmockery.h diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 80ede1c0..bd78ba02 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -12,7 +12,8 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ fi RUN if [ "${CHECK_CODE}" = "false" ] ; then \ - apk --no-cache add python3 gcc make musl-dev;\ + echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ + apk --no-cache add python3 gcc make musl-dev cmocka-dev;\ pip3 install testgres; \ fi diff --git a/run_tests.sh b/run_tests.sh index dc1f4114..d41e053e 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -61,7 +61,7 @@ if [ $status -ne 0 ]; then exit $status; fi set -u -# run mock tests (using CFLAGS_SL for gcov) +# run cmocka tests (using CFLAGS_SL for gcov) make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || status=$? if [ $status -ne 0 ]; then exit $status; fi diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index 2d4d8bff..e31e6d95 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -8,11 +8,11 @@ CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) CFLAGS += $(PG_CPPFLAGS) +LDFLAGS += -lcmocka TEST_BIN = rangeset_tests OBJ = missing_basic.o missing_list.o missing_stringinfo.o \ - missing_bitmapset.o rangeset_tests.o cmockery.o \ - $(TOP_SRC_DIR)/rangeset.o + missing_bitmapset.o rangeset_tests.o $(TOP_SRC_DIR)/rangeset.o all: build_extension $(TEST_BIN) diff --git a/tests/cmocka/cmockery.c b/tests/cmocka/cmockery.c deleted file mode 100755 index 5bf212dc..00000000 --- a/tests/cmocka/cmockery.c +++ /dev/null @@ -1,1770 +0,0 @@ -/* - * Copyright 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif -#ifdef HAVE_MALLOC_H -#include -#endif -#include -#ifndef _WIN32 -#include -#endif // !_WIN32 -#include -#include -#include -#include -#include -#ifdef _WIN32 -#include -#endif // _WIN32 -#include - -#ifdef _WIN32 -#define vsnprintf _vsnprintf -#endif // _WIN32 - -/* Backwards compatibility with headers shipped with Visual Studio 2005 and - * earlier. */ -#ifdef _WIN32 -WINBASEAPI BOOL WINAPI IsDebuggerPresent(VOID); -#endif // _WIN32 - -// Size of guard bytes around dynamically allocated blocks. -#define MALLOC_GUARD_SIZE 16 -// Pattern used to initialize guard blocks. -#define MALLOC_GUARD_PATTERN 0xEF -// Pattern used to initialize memory allocated with test_malloc(). -#define MALLOC_ALLOC_PATTERN 0xBA -#define MALLOC_FREE_PATTERN 0xCD -// Alignment of allocated blocks. NOTE: This must be base2. -#define MALLOC_ALIGNMENT sizeof(size_t) - -// Printf formatting for source code locations. -#define SOURCE_LOCATION_FORMAT "%s:%d" - -// Calculates the number of elements in an array. -#define ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) - -// Declare and initialize the pointer member of ValuePointer variable name -// with ptr. -#define declare_initialize_value_pointer_pointer(name, ptr) \ - ValuePointer name ; \ - name.value = 0; \ - name.pointer = (void*)(ptr) - -// Declare and initialize the value member of ValuePointer variable name -// with val. -#define declare_initialize_value_pointer_value(name, val) \ - ValuePointer name ; \ - name.value = val - -// Cast a LargestIntegralType to pointer_type via a ValuePointer. -#define cast_largest_integral_type_to_pointer( \ - pointer_type, largest_integral_type) \ - ((pointer_type)((ValuePointer*)&(largest_integral_type))->pointer) - -// Used to cast LargetIntegralType to void* and vice versa. -typedef union ValuePointer { - LargestIntegralType value; - void *pointer; -} ValuePointer; - -// Doubly linked list node. -typedef struct ListNode { - const void *value; - int refcount; - struct ListNode *next; - struct ListNode *prev; -} ListNode; - -// Debug information for malloc(). -typedef struct MallocBlockInfo { - void* block; // Address of the block returned by malloc(). - size_t allocated_size; // Total size of the allocated block. - size_t size; // Request block size. - SourceLocation location; // Where the block was allocated. - ListNode node; // Node within list of all allocated blocks. -} MallocBlockInfo; - -// State of each test. -typedef struct TestState { - const ListNode *check_point; // Check point of the test if there's a - // setup function. - void *state; // State associated with the test. -} TestState; - -// Determines whether two values are the same. -typedef int (*EqualityFunction)(const void *left, const void *right); - -// Value of a symbol and the place it was declared. -typedef struct SymbolValue { - SourceLocation location; - LargestIntegralType value; -} SymbolValue; - -/* Contains a list of values for a symbol. - * NOTE: Each structure referenced by symbol_values_list_head must have a - * SourceLocation as its' first member. - */ -typedef struct SymbolMapValue { - const char *symbol_name; - ListNode symbol_values_list_head; -} SymbolMapValue; - -// Used by list_free() to deallocate values referenced by list nodes. -typedef void (*CleanupListValue)(const void *value, void *cleanup_value_data); - -// Structure used to check the range of integer types. -typedef struct CheckIntegerRange { - CheckParameterEvent event; - LargestIntegralType minimum; - LargestIntegralType maximum; -} CheckIntegerRange; - -// Structure used to check whether an integer value is in a set. -typedef struct CheckIntegerSet { - CheckParameterEvent event; - const LargestIntegralType *set; - size_t size_of_set; -} CheckIntegerSet; - -/* Used to check whether a parameter matches the area of memory referenced by - * this structure. */ -typedef struct CheckMemoryData { - CheckParameterEvent event; - const void *memory; - size_t size; -} CheckMemoryData; - -static ListNode* list_initialize(ListNode * const node); -static ListNode* list_add(ListNode * const head, ListNode *new_node); -static ListNode* list_add_value(ListNode * const head, const void *value, - const int count); -static ListNode* list_remove( - ListNode * const node, const CleanupListValue cleanup_value, - void * const cleanup_value_data); -static void list_remove_free( - ListNode * const node, const CleanupListValue cleanup_value, - void * const cleanup_value_data); -static int list_empty(const ListNode * const head); -static int list_find( - ListNode * const head, const void *value, - const EqualityFunction equal_func, ListNode **output); -static int list_first(ListNode * const head, ListNode **output); -static ListNode* list_free( - ListNode * const head, const CleanupListValue cleanup_value, - void * const cleanup_value_data); - -static void add_symbol_value( - ListNode * const symbol_map_head, const char * const symbol_names[], - const size_t number_of_symbol_names, const void* value, const int count); -static int get_symbol_value( - ListNode * const symbol_map_head, const char * const symbol_names[], - const size_t number_of_symbol_names, void **output); -static void free_value(const void *value, void *cleanup_value_data); -static void free_symbol_map_value( - const void *value, void *cleanup_value_data); -static void remove_always_return_values(ListNode * const map_head, - const size_t number_of_symbol_names); -static int check_for_leftover_values( - const ListNode * const map_head, const char * const error_message, - const size_t number_of_symbol_names); -// This must be called at the beginning of a test to initialize some data -// structures. -static void initialize_testing(const char *test_name); -// This must be called at the end of a test to free() allocated structures. -static void teardown_testing(const char *test_name); -static void fail_if_leftover_values(const char *test_name); - - -// Keeps track of the calling context returned by setenv() so that the fail() -// method can jump out of a test. -static jmp_buf global_run_test_env; -static int global_running_test = 0; - -// Keeps track of the calling context returned by setenv() so that -// mock_assert() can optionally jump back to expect_assert_failure(). -jmp_buf global_expect_assert_env; -const char *global_last_failed_assert = NULL; -int global_expecting_assert = 0; - -// Keeps a map of the values that functions will have to return to provide -// mocked interfaces. -static ListNode global_function_result_map_head; -// Location of the last mock value returned was declared. -static SourceLocation global_last_mock_value_location; - -/* Keeps a map of the values that functions expect as parameters to their - * mocked interfaces. */ -static ListNode global_function_parameter_map_head; -// Location of last parameter value checked was declared. -static SourceLocation global_last_parameter_location; - -// List of all currently allocated blocks. -static ListNode global_allocated_blocks; - -#ifndef _WIN32 -// Signals caught by exception_handler(). -static const int exception_signals[] = { - SIGFPE, - SIGILL, - SIGSEGV, - SIGBUS, - SIGSYS, -}; - -// Default signal functions that should be restored after a test is complete. -typedef void (*SignalFunction)(int signal); -static SignalFunction default_signal_functions[ - ARRAY_LENGTH(exception_signals)]; - -#else // _WIN32 - -// The default exception filter. -static LPTOP_LEVEL_EXCEPTION_FILTER previous_exception_filter; - -// Fatal exceptions. -typedef struct ExceptionCodeInfo { - DWORD code; - const char* description; -} ExceptionCodeInfo; - -#define EXCEPTION_CODE_INFO(exception_code) {exception_code, #exception_code} - -static const ExceptionCodeInfo exception_codes[] = { - EXCEPTION_CODE_INFO(EXCEPTION_ACCESS_VIOLATION), - EXCEPTION_CODE_INFO(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), - EXCEPTION_CODE_INFO(EXCEPTION_DATATYPE_MISALIGNMENT), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_DENORMAL_OPERAND), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_DIVIDE_BY_ZERO), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_INEXACT_RESULT), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_INVALID_OPERATION), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_OVERFLOW), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_STACK_CHECK), - EXCEPTION_CODE_INFO(EXCEPTION_FLT_UNDERFLOW), - EXCEPTION_CODE_INFO(EXCEPTION_GUARD_PAGE), - EXCEPTION_CODE_INFO(EXCEPTION_ILLEGAL_INSTRUCTION), - EXCEPTION_CODE_INFO(EXCEPTION_INT_DIVIDE_BY_ZERO), - EXCEPTION_CODE_INFO(EXCEPTION_INT_OVERFLOW), - EXCEPTION_CODE_INFO(EXCEPTION_INVALID_DISPOSITION), - EXCEPTION_CODE_INFO(EXCEPTION_INVALID_HANDLE), - EXCEPTION_CODE_INFO(EXCEPTION_IN_PAGE_ERROR), - EXCEPTION_CODE_INFO(EXCEPTION_NONCONTINUABLE_EXCEPTION), - EXCEPTION_CODE_INFO(EXCEPTION_PRIV_INSTRUCTION), - EXCEPTION_CODE_INFO(EXCEPTION_STACK_OVERFLOW), -}; -#endif // !_WIN32 - - -// Exit the currently executing test. -static void exit_test(const int quit_application) { - if (global_running_test) { - longjmp(global_run_test_env, 1); - } else if (quit_application) { - exit(-1); - } -} - - -// Initialize a SourceLocation structure. -static void initialize_source_location(SourceLocation * const location) { - assert_true(location); - location->file = NULL; - location->line = 0; -} - - -// Determine whether a source location is currently set. -static int source_location_is_set(const SourceLocation * const location) { - assert_true(location); - return location->file && location->line; -} - - -// Set a source location. -static void set_source_location( - SourceLocation * const location, const char * const file, - const int line) { - assert_true(location); - location->file = file; - location->line = line; -} - - -// Create function results and expected parameter lists. -void initialize_testing(const char *test_name) { - list_initialize(&global_function_result_map_head); - initialize_source_location(&global_last_mock_value_location); - list_initialize(&global_function_parameter_map_head); - initialize_source_location(&global_last_parameter_location); -} - - -static void fail_if_leftover_values(const char *test_name) { - int error_occurred = 0; - remove_always_return_values(&global_function_result_map_head, 1); - if (check_for_leftover_values( - &global_function_result_map_head, - "%s() has remaining non-returned values.\n", 1)) { - error_occurred = 1; - } - - remove_always_return_values(&global_function_parameter_map_head, 2); - if (check_for_leftover_values( - &global_function_parameter_map_head, - "%s parameter still has values that haven't been checked.\n", 2)) { - error_occurred = 1; - } - if (error_occurred) { - exit_test(1); - } -} - - -void teardown_testing(const char *test_name) { - list_free(&global_function_result_map_head, free_symbol_map_value, - (void*)0); - initialize_source_location(&global_last_mock_value_location); - list_free(&global_function_parameter_map_head, free_symbol_map_value, - (void*)1); - initialize_source_location(&global_last_parameter_location); -} - -// Initialize a list node. -static ListNode* list_initialize(ListNode * const node) { - node->value = NULL; - node->next = node; - node->prev = node; - node->refcount = 1; - return node; -} - - -/* Adds a value at the tail of a given list. - * The node referencing the value is allocated from the heap. */ -static ListNode* list_add_value(ListNode * const head, const void *value, - const int refcount) { - ListNode * const new_node = (ListNode*)malloc(sizeof(ListNode)); - assert_true(head); - assert_true(value); - new_node->value = value; - new_node->refcount = refcount; - return list_add(head, new_node); -} - - -// Add new_node to the end of the list. -static ListNode* list_add(ListNode * const head, ListNode *new_node) { - assert_true(head); - assert_true(new_node); - new_node->next = head; - new_node->prev = head->prev; - head->prev->next = new_node; - head->prev = new_node; - return new_node; -} - - -// Remove a node from a list. -static ListNode* list_remove( - ListNode * const node, const CleanupListValue cleanup_value, - void * const cleanup_value_data) { - assert_true(node); - node->prev->next = node->next; - node->next->prev = node->prev; - if (cleanup_value) { - cleanup_value(node->value, cleanup_value_data); - } - return node; -} - - -/* Remove a list node from a list and free the node. */ -static void list_remove_free( - ListNode * const node, const CleanupListValue cleanup_value, - void * const cleanup_value_data) { - assert_true(node); - free(list_remove(node, cleanup_value, cleanup_value_data)); -} - - -/* Frees memory kept by a linked list - * The cleanup_value function is called for every "value" field of nodes in the - * list, except for the head. In addition to each list value, - * cleanup_value_data is passed to each call to cleanup_value. The head - * of the list is not deallocated. - */ -static ListNode* list_free( - ListNode * const head, const CleanupListValue cleanup_value, - void * const cleanup_value_data) { - assert_true(head); - while (!list_empty(head)) { - list_remove_free(head->next, cleanup_value, cleanup_value_data); - } - return head; -} - - -// Determine whether a list is empty. -static int list_empty(const ListNode * const head) { - assert_true(head); - return head->next == head; -} - - -/* Find a value in the list using the equal_func to compare each node with the - * value. - */ -static int list_find(ListNode * const head, const void *value, - const EqualityFunction equal_func, ListNode **output) { - ListNode *current; - assert_true(head); - for (current = head->next; current != head; current = current->next) { - if (equal_func(current->value, value)) { - *output = current; - return 1; - } - } - return 0; -} - -// Returns the first node of a list -static int list_first(ListNode * const head, ListNode **output) { - ListNode *target_node; - assert_true(head); - if (list_empty(head)) { - return 0; - } - target_node = head->next; - *output = target_node; - return 1; -} - - -// Deallocate a value referenced by a list. -static void free_value(const void *value, void *cleanup_value_data) { - assert_true(value); - free((void*)value); -} - - -// Releases memory associated to a symbol_map_value. -static void free_symbol_map_value(const void *value, - void *cleanup_value_data) { - SymbolMapValue * const map_value = (SymbolMapValue*)value; - assert_true(value); - list_free(&map_value->symbol_values_list_head, - cleanup_value_data ? free_symbol_map_value : free_value, - (void *)((char *) cleanup_value_data - 1)); - free(map_value); -} - - -/* Determine whether a symbol name referenced by a symbol_map_value - * matches the specified function name. */ -static int symbol_names_match(const void *map_value, const void *symbol) { - return !strcmp(((SymbolMapValue*)map_value)->symbol_name, - (const char*)symbol); -} - - -/* Adds a value to the queue of values associated with the given - * hierarchy of symbols. It's assumed value is allocated from the heap. - */ -static void add_symbol_value(ListNode * const symbol_map_head, - const char * const symbol_names[], - const size_t number_of_symbol_names, - const void* value, const int refcount) { - const char* symbol_name; - ListNode *target_node; - SymbolMapValue *target_map_value; - assert_true(symbol_map_head); - assert_true(symbol_names); - assert_true(number_of_symbol_names); - symbol_name = symbol_names[0]; - - if (!list_find(symbol_map_head, symbol_name, symbol_names_match, - &target_node)) { - SymbolMapValue * const new_symbol_map_value = - malloc(sizeof(*new_symbol_map_value)); - new_symbol_map_value->symbol_name = symbol_name; - list_initialize(&new_symbol_map_value->symbol_values_list_head); - target_node = list_add_value(symbol_map_head, new_symbol_map_value, - 1); - } - - target_map_value = (SymbolMapValue*)target_node->value; - if (number_of_symbol_names == 1) { - list_add_value(&target_map_value->symbol_values_list_head, - value, refcount); - } else { - add_symbol_value(&target_map_value->symbol_values_list_head, - &symbol_names[1], number_of_symbol_names - 1, value, - refcount); - } -} - - -/* Gets the next value associated with the given hierarchy of symbols. - * The value is returned as an output parameter with the function returning the - * node's old refcount value if a value is found, 0 otherwise. - * This means that a return value of 1 indicates the node was just removed from - * the list. - */ -static int get_symbol_value( - ListNode * const head, const char * const symbol_names[], - const size_t number_of_symbol_names, void **output) { - const char* symbol_name; - ListNode *target_node; - assert_true(head); - assert_true(symbol_names); - assert_true(number_of_symbol_names); - assert_true(output); - symbol_name = symbol_names[0]; - - if (list_find(head, symbol_name, symbol_names_match, &target_node)) { - SymbolMapValue *map_value; - ListNode *child_list; - int return_value = 0; - assert_true(target_node); - assert_true(target_node->value); - - map_value = (SymbolMapValue*)target_node->value; - child_list = &map_value->symbol_values_list_head; - - if (number_of_symbol_names == 1) { - ListNode *value_node = NULL; - return_value = list_first(child_list, &value_node); - assert_true(return_value); - *output = (void*) value_node->value; - return_value = value_node->refcount; - if (--value_node->refcount == 0) { - list_remove_free(value_node, NULL, NULL); - } - } else { - return_value = get_symbol_value( - child_list, &symbol_names[1], number_of_symbol_names - 1, - output); - } - if (list_empty(child_list)) { - list_remove_free(target_node, free_symbol_map_value, (void*)0); - } - return return_value; - } else { - print_error("No entries for symbol %s.\n", symbol_name); - } - return 0; -} - - -/* Traverse down a tree of symbol values and remove the first symbol value - * in each branch that has a refcount < -1 (i.e should always be returned - * and has been returned at least once). - */ -static void remove_always_return_values(ListNode * const map_head, - const size_t number_of_symbol_names) { - ListNode *current; - assert_true(map_head); - assert_true(number_of_symbol_names); - current = map_head->next; - while (current != map_head) { - SymbolMapValue * const value = (SymbolMapValue*)current->value; - ListNode * const next = current->next; - ListNode *child_list; - assert_true(value); - child_list = &value->symbol_values_list_head; - - if (!list_empty(child_list)) { - if (number_of_symbol_names == 1) { - ListNode * const child_node = child_list->next; - // If this item has been returned more than once, free it. - if (child_node->refcount < -1) { - list_remove_free(child_node, free_value, NULL); - } - } else { - remove_always_return_values(child_list, - number_of_symbol_names - 1); - } - } - - if (list_empty(child_list)) { - list_remove_free(current, free_value, NULL); - } - current = next; - } -} - -/* Checks if there are any leftover values set up by the test that were never - * retrieved through execution, and fail the test if that is the case. - */ -static int check_for_leftover_values( - const ListNode * const map_head, const char * const error_message, - const size_t number_of_symbol_names) { - const ListNode *current; - int symbols_with_leftover_values = 0; - assert_true(map_head); - assert_true(number_of_symbol_names); - - for (current = map_head->next; current != map_head; - current = current->next) { - const SymbolMapValue * const value = - (SymbolMapValue*)current->value; - const ListNode *child_list; - assert_true(value); - child_list = &value->symbol_values_list_head; - - if (!list_empty(child_list)) { - if (number_of_symbol_names == 1) { - const ListNode *child_node; - print_error(error_message, value->symbol_name); - print_error(" Remaining item(s) declared at...\n"); - - for (child_node = child_list->next; child_node != child_list; - child_node = child_node->next) { - const SourceLocation * const location = child_node->value; - print_error(" " SOURCE_LOCATION_FORMAT "\n", - location->file, location->line); - } - } else { - print_error("%s.", value->symbol_name); - check_for_leftover_values(child_list, error_message, - number_of_symbol_names - 1); - } - symbols_with_leftover_values ++; - } - } - return symbols_with_leftover_values; -} - - -// Get the next return value for the specified mock function. -LargestIntegralType _mock(const char * const function, const char* const file, - const int line) { - void *result; - const int rc = get_symbol_value(&global_function_result_map_head, - &function, 1, &result); - if (rc) { - SymbolValue * const symbol = (SymbolValue*)result; - const LargestIntegralType value = symbol->value; - global_last_mock_value_location = symbol->location; - if (rc == 1) { - free(symbol); - } - return value; - } else { - print_error("ERROR: " SOURCE_LOCATION_FORMAT " - Could not get value " - "to mock function %s\n", file, line, function); - if (source_location_is_set(&global_last_mock_value_location)) { - print_error("Previously returned mock value was declared at " - SOURCE_LOCATION_FORMAT "\n", - global_last_mock_value_location.file, - global_last_mock_value_location.line); - } else { - print_error("There were no previously returned mock values for " - "this test.\n"); - } - exit_test(1); - } - return 0; -} - - -// Add a return value for the specified mock function name. -void _will_return(const char * const function_name, const char * const file, - const int line, const LargestIntegralType value, - const int count) { - SymbolValue * const return_value = malloc(sizeof(*return_value)); - assert_true(count > 0 || count == -1); - return_value->value = value; - set_source_location(&return_value->location, file, line); - add_symbol_value(&global_function_result_map_head, &function_name, 1, - return_value, count); -} - - -/* Add a custom parameter checking function. If the event parameter is NULL - * the event structure is allocated internally by this function. If event - * parameter is provided it must be allocated on the heap and doesn't need to - * be deallocated by the caller. - */ -void _expect_check( - const char* const function, const char* const parameter, - const char* const file, const int line, - const CheckParameterValue check_function, - const LargestIntegralType check_data, - CheckParameterEvent * const event, const int count) { - CheckParameterEvent * const check = - event ? event : malloc(sizeof(*check)); - const char* symbols[] = {function, parameter}; - check->parameter_name = parameter; - check->check_value = check_function; - check->check_value_data = check_data; - set_source_location(&check->location, file, line); - add_symbol_value(&global_function_parameter_map_head, symbols, 2, check, - count); -} - - -/* Returns 1 if the specified values are equal. If the values are not equal - * an error is displayed and 0 is returned. */ -static int values_equal_display_error(const LargestIntegralType left, - const LargestIntegralType right) { - const int equal = left == right; - if (!equal) { - print_error(LargestIntegralTypePrintfFormat " != " - LargestIntegralTypePrintfFormat "\n", left, right); - } - return equal; -} - -/* Returns 1 if the specified values are not equal. If the values are equal - * an error is displayed and 0 is returned. */ -static int values_not_equal_display_error(const LargestIntegralType left, - const LargestIntegralType right) { - const int not_equal = left != right; - if (!not_equal) { - print_error(LargestIntegralTypePrintfFormat " == " - LargestIntegralTypePrintfFormat "\n", left, right); - } - return not_equal; -} - - -/* Determine whether value is contained within check_integer_set. - * If invert is 0 and the value is in the set 1 is returned, otherwise 0 is - * returned and an error is displayed. If invert is 1 and the value is not - * in the set 1 is returned, otherwise 0 is returned and an error is - * displayed. */ -static int value_in_set_display_error( - const LargestIntegralType value, - const CheckIntegerSet * const check_integer_set, const int invert) { - int succeeded = invert; - assert_true(check_integer_set); - { - const LargestIntegralType * const set = check_integer_set->set; - const size_t size_of_set = check_integer_set->size_of_set; - size_t i; - for (i = 0; i < size_of_set; i++) { - if (set[i] == value) { - // If invert = 0 and item is found, succeeded = 1. - // If invert = 1 and item is found, succeeded = 0. - succeeded = !succeeded; - break; - } - } - if (succeeded) { - return 1; - } - print_error("%d is %sin the set (", value, invert ? "" : "not "); - for (i = 0; i < size_of_set; i++) { - print_error("%d, ", set[i]); - } - print_error(")\n"); - } - return 0; -} - - -/* Determine whether a value is within the specified range. If the value is - * within the specified range 1 is returned. If the value isn't within the - * specified range an error is displayed and 0 is returned. */ -static int integer_in_range_display_error( - const LargestIntegralType value, const LargestIntegralType range_min, - const LargestIntegralType range_max) { - if (value >= range_min && value <= range_max) { - return 1; - } - print_error("%d is not within the range %d-%d\n", value, range_min, - range_max); - return 0; -} - - -/* Determine whether a value is within the specified range. If the value - * is not within the range 1 is returned. If the value is within the - * specified range an error is displayed and zero is returned. */ -static int integer_not_in_range_display_error( - const LargestIntegralType value, const LargestIntegralType range_min, - const LargestIntegralType range_max) { - if (value < range_min || value > range_max) { - return 1; - } - print_error("%d is within the range %d-%d\n", value, range_min, - range_max); - return 0; -} - - -/* Determine whether the specified strings are equal. If the strings are equal - * 1 is returned. If they're not equal an error is displayed and 0 is - * returned. */ -static int string_equal_display_error( - const char * const left, const char * const right) { - if (strcmp(left, right) == 0) { - return 1; - } - print_error("\"%s\" != \"%s\"\n", left, right); - return 0; -} - - -/* Determine whether the specified strings are equal. If the strings are not - * equal 1 is returned. If they're not equal an error is displayed and 0 is - * returned */ -static int string_not_equal_display_error( - const char * const left, const char * const right) { - if (strcmp(left, right) != 0) { - return 1; - } - print_error("\"%s\" == \"%s\"\n", left, right); - return 0; -} - - -/* Determine whether the specified areas of memory are equal. If they're equal - * 1 is returned otherwise an error is displayed and 0 is returned. */ -static int memory_equal_display_error(const char* const a, const char* const b, - const size_t size) { - int differences = 0; - size_t i; - for (i = 0; i < size; i++) { - const char l = a[i]; - const char r = b[i]; - if (l != r) { - print_error("difference at offset %d 0x%02x 0x%02x\n", i, l, r); - differences ++; - } - } - if (differences) { - print_error("%d bytes of 0x%08x and 0x%08x differ\n", differences, - a, b); - return 0; - } - return 1; -} - - -/* Determine whether the specified areas of memory are not equal. If they're - * not equal 1 is returned otherwise an error is displayed and 0 is - * returned. */ -static int memory_not_equal_display_error( - const char* const a, const char* const b, const size_t size) { - int same = 0; - size_t i; - for (i = 0; i < size; i++) { - const char l = a[i]; - const char r = b[i]; - if (l == r) { - same ++; - } - } - if (same == size) { - print_error("%d bytes of 0x%08x and 0x%08x the same\n", same, - a, b); - return 0; - } - return 1; -} - - -// CheckParameterValue callback to check whether a value is within a set. -static int check_in_set(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return value_in_set_display_error(value, - cast_largest_integral_type_to_pointer(CheckIntegerSet*, - check_value_data), 0); -} - - -// CheckParameterValue callback to check whether a value isn't within a set. -static int check_not_in_set(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return value_in_set_display_error(value, - cast_largest_integral_type_to_pointer(CheckIntegerSet*, - check_value_data), 1); -} - - -/* Create the callback data for check_in_set() or check_not_in_set() and - * register a check event. */ -static void expect_set( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType values[], const size_t number_of_values, - const CheckParameterValue check_function, const int count) { - CheckIntegerSet * const check_integer_set = - malloc(sizeof(*check_integer_set) + - (sizeof(values[0]) * number_of_values)); - LargestIntegralType * const set = (LargestIntegralType*)( - check_integer_set + 1); - declare_initialize_value_pointer_pointer(check_data, check_integer_set); - assert_true(values); - assert_true(number_of_values); - memcpy(set, values, number_of_values * sizeof(values[0])); - check_integer_set->set = set; - _expect_check( - function, parameter, file, line, check_function, - check_data.value, &check_integer_set->event, count); -} - - -// Add an event to check whether a value is in a set. -void _expect_in_set( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType values[], const size_t number_of_values, - const int count) { - expect_set(function, parameter, file, line, values, number_of_values, - check_in_set, count); -} - - -// Add an event to check whether a value isn't in a set. -void _expect_not_in_set( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType values[], const size_t number_of_values, - const int count) { - expect_set(function, parameter, file, line, values, number_of_values, - check_not_in_set, count); -} - - -// CheckParameterValue callback to check whether a value is within a range. -static int check_in_range(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - CheckIntegerRange * const check_integer_range = - cast_largest_integral_type_to_pointer(CheckIntegerRange*, - check_value_data); - assert_true(check_integer_range); - return integer_in_range_display_error(value, check_integer_range->minimum, - check_integer_range->maximum); -} - - -// CheckParameterValue callback to check whether a value is not within a range. -static int check_not_in_range(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - CheckIntegerRange * const check_integer_range = - cast_largest_integral_type_to_pointer(CheckIntegerRange*, - check_value_data); - assert_true(check_integer_range); - return integer_not_in_range_display_error( - value, check_integer_range->minimum, check_integer_range->maximum); -} - - -/* Create the callback data for check_in_range() or check_not_in_range() and - * register a check event. */ -static void expect_range( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType minimum, const LargestIntegralType maximum, - const CheckParameterValue check_function, const int count) { - CheckIntegerRange * const check_integer_range = - malloc(sizeof(*check_integer_range)); - declare_initialize_value_pointer_pointer(check_data, check_integer_range); - check_integer_range->minimum = minimum; - check_integer_range->maximum = maximum; - _expect_check(function, parameter, file, line, check_function, - check_data.value, &check_integer_range->event, count); -} - - -// Add an event to determine whether a parameter is within a range. -void _expect_in_range( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType minimum, const LargestIntegralType maximum, - const int count) { - expect_range(function, parameter, file, line, minimum, maximum, - check_in_range, count); -} - - -// Add an event to determine whether a parameter is not within a range. -void _expect_not_in_range( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType minimum, const LargestIntegralType maximum, - const int count) { - expect_range(function, parameter, file, line, minimum, maximum, - check_not_in_range, count); -} - - -/* CheckParameterValue callback to check whether a value is equal to an - * expected value. */ -static int check_value(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return values_equal_display_error(value, check_value_data); -} - - -// Add an event to check a parameter equals an expected value. -void _expect_value( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType value, const int count) { - _expect_check(function, parameter, file, line, check_value, value, NULL, - count); -} - - -/* CheckParameterValue callback to check whether a value is not equal to an - * expected value. */ -static int check_not_value(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return values_not_equal_display_error(value, check_value_data); -} - - -// Add an event to check a parameter is not equal to an expected value. -void _expect_not_value( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType value, const int count) { - _expect_check(function, parameter, file, line, check_not_value, value, - NULL, count); -} - - -// CheckParameterValue callback to check whether a parameter equals a string. -static int check_string(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return string_equal_display_error( - cast_largest_integral_type_to_pointer(char*, value), - cast_largest_integral_type_to_pointer(char*, check_value_data)); -} - - -// Add an event to check whether a parameter is equal to a string. -void _expect_string( - const char* const function, const char* const parameter, - const char* const file, const int line, const char* string, - const int count) { - declare_initialize_value_pointer_pointer(string_pointer, (char*)string); - _expect_check(function, parameter, file, line, check_string, - string_pointer.value, NULL, count); -} - - -/* CheckParameterValue callback to check whether a parameter is not equals to - * a string. */ -static int check_not_string(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return string_not_equal_display_error( - cast_largest_integral_type_to_pointer(char*, value), - cast_largest_integral_type_to_pointer(char*, check_value_data)); -} - - -// Add an event to check whether a parameter is not equal to a string. -void _expect_not_string( - const char* const function, const char* const parameter, - const char* const file, const int line, const char* string, - const int count) { - declare_initialize_value_pointer_pointer(string_pointer, (char*)string); - _expect_check(function, parameter, file, line, check_not_string, - string_pointer.value, NULL, count); -} - -/* CheckParameterValue callback to check whether a parameter equals an area of - * memory. */ -static int check_memory(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - CheckMemoryData * const check = cast_largest_integral_type_to_pointer( - CheckMemoryData*, check_value_data); - assert_true(check); - return memory_equal_display_error( - cast_largest_integral_type_to_pointer(void*, value), - check->memory, check->size); -} - - -/* Create the callback data for check_memory() or check_not_memory() and - * register a check event. */ -static void expect_memory_setup( - const char* const function, const char* const parameter, - const char* const file, const int line, - const void * const memory, const size_t size, - const CheckParameterValue check_function, const int count) { - CheckMemoryData * const check_data = malloc(sizeof(*check_data) + size); - void * const mem = (void*)(check_data + 1); - declare_initialize_value_pointer_pointer(check_data_pointer, check_data); - assert_true(memory); - assert_true(size); - memcpy(mem, memory, size); - check_data->memory = mem; - check_data->size = size; - _expect_check(function, parameter, file, line, check_function, - check_data_pointer.value, &check_data->event, count); -} - - -// Add an event to check whether a parameter matches an area of memory. -void _expect_memory( - const char* const function, const char* const parameter, - const char* const file, const int line, const void* const memory, - const size_t size, const int count) { - expect_memory_setup(function, parameter, file, line, memory, size, - check_memory, count); -} - - -/* CheckParameterValue callback to check whether a parameter is not equal to - * an area of memory. */ -static int check_not_memory(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - CheckMemoryData * const check = cast_largest_integral_type_to_pointer( - CheckMemoryData*, check_value_data); - assert_true(check); - return memory_not_equal_display_error( - cast_largest_integral_type_to_pointer(void*, value), check->memory, - check->size); -} - - -// Add an event to check whether a parameter doesn't match an area of memory. -void _expect_not_memory( - const char* const function, const char* const parameter, - const char* const file, const int line, const void* const memory, - const size_t size, const int count) { - expect_memory_setup(function, parameter, file, line, memory, size, - check_not_memory, count); -} - - -// CheckParameterValue callback that always returns 1. -static int check_any(const LargestIntegralType value, - const LargestIntegralType check_value_data) { - return 1; -} - - -// Add an event to allow any value for a parameter. -void _expect_any( - const char* const function, const char* const parameter, - const char* const file, const int line, const int count) { - _expect_check(function, parameter, file, line, check_any, 0, NULL, - count); -} - - -void _check_expected( - const char * const function_name, const char * const parameter_name, - const char* file, const int line, const LargestIntegralType value) { - void *result; - const char* symbols[] = {function_name, parameter_name}; - const int rc = get_symbol_value(&global_function_parameter_map_head, - symbols, 2, &result); - if (rc) { - CheckParameterEvent * const check = (CheckParameterEvent*)result; - int check_succeeded; - global_last_parameter_location = check->location; - check_succeeded = check->check_value(value, check->check_value_data); - if (rc == 1) { - free(check); - } - if (!check_succeeded) { - print_error("ERROR: Check of parameter %s, function %s failed\n" - "Expected parameter declared at " - SOURCE_LOCATION_FORMAT "\n", - parameter_name, function_name, - global_last_parameter_location.file, - global_last_parameter_location.line); - _fail(file, line); - } - } else { - print_error("ERROR: " SOURCE_LOCATION_FORMAT " - Could not get value " - "to check parameter %s of function %s\n", file, line, - parameter_name, function_name); - if (source_location_is_set(&global_last_parameter_location)) { - print_error("Previously declared parameter value was declared at " - SOURCE_LOCATION_FORMAT "\n", - global_last_parameter_location.file, - global_last_parameter_location.line); - } else { - print_error("There were no previously declared parameter values " - "for this test.\n"); - } - exit_test(1); - } -} - - - -/* Replacement for assert. */ -void mock_assert(const int result, const char* const expression, - const char* const file, const int line) { - if (!result) { - if (global_expecting_assert) { - global_last_failed_assert = expression; - longjmp(global_expect_assert_env, result); - } else { - print_error("ASSERT: %s\n", expression); - _fail(file, line); - } - } -} - - -void _assert_true(const LargestIntegralType result, - const char * const expression, - const char * const file, const int line) { - if (!result) { - print_error("%s\n", expression); - _fail(file, line); - } -} - -void _assert_int_equal( - const LargestIntegralType a, const LargestIntegralType b, - const char * const file, const int line) { - if (!values_equal_display_error(a, b)) { - _fail(file, line); - } -} - - -void _assert_int_not_equal( - const LargestIntegralType a, const LargestIntegralType b, - const char * const file, const int line) { - if (!values_not_equal_display_error(a, b)) { - _fail(file, line); - } -} - - -void _assert_string_equal(const char * const a, const char * const b, - const char * const file, const int line) { - if (!string_equal_display_error(a, b)) { - _fail(file, line); - } -} - - -void _assert_string_not_equal(const char * const a, const char * const b, - const char *file, const int line) { - if (!string_not_equal_display_error(a, b)) { - _fail(file, line); - } -} - - -void _assert_memory_equal(const void * const a, const void * const b, - const size_t size, const char* const file, - const int line) { - if (!memory_equal_display_error((const char*)a, (const char*)b, size)) { - _fail(file, line); - } -} - - -void _assert_memory_not_equal(const void * const a, const void * const b, - const size_t size, const char* const file, - const int line) { - if (!memory_not_equal_display_error((const char*)a, (const char*)b, - size)) { - _fail(file, line); - } -} - - -void _assert_in_range( - const LargestIntegralType value, const LargestIntegralType minimum, - const LargestIntegralType maximum, const char* const file, - const int line) { - if (!integer_in_range_display_error(value, minimum, maximum)) { - _fail(file, line); - } -} - -void _assert_not_in_range( - const LargestIntegralType value, const LargestIntegralType minimum, - const LargestIntegralType maximum, const char* const file, - const int line) { - if (!integer_not_in_range_display_error(value, minimum, maximum)) { - _fail(file, line); - } -} - -void _assert_in_set(const LargestIntegralType value, - const LargestIntegralType values[], - const size_t number_of_values, const char* const file, - const int line) { - CheckIntegerSet check_integer_set; - check_integer_set.set = values; - check_integer_set.size_of_set = number_of_values; - if (!value_in_set_display_error(value, &check_integer_set, 0)) { - _fail(file, line); - } -} - -void _assert_not_in_set(const LargestIntegralType value, - const LargestIntegralType values[], - const size_t number_of_values, const char* const file, - const int line) { - CheckIntegerSet check_integer_set; - check_integer_set.set = values; - check_integer_set.size_of_set = number_of_values; - if (!value_in_set_display_error(value, &check_integer_set, 1)) { - _fail(file, line); - } -} - - -// Get the list of allocated blocks. -static ListNode* get_allocated_blocks_list() { - // If it initialized, initialize the list of allocated blocks. - if (!global_allocated_blocks.value) { - list_initialize(&global_allocated_blocks); - global_allocated_blocks.value = (void*)1; - } - return &global_allocated_blocks; -} - -// Use the real malloc in this function. -#undef malloc -void* _test_malloc(const size_t size, const char* file, const int line) { - char* ptr; - MallocBlockInfo *block_info; - ListNode * const block_list = get_allocated_blocks_list(); - const size_t allocate_size = size + (MALLOC_GUARD_SIZE * 2) + - sizeof(*block_info) + MALLOC_ALIGNMENT; - char* const block = (char*)malloc(allocate_size); - assert_true(block); - - // Calculate the returned address. - ptr = (char*)(((size_t)block + MALLOC_GUARD_SIZE + sizeof(*block_info) + - MALLOC_ALIGNMENT) & ~(MALLOC_ALIGNMENT - 1)); - - // Initialize the guard blocks. - memset(ptr - MALLOC_GUARD_SIZE, MALLOC_GUARD_PATTERN, MALLOC_GUARD_SIZE); - memset(ptr + size, MALLOC_GUARD_PATTERN, MALLOC_GUARD_SIZE); - memset(ptr, MALLOC_ALLOC_PATTERN, size); - - block_info = (MallocBlockInfo*)(ptr - (MALLOC_GUARD_SIZE + - sizeof(*block_info))); - set_source_location(&block_info->location, file, line); - block_info->allocated_size = allocate_size; - block_info->size = size; - block_info->block = block; - block_info->node.value = block_info; - list_add(block_list, &block_info->node); - return ptr; -} -#define malloc test_malloc - - -void* _test_calloc(const size_t number_of_elements, const size_t size, - const char* file, const int line) { - void* const ptr = _test_malloc(number_of_elements * size, file, line); - if (ptr) { - memset(ptr, 0, number_of_elements * size); - } - return ptr; -} - - -// Use the real free in this function. -#undef free -void _test_free(void* const ptr, const char* file, const int line) { - unsigned int i; - char *block = (char*)ptr; - MallocBlockInfo *block_info; - _assert_true((LargestIntegralType)ptr, "ptr", file, line); - block_info = (MallocBlockInfo*)(block - (MALLOC_GUARD_SIZE + - sizeof(*block_info))); - // Check the guard blocks. - { - char *guards[2] = {block - MALLOC_GUARD_SIZE, - block + block_info->size}; - for (i = 0; i < ARRAY_LENGTH(guards); i++) { - unsigned int j; - char * const guard = guards[i]; - for (j = 0; j < MALLOC_GUARD_SIZE; j++) { - const char diff = guard[j] - MALLOC_GUARD_PATTERN; - if (diff) { - print_error( - "Guard block of 0x%08x size=%d allocated by " - SOURCE_LOCATION_FORMAT " at 0x%08x is corrupt\n", - (size_t)ptr, block_info->size, - block_info->location.file, block_info->location.line, - (size_t)&guard[j]); - _fail(file, line); - } - } - } - } - list_remove(&block_info->node, NULL, NULL); - - block = block_info->block; - memset(block, MALLOC_FREE_PATTERN, block_info->allocated_size); - free(block); -} -#define free test_free - - -// Crudely checkpoint the current heap state. -static const ListNode* check_point_allocated_blocks() { - return get_allocated_blocks_list()->prev; -} - - -/* Display the blocks allocated after the specified check point. This - * function returns the number of blocks displayed. */ -static int display_allocated_blocks(const ListNode * const check_point) { - const ListNode * const head = get_allocated_blocks_list(); - const ListNode *node; - int allocated_blocks = 0; - assert_true(check_point); - assert_true(check_point->next); - - for (node = check_point->next; node != head; node = node->next) { - const MallocBlockInfo * const block_info = node->value; - assert_true(block_info); - - if (!allocated_blocks) { - print_error("Blocks allocated...\n"); - } - print_error(" 0x%08x : " SOURCE_LOCATION_FORMAT "\n", - block_info->block, block_info->location.file, - block_info->location.line); - allocated_blocks ++; - } - return allocated_blocks; -} - - -// Free all blocks allocated after the specified check point. -static void free_allocated_blocks(const ListNode * const check_point) { - const ListNode * const head = get_allocated_blocks_list(); - const ListNode *node; - assert_true(check_point); - - node = check_point->next; - assert_true(node); - - while (node != head) { - MallocBlockInfo * const block_info = (MallocBlockInfo*)node->value; - node = node->next; - free((char*)block_info + sizeof(*block_info) + MALLOC_GUARD_SIZE); - } -} - - -// Fail if any any blocks are allocated after the specified check point. -static void fail_if_blocks_allocated(const ListNode * const check_point, - const char * const test_name) { - const int allocated_blocks = display_allocated_blocks(check_point); - if (allocated_blocks) { - free_allocated_blocks(check_point); - print_error("ERROR: %s leaked %d block(s)\n", test_name, - allocated_blocks); - exit_test(1); - } -} - - -void _fail(const char * const file, const int line) { - print_error("ERROR: " SOURCE_LOCATION_FORMAT " Failure!\n", file, line); - exit_test(1); -} - - -#ifndef _WIN32 -static void exception_handler(int sig) { - print_error("%s\n", strsignal(sig)); - exit_test(1); -} - -#else // _WIN32 - -static LONG WINAPI exception_filter(EXCEPTION_POINTERS *exception_pointers) { - EXCEPTION_RECORD * const exception_record = - exception_pointers->ExceptionRecord; - const DWORD code = exception_record->ExceptionCode; - unsigned int i; - for (i = 0; i < ARRAY_LENGTH(exception_codes); i++) { - const ExceptionCodeInfo * const code_info = &exception_codes[i]; - if (code == code_info->code) { - static int shown_debug_message = 0; - fflush(stdout); - print_error("%s occurred at 0x%08x.\n", code_info->description, - exception_record->ExceptionAddress); - if (!shown_debug_message) { - print_error( - "\n" - "To debug in Visual Studio...\n" - "1. Select menu item File->Open Project\n" - "2. Change 'Files of type' to 'Executable Files'\n" - "3. Open this executable.\n" - "4. Select menu item Debug->Start\n" - "\n" - "Alternatively, set the environment variable \n" - "UNIT_TESTING_DEBUG to 1 and rebuild this executable, \n" - "then click 'Debug' in the popup dialog box.\n" - "\n"); - shown_debug_message = 1; - } - exit_test(0); - return EXCEPTION_EXECUTE_HANDLER; - } - } - return EXCEPTION_CONTINUE_SEARCH; -} -#endif // !_WIN32 - - -// Standard output and error print methods. -void vprint_message(const char* const format, va_list args) { - char buffer[1024]; - vsnprintf(buffer, sizeof(buffer), format, args); - puts(buffer); -#ifdef _WIN32 - OutputDebugString(buffer); -#endif // _WIN32 -} - - -void vprint_error(const char* const format, va_list args) { - char buffer[1024]; - vsnprintf(buffer, sizeof(buffer), format, args); - fputs(buffer, stderr); -#ifdef _WIN32 - OutputDebugString(buffer); -#endif // _WIN32 -} - - -void print_message(const char* const format, ...) { - va_list args; - va_start(args, format); - vprint_message(format, args); - va_end(args); -} - - -void print_error(const char* const format, ...) { - va_list args; - va_start(args, format); - vprint_error(format, args); - va_end(args); -} - - -int _run_test( - const char * const function_name, const UnitTestFunction Function, - void ** const state, const UnitTestFunctionType function_type, - const void* const heap_check_point) { - const ListNode * const check_point = heap_check_point ? - heap_check_point : check_point_allocated_blocks(); - void *current_state = NULL; - int rc = 1; - int handle_exceptions = 1; -#ifdef _WIN32 - handle_exceptions = !IsDebuggerPresent(); -#endif // _WIN32 -#if UNIT_TESTING_DEBUG - handle_exceptions = 0; -#endif // UNIT_TESTING_DEBUG - - if (handle_exceptions) { -#ifndef _WIN32 - unsigned int i; - for (i = 0; i < ARRAY_LENGTH(exception_signals); i++) { - default_signal_functions[i] = signal( - exception_signals[i], exception_handler); - } -#else // _WIN32 - previous_exception_filter = SetUnhandledExceptionFilter( - exception_filter); -#endif // !_WIN32 - } - - if (function_type == UNIT_TEST_FUNCTION_TYPE_TEST) { - print_message("%s: Starting test\n", function_name); - } - initialize_testing(function_name); - global_running_test = 1; - if (setjmp(global_run_test_env) == 0) { - Function(state ? state : ¤t_state); - fail_if_leftover_values(function_name); - - /* If this is a setup function then ignore any allocated blocks - * only ensure they're deallocated on tear down. */ - if (function_type != UNIT_TEST_FUNCTION_TYPE_SETUP) { - fail_if_blocks_allocated(check_point, function_name); - } - - global_running_test = 0; - - if (function_type == UNIT_TEST_FUNCTION_TYPE_TEST) { - print_message("%s: Test completed successfully.\n", function_name); - } - rc = 0; - } else { - global_running_test = 0; - print_message("%s: Test failed.\n", function_name); - } - teardown_testing(function_name); - - if (handle_exceptions) { -#ifndef _WIN32 - unsigned int i; - for (i = 0; i < ARRAY_LENGTH(exception_signals); i++) { - signal(exception_signals[i], default_signal_functions[i]); - } -#else // _WIN32 - if (previous_exception_filter) { - SetUnhandledExceptionFilter(previous_exception_filter); - previous_exception_filter = NULL; - } -#endif // !_WIN32 - } - - return rc; -} - - -int _run_tests(const UnitTest * const tests, const size_t number_of_tests) { - // Whether to execute the next test. - int run_next_test = 1; - // Whether the previous test failed. - int previous_test_failed = 0; - // Check point of the heap state. - const ListNode * const check_point = check_point_allocated_blocks(); - // Current test being executed. - size_t current_test = 0; - // Number of tests executed. - size_t tests_executed = 0; - // Number of failed tests. - size_t total_failed = 0; - // Number of setup functions. - size_t setups = 0; - // Number of teardown functions. - size_t teardowns = 0; - /* A stack of test states. A state is pushed on the stack - * when a test setup occurs and popped on tear down. */ - TestState* test_states = malloc(number_of_tests * sizeof(*test_states)); - size_t number_of_test_states = 0; - // Names of the tests that failed. - const char** failed_names = malloc(number_of_tests * - sizeof(*failed_names)); - void **current_state = NULL; - // Make sure LargestIntegralType is at least the size of a pointer. - assert_true(sizeof(LargestIntegralType) >= sizeof(void*)); - - while (current_test < number_of_tests) { - const ListNode *test_check_point = NULL; - TestState *current_TestState; - const UnitTest * const test = &tests[current_test++]; - if (!test->function) { - continue; - } - - switch (test->function_type) { - case UNIT_TEST_FUNCTION_TYPE_TEST: - run_next_test = 1; - break; - case UNIT_TEST_FUNCTION_TYPE_SETUP: { - // Checkpoint the heap before the setup. - current_TestState = &test_states[number_of_test_states++]; - current_TestState->check_point = check_point_allocated_blocks(); - test_check_point = current_TestState->check_point; - current_state = ¤t_TestState->state; - *current_state = NULL; - run_next_test = 1; - setups ++; - break; - } - case UNIT_TEST_FUNCTION_TYPE_TEARDOWN: - // Check the heap based on the last setup checkpoint. - assert_true(number_of_test_states); - current_TestState = &test_states[--number_of_test_states]; - test_check_point = current_TestState->check_point; - current_state = ¤t_TestState->state; - teardowns ++; - break; - default: - print_error("Invalid unit test function type %d\n", - test->function_type); - exit_test(1); - break; - } - - if (run_next_test) { - int failed = _run_test(test->name, test->function, current_state, - test->function_type, test_check_point); - if (failed) { - failed_names[total_failed] = test->name; - } - - switch (test->function_type) { - case UNIT_TEST_FUNCTION_TYPE_TEST: - previous_test_failed = failed; - total_failed += failed; - tests_executed ++; - break; - - case UNIT_TEST_FUNCTION_TYPE_SETUP: - if (failed) { - total_failed ++; - tests_executed ++; - // Skip forward until the next test or setup function. - run_next_test = 0; - } - previous_test_failed = 0; - break; - - case UNIT_TEST_FUNCTION_TYPE_TEARDOWN: - // If this test failed. - if (failed && !previous_test_failed) { - total_failed ++; - } - break; - default: - assert_false("BUG: shouldn't be here!"); - break; - } - } - } - - if (total_failed) { - size_t i; - print_error("%d out of %d tests failed!\n", total_failed, - tests_executed); - for (i = 0; i < total_failed; i++) { - print_error(" %s\n", failed_names[i]); - } - } else { - print_message("All %d tests passed\n", tests_executed); - } - - if (number_of_test_states) { - print_error("Mismatched number of setup %d and teardown %d " - "functions\n", setups, teardowns); - total_failed = -1; - } - - free(test_states); - free((void*)failed_names); - - fail_if_blocks_allocated(check_point, "run_tests"); - return (int)total_failed; -} diff --git a/tests/cmocka/cmockery.h b/tests/cmocka/cmockery.h deleted file mode 100755 index 4d5235cd..00000000 --- a/tests/cmocka/cmockery.h +++ /dev/null @@ -1,484 +0,0 @@ -/* - * Copyright 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef CMOCKERY_H_ -#define CMOCKERY_H_ -/* - * These headers or their equivalents should be included prior to including - * this header file. - * - * #include - * #include - * #include - * - * This allows test applications to use custom definitions of C standard - * library functions and types. - */ - -// For those who are used to __func__ from gcc. -#ifndef __func__ -#define __func__ __FUNCTION__ -#endif - -/* Largest integral type. This type should be large enough to hold any - * pointer or integer supported by the compiler. */ -#ifndef LargestIntegralType -#define LargestIntegralType unsigned long long -#endif // LargestIntegralType - -// Printf format used to display LargestIntegralType. -#ifndef LargestIntegralTypePrintfFormat -#ifdef _WIN32 -#define LargestIntegralTypePrintfFormat "%I64x" -#else -#define LargestIntegralTypePrintfFormat "%llx" -#endif // _WIN32 -#endif // LargestIntegralTypePrintfFormat - -// Perform an unsigned cast to LargestIntegralType. -#define cast_to_largest_integral_type(value) \ - ((LargestIntegralType)(value)) - -// Retrieves a return value for the current function. -#define mock() _mock(__func__, __FILE__, __LINE__) - -/* Stores a value to be returned by the specified function later. - * The count parameter returns the number of times the value should be returned - * by mock(). If count is set to -1 the value will always be returned. - */ -#define will_return(function, value) \ - _will_return(#function, __FILE__, __LINE__, \ - cast_to_largest_integral_type(value), 1) -#define will_return_count(function, value, count) \ - _will_return(#function, __FILE__, __LINE__, \ - cast_to_largest_integral_type(value), count) - -/* Add a custom parameter checking function. If the event parameter is NULL - * the event structure is allocated internally by this function. If event - * parameter is provided it must be allocated on the heap and doesn't need to - * be deallocated by the caller. - */ -#define expect_check(function, parameter, check_function, check_data) \ - _expect_check(#function, #parameter, __FILE__, __LINE__, check_function, \ - cast_to_largest_integral_type(check_data), NULL, 0) - -/* Add an event to check a parameter, using check_expected(), against a set of - * values. See will_return() for a description of the count parameter. - */ -#define expect_in_set(function, parameter, value_array) \ - expect_in_set_count(function, parameter, value_array, 1) -#define expect_in_set_count(function, parameter, value_array, count) \ - _expect_in_set(#function, #parameter, __FILE__, __LINE__, value_array, \ - sizeof(value_array) / sizeof((value_array)[0]), count) -#define expect_not_in_set(function, parameter, value_array) \ - expect_not_in_set_count(function, parameter, value_array, 1) -#define expect_not_in_set_count(function, parameter, value_array, count) \ - _expect_not_in_set( \ - #function, #parameter, __FILE__, __LINE__, value_array, \ - sizeof(value_array) / sizeof((value_array)[0]), count) - - -/* Add an event to check a parameter, using check_expected(), against a - * signed range. Where range is minimum <= value <= maximum. - * See will_return() for a description of the count parameter. - */ -#define expect_in_range(function, parameter, minimum, maximum) \ - expect_in_range_count(function, parameter, minimum, maximum, 1) -#define expect_in_range_count(function, parameter, minimum, maximum, count) \ - _expect_in_range(#function, #parameter, __FILE__, __LINE__, minimum, \ - maximum, count) - -/* Add an event to check a parameter, using check_expected(), against a - * signed range. Where range is value < minimum or value > maximum. - * See will_return() for a description of the count parameter. - */ -#define expect_not_in_range(function, parameter, minimum, maximum) \ - expect_not_in_range_count(function, parameter, minimum, maximum, 1) -#define expect_not_in_range_count(function, parameter, minimum, maximum, \ - count) \ - _expect_not_in_range(#function, #parameter, __FILE__, __LINE__, \ - minimum, maximum, count) - -/* Add an event to check whether a parameter, using check_expected(), is or - * isn't a value. See will_return() for a description of the count parameter. - */ -#define expect_value(function, parameter, value) \ - expect_value_count(function, parameter, value, 1) -#define expect_value_count(function, parameter, value, count) \ - _expect_value(#function, #parameter, __FILE__, __LINE__, \ - cast_to_largest_integral_type(value), count) -#define expect_not_value(function, parameter, value) \ - expect_not_value_count(function, parameter, value, 1) -#define expect_not_value_count(function, parameter, value, count) \ - _expect_not_value(#function, #parameter, __FILE__, __LINE__, \ - cast_to_largest_integral_type(value), count) - -/* Add an event to check whether a parameter, using check_expected(), - * is or isn't a string. See will_return() for a description of the count - * parameter. - */ -#define expect_string(function, parameter, string) \ - expect_string_count(function, parameter, string, 1) -#define expect_string_count(function, parameter, string, count) \ - _expect_string(#function, #parameter, __FILE__, __LINE__, \ - (const char*)(string), count) -#define expect_not_string(function, parameter, string) \ - expect_not_string_count(function, parameter, string, 1) -#define expect_not_string_count(function, parameter, string, count) \ - _expect_not_string(#function, #parameter, __FILE__, __LINE__, \ - (const char*)(string), count) - -/* Add an event to check whether a parameter, using check_expected() does or - * doesn't match an area of memory. See will_return() for a description of - * the count parameter. - */ -#define expect_memory(function, parameter, memory, size) \ - expect_memory_count(function, parameter, memory, size, 1) -#define expect_memory_count(function, parameter, memory, size, count) \ - _expect_memory(#function, #parameter, __FILE__, __LINE__, \ - (const void*)(memory), size, count) -#define expect_not_memory(function, parameter, memory, size) \ - expect_not_memory_count(function, parameter, memory, size, 1) -#define expect_not_memory_count(function, parameter, memory, size, count) \ - _expect_not_memory(#function, #parameter, __FILE__, __LINE__, \ - (const void*)(memory), size, count) - - -/* Add an event to allow any value for a parameter checked using - * check_expected(). See will_return() for a description of the count - * parameter. - */ -#define expect_any(function, parameter) \ - expect_any_count(function, parameter, 1) -#define expect_any_count(function, parameter, count) \ - _expect_any(#function, #parameter, __FILE__, __LINE__, count) - -/* Determine whether a function parameter is correct. This ensures the next - * value queued by one of the expect_*() macros matches the specified variable. - */ -#define check_expected(parameter) \ - _check_expected(__func__, #parameter, __FILE__, __LINE__, \ - cast_to_largest_integral_type(parameter)) - -// Assert that the given expression is true. -#define assert_true(c) _assert_true(cast_to_largest_integral_type(c), #c, \ - __FILE__, __LINE__) -// Assert that the given expression is false. -#define assert_false(c) _assert_true(!(cast_to_largest_integral_type(c)), #c, \ - __FILE__, __LINE__) - -// Assert that the two given integers are equal, otherwise fail. -#define assert_int_equal(a, b) \ - _assert_int_equal(cast_to_largest_integral_type(a), \ - cast_to_largest_integral_type(b), \ - __FILE__, __LINE__) -// Assert that the two given integers are not equal, otherwise fail. -#define assert_int_not_equal(a, b) \ - _assert_int_not_equal(cast_to_largest_integral_type(a), \ - cast_to_largest_integral_type(b), \ - __FILE__, __LINE__) - -// Assert that the two given strings are equal, otherwise fail. -#define assert_string_equal(a, b) \ - _assert_string_equal((const char*)(a), (const char*)(b), __FILE__, \ - __LINE__) -// Assert that the two given strings are not equal, otherwise fail. -#define assert_string_not_equal(a, b) \ - _assert_string_not_equal((const char*)(a), (const char*)(b), __FILE__, \ - __LINE__) - -// Assert that the two given areas of memory are equal, otherwise fail. -#define assert_memory_equal(a, b, size) \ - _assert_memory_equal((const char*)(a), (const char*)(b), size, __FILE__, \ - __LINE__) -// Assert that the two given areas of memory are not equal, otherwise fail. -#define assert_memory_not_equal(a, b, size) \ - _assert_memory_not_equal((const char*)(a), (const char*)(b), size, \ - __FILE__, __LINE__) - -// Assert that the specified value is >= minimum and <= maximum. -#define assert_in_range(value, minimum, maximum) \ - _assert_in_range( \ - cast_to_largest_integral_type(value), \ - cast_to_largest_integral_type(minimum), \ - cast_to_largest_integral_type(maximum), __FILE__, __LINE__) - -// Assert that the specified value is < minumum or > maximum -#define assert_not_in_range(value, minimum, maximum) \ - _assert_not_in_range( \ - cast_to_largest_integral_type(value), \ - cast_to_largest_integral_type(minimum), \ - cast_to_largest_integral_type(maximum), __FILE__, __LINE__) - -// Assert that the specified value is within a set. -#define assert_in_set(value, values, number_of_values) \ - _assert_in_set(value, values, number_of_values, __FILE__, __LINE__) -// Assert that the specified value is not within a set. -#define assert_not_in_set(value, values, number_of_values) \ - _assert_not_in_set(value, values, number_of_values, __FILE__, __LINE__) - - -// Forces the test to fail immediately and quit. -#define fail() _fail(__FILE__, __LINE__) - -// Generic method to kick off testing -#define run_test(f) _run_test(#f, f, NULL, UNIT_TEST_FUNCTION_TYPE_TEST, NULL) - -// Initializes a UnitTest structure. -#define unit_test(f) { #f, f, UNIT_TEST_FUNCTION_TYPE_TEST } -#define unit_test_setup(test, setup) \ - { #test "_" #setup, setup, UNIT_TEST_FUNCTION_TYPE_SETUP } -#define unit_test_teardown(test, teardown) \ - { #test "_" #teardown, teardown, UNIT_TEST_FUNCTION_TYPE_TEARDOWN } - -/* Initialize an array of UnitTest structures with a setup function for a test - * and a teardown function. Either setup or teardown can be NULL. - */ -#define unit_test_setup_teardown(test, setup, teardown) \ - unit_test_setup(test, setup), \ - unit_test(test), \ - unit_test_teardown(test, teardown) - -/* - * Run tests specified by an array of UnitTest structures. The following - * example illustrates this macro's use with the unit_test macro. - * - * void Test0(); - * void Test1(); - * - * int main(int argc, char* argv[]) { - * const UnitTest tests[] = { - * unit_test(Test0); - * unit_test(Test1); - * }; - * return run_tests(tests); - * } - */ -#define run_tests(tests) _run_tests(tests, sizeof(tests) / sizeof(tests)[0]) - -// Dynamic allocators -#define test_malloc(size) _test_malloc(size, __FILE__, __LINE__) -#define test_calloc(num, size) _test_calloc(num, size, __FILE__, __LINE__) -#define test_free(ptr) _test_free(ptr, __FILE__, __LINE__) - -// Redirect malloc, calloc and free to the unit test allocators. -#if UNIT_TESTING -#define malloc test_malloc -#define calloc test_calloc -#define free test_free -#endif // UNIT_TESTING - -/* - * Ensure mock_assert() is called. If mock_assert() is called the assert - * expression string is returned. - * For example: - * - * #define assert mock_assert - * - * void showmessage(const char *message) { - * assert(message); - * } - * - * int main(int argc, const char* argv[]) { - * expect_assert_failure(show_message(NULL)); - * printf("succeeded\n"); - * return 0; - * } - */ -#define expect_assert_failure(function_call) \ - { \ - const int expression = setjmp(global_expect_assert_env); \ - global_expecting_assert = 1; \ - if (expression) { \ - print_message("Expected assertion %s occurred\n", \ - *((const char**)&expression)); \ - global_expecting_assert = 0; \ - } else { \ - function_call ; \ - global_expecting_assert = 0; \ - print_error("Expected assert in %s\n", #function_call); \ - _fail(__FILE__, __LINE__); \ - } \ - } - -// Function prototype for setup, test and teardown functions. -typedef void (*UnitTestFunction)(void **state); - -// Function that determines whether a function parameter value is correct. -typedef int (*CheckParameterValue)(const LargestIntegralType value, - const LargestIntegralType check_value_data); - -// Type of the unit test function. -typedef enum UnitTestFunctionType { - UNIT_TEST_FUNCTION_TYPE_TEST = 0, - UNIT_TEST_FUNCTION_TYPE_SETUP, - UNIT_TEST_FUNCTION_TYPE_TEARDOWN, -} UnitTestFunctionType; - -/* Stores a unit test function with its name and type. - * NOTE: Every setup function must be paired with a teardown function. It's - * possible to specify NULL function pointers. - */ -typedef struct UnitTest { - const char* name; - UnitTestFunction function; - UnitTestFunctionType function_type; -} UnitTest; - - -// Location within some source code. -typedef struct SourceLocation { - const char* file; - int line; -} SourceLocation; - -// Event that's called to check a parameter value. -typedef struct CheckParameterEvent { - SourceLocation location; - const char *parameter_name; - CheckParameterValue check_value; - LargestIntegralType check_value_data; -} CheckParameterEvent; - -// Used by expect_assert_failure() and mock_assert(). -extern int global_expecting_assert; -extern jmp_buf global_expect_assert_env; - -// Retrieves a value for the given function, as set by "will_return". -LargestIntegralType _mock(const char * const function, const char* const file, - const int line); - -void _expect_check( - const char* const function, const char* const parameter, - const char* const file, const int line, - const CheckParameterValue check_function, - const LargestIntegralType check_data, CheckParameterEvent * const event, - const int count); - -void _expect_in_set( - const char* const function, const char* const parameter, - const char* const file, const int line, const LargestIntegralType values[], - const size_t number_of_values, const int count); -void _expect_not_in_set( - const char* const function, const char* const parameter, - const char* const file, const int line, const LargestIntegralType values[], - const size_t number_of_values, const int count); - -void _expect_in_range( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType minimum, - const LargestIntegralType maximum, const int count); -void _expect_not_in_range( - const char* const function, const char* const parameter, - const char* const file, const int line, - const LargestIntegralType minimum, - const LargestIntegralType maximum, const int count); - -void _expect_value( - const char* const function, const char* const parameter, - const char* const file, const int line, const LargestIntegralType value, - const int count); -void _expect_not_value( - const char* const function, const char* const parameter, - const char* const file, const int line, const LargestIntegralType value, - const int count); - -void _expect_string( - const char* const function, const char* const parameter, - const char* const file, const int line, const char* string, - const int count); -void _expect_not_string( - const char* const function, const char* const parameter, - const char* const file, const int line, const char* string, - const int count); - -void _expect_memory( - const char* const function, const char* const parameter, - const char* const file, const int line, const void* const memory, - const size_t size, const int count); -void _expect_not_memory( - const char* const function, const char* const parameter, - const char* const file, const int line, const void* const memory, - const size_t size, const int count); - -void _expect_any( - const char* const function, const char* const parameter, - const char* const file, const int line, const int count); - -void _check_expected( - const char * const function_name, const char * const parameter_name, - const char* file, const int line, const LargestIntegralType value); - -// Can be used to replace assert in tested code so that in conjuction with -// check_assert() it's possible to determine whether an assert condition has -// failed without stopping a test. -void mock_assert(const int result, const char* const expression, - const char * const file, const int line); - -void _will_return(const char * const function_name, const char * const file, - const int line, const LargestIntegralType value, - const int count); -void _assert_true(const LargestIntegralType result, - const char* const expression, - const char * const file, const int line); -void _assert_int_equal( - const LargestIntegralType a, const LargestIntegralType b, - const char * const file, const int line); -void _assert_int_not_equal( - const LargestIntegralType a, const LargestIntegralType b, - const char * const file, const int line); -void _assert_string_equal(const char * const a, const char * const b, - const char * const file, const int line); -void _assert_string_not_equal(const char * const a, const char * const b, - const char *file, const int line); -void _assert_memory_equal(const void * const a, const void * const b, - const size_t size, const char* const file, - const int line); -void _assert_memory_not_equal(const void * const a, const void * const b, - const size_t size, const char* const file, - const int line); -void _assert_in_range( - const LargestIntegralType value, const LargestIntegralType minimum, - const LargestIntegralType maximum, const char* const file, const int line); -void _assert_not_in_range( - const LargestIntegralType value, const LargestIntegralType minimum, - const LargestIntegralType maximum, const char* const file, const int line); -void _assert_in_set( - const LargestIntegralType value, const LargestIntegralType values[], - const size_t number_of_values, const char* const file, const int line); -void _assert_not_in_set( - const LargestIntegralType value, const LargestIntegralType values[], - const size_t number_of_values, const char* const file, const int line); - -void* _test_malloc(const size_t size, const char* file, const int line); -void* _test_calloc(const size_t number_of_elements, const size_t size, - const char* file, const int line); -void _test_free(void* const ptr, const char* file, const int line); - -void _fail(const char * const file, const int line); -int _run_test( - const char * const function_name, const UnitTestFunction Function, - void ** const state, const UnitTestFunctionType function_type, - const void* const heap_check_point); -int _run_tests(const UnitTest * const tests, const size_t number_of_tests); - -// Standard output and error print methods. -void print_message(const char* const format, ...); -void print_error(const char* const format, ...); -void vprint_message(const char* const format, va_list args); -void vprint_error(const char* const format, va_list args); - -#endif // CMOCKERY_H_ diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index 589554f9..98d8d4d5 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -1,9 +1,9 @@ #include #include #include +#include #include "rangeset.h" -#include "cmockery.h" /* for "print" functions */ #include "debug_print.c" @@ -30,18 +30,18 @@ int main(void) { /* Array of test functions */ - const struct UnitTest tests[] = + const struct CMUnitTest tests[] = { - unit_test(test_irange_basic), - unit_test(test_irange_list_union_merge), - unit_test(test_irange_list_union_lossy_cov), - unit_test(test_irange_list_union_complete_cov), - unit_test(test_irange_list_union_intersecting), - unit_test(test_irange_list_intersection), + cmocka_unit_test(test_irange_basic), + cmocka_unit_test(test_irange_list_union_merge), + cmocka_unit_test(test_irange_list_union_lossy_cov), + cmocka_unit_test(test_irange_list_union_complete_cov), + cmocka_unit_test(test_irange_list_union_intersecting), + cmocka_unit_test(test_irange_list_intersection), }; /* Run series of tests */ - return run_tests(tests); + return cmocka_run_group_tests(tests, NULL, NULL); } /* From 2092dffaff7ec5171e9d4a184ef27afdf5fcbc9d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 10 Jul 2017 17:49:38 +0300 Subject: [PATCH 0642/1124] make Codecov work in Docker --- .travis.yml | 13 +++++-------- Makefile | 2 +- run_tests.sh | 18 ++++++++++++++---- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/.travis.yml b/.travis.yml index 576d9efb..cf4a4fec 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,5 @@ os: - - linux + - linux sudo: required dist: trusty @@ -10,22 +10,19 @@ services: - docker install: - - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile - docker-compose build script: - - docker-compose run tests + - docker-compose run tests $(bash <(curl -s https://codecov.io/env)) env: - PG_VERSION=10 CHECK_CODE=clang - PG_VERSION=9.6 CHECK_CODE=clang - PG_VERSION=9.5 CHECK_CODE=clang - PG_VERSION=10 CHECK_CODE=cppcheck - - PG_VERSION=10 CHECK_CODE=false - PG_VERSION=9.6 CHECK_CODE=cppcheck - - PG_VERSION=9.6 CHECK_CODE=false - PG_VERSION=9.5 CHECK_CODE=cppcheck + - PG_VERSION=10 CHECK_CODE=false + - PG_VERSION=9.6 CHECK_CODE=false - PG_VERSION=9.5 CHECK_CODE=false - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/Makefile b/Makefile index cec002ce..2de0874d 100644 --- a/Makefile +++ b/Makefile @@ -83,4 +83,4 @@ python_tests: $(MAKE) -C tests/python partitioning_tests cmocka_tests: - $(MAKE) -C tests/cmocka check + $(MAKE) -C tests/cmocka clean check diff --git a/run_tests.sh b/run_tests.sh index d41e053e..5859fefd 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,5 +1,11 @@ #!/bin/bash +# This is a main testing script for: +# * regression tests +# * testgres-based tests +# * cmocka-based tests +# Copyright (c) 2017, Postgres Professional + set -eux echo CHECK_CODE=$CHECK_CODE @@ -12,7 +18,8 @@ if [ "$CHECK_CODE" = "clang" ]; then exit $status elif [ "$CHECK_CODE" = "cppcheck" ]; then - cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ + cppcheck \ + --template "{file} ({line}): {severity} ({id}): {message}" \ --enable=warning,portability,performance \ --suppress=redundantAssignment \ --suppress=uselessAssignmentPtrArg \ @@ -34,8 +41,8 @@ make USE_PGXS=1 clean # initialize database initdb -# build pg_pathman (using CFLAGS_SL for gcov) -make USE_PGXS=1 CFLAGS_SL="$(pg_config --cflags_sl) -coverage" +# build pg_pathman (using PG_CPPFLAGS for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" make USE_PGXS=1 install # check build @@ -69,7 +76,10 @@ if [ $status -ne 0 ]; then exit $status; fi rm -f tests/cmocka/*.gcno rm -f tests/cmocka/*.gcda -#generate *.gcov files +# generate *.gcov files gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h +# send coverage stats to Coveralls +bash <(curl -s https://codecov.io/bash) + exit $status From 53bb75252972a6e6910170db4578af1c7c6541e5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Jul 2017 00:02:04 +0300 Subject: [PATCH 0643/1124] show postgres server log if startup failed --- run_tests.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/run_tests.sh b/run_tests.sh index 5859fefd..a84c4dea 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -54,6 +54,10 @@ echo "shared_preload_libraries = 'pg_pathman'" >> $PGDATA/postgresql.conf echo "port = 55435" >> $PGDATA/postgresql.conf pg_ctl start -l /tmp/postgres.log -w +# check startup +status=$? +if [ $status -ne 0 ]; then cat /tmp/postgres.log; fi + # run regression tests PGPORT=55435 make USE_PGXS=1 installcheck || status=$? From 0c9e29269590832d17d1949d38dc84c6fd160bd0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Jul 2017 01:01:35 +0300 Subject: [PATCH 0644/1124] make use of SHLIB_LINK --- run_tests.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index a84c4dea..353033e4 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -41,8 +41,8 @@ make USE_PGXS=1 clean # initialize database initdb -# build pg_pathman (using PG_CPPFLAGS for gcov) -make USE_PGXS=1 PG_CPPFLAGS="-coverage" +# build pg_pathman (using PG_CPPFLAGS and SHLIB_LINK for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" make USE_PGXS=1 install # check build From ecd13bca16462594a14b3dc6fca4d291bd7d219f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Jul 2017 01:15:41 +0300 Subject: [PATCH 0645/1124] Travis CI: install curl for Codecov --- Dockerfile.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index bd78ba02..31358464 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -13,7 +13,7 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ RUN if [ "${CHECK_CODE}" = "false" ] ; then \ echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ - apk --no-cache add python3 gcc make musl-dev cmocka-dev;\ + apk --no-cache add curl python3 gcc make musl-dev cmocka-dev;\ pip3 install testgres; \ fi From a8b197f107cf0257c3ef37b50f870ac25eaabf45 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Jul 2017 01:27:40 +0300 Subject: [PATCH 0646/1124] fix .travis.yml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index cf4a4fec..1c7d2bc8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,7 +14,7 @@ install: - docker-compose build script: - - docker-compose run tests $(bash <(curl -s https://codecov.io/env)) + - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests env: - PG_VERSION=10 CHECK_CODE=clang From b841f7f08c571056dd895125c6f4d5a6dd059aa2 Mon Sep 17 00:00:00 2001 From: Michel Pelletier Date: Tue, 11 Jul 2017 14:59:50 -0700 Subject: [PATCH 0647/1124] dot editorconfig --- .editorconfig | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .editorconfig diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..a54d21c5 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,3 @@ +[*] +indent_style = tab +indent_size = 4 From 4a621103f2de85317c55475b790e0c59fdba6c53 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 12 Jul 2017 14:34:15 +0300 Subject: [PATCH 0648/1124] small style fixes --- src/compat/pg_compat.c | 117 +++++++++++++++++---------------- src/include/compat/pg_compat.h | 14 ++-- 2 files changed, 69 insertions(+), 62 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 0e2b9f05..71f93a1e 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -521,64 +521,8 @@ get_rel_persistence(Oid relid) #endif - -/* - * ------------- - * Common code - * ------------- - */ - -void -set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) -{ - double parent_rows = 0; - double parent_size = 0; - ListCell *l; - - foreach(l, root->append_rel_list) - { - AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); - Index childRTindex, - parentRTindex = rti; - RelOptInfo *childrel; - - /* append_rel_list contains all append rels; ignore others */ - if (appinfo->parent_relid != parentRTindex) - continue; - - childRTindex = appinfo->child_relid; - - childrel = find_base_rel(root, childRTindex); - Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL); - - /* - * Accumulate size information from each live child. - */ - Assert(childrel->rows > 0); - - parent_rows += childrel->rows; - -#if PG_VERSION_NUM >= 90600 - parent_size += childrel->reltarget->width * childrel->rows; -#else - parent_size += childrel->width * childrel->rows; -#endif - } - - /* Set 'rows' for append relation */ - rel->rows = parent_rows; - -#if PG_VERSION_NUM >= 90600 - rel->reltarget->width = rint(parent_size / parent_rows); -#else - rel->width = rint(parent_size / parent_rows); -#endif - - rel->tuples = parent_rows; -} - -#if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) \ - || (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) +#if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) || \ + (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) /* * Return a palloc'd bare attribute map for tuple conversion, matching input * and output columns by name. (Dropped columns are ignored in both input and @@ -642,3 +586,60 @@ convert_tuples_by_name_map(TupleDesc indesc, return attrMap; } #endif + + + +/* + * ------------- + * Common code + * ------------- + */ + +void +set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) +{ + double parent_rows = 0; + double parent_size = 0; + ListCell *l; + + foreach(l, root->append_rel_list) + { + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); + Index childRTindex, + parentRTindex = rti; + RelOptInfo *childrel; + + /* append_rel_list contains all append rels; ignore others */ + if (appinfo->parent_relid != parentRTindex) + continue; + + childRTindex = appinfo->child_relid; + + childrel = find_base_rel(root, childRTindex); + Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL); + + /* + * Accumulate size information from each live child. + */ + Assert(childrel->rows > 0); + + parent_rows += childrel->rows; + +#if PG_VERSION_NUM >= 90600 + parent_size += childrel->reltarget->width * childrel->rows; +#else + parent_size += childrel->width * childrel->rows; +#endif + } + + /* Set 'rows' for append relation */ + rel->rows = parent_rows; + +#if PG_VERSION_NUM >= 90600 + rel->reltarget->width = rint(parent_size / parent_rows); +#else + rel->width = rint(parent_size / parent_rows); +#endif + + rel->tuples = parent_rows; +} diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index bb95c153..6b80fcaf 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -548,15 +548,21 @@ extern void set_rel_consider_parallel(PlannerInfo *root, tlist_member_ignore_relabel((Node *) (expr), (targetlist)) #endif -#if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) \ - || (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) + +/* + * convert_tuples_by_name_map() + */ +#if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) || \ + (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, - TupleDesc outdesc, - const char *msg); + TupleDesc outdesc, + const char *msg); #else #include "access/tupconvert.h" #endif + + /* * ------------- * Common code From 2bc4adaa9ea2a65e9b7b7da3e0b67dea692ed162 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 12 Jul 2017 16:57:10 +0300 Subject: [PATCH 0649/1124] add new test (test_irange_change_lossiness) --- tests/cmocka/rangeset_tests.c | 72 ++++++++++++++++++++++++++++++++++- 1 file changed, 70 insertions(+), 2 deletions(-) diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index 98d8d4d5..1f700bc3 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -16,6 +16,7 @@ */ static void test_irange_basic(void **state); +static void test_irange_change_lossiness(void **state); static void test_irange_list_union_merge(void **state); static void test_irange_list_union_lossy_cov(void **state); @@ -33,6 +34,7 @@ main(void) const struct CMUnitTest tests[] = { cmocka_unit_test(test_irange_basic), + cmocka_unit_test(test_irange_change_lossiness), cmocka_unit_test(test_irange_list_union_merge), cmocka_unit_test(test_irange_list_union_lossy_cov), cmocka_unit_test(test_irange_list_union_complete_cov), @@ -75,10 +77,76 @@ test_irange_basic(void **state) assert_true(is_irange_valid(irange)); /* test allocation */ - irange_list = NIL; - irange_list = lappend_irange(irange_list, irange); + irange = make_irange(100, 200, IR_LOSSY); + irange_list = lappend_irange(NIL, irange); assert_memory_equal(&irange, &linitial_irange(irange_list), sizeof(IndexRange)); assert_memory_equal(&irange, &llast_irange(irange_list), sizeof(IndexRange)); + + /* test length */ + irange_list = NIL; + assert_int_equal(irange_list_length(irange_list), 0); + irange_list = lappend_irange(irange_list, make_irange(10, 20, IR_LOSSY)); + assert_int_equal(irange_list_length(irange_list), 11); + irange_list = lappend_irange(irange_list, make_irange(21, 30, IR_LOSSY)); + assert_int_equal(irange_list_length(irange_list), 21); +} + + +/* Test lossiness switcher */ +static void +test_irange_change_lossiness(void **state) +{ + List *irange_list; + + /* test lossiness change (NIL) */ + irange_list = irange_list_set_lossiness(NIL, IR_LOSSY); + assert_ptr_equal(irange_list, NIL); + irange_list = irange_list_set_lossiness(NIL, IR_COMPLETE); + assert_ptr_equal(irange_list, NIL); + + /* test lossiness change (no-op) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-20]L"); + + /* test lossiness change (no-op) #2 */ + irange_list = list_make1_irange(make_irange(30, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[30-40]C"); + + /* test lossiness change (single element) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-20]C"); + + /* test lossiness change (single element) #2 */ + irange_list = list_make1_irange(make_irange(30, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[30-40]L"); + + /* test lossiness change (multiple elements, adjacent) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-40]C"); + + /* test lossiness change (multiple elements, adjacent) #2 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_COMPLETE)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-40]L"); + + /* test lossiness change (multiple elements, non-adjacent) #1 */ + irange_list = list_make1_irange(make_irange(10, 15, IR_COMPLETE)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-15]C, [21-40]C"); + + /* test lossiness change (multiple elements, non-adjacent) #2 */ + irange_list = list_make1_irange(make_irange(10, 15, IR_LOSSY)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-15]L, [21-40]L"); } From 6288849ef76c465bf7741e19e10ceeabb321e7e1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 12 Jul 2017 18:38:09 +0300 Subject: [PATCH 0650/1124] add new rule to Makefile --- Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Makefile b/Makefile index 2de0874d..6796be3b 100644 --- a/Makefile +++ b/Makefile @@ -84,3 +84,9 @@ python_tests: cmocka_tests: $(MAKE) -C tests/cmocka clean check + +clean_gcov: + find . \ + -name "*.gcda" -delete -o \ + -name "*.gcno" -delete -o \ + -name "*.gcov" -delete From ea98072a0972a891c2df1aa036e4690b4ab51b30 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 12 Jul 2017 22:24:06 +0300 Subject: [PATCH 0651/1124] Makefile: change rule cmocka_tests for Codecov --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6796be3b..2b69fd73 100644 --- a/Makefile +++ b/Makefile @@ -83,7 +83,7 @@ python_tests: $(MAKE) -C tests/python partitioning_tests cmocka_tests: - $(MAKE) -C tests/cmocka clean check + $(MAKE) -C tests/cmocka check clean_gcov: find . \ From c70a71a2b12b3458562bbaa49615c746a4f89d6d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 13 Jul 2017 16:47:43 +0300 Subject: [PATCH 0652/1124] acquire suitable lock in append_child_relation() --- src/hooks.c | 24 ++++++++++++++++-------- src/include/pathman.h | 8 ++++++-- src/pg_pathman.c | 36 +++++++++++++++++++++++++++++------- 3 files changed, 51 insertions(+), 17 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index fbd72231..7b6c587c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -29,6 +29,7 @@ #include "catalog/pg_authid.h" #include "miscadmin.h" #include "optimizer/cost.h" +#include "optimizer/prep.h" #include "optimizer/restrictinfo.h" #include "rewrite/rewriteManip.h" #include "utils/typcache.h" @@ -290,6 +291,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if ((prel = get_pathman_relation_info(rte->relid)) != NULL) { Relation parent_rel; /* parent's relation (heap) */ + PlanRowMark *parent_rowmark; /* parent's rowmark */ Oid *children; /* selected children oids */ List *ranges, /* a list of IndexRanges */ *wrappers; /* a list of WrapperNodes */ @@ -305,6 +307,9 @@ pathman_rel_pathlist_hook(PlannerInfo *root, /* Make copy of partitioning expression and fix Var's varno attributes */ part_expr = PrelExpressionForRelid(prel, rti); + /* Get partitioning-related clauses (do this before append_child_relation()) */ + part_clauses = get_partitioning_clauses(rel->baserestrictinfo, prel, rti); + if (prel->parttype == PT_RANGE) { /* @@ -382,19 +387,25 @@ pathman_rel_pathlist_hook(PlannerInfo *root, /* Parent has already been locked by rewriter */ parent_rel = heap_open(rte->relid, NoLock); - /* Add parent if asked to */ - if (prel->enable_parent) - append_child_relation(root, parent_rel, rti, 0, rte->relid, NULL); + parent_rowmark = get_plan_rowmark(root->rowMarks, rti); /* - * Iterate all indexes in rangeset and append corresponding child relations. + * WARNING: 'prel' might become invalid after append_child_relation(). */ + + /* Add parent if asked to */ + if (prel->enable_parent) + append_child_relation(root, parent_rel, parent_rowmark, + rti, 0, rte->relid, NULL); + + /* Iterate all indexes in rangeset and append child relations */ foreach(lc, ranges) { IndexRange irange = lfirst_irange(lc); for (i = irange_lower(irange); i <= irange_upper(irange); i++) - append_child_relation(root, parent_rel, rti, i, children[i], wrappers); + append_child_relation(root, parent_rel, parent_rowmark, + rti, i, children[i], wrappers); } /* Now close parent relation */ @@ -424,9 +435,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, pg_pathman_enable_runtime_merge_append)) return; - /* Get partitioning-related clauses */ - part_clauses = get_partitioning_clauses(rel->baserestrictinfo, prel, rti); - /* Skip if there's no PARAMs in partitioning-related clauses */ if (!clause_contains_params((Node *) part_clauses)) return; diff --git a/src/include/pathman.h b/src/include/pathman.h index 9bcd26f0..d1ebb583 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -105,8 +105,12 @@ Oid get_pathman_config_params_relid(bool invalid_is_ok); /* * Create RelOptInfo & RTE for a selected partition. */ -Index append_child_relation(PlannerInfo *root, Relation parent_relation, - Index parent_rti, int ir_index, Oid child_oid, +Index append_child_relation(PlannerInfo *root, + Relation parent_relation, + PlanRowMark *parent_rowmark, + Index parent_rti, + int ir_index, + Oid child_oid, List *wrappers); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 4f61effc..4a8c4ff5 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -30,8 +30,9 @@ #include "optimizer/restrictinfo.h" #include "optimizer/cost.h" #include "utils/datum.h" -#include "utils/lsyscache.h" #include "utils/rel.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" #include "utils/selfuncs.h" #include "utils/typcache.h" @@ -364,8 +365,12 @@ get_pathman_config_params_relid(bool invalid_is_ok) * NOTE: partially based on the expand_inherited_rtentry() function. */ Index -append_child_relation(PlannerInfo *root, Relation parent_relation, - Index parent_rti, int ir_index, Oid child_oid, +append_child_relation(PlannerInfo *root, + Relation parent_relation, + PlanRowMark *parent_rowmark, + Index parent_rti, + int ir_index, + Oid child_oid, List *wrappers) { RangeTblEntry *parent_rte, @@ -375,17 +380,35 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, Relation child_relation; AppendRelInfo *appinfo; Index childRTindex; - PlanRowMark *parent_rowmark, - *child_rowmark; + PlanRowMark *child_rowmark; Node *childqual; List *childquals; ListCell *lc1, *lc2; + LOCKMODE lockmode; + + /* Choose a correct lock mode */ + if (parent_rti == root->parse->resultRelation) + lockmode = RowExclusiveLock; + else if (parent_rowmark && RowMarkRequiresRowShareLock(parent_rowmark->markType)) + lockmode = RowShareLock; + else + lockmode = AccessShareLock; + + /* Acquire a suitable lock on partition */ + LockRelationOid(child_oid, lockmode); + + /* Check that partition exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(child_oid))) + { + UnlockRelationOid(child_oid, lockmode); + return 0; + } parent_rel = root->simple_rel_array[parent_rti]; parent_rte = root->simple_rte_array[parent_rti]; - /* FIXME: acquire a suitable lock on partition */ + /* Open child relation (we've just locked it) */ child_relation = heap_open(child_oid, NoLock); /* Create RangeTblEntry for child relation */ @@ -408,7 +431,6 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, /* Create rowmarks required for child rels */ - parent_rowmark = get_plan_rowmark(root->rowMarks, parent_rti); if (parent_rowmark) { child_rowmark = makeNode(PlanRowMark); From e98dd7b46406989ab11751e2c8b06c24be892b60 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 13 Jul 2017 17:45:04 +0300 Subject: [PATCH 0653/1124] fixes for REL_10_BETA2 --- src/include/compat/pg_compat.h | 28 ++++++++++++++++++++++++++++ src/utility_stmt_hooking.c | 10 +++++----- 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 6b80fcaf..b79f9192 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -22,6 +22,7 @@ #include "compat/debug_compat_features.h" #include "postgres.h" +#include "commands/trigger.h" #include "executor/executor.h" #include "nodes/memnodes.h" #include "nodes/relation.h" @@ -562,6 +563,33 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif +/* + * ExecARInsertTriggers() + */ +#if PG_VERSION_NUM >= 100000 +#define ExecARInsertTriggersCompat(estate, relinfo, trigtuple, \ + recheck_indexes, transition_capture) \ + ExecARInsertTriggers((estate), (relinfo), (trigtuple), \ + (recheck_indexes), (transition_capture)) +#elif PG_VERSION_NUM >= 90500 +#define ExecARInsertTriggersCompat(estate, relinfo, trigtuple, \ + recheck_indexes, transition_capture) \ + ExecARInsertTriggers((estate), (relinfo), (trigtuple), (recheck_indexes)) +#endif + + +/* + * ExecASInsertTriggers() + */ +#if PG_VERSION_NUM >= 100000 +#define ExecASInsertTriggersCompat(estate, relinfo, transition_capture) \ + ExecASInsertTriggers((estate), (relinfo), (transition_capture)) +#elif PG_VERSION_NUM >= 90500 +#define ExecASInsertTriggersCompat(estate, relinfo, transition_capture) \ + ExecASInsertTriggers((estate), (relinfo)) +#endif + + /* * ------------- diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index f05aae27..31d39bc2 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -722,9 +722,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false, NULL, NIL); - /* AFTER ROW INSERT Triggers */ - ExecARInsertTriggers(estate, child_result_rel, tuple, - recheckIndexes); + /* AFTER ROW INSERT Triggers (FIXME: NULL transition) */ + ExecARInsertTriggersCompat(estate, child_result_rel, tuple, + recheckIndexes, NULL); list_free(recheckIndexes); @@ -746,8 +746,8 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, if (old_protocol) pq_endmsgread(); - /* Execute AFTER STATEMENT insertion triggers */ - ExecASInsertTriggers(estate, parent_result_rel); + /* Execute AFTER STATEMENT insertion triggers (FIXME: NULL transition) */ + ExecASInsertTriggersCompat(estate, parent_result_rel, NULL); /* Handle queued AFTER triggers */ AfterTriggerEndQuery(estate); From fcfd134ffedbfc4a85bf2fa7ce106a3d67ce5092 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 17 Jul 2017 16:43:36 +0300 Subject: [PATCH 0654/1124] improve update/delete on a missing key (mentioned in issue #104) --- expected/pathman_basic.out | 32 ++++++++++++++++++++++++++++++-- sql/pathman_basic.sql | 17 +++++++++++++++-- src/planner_tree_modification.c | 16 +++++++++++----- 3 files changed, 56 insertions(+), 9 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 7128532c..7d83372e 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1657,7 +1657,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); /* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ QUERY PLAN -------------------------------------------------------------------------------- Update on range_rel_6 @@ -1672,7 +1672,7 @@ SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; 166 | Tue Jun 15 00:00:00 2010 | 111 (1 row) -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ QUERY PLAN -------------------------------------------------------------------------------- Delete on range_rel_6 @@ -1686,6 +1686,34 @@ SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; ----+----+------- (0 rows) +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; /* no partitions for this 'dt' */ + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no partitions for this 'dt' */ + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; QUERY PLAN -------------------------------------------------------------------------------------------- diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 297c4097..8fed0b32 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -503,18 +503,31 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); + /* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ DELETE FROM test.range_rel WHERE dt = '2010-06-15'; SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; /* no partitions for this 'dt' */ +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no partitions for this 'dt' */ +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; + /* Create range partitions from whole range */ SELECT drop_partitions('test.range_rel'); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 32ed3f46..464530c7 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -249,6 +249,7 @@ handle_modification_query(Query *parse, ParamListInfo params) Expr *expr; WalkerContext context; Index result_rel; + int num_selected; /* Fetch index of result relation */ result_rel = parse->resultRelation; @@ -291,12 +292,10 @@ handle_modification_query(Query *parse, ParamListInfo params) wrap = walk_expr_tree(expr, &context); ranges = irange_list_intersection(ranges, wrap->rangeset); + num_selected = irange_list_length(ranges); - /* - * If only one partition is affected, - * substitute parent table with the partition. - */ - if (irange_list_length(ranges) == 1) + /* Special case #1: only one partition is affected */ + if (num_selected == 1) { IndexRange irange = linitial_irange(ranges); @@ -361,6 +360,13 @@ handle_modification_query(Query *parse, ParamListInfo params) rte->inh = false; } } + + /* Special case #2: no partitions are affected */ + else if (num_selected == 0) + { + /* HACK: unset the 'inh' flag (no children) */ + rte->inh = false; + } } From 215aa2e591a71ac9792a793e32cf21fa148ce1a8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Jul 2017 18:37:17 +0300 Subject: [PATCH 0655/1124] refactoring, extract function copy_rel_attributes() --- src/partition_creation.c | 121 +++++++++++++++++++++------------------ 1 file changed, 64 insertions(+), 57 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 669295c5..a0ea93ab 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -74,7 +74,7 @@ static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, Oid relowner); static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); -static void copy_relation_attributes(Oid partition_relid, Datum reloptions); +static void copy_rel_attributes(Oid parent_relid, Oid partition_relid); static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid); static Oid text_to_regprocedure(text *proname_args); @@ -672,9 +672,6 @@ create_single_partition_internal(Oid parent_relid, RangeVar *partition_rv, char *tablespace) { - HeapTuple tuple = NULL; - Relation parentrel; - /* Value to be returned */ Oid partition_relid = InvalidOid; /* safety */ @@ -695,7 +692,6 @@ create_single_partition_internal(Oid parent_relid, Oid save_userid; int save_sec_context; bool need_priv_escalation = !superuser(); /* we might be a SU */ - Datum reloptions = (Datum) 0; /* Lock parent and check if it exists */ LockRelationOid(parent_relid, ShareUpdateExclusiveLock); @@ -736,24 +732,6 @@ create_single_partition_internal(Oid parent_relid, /* Make up parent's RangeVar */ parent_rv = makeRangeVar(parent_nsp_name, parent_name, -1); - /* Copy attributes */ - parentrel = heap_open(parent_relid, NoLock); - newrel_rv->relpersistence = parentrel->rd_rel->relpersistence; - if (parentrel->rd_options) - { - bool isNull; - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); - if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for relation %u", parent_relid); - - reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, - &isNull); - if (isNull) - reloptions = (Datum) 0; - } - heap_close(parentrel, NoLock); - /* If no 'tablespace' is provided, get parent's tablespace */ if (!tablespace) tablespace = get_tablespace_name(get_rel_tablespace(parent_relid)); @@ -804,8 +782,7 @@ create_single_partition_internal(Oid parent_relid, child_relowner).objectId; /* Copy attributes to partition */ - if (reloptions) - copy_relation_attributes(partition_relid, reloptions); + copy_rel_attributes(parent_relid, partition_relid); /* Copy FOREIGN KEYS of the parent table */ copy_foreign_keys(parent_relid, partition_relid); @@ -843,9 +820,6 @@ create_single_partition_internal(Oid parent_relid, if (need_priv_escalation) SetUserIdAndSecContext(save_userid, save_sec_context); - if (tuple != NULL) - ReleaseSysCache(tuple); - return partition_relid; } @@ -1104,7 +1078,7 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) heap_close(pg_attribute_rel, RowExclusiveLock); } -/* Copy foreign keys of parent table */ +/* Copy foreign keys of parent table (updates pg_class) */ static void copy_foreign_keys(Oid parent_relid, Oid partition_oid) { @@ -1135,38 +1109,71 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) /* Invoke the callback */ FunctionCallInvoke(©_fkeys_proc_fcinfo); + + /* Make changes visible */ + CommandCounterIncrement(); } -/* Copy attributes to partition. Updates partition's tuple in pg_class */ +/* Copy reloptions of foreign table (updates pg_class) */ static void -copy_relation_attributes(Oid partition_relid, Datum reloptions) +copy_rel_attributes(Oid parent_relid, Oid partition_relid) { - Relation classRel; - HeapTuple tuple, - newtuple; - Datum new_val[Natts_pg_class]; - bool new_null[Natts_pg_class], - new_repl[Natts_pg_class]; - - classRel = heap_open(RelationRelationId, RowExclusiveLock); - tuple = SearchSysCacheCopy1(RELOID, - ObjectIdGetDatum(partition_relid)); - if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for relation %u", - partition_relid); - - /* Fill in relpartbound value */ - memset(new_val, 0, sizeof(new_val)); - memset(new_null, false, sizeof(new_null)); - memset(new_repl, false, sizeof(new_repl)); - new_val[Anum_pg_class_reloptions - 1] = reloptions; - new_null[Anum_pg_class_reloptions - 1] = false; - new_repl[Anum_pg_class_reloptions - 1] = true; - newtuple = heap_modify_tuple(tuple, RelationGetDescr(classRel), - new_val, new_null, new_repl); - CatalogTupleUpdate(classRel, &newtuple->t_self, newtuple); - heap_freetuple(newtuple); - heap_close(classRel, RowExclusiveLock); + Relation pg_class_rel; + + HeapTuple parent_htup, + partition_htup, + new_htup; + + Datum reloptions; + bool reloptions_null; + Datum relpersistence; + + Datum values[Natts_pg_class]; + bool isnull[Natts_pg_class], + replace[Natts_pg_class] = { false }; + + pg_class_rel = heap_open(RelationRelationId, RowExclusiveLock); + + parent_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); + partition_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(partition_relid)); + + if (!HeapTupleIsValid(parent_htup)) + elog(ERROR, "cache lookup failed for relation %u", parent_relid); + + if (!HeapTupleIsValid(partition_htup)) + elog(ERROR, "cache lookup failed for relation %u", partition_relid); + + /* Extract parent's reloptions */ + reloptions = SysCacheGetAttr(RELOID, parent_htup, + Anum_pg_class_reloptions, + &reloptions_null); + + /* Extract parent's relpersistence */ + relpersistence = ((Form_pg_class) GETSTRUCT(parent_htup))->relpersistence; + + /* Fill in reloptions */ + values[Anum_pg_class_reloptions - 1] = reloptions; + isnull[Anum_pg_class_reloptions - 1] = reloptions_null; + replace[Anum_pg_class_reloptions - 1] = true; + + /* Fill in relpersistence */ + values[Anum_pg_class_relpersistence - 1] = relpersistence; + isnull[Anum_pg_class_relpersistence - 1] = false; + replace[Anum_pg_class_relpersistence - 1] = true; + + new_htup = heap_modify_tuple(partition_htup, + RelationGetDescr(pg_class_rel), + values, isnull, replace); + CatalogTupleUpdate(pg_class_rel, &new_htup->t_self, new_htup); + heap_freetuple(new_htup); + + ReleaseSysCache(parent_htup); + ReleaseSysCache(partition_htup); + + heap_close(pg_class_rel, RowExclusiveLock); + + /* Make changes visible */ + CommandCounterIncrement(); } From 5afbc305bb0e14f477f22aa0eb03bb308a24a578 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Jul 2017 18:43:38 +0300 Subject: [PATCH 0656/1124] minor fixes (free tuples etc) --- src/init.c | 1 + src/partition_creation.c | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/init.c b/src/init.c index 3219b1fa..e1a1b5bf 100644 --- a/src/init.c +++ b/src/init.c @@ -706,6 +706,7 @@ pathman_config_invalidate_parsed_expression(Oid relid) /* Form new tuple and perform an update */ new_htup = heap_form_tuple(RelationGetDescr(rel), values, nulls); CatalogTupleUpdate(rel, &iptr, new_htup); + heap_freetuple(new_htup); heap_close(rel, RowExclusiveLock); } diff --git a/src/partition_creation.c b/src/partition_creation.c index a0ea93ab..53837ee1 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -681,8 +681,7 @@ create_single_partition_internal(Oid parent_relid, *parent_nsp_name; /* Elements of the "CREATE TABLE" query tree */ - RangeVar *parent_rv, - *newrel_rv = copyObject(partition_rv); + RangeVar *parent_rv; TableLikeClause like_clause; CreateStmt create_stmt; List *create_stmts; @@ -745,7 +744,7 @@ create_single_partition_internal(Oid parent_relid, /* Initialize CreateStmt structure */ NodeSetTag(&create_stmt, T_CreateStmt); - create_stmt.relation = newrel_rv; + create_stmt.relation = copyObject(partition_rv); create_stmt.tableElts = list_make1(copyObject(&like_clause)); create_stmt.inhRelations = list_make1(copyObject(parent_rv)); create_stmt.ofTypename = NULL; From 44e180d2d2976acb1fad74ca4ab7893fde12f18c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 18 Jul 2017 18:45:57 +0300 Subject: [PATCH 0657/1124] rename copy_rel_attributes() -> copy_rel_options() --- src/partition_creation.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 53837ee1..3d64d676 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -74,7 +74,7 @@ static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, Oid relowner); static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); -static void copy_rel_attributes(Oid parent_relid, Oid partition_relid); +static void copy_rel_options(Oid parent_relid, Oid partition_relid); static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid); static Oid text_to_regprocedure(text *proname_args); @@ -781,7 +781,7 @@ create_single_partition_internal(Oid parent_relid, child_relowner).objectId; /* Copy attributes to partition */ - copy_rel_attributes(parent_relid, partition_relid); + copy_rel_options(parent_relid, partition_relid); /* Copy FOREIGN KEYS of the parent table */ copy_foreign_keys(parent_relid, partition_relid); @@ -1115,7 +1115,7 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) /* Copy reloptions of foreign table (updates pg_class) */ static void -copy_rel_attributes(Oid parent_relid, Oid partition_relid) +copy_rel_options(Oid parent_relid, Oid partition_relid) { Relation pg_class_rel; From 5d34026d723407230a7a27f4d51d61acedd5cc7a Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Jul 2017 14:16:34 +0300 Subject: [PATCH 0658/1124] Add first version of script that creates docker containers and uploads them to docker hub --- .travis.yml | 12 ++---------- Dockerfile.tmpl | 10 +++++----- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1c7d2bc8..86c6a175 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,19 +10,11 @@ services: - docker install: - - sed -e 's/${CHECK_CODE}/'${CHECK_CODE}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile + - echo "FROM ${DOCKER_IMAGE}" > Dockerfile - docker-compose build script: - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests env: - - PG_VERSION=10 CHECK_CODE=clang - - PG_VERSION=9.6 CHECK_CODE=clang - - PG_VERSION=9.5 CHECK_CODE=clang - - PG_VERSION=10 CHECK_CODE=cppcheck - - PG_VERSION=9.6 CHECK_CODE=cppcheck - - PG_VERSION=9.5 CHECK_CODE=cppcheck - - PG_VERSION=10 CHECK_CODE=false - - PG_VERSION=9.6 CHECK_CODE=false - - PG_VERSION=9.5 CHECK_CODE=false + - DOCKER_IMAGE=pg95_clang_check_code diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 31358464..c7b0fab5 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -23,8 +23,8 @@ RUN mkdir -p /pg/data && \ chmod a+rwx /usr/local/lib/postgresql && \ chmod a+rwx /usr/local/share/postgresql/extension -ADD . /pg/pg_pathman -WORKDIR /pg/pg_pathman -RUN chmod -R go+rwX /pg/pg_pathman -USER postgres -ENTRYPOINT PGDATA=${PGDATA} CHECK_CODE=${CHECK_CODE} bash run_tests.sh +ONBUILD ADD . /pg/pg_pathman +ONBUILD WORKDIR /pg/pg_pathman +ONBUILD RUN chmod -R go+rwX /pg/pg_pathman +ONBUILD USER postgres +ONBUILD ENTRYPOINT PGDATA=${PGDATA} CHECK_CODE=${CHECK_CODE} bash run_tests.sh From 22f6d4d38fa08090c1708038d9298c5ea547cefb Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Jul 2017 14:16:51 +0300 Subject: [PATCH 0659/1124] Add forgotten file --- make_images.py | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100755 make_images.py diff --git a/make_images.py b/make_images.py new file mode 100755 index 00000000..e2968f2a --- /dev/null +++ b/make_images.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +import subprocess + +DOCKER_ID = 'pathman' +pg_versions = ['9.5','9.6','10'] + +image_types = { + 'clang_check_code': { + 'CHECK_CODE': 'clang', + }, + 'cppcheck': { + 'CHECK_CODE': 'cppcheck', + }, + 'pathman_tests': { + 'CHECK_CODE': 'false', + } +} + +stopline = '###STOP' + +password = input("Enter password for `docker login`: ") +subprocess.check_output([ + 'docker', + 'login', + '-u', DOCKER_ID, + '-p', password]) + +for pg_version in pg_versions: + pgname = 'pg%s' % pg_version.replace('.', '') + for key, variables in image_types.items(): + image_name = '%s/%s_%s' % (DOCKER_ID, pgname, key) + with open('Dockerfile', 'w') as out: + with open('Dockerfile.tmpl', 'r') as f: + for line in f: + if line.startswith(stopline): + break + + line = line + line = line.replace('${PG_VERSION}', pg_version) + for key, value in variables.items(): + varname = '${%s}' % key + line = line.replace(varname, value) + + out.write(line) + + args = [ + 'docker', + 'build', + '-t', image_name, + '.' + ] + subprocess.check_output(args, stderr=subprocess.STDOUT) + print("build ok:", image_name) + subprocess.check_output(['docker', 'push', image_name], + stderr=subprocess.STDOUT) + print("upload ok:", image_name) + exit() From 3a8bcf873083cd086da648178b885cc20b5b7131 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Jul 2017 14:19:12 +0300 Subject: [PATCH 0660/1124] Fix container name in tests --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 86c6a175..b04085ec 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,4 +17,4 @@ script: - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests env: - - DOCKER_IMAGE=pg95_clang_check_code + - DOCKER_IMAGE=pathman/pg95_clang_check_code From f633bce04e5802a566f0d30a6a242e095c4fea59 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Jul 2017 14:55:59 +0300 Subject: [PATCH 0661/1124] Add all images to travis --- .travis.yml | 8 ++++++++ make_images.py | 15 +++++++-------- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index b04085ec..cd41b73e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,3 +18,11 @@ script: env: - DOCKER_IMAGE=pathman/pg95_clang_check_code + - DOCKER_IMAGE=pathman/pg95_cppcheck + - DOCKER_IMAGE=pathman/pg95_pathman_tests + - DOCKER_IMAGE=pathman/pg96_clang_check_code + - DOCKER_IMAGE=pathman/pg96_cppcheck + - DOCKER_IMAGE=pathman/pg96_pathman_tests + - DOCKER_IMAGE=pathman/pg10_clang_check_code + - DOCKER_IMAGE=pathman/pg10_cppcheck + - DOCKER_IMAGE=pathman/pg10_pathman_tests diff --git a/make_images.py b/make_images.py index e2968f2a..77a49fac 100755 --- a/make_images.py +++ b/make_images.py @@ -17,15 +17,16 @@ } } -stopline = '###STOP' - -password = input("Enter password for `docker login`: ") +password = input("Enter password for `docker login` for user `%s`: " % DOCKER_ID) subprocess.check_output([ 'docker', 'login', '-u', DOCKER_ID, '-p', password]) +travis_conf_line = '- DOCKER_IMAGE=%s' +travis_conf = [] + for pg_version in pg_versions: pgname = 'pg%s' % pg_version.replace('.', '') for key, variables in image_types.items(): @@ -33,10 +34,6 @@ with open('Dockerfile', 'w') as out: with open('Dockerfile.tmpl', 'r') as f: for line in f: - if line.startswith(stopline): - break - - line = line line = line.replace('${PG_VERSION}', pg_version) for key, value in variables.items(): varname = '${%s}' % key @@ -55,4 +52,6 @@ subprocess.check_output(['docker', 'push', image_name], stderr=subprocess.STDOUT) print("upload ok:", image_name) - exit() + travis_conf.append(travis_conf_line % image_name) + +print('\n'.join(travis_conf)) From 64b78a54adbab3c0b9a56dcee8267f80d3e8e12b Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 19 Jul 2017 15:04:07 +0300 Subject: [PATCH 0662/1124] Change make_images output a little bit --- make_images.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/make_images.py b/make_images.py index 77a49fac..6859cd3c 100755 --- a/make_images.py +++ b/make_images.py @@ -26,6 +26,7 @@ travis_conf_line = '- DOCKER_IMAGE=%s' travis_conf = [] +print("") for pg_version in pg_versions: pgname = 'pg%s' % pg_version.replace('.', '') @@ -54,4 +55,5 @@ print("upload ok:", image_name) travis_conf.append(travis_conf_line % image_name) +print("\ntravis configuration") print('\n'.join(travis_conf)) From beb3471f3a8862f4351c00ada1c4045d5f73c7fb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 19 Jul 2017 18:50:17 +0300 Subject: [PATCH 0663/1124] improve regression tests, fix rare crash in create_partitions_for_value_internal() --- expected/pathman_bgw.out | 38 ++++++++++++++++++++++++++++++++++ expected/pathman_callbacks.out | 26 ++++++++++------------- sql/pathman_bgw.sql | 20 ++++++++++++++++++ sql/pathman_callbacks.sql | 37 ++++++++++++++++++--------------- src/partition_creation.c | 9 ++++---- 5 files changed, 93 insertions(+), 37 deletions(-) diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index 3c955c05..2356c1fc 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -105,5 +105,43 @@ SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 par DROP TABLE test_bgw.test_4 CASCADE; NOTICE: drop cascades to 4 other objects +/* test error handling in BGW */ +CREATE TABLE test_bgw.test_5(val INT4 NOT NULL); +SELECT create_range_partitions('test_bgw.test_5', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +CREATE OR REPLACE FUNCTION test_bgw.abort_xact(args JSONB) +RETURNS VOID AS $$ +BEGIN + RAISE EXCEPTION 'aborting xact!'; +END +$$ language plpgsql; +SELECT set_spawn_using_bgw('test_bgw.test_5', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +SELECT set_init_callback('test_bgw.test_5', 'test_bgw.abort_xact(jsonb)'); + set_init_callback +------------------- + +(1 row) + +INSERT INTO test_bgw.test_5 VALUES (-100); +ERROR: Attempt to spawn new partitions of relation "test_5" failed +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + parent | partition | parttype | expr | range_min | range_max +-----------------+-------------------+----------+------+-----------+----------- + test_bgw.test_5 | test_bgw.test_5_1 | 2 | val | 1 | 11 + test_bgw.test_5 | test_bgw.test_5_2 | 2 | val | 11 | 21 +(2 rows) + +DROP FUNCTION test_bgw.abort_xact(args JSONB); +DROP TABLE test_bgw.test_5 CASCADE; +NOTICE: drop cascades to 3 other objects DROP SCHEMA test_bgw CASCADE; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 2f8e0166..aaa9f82b 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -1,14 +1,14 @@ \set VERBOSITY terse CREATE EXTENSION pg_pathman; CREATE SCHEMA callbacks; -/* Check callbacks */ +/* callback #1 */ CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ BEGIN RAISE WARNING 'callback arg: %', args::TEXT; END $$ language plpgsql; -/* callback is in public namespace, must be schema-qualified */ +/* callback #2 */ CREATE OR REPLACE FUNCTION public.dummy_cb(args JSONB) RETURNS VOID AS $$ BEGIN @@ -184,15 +184,11 @@ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", INSERT INTO callbacks.abc VALUES (201, 0); /* +1 new partition */ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201", "parent_schema": "callbacks", "partition_schema": "callbacks"} +BEGIN; DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); INSERT INTO callbacks.abc VALUES (301, 0); /* +0 new partitions (ERROR) */ ERROR: callback function "callbacks.abc_on_part_created_callback(jsonb)" does not exist -CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) -RETURNS VOID AS $$ -BEGIN - RAISE WARNING 'callback arg: %', args::TEXT; -END -$$ language plpgsql; +ROLLBACK; INSERT INTO callbacks.abc VALUES (301, 0); /* +1 new partition */ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "401", "range_min": "301", "parent_schema": "callbacks", "partition_schema": "callbacks"} DROP TABLE callbacks.abc CASCADE; @@ -211,22 +207,22 @@ CREATE OR REPLACE FUNCTION callbacks.rotation_callback(params jsonb) RETURNS VOID AS $$ DECLARE - relation regclass; + relation regclass; parent_rel regclass; BEGIN parent_rel := concat(params->>'partition_schema', '.', params->>'parent')::regclass; - -- drop "old" partitions - FOR relation IN (SELECT partition FROM + -- drop "old" partitions + FOR relation IN (SELECT partition FROM (SELECT partition, range_min::INT4 FROM pathman_partition_list WHERE parent = parent_rel ORDER BY range_min::INT4 DESC OFFSET 4) t -- remain 4 last partitions ORDER BY range_min) - LOOP - RAISE NOTICE 'dropping partition %', relation; - PERFORM drop_range_partition(relation); - END LOOP; + LOOP + RAISE NOTICE 'dropping partition %', relation; + PERFORM drop_range_partition(relation); + END LOOP; END $$ LANGUAGE plpgsql; SELECT * FROM pathman_partition_list diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index 90165f4c..7eedaff2 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -53,6 +53,26 @@ SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 par DROP TABLE test_bgw.test_4 CASCADE; +/* test error handling in BGW */ +CREATE TABLE test_bgw.test_5(val INT4 NOT NULL); +SELECT create_range_partitions('test_bgw.test_5', 'val', 1, 10, 2); + +CREATE OR REPLACE FUNCTION test_bgw.abort_xact(args JSONB) +RETURNS VOID AS $$ +BEGIN + RAISE EXCEPTION 'aborting xact!'; +END +$$ language plpgsql; + +SELECT set_spawn_using_bgw('test_bgw.test_5', true); +SELECT set_init_callback('test_bgw.test_5', 'test_bgw.abort_xact(jsonb)'); +INSERT INTO test_bgw.test_5 VALUES (-100); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + +DROP FUNCTION test_bgw.abort_xact(args JSONB); +DROP TABLE test_bgw.test_5 CASCADE; + + DROP SCHEMA test_bgw CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 79325a2c..f435e1c7 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -3,8 +3,9 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA callbacks; -/* Check callbacks */ + +/* callback #1 */ CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ BEGIN @@ -12,15 +13,15 @@ BEGIN END $$ language plpgsql; - - -/* callback is in public namespace, must be schema-qualified */ +/* callback #2 */ CREATE OR REPLACE FUNCTION public.dummy_cb(args JSONB) RETURNS VOID AS $$ BEGIN END $$ language plpgsql; + + CREATE TABLE callbacks.abc(a serial, b int); SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); @@ -78,6 +79,7 @@ SELECT create_hash_partitions('callbacks.abc', 'a', 5); DROP TABLE callbacks.abc CASCADE; + /* test the temprary deletion of callback function */ CREATE TABLE callbacks.abc(a serial, b int); SELECT set_init_callback('callbacks.abc', @@ -85,18 +87,17 @@ SELECT set_init_callback('callbacks.abc', SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); INSERT INTO callbacks.abc VALUES (201, 0); /* +1 new partition */ + +BEGIN; DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); INSERT INTO callbacks.abc VALUES (301, 0); /* +0 new partitions (ERROR) */ -CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) -RETURNS VOID AS $$ -BEGIN - RAISE WARNING 'callback arg: %', args::TEXT; -END -$$ language plpgsql; +ROLLBACK; + INSERT INTO callbacks.abc VALUES (301, 0); /* +1 new partition */ DROP TABLE callbacks.abc CASCADE; + /* more complex test using rotation of tables */ CREATE TABLE callbacks.abc(a INT4 NOT NULL); INSERT INTO callbacks.abc @@ -107,22 +108,22 @@ CREATE OR REPLACE FUNCTION callbacks.rotation_callback(params jsonb) RETURNS VOID AS $$ DECLARE - relation regclass; + relation regclass; parent_rel regclass; BEGIN parent_rel := concat(params->>'partition_schema', '.', params->>'parent')::regclass; - -- drop "old" partitions - FOR relation IN (SELECT partition FROM + -- drop "old" partitions + FOR relation IN (SELECT partition FROM (SELECT partition, range_min::INT4 FROM pathman_partition_list WHERE parent = parent_rel ORDER BY range_min::INT4 DESC OFFSET 4) t -- remain 4 last partitions ORDER BY range_min) - LOOP - RAISE NOTICE 'dropping partition %', relation; - PERFORM drop_range_partition(relation); - END LOOP; + LOOP + RAISE NOTICE 'dropping partition %', relation; + PERFORM drop_range_partition(relation); + END LOOP; END $$ LANGUAGE plpgsql; @@ -140,6 +141,8 @@ SELECT * FROM pathman_partition_list WHERE parent = 'callbacks.abc'::REGCLASS ORDER BY range_min::INT4; + + DROP TABLE callbacks.abc CASCADE; DROP SCHEMA callbacks CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/partition_creation.c b/src/partition_creation.c index 412b3f36..95e3c045 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -451,11 +451,10 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, FlushErrorState(); /* Produce log message if we're in BGW */ - error->elevel = LOG; - error->message = psprintf(CppAsString(create_partitions_for_value_internal) - ": %s [%u]", error->message, MyProcPid); - - ReThrowError(error); + elog(LOG, + CppAsString(create_partitions_for_value_internal) ": %s [%u]", + error->message, + MyProcPid); /* Reset 'partid' in case of error */ partid = InvalidOid; From 5d91fc55a755b0a47fdc30382f43398bcae86821 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 20 Jul 2017 14:51:09 +0300 Subject: [PATCH 0664/1124] Advice to configure pathman_ddl_trigger ENABLE ALWAYS on LR replica --- README.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/README.md b/README.md index d53ad374..3b37273f 100644 --- a/README.md +++ b/README.md @@ -251,6 +251,25 @@ drop_partitions(parent REGCLASS, ``` Drop partitions of the `parent` table (both foreign and local relations). If `delete_data` is `false`, the data is copied to the parent table first. Default is `false`. +To remove partitioned table along with all partitions fully, use conventional +`DROP TABLE relation CASCADE`. However, care should be taken in somewhat rare +case when you are running logical replication and `DROP` was executed by +replication apply worker, e.g. via trigger on replicated table. `pg_pathman` +uses `pathman_ddl_trigger` event trigger to remove the record about dropped +table from `pathman_config`, and this trigger by default won't fire on replica, +leading to inconsistent state when `pg_pathman` thinks that the table still +exists, but in fact it doesn't. If this is the case, configure this trigger to +fire on replica too: + +```plpgsql +ALTER EVENT TRIGGER pathman_ddl_trigger ENABLE ALWAYS; +``` + +Physical replication doesn't have this problem since DDL as well as +`pathman_config` table is replicated too; master and slave PostgreSQL instances +are basically identical, and it is only harmful to keep this trigger in `ALWAYS` +mode. + ### Additional parameters From 215234e5ca1d8bf56844ae45403d8f5839986bdb Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Jul 2017 13:20:59 +0300 Subject: [PATCH 0665/1124] Ask user login in make_images script. Remove testgres installation from container --- Dockerfile.tmpl | 1 - make_images.py | 6 ++++-- run_tests.sh | 4 ++++ 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index c7b0fab5..a1c662c8 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -14,7 +14,6 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ RUN if [ "${CHECK_CODE}" = "false" ] ; then \ echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ apk --no-cache add curl python3 gcc make musl-dev cmocka-dev;\ - pip3 install testgres; \ fi RUN mkdir -p /pg/data && \ diff --git a/make_images.py b/make_images.py index 6859cd3c..b63b3fbe 100755 --- a/make_images.py +++ b/make_images.py @@ -1,6 +1,7 @@ #!/usr/bin/env python import subprocess +import getpass DOCKER_ID = 'pathman' pg_versions = ['9.5','9.6','10'] @@ -17,11 +18,12 @@ } } -password = input("Enter password for `docker login` for user `%s`: " % DOCKER_ID) +user = input("Enter username for `docker login`: ") +password = getpass.getpass() subprocess.check_output([ 'docker', 'login', - '-u', DOCKER_ID, + '-u', user, '-p', password]) travis_conf_line = '- DOCKER_IMAGE=%s' diff --git a/run_tests.sh b/run_tests.sh index 353033e4..0bb36105 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -35,6 +35,10 @@ elif [ "$CHECK_CODE" = "cppcheck" ]; then exit $status fi +# we need testgres for pathman tests +pip3 install testgres +pip3 freeze | grep testgres + # don't forget to "make clean" make USE_PGXS=1 clean From 9a693d4f3b10b792aa0c64475be3d07f8504ce4c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Jul 2017 13:25:57 +0300 Subject: [PATCH 0666/1124] Remove printlog function --- tests/python/partitioning_test.py | 299 ++++++++++++++---------------- 1 file changed, 141 insertions(+), 158 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 0d05c458..6d79dd96 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -81,115 +81,102 @@ def catchup_replica(self, master, replica): % replica.name master.poll_query_until('postgres', wait_lsn_query) - def printlog(self, logfile): - with open(logfile, 'r') as log: - for line in log.readlines(): - print(line) - def test_concurrent(self): """Tests concurrent partitioning""" - try: - node = self.start_new_pathman_cluster() - self.init_test_data(node) - node.psql( + node = self.start_new_pathman_cluster() + self.init_test_data(node) + + node.psql( + 'postgres', + 'select partition_table_concurrently(\'abc\')') + + while True: + # update some rows to check for deadlocks + node.safe_psql( 'postgres', - 'select partition_table_concurrently(\'abc\')') + ''' + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + ''') - while True: - # update some rows to check for deadlocks - node.safe_psql( - 'postgres', - ''' - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - ''') - - count = node.execute( - 'postgres', - 'select count(*) from pathman_concurrent_part_tasks') - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e + count = node.execute( + 'postgres', + 'select count(*) from pathman_concurrent_part_tasks') + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('postgres', 'select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('postgres', 'select count(*) from abc') + self.assertEqual(data[0][0], 300000) + + node.stop() def test_replication(self): """Tests how pg_pathman works with replication""" node = get_new_node('master') replica = get_new_node('repl') - try: # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 300000 - ) - - # check that direct UPDATE in pathman_config_params invalidates - # cache - node.psql( - 'postgres', - 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 0 - ) - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - self.printlog(replica.logs_dir + '/postgresql.log') - raise e + node = self.start_new_pathman_cluster(allows_streaming=True) + node.backup('my_backup') + + # initialize replica from backup + replica.init_from_backup(node, 'my_backup', has_streaming=True) + replica.start() + + # initialize pg_pathman extension and some test data + self.init_test_data(node) + + # wait until replica catches up + self.catchup_replica(node, replica) + + # check that results are equal + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc') + ) + + # enable parent and see if it is enabled in replica + node.psql('postgres', 'select enable_parent(\'abc\'') + + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc') + ) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc') + ) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], + 300000 + ) + + # check that direct UPDATE in pathman_config_params invalidates + # cache + node.psql( + 'postgres', + 'update pathman_config_params set enable_parent = false') + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc') + ) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc') + ) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], + 0 + ) def test_locks(self): """Test that a session trying to create new partitions waits for other @@ -225,71 +212,67 @@ def add_partition(node, flag, query): # Initialize master server node = get_new_node('master') - try: - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + - 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' - ) - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman\'\n') + node.start() + node.safe_psql( + 'postgres', + 'create extension pg_pathman; ' + + 'create table abc(id serial, t text); ' + + 'insert into abc select generate_series(1, 100000); ' + + 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' + ) + + # Start transaction that will create partition + con = node.connect() + con.begin() + con.execute('select append_range_partition(\'abc\')') + + # Start threads that suppose to add new partitions and wait some + # time + query = [ + 'select prepend_range_partition(\'abc\')', + 'select append_range_partition(\'abc\')', + 'select add_range_partition(\'abc\', 500000, 550000)', + ] + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, + args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # This threads should wait until current transaction finished + with lock: for i in range(3): - thread = threading.Thread( - target=add_partition, - args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) + self.assertEqual(flags[i].get(), False) - # Commit transaction. Since then other sessions can create - # partitions - con.commit() + # Commit transaction. Since then other sessions can create + # partitions + con.commit() - # Now wait until each thread finishes - for thread in threads: - thread.join() + # Now wait until each thread finishes + for thread in threads: + thread.join() - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' - ), - b'6\n' - ) - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) + + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + 'postgres', + 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' + ), + b'6\n' + ) def test_tablespace(self): """Check tablespace support""" From 1126cc2cc84e6bce495861ef751824be6ff24436 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Jul 2017 13:35:40 +0300 Subject: [PATCH 0667/1124] Use virtualenv in tests --- .gitignore | 1 + Dockerfile.tmpl | 1 + run_tests.sh | 2 ++ 3 files changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 3eb50e54..06aa9a65 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ pg_pathman--*.sql tags cscope* Dockerfile +testgres diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index a1c662c8..e5663156 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -14,6 +14,7 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ RUN if [ "${CHECK_CODE}" = "false" ] ; then \ echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ apk --no-cache add curl python3 gcc make musl-dev cmocka-dev;\ + pip3 install virtualenv;\ fi RUN mkdir -p /pg/data && \ diff --git a/run_tests.sh b/run_tests.sh index 0bb36105..5074be7a 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -36,6 +36,8 @@ elif [ "$CHECK_CODE" = "cppcheck" ]; then fi # we need testgres for pathman tests +virtualenv env +source env/bin/activate pip3 install testgres pip3 freeze | grep testgres From 63dccb6b6d7b2d7940e16710326ef98ea12a5a3f Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 21 Jul 2017 13:42:00 +0300 Subject: [PATCH 0668/1124] Fix error in virtualenv activation for tests --- run_tests.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/run_tests.sh b/run_tests.sh index 5074be7a..1b9d7a70 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -37,6 +37,7 @@ fi # we need testgres for pathman tests virtualenv env +export VIRTUAL_ENV_DISABLE_PROMPT=1 source env/bin/activate pip3 install testgres pip3 freeze | grep testgres From c85f96af05bf0648d5dde080e8a882efb917da38 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 21 Jul 2017 20:41:11 +0300 Subject: [PATCH 0669/1124] reformat + fix python-based tests --- tests/python/.style.yapf | 5 + tests/python/partitioning_test.py | 2111 +++++++++++++++-------------- 2 files changed, 1075 insertions(+), 1041 deletions(-) create mode 100644 tests/python/.style.yapf diff --git a/tests/python/.style.yapf b/tests/python/.style.yapf new file mode 100644 index 00000000..e2ca7ba3 --- /dev/null +++ b/tests/python/.style.yapf @@ -0,0 +1,5 @@ +[style] +based_on_style = pep8 +spaces_before_comment = 4 +split_before_logical_operator = false +column_limit=90 diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 6d79dd96..52b96d87 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1,11 +1,10 @@ #!/usr/bin/env python3 # coding: utf-8 - """ - concurrent_partitioning_test.py - Tests concurrent partitioning worker with simultaneous update queries +partitioning_test.py + Various stuff that looks out of place in regression tests - Copyright (c) 2015-2017, Postgres Professional + Copyright (c) 2015-2017, Postgres Professional """ import unittest @@ -20,1051 +19,1081 @@ version = get_config().get("VERSION_NUM") + # Helper function for json equality def ordered(obj): - if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered(x) for x in obj) - else: - return obj + if isinstance(obj, dict): + return sorted((k, ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered(x) for x in obj) + else: + return obj def if_fdw_enabled(func): - """To run tests with FDW support set environment variable TEST_FDW=1""" - def wrapper(*args, **kwargs): - if os.environ.get('FDW_DISABLED') != '1': - func(*args, **kwargs) - else: - print('Warning: FDW features tests are disabled, skipping...') - return wrapper + """ To run tests with FDW support, set environment variable TEST_FDW=1 """ + def wrapper(*args, **kwargs): + if os.environ.get('FDW_DISABLED') != '1': + func(*args, **kwargs) + else: + print('Warning: FDW features tests are disabled, skipping...') -class PartitioningTests(unittest.TestCase): + return wrapper - def setUp(self): - self.setup_cmd = [ - 'create table abc(id serial, t text)', - 'insert into abc select generate_series(1, 300000)', - 'select create_hash_partitions(\'abc\', \'id\', 3, partition_data := false)', - ] - - def tearDown(self): - stop_all() - - def start_new_pathman_cluster(self, name='test', allows_streaming=False): - node = get_new_node(name) - node.init(allows_streaming=allows_streaming) - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - return node - - def init_test_data(self, node): - """Initialize pg_pathman extension and test data""" - for cmd in self.setup_cmd: - node.safe_psql('postgres', cmd) - - def catchup_replica(self, master, replica): - """Wait until replica synchronizes with master""" - if version >= 100000: - wait_lsn_query = \ - 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - else: - wait_lsn_query = \ - 'SELECT pg_current_xlog_location() <= replay_location ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - master.poll_query_until('postgres', wait_lsn_query) - - def test_concurrent(self): - """Tests concurrent partitioning""" - - node = self.start_new_pathman_cluster() - self.init_test_data(node) - - node.psql( - 'postgres', - 'select partition_table_concurrently(\'abc\')') - - while True: - # update some rows to check for deadlocks - node.safe_psql( - 'postgres', - ''' - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - ''') - - count = node.execute( - 'postgres', - 'select count(*) from pathman_concurrent_part_tasks') - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() - - def test_replication(self): - """Tests how pg_pathman works with replication""" - node = get_new_node('master') - replica = get_new_node('repl') - - # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 300000 - ) - - # check that direct UPDATE in pathman_config_params invalidates - # cache - node.psql( - 'postgres', - 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 0 - ) - - def test_locks(self): - """Test that a session trying to create new partitions waits for other - sessions if they are doing the same""" - - import threading - import time - - class Flag: - def __init__(self, value): - self.flag = value - - def set(self, value): - self.flag = value - - def get(self): - return self.flag - - # There is one flag for each thread which shows if thread have done its work - flags = [Flag(False) for i in range(3)] - - # All threads synchronize though this lock - lock = threading.Lock() - - # Define thread function - def add_partition(node, flag, query): - """ We expect that this query will wait until another session - commits or rolls back""" - node.safe_psql('postgres', query) - with lock: - flag.set(True) - - # Initialize master server - node = get_new_node('master') - - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + - 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' - ) - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] - for i in range(3): - thread = threading.Thread( - target=add_partition, - args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) - - # Commit transaction. Since then other sessions can create - # partitions - con.commit() - - # Now wait until each thread finishes - for thread in threads: - thread.join() - - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' - ), - b'6\n' - ) - - def test_tablespace(self): - """Check tablespace support""" - - def check_tablespace(node, tablename, tablespace): - res = node.execute( - 'postgres', - 'select get_tablespace(\'{}\')'.format(tablename)) - if len(res) == 0: - return False - - return res[0][0] == tablespace - - node = get_new_node('master') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - - # create tablespace - path = os.path.join(node.data_dir, 'test_space_location') - os.mkdir(path) - node.psql( - 'postgres', - 'create tablespace test_space location \'{}\''.format(path)) - - # create table in this tablespace - node.psql( - 'postgres', - 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql( - 'postgres', - 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended\')') - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # check tablespace for split - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') - self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) - - # now let's specify tablespace explicitly - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')') - self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) - - @if_fdw_enabled - def test_foreign_table(self): - """Test foreign tables""" - - # Start master server - master = get_new_node('test') - master.init() - master.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') - - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions - master.psql( - 'postgres', - '''create table abc(id serial, name text); - select create_range_partitions('abc', 'id', 0, 10, 2)''') - - # Current user name (needed for user mapping) - username = master.execute('postgres', 'select current_user')[0][0] - - # Start foreign server - fserv = get_new_node('fserv') - fserv.init().start() - fserv.safe_psql('postgres', 'create table ftable(id serial, name text)') - fserv.safe_psql('postgres', 'insert into ftable values (25, \'foreign\')') - - # Create foreign table and attach it to partitioned table - master.safe_psql( - 'postgres', - '''create server fserv - foreign data wrapper postgres_fdw - options (dbname 'postgres', host '127.0.0.1', port '{}')'''.format(fserv.port) - ) - master.safe_psql( - 'postgres', - '''create user mapping for {0} - server fserv - options (user '{0}')'''.format(username) - ) - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (ftable) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') - - # Check that table attached to partitioned table - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable'), - b'25|foreign\n' - ) - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - b'25|foreign\n26|part\n' - ) - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') - - # HASH partitioning with FDW: - # - create hash partitioned table in master - # - create foreign table - # - replace local partition with foreign one - # - insert data - # - drop partitions - master.psql( - 'postgres', - '''create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2)''') - fserv.safe_psql('postgres', 'create table f_hash_test(id serial, name text)') - - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (f_hash_test) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select replace_hash_partition(\'hash_test_1\', \'f_hash_test\')') - master.safe_psql('postgres', 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' - ) - master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') - - def test_parallel_nodes(self): - """Test parallel queries under partitions""" - - import json - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - node.start() - - # Check version of postgres server - # If version < 9.6 skip all tests for parallel queries - if version < 90600: - return - - # Prepare test database - node.psql('postgres', 'create extension pg_pathman') - node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table range_partitioned alter column i set not null') - node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)') - node.psql('postgres', 'vacuum analyze range_partitioned') - - node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table hash_partitioned alter column i set not null') - node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)') - node.psql('postgres', 'vacuum analyze hash_partitioned') - - node.psql('postgres', """ - create or replace function query_plan(query text) returns jsonb as $$ - declare - plan jsonb; - begin - execute 'explain (costs off, format json)' || query into plan; - return plan; - end; - $$ language plpgsql; - """) - - # Test parallel select - with node.connect() as con: - con.execute('set max_parallel_workers_per_gather = 2') - if version >= 100000: - con.execute('set min_parallel_table_scan_size = 0') - else: - con.execute('set min_parallel_relation_size = 0') - con.execute('set parallel_setup_cost = 0') - con.execute('set parallel_tuple_cost = 0') - - # Check parallel aggregate plan - test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Finalize", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Partial", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check count of returned tuples - count = con.execute('select count(*) from range_partitioned where i < 1500')[0][0] - self.assertEqual(count, 1499) - - # Check simple parallel seq scan plan with limit - test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Limit", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check tuples returned by query above - res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5') - res_tuples = sorted(map(lambda x: x[0], res_tuples)) - expected = [1, 2, 3, 4, 5] - self.assertEqual(res_tuples, expected) - - # Check the case when none partition is selected in result plan - test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Result", - "Parallel Aware": false, - "One-Time Filter": "false" - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Remove all objects for testing - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_creation_insert(self): - """Test concurrent partition creation on INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute('insert into ins_test select generate_series(1, 50)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.execute('insert into ins_test values(51)') - con2.commit() - - # Step 1: lock partitioned table in con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - con1.execute('lock table ins_test in share update exclusive mode') - - # Step 2: try inserting new value in con2 (waiting) - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - t = threading.Thread(target=con2_thread) - t.start() - - # Step 3: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 4: try inserting new value in con1 (success, unlock) - con1.execute('insert into ins_test values(52)') - con1.commit() - - # Step 5: wait for con2 - t.join() - - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'ins_test'::regclass - order by range_min, range_max - """) - - # check number of partitions - self.assertEqual(len(rows), 6) - - # check range_max of partitions - self.assertEqual(int(rows[0][5]), 11) - self.assertEqual(int(rows[1][5]), 21) - self.assertEqual(int(rows[2][5]), 31) - self.assertEqual(int(rows[3][5]), 41) - self.assertEqual(int(rows[4][5]), 51) - self.assertEqual(int(rows[5][5]), 61) - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_merge_insert(self): - """Test concurrent merge_range_partitions() + INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.begin() - con2.execute('insert into ins_test values(20)') - con2.commit() - - # Step 1: initilize con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - - # Step 2: initilize con2 - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - con2.commit() # unlock relations - - # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) - con1.execute("select merge_range_partitions('ins_test_1', 'ins_test_2')") - - # Step 4: try inserting new value in con2 (waiting) - t = threading.Thread(target=con2_thread) - t.start() - - # Step 5: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 6: finish merge in con1 (success, unlock) - con1.commit() - - # Step 7: wait for con2 - t.join() - - rows = con1.execute("select *, tableoid::regclass::text from ins_test") - - # check number of rows in table - self.assertEqual(len(rows), 1) - - # check value that has been inserted - self.assertEqual(int(rows[0][0]), 20) - - # check partition that was chosen for insert - self.assertEqual(str(rows[0][1]), 'ins_test_1') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_pg_dump(self): - """ - Test using dump and restore of partitioned table through pg_dump and pg_restore tools. - - Test strategy: - - test range and hash partitioned tables; - - for each partitioned table check on restorable side the following quantities: - * constraints related to partitioning; - * init callback function and enable parent flag; - * number of rows in parent and child tables; - * plan validity of simple SELECT query under partitioned table; - - check dumping using the following parameters of pg_dump: - * format = plain | custom; - * using of inserts and copy. - - all test cases are carried out on tables half-full with data located in parent part, - the rest of data - in child tables. - """ - - import subprocess - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - """ - shared_preload_libraries=\'pg_pathman\' - pg_pathman.override_copy=false - """) - node.start() - - # Init two databases: initial and copy - node.psql('postgres', 'create database initial') - node.psql('postgres', 'create database copy') - node.psql('initial', 'create extension pg_pathman') - - # Create and fillin partitioned table in initial database - with node.connect('initial') as con: - - # create and initailly fillin tables - con.execute('create table range_partitioned (i integer not null)') - con.execute('insert into range_partitioned select i from generate_series(1, 500) i') - con.execute('create table hash_partitioned (i integer not null)') - con.execute('insert into hash_partitioned select i from generate_series(1, 500) i') - - # partition table keeping data in base table - # enable_parent parameter automatically becames true - con.execute('select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)') - con.execute('select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)') - - # fillin child tables with remain data - con.execute('insert into range_partitioned select i from generate_series(501, 1000) i') - con.execute('insert into hash_partitioned select i from generate_series(501, 1000) i') - - # set init callback - con.execute(""" - create or replace function init_partition_stub_callback(args jsonb) - returns void as $$ - begin - end - $$ language plpgsql; - """) - con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')') - con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')') - - # turn off enable_parent option - con.execute('select set_enable_parent(\'range_partitioned\', false)') - con.execute('select set_enable_parent(\'hash_partitioned\', false)') - - con.commit() - - # compare strategies - CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) - def cmp_full(con1, con2): - """Compare selection partitions in plan and contents in partitioned tables""" - - plan_query = 'explain (costs off, format json) select * from %s' - content_query = 'select * from %s order by i' - table_refs = [ - 'range_partitioned', - 'only range_partitioned', - 'hash_partitioned', - 'only hash_partitioned' - ] - for table_ref in table_refs: - plan_initial = con1.execute(plan_query % table_ref)[0][0][0]['Plan'] - plan_copy = con2.execute(plan_query % table_ref)[0][0][0]['Plan'] - if ordered(plan_initial) != ordered(plan_copy): - return PLANS_MISMATCH - - content_initial = [x[0] for x in con1.execute(content_query % table_ref)] - content_copy = [x[0] for x in con2.execute(content_query % table_ref)] - if content_initial != content_copy: - return CONTENTS_MISMATCH - - return CMP_OK - - def turnoff_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to off') - node.reload() - - def turnon_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to on') - node.psql('copy', 'alter system set pg_pathman.enable to on') - node.psql('initial', 'alter system set pg_pathman.override_copy to off') - node.psql('copy', 'alter system set pg_pathman.override_copy to off') - node.reload() - - # Test dump/restore from init database to copy functionality - test_params = [ - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via COPY - (turnoff_pathman, - turnon_pathman, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--inserts", - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via INSERTs - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--format=custom", - "initial"], - [node.get_bin_path("pg_restore"), - "-p {}".format(node.port), - "--dbname=copy"], - cmp_full), # dump in archive format - ] - - try: - FNULL = open(os.devnull, 'w') - - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) - - if (preproc != None): - preproc(node) - - # transfer and restore data - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - stdoutdata, _ = p1.communicate() - p2 = subprocess.Popen(pg_restore_params, stdin=subprocess.PIPE, - stdout=FNULL, stderr=FNULL) - p2.communicate(input=stdoutdata) - - if (postproc != None): - postproc(node) - - # check validity of data - with node.connect('initial') as con1, node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual(cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" % dump_restore_cmd) - self.assertNotEqual(cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') - - except: - raise - finally: - FNULL.close() - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_concurrent_detach(self): - """Test concurrent detach partition with contiguous tuple inserting and spawning new partitions""" - - # Init parameters - num_insert_workers = 8 - detach_timeout = 0.1 # time in sec between successive inserts and detachs - num_detachs = 100 # estimated number of detachs - inserts_advance = 1 # abvance in sec of inserts process under detachs - test_interval = int(math.ceil(detach_timeout * num_detachs)) - - insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/insert_current_timestamp.pgbench" - detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/detachs_in_timeout.pgbench" - - # Check pgbench scripts on existance - self.assertTrue(os.path.isfile(insert_pgbench_script), - msg="pgbench script with insert timestamp doesn't exist") - self.assertTrue(os.path.isfile(detach_pgbench_script), - msg="pgbench script with detach letfmost partition doesn't exist") - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec - with node.connect() as con0: - con0.begin() - con0.execute('create table ts_range_partitioned(ts timestamp not null)') - con0.execute("select create_range_partitions('ts_range_partitioned', 'ts', current_timestamp, interval '%f', 1)" % detach_timeout) - con0.commit() - - # Run in background inserts and detachs processes - FNULL = open(os.devnull, 'w') - - # init pgbench's utility tables - init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) - init_pgbench.wait() - - inserts = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ - "-j", "%i" % num_insert_workers, - "-c", "%i" % num_insert_workers, - "-f", insert_pgbench_script, - "-T", "%i" % (test_interval+inserts_advance) - ]) - time.sleep(inserts_advance) - detachs = node.pgbench(stdout=FNULL, stderr=FNULL, options=[ - "-D", "timeout=%f" % detach_timeout, - "-f", detach_pgbench_script, - "-T", "%i" % test_interval - ]) - - # Wait for completion of processes - _, stderrdata = inserts.communicate() - detachs.wait() - - # Obtain error log from inserts process - self.assertIsNone(re.search("ERROR|FATAL|PANIC", str(stderrdata)), - msg="Race condition between detach and concurrent inserts with append partition is expired") - - # Stop instance and finish work - node.stop() - node.cleanup() - FNULL.close() +class PartitioningTests(unittest.TestCase): + def setUp(self): + self.setup_cmd = [ + "create table abc(id serial, t text)", + "insert into abc select generate_series(1, 300000)", + "select create_hash_partitions('abc', 'id', 3, partition_data := false)", + ] + + def tearDown(self): + stop_all() + + def start_new_pathman_cluster(self, name='test', allows_streaming=False): + node = get_new_node(name) + node.init(allows_streaming=allows_streaming) + node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") + node.start() + node.psql('postgres', 'create extension pg_pathman') + return node + + def init_test_data(self, node): + """ Initialize pg_pathman extension and test data """ + for cmd in self.setup_cmd: + node.safe_psql('postgres', cmd) + + def catchup_replica(self, master, replica): + """ Wait until replica synchronizes with master """ + if version >= 100000: + wait_lsn_query = \ + 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ + 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ + % replica.name + else: + wait_lsn_query = \ + 'SELECT pg_current_xlog_location() <= replay_location ' \ + 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ + % replica.name + master.poll_query_until('postgres', wait_lsn_query) + + def test_concurrent(self): + """ Test concurrent partitioning """ + + node = self.start_new_pathman_cluster() + self.init_test_data(node) + + node.psql('postgres', "select partition_table_concurrently('abc')") + + while True: + # update some rows to check for deadlocks + node.safe_psql('postgres', """ + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + """) + + count = node.execute('postgres', """ + select count(*) from pathman_concurrent_part_tasks + """) + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('postgres', 'select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('postgres', 'select count(*) from abc') + self.assertEqual(data[0][0], 300000) + + node.stop() + + def test_replication(self): + """ Test how pg_pathman works with replication """ + + node = get_new_node('master') + replica = get_new_node('repl') + + # initialize master server + node = self.start_new_pathman_cluster(allows_streaming=True) + node.backup('my_backup') + + # initialize replica from backup + replica.init_from_backup(node, 'my_backup', has_streaming=True) + replica.start() + + # initialize pg_pathman extension and some test data + self.init_test_data(node) + + # wait until replica catches up + self.catchup_replica(node, replica) + + # check that results are equal + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + + # enable parent and see if it is enabled in replica + node.psql('postgres', 'select enable_parent(\'abc\'') + + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], 300000) + + # check that UPDATE in pathman_config_params invalidates cache + node.psql('postgres', 'update pathman_config_params set enable_parent = false') + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual(node.execute('postgres', 'select count(*) from abc')[0][0], 0) + + def test_locks(self): + """ + Test that a session trying to create new partitions + waits for other sessions if they are doing the same + """ + + import threading + import time + + class Flag: + def __init__(self, value): + self.flag = value + + def set(self, value): + self.flag = value + + def get(self): + return self.flag + + # There is one flag for each thread which shows if thread have done its work + flags = [Flag(False) for i in range(3)] + + # All threads synchronize though this lock + lock = threading.Lock() + + # Define thread function + def add_partition(node, flag, query): + """ + We expect that this query will wait until + another session commits or rolls back + """ + node.safe_psql('postgres', query) + with lock: + flag.set(True) + + # Initialize master server + node = get_new_node('master') + + node.init() + node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") + node.start() + node.safe_psql( + 'postgres', + 'create extension pg_pathman; ' + 'create table abc(id serial, t text); ' + + 'insert into abc select generate_series(1, 100000); ' + + 'select create_range_partitions(\'abc\', \'id\', 1, 50000);') + + # Start transaction that will create partition + con = node.connect() + con.begin() + con.execute('select append_range_partition(\'abc\')') + + # Start threads that suppose to add new partitions and wait some + # time + query = [ + 'select prepend_range_partition(\'abc\')', + 'select append_range_partition(\'abc\')', + 'select add_range_partition(\'abc\', 500000, 550000)', + ] + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # This threads should wait until current transaction finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), False) + + # Commit transaction. Since then other sessions can create + # partitions + con.commit() + + # Now wait until each thread finishes + for thread in threads: + thread.join() + + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) + + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + 'postgres', + 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass'), + b'6\n') + + def test_tablespace(self): + """ Check tablespace support """ + + def check_tablespace(node, tablename, tablespace): + res = node.execute('postgres', + 'select get_tablespace(\'{}\')'.format(tablename)) + if len(res) == 0: + return False + + return res[0][0] == tablespace + + node = get_new_node('master') + node.init() + node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') + node.start() + node.psql('postgres', 'create extension pg_pathman') + + # create tablespace + path = os.path.join(node.data_dir, 'test_space_location') + os.mkdir(path) + node.psql('postgres', 'create tablespace test_space location \'{}\''.format(path)) + + # create table in this tablespace + node.psql('postgres', 'create table abc(a serial, b int) tablespace test_space') + + # create three partitions. Excpect that they will be created in the + # same tablespace as the parent table + node.psql('postgres', 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') + self.assertTrue(check_tablespace(node, 'abc', 'test_space')) + + # check tablespace for appended partition + node.psql('postgres', 'select append_range_partition(\'abc\', \'abc_appended\')') + self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended\')') + self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') + self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + + # check tablespace for split + node.psql('postgres', + 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') + self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) + + # now let's specify tablespace explicitly + node.psql( + 'postgres', + 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') + node.psql( + 'postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')' + ) + + # yapf: disable + self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) + + @if_fdw_enabled + def test_foreign_table(self): + """ Test foreign tables """ + + # Start master server + master = get_new_node('test') + master.init() + master.append_conf('postgresql.conf', """ + shared_preload_libraries='pg_pathman, postgres_fdw'\n + """) + master.start() + master.psql('postgres', 'create extension pg_pathman') + master.psql('postgres', 'create extension postgres_fdw') + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + master.psql('postgres', """ + create table abc(id serial, name text); + select create_range_partitions('abc', 'id', 0, 10, 2) + """) + + # Current user name (needed for user mapping) + username = master.execute('postgres', 'select current_user')[0][0] + + # Start foreign server + fserv = get_new_node('fserv') + fserv.init().start() + fserv.safe_psql('postgres', "create table ftable(id serial, name text)") + fserv.safe_psql('postgres', "insert into ftable values (25, 'foreign')") + + # Create foreign table and attach it to partitioned table + master.safe_psql('postgres', """ + create server fserv + foreign data wrapper postgres_fdw + options (dbname 'postgres', host '127.0.0.1', port '{}') + """.format(fserv.port)) + + master.safe_psql('postgres', """ + create user mapping for {0} server fserv + options (user '{0}') + """.format(username)) + + master.safe_psql('postgres', """ + import foreign schema public limit to (ftable) + from server fserv into public + """) + + master.safe_psql( + 'postgres', + "select attach_range_partition('abc', 'ftable', 20, 30)") + + # Check that table attached to partitioned table + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable'), + b'25|foreign\n') + + # Check that we can successfully insert new data into foreign partition + master.safe_psql('postgres', 'insert into abc values (26, \'part\')') + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable order by id'), + b'25|foreign\n26|part\n') + + # Testing drop partitions (including foreign partitions) + master.safe_psql('postgres', 'select drop_partitions(\'abc\')') + + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql('postgres', """ + create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2) + """) + fserv.safe_psql('postgres', + 'create table f_hash_test(id serial, name text)') + + master.safe_psql('postgres', """ + import foreign schema public limit to (f_hash_test) + from server fserv into public + """) + master.safe_psql('postgres', """ + select replace_hash_partition('hash_test_1', 'f_hash_test') + """) + master.safe_psql('postgres', + 'insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('postgres', 'select * from hash_test'), + b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') + master.safe_psql('postgres', "select drop_partitions('hash_test')") + + @if_fdw_enabled + def test_parallel_nodes(self): + """ Test parallel queries under partitions """ + + import json + + # Init and start postgres instance with preload pg_pathman module + node = get_new_node('test') + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + if version < 90600: + return + + # Prepare test database + node.psql('postgres', 'create extension pg_pathman') + + node.psql('postgres', """ + create table range_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table range_partitioned alter column i set not null; + select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); + + create table hash_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table hash_partitioned alter column i set not null; + select create_hash_partitions('hash_partitioned', 'i', 10); + """) + + # create statistics for both partitioned tables + node.psql('postgres', 'vacuum analyze') + + node.psql('postgres', """ + create or replace function query_plan(query text) + returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + if version >= 100000: + con.execute('set min_parallel_table_scan_size = 0') + else: + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check count of returned tuples + count = con.execute( + 'select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) + + # Check simple parallel seq scan plan with limit + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check tuples returned by query above + res_tuples = con.execute( + 'select * from range_partitioned where i < 1500 limit 5') + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] + self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Remove all objects for testing + node.psql('postgres', 'drop table range_partitioned cascade') + node.psql('postgres', 'drop table hash_partitioned cascade') + node.psql('postgres', 'drop extension pg_pathman cascade') + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_conc_part_creation_insert(self): + """ Test concurrent partition creation on INSERT """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("insert into ins_test select generate_series(1, 50)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + con2.commit() + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('lock table ins_test in share update exclusive mode') + + # Step 2: try inserting new value in con2 (waiting) + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 4: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 5: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_conc_part_merge_insert(self): + """ Test concurrent merge_range_partitions() + INSERT """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('insert into ins_test values(20)') + con2.commit() + + # Step 1: initilize con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + + # Step 2: initilize con2 + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations + + # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) + con1.execute( + "select merge_range_partitions('ins_test_1', 'ins_test_2')") + + # Step 4: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: finish merge in con1 (success, unlock) + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute("select *, tableoid::regclass::text from ins_test") + + # check number of rows in table + self.assertEqual(len(rows), 1) + + # check value that has been inserted + self.assertEqual(int(rows[0][0]), 20) + + # check partition that was chosen for insert + self.assertEqual(str(rows[0][1]), 'ins_test_1') + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_pg_dump(self): + """ + Test using dump and restore of partitioned table through pg_dump and pg_restore tools. + + Test strategy: + - test range and hash partitioned tables; + - for each partitioned table check on restorable side the following quantities: + * constraints related to partitioning; + * init callback function and enable parent flag; + * number of rows in parent and child tables; + * plan validity of simple SELECT query under partitioned table; + - check dumping using the following parameters of pg_dump: + * format = plain | custom; + * using of inserts and copy. + - all test cases are carried out on tables half-full with data located in parent part, + the rest of data - in child tables. + """ + + import subprocess + + # Init and start postgres instance with preload pg_pathman module + node = get_new_node('test') + node.init() + node.append_conf('postgresql.conf', """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false + """) + node.start() + + # Init two databases: initial and copy + node.psql('postgres', 'create database initial') + node.psql('postgres', 'create database copy') + node.psql('initial', 'create extension pg_pathman') + + # Create and fillin partitioned table in initial database + with node.connect('initial') as con: + + # create and initailly fillin tables + con.execute('create table range_partitioned (i integer not null)') + con.execute( + 'insert into range_partitioned select i from generate_series(1, 500) i' + ) + con.execute('create table hash_partitioned (i integer not null)') + con.execute( + 'insert into hash_partitioned select i from generate_series(1, 500) i' + ) + + # partition table keeping data in base table + # enable_parent parameter automatically becames true + con.execute( + 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)' + ) + con.execute( + 'select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)' + ) + + # fillin child tables with remain data + con.execute( + 'insert into range_partitioned select i from generate_series(501, 1000) i' + ) + con.execute( + 'insert into hash_partitioned select i from generate_series(501, 1000) i' + ) + + # set init callback + con.execute(""" + create or replace function init_partition_stub_callback(args jsonb) + returns void as $$ + begin + end + $$ language plpgsql; + """) + con.execute( + 'select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')' + ) + con.execute( + 'select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')' + ) + + # turn off enable_parent option + con.execute( + 'select set_enable_parent(\'range_partitioned\', false)') + con.execute('select set_enable_parent(\'hash_partitioned\', false)') + + con.commit() + + # compare strategies + CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) + + def cmp_full(con1, con2): + """ + Compare selection partitions in plan + and contents in partitioned tables + """ + + plan_query = 'explain (costs off, format json) select * from %s' + content_query = 'select * from %s order by i' + table_refs = [ + 'range_partitioned', 'only range_partitioned', + 'hash_partitioned', 'only hash_partitioned' + ] + for table_ref in table_refs: + plan_initial = con1.execute( + plan_query % table_ref)[0][0][0]['Plan'] + plan_copy = con2.execute( + plan_query % table_ref)[0][0][0]['Plan'] + if ordered(plan_initial) != ordered(plan_copy): + return PLANS_MISMATCH + + content_initial = [ + x[0] for x in con1.execute(content_query % table_ref) + ] + content_copy = [ + x[0] for x in con2.execute(content_query % table_ref) + ] + if content_initial != content_copy: + return CONTENTS_MISMATCH + + return CMP_OK + + def turnoff_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to off') + node.reload() + + def turnon_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to on') + node.psql('copy', 'alter system set pg_pathman.enable to on') + node.psql('initial', + 'alter system set pg_pathman.override_copy to off') + node.psql('copy', + 'alter system set pg_pathman.override_copy to off') + node.reload() + + # Test dump/restore from init database to copy functionality + test_params = [ + (None, None, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "initial" + ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via COPY + (turnoff_pathman, turnon_pathman, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "--inserts", "initial" + ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via INSERTs + (None, None, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "--format=custom", "initial" + ], [ + node.get_bin_path("pg_restore"), "-p {}".format(node.port), + "--dbname=copy" + ], cmp_full), # dump in archive format + ] + + try: + FNULL = open(os.devnull, 'w') + + for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: + + dump_restore_cmd = " | ".join((' '.join(pg_dump_params), + ' '.join(pg_restore_params))) + + if (preproc != None): + preproc(node) + + # transfer and restore data + p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) + stdoutdata, _ = p1.communicate() + p2 = subprocess.Popen( + pg_restore_params, + stdin=subprocess.PIPE, + stdout=FNULL, + stderr=FNULL) + p2.communicate(input=stdoutdata) + + if (postproc != None): + postproc(node) + + # validate data + with node.connect('initial') as con1, \ + node.connect('copy') as con2: + + # compare plans and contents of initial and copy + cmp_result = cmp_dbs(con1, con2) + self.assertNotEqual( + cmp_result, PLANS_MISMATCH, + "mismatch in plans of select query on partitioned tables under the command: %s" + % dump_restore_cmd) + self.assertNotEqual( + cmp_result, CONTENTS_MISMATCH, + "mismatch in contents of partitioned tables under the command: %s" + % dump_restore_cmd) + + # compare enable_parent flag and callback function + config_params_query = """ + select partrel, enable_parent, init_callback from pathman_config_params + """ + config_params_initial, config_params_copy = {}, {} + for row in con1.execute(config_params_query): + config_params_initial[row[0]] = row[1:] + for row in con2.execute(config_params_query): + config_params_copy[row[0]] = row[1:] + self.assertEqual(config_params_initial, config_params_copy, \ + "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) + + # compare constraints on each partition + constraints_query = """ + select r.relname, c.conname, c.consrc from + pg_constraint c join pg_class r on c.conrelid=r.oid + where relname similar to '(range|hash)_partitioned_\d+' + """ + constraints_initial, constraints_copy = {}, {} + for row in con1.execute(constraints_query): + constraints_initial[row[0]] = row[1:] + for row in con2.execute(constraints_query): + constraints_copy[row[0]] = row[1:] + self.assertEqual(constraints_initial, constraints_copy, \ + "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) + + # clear copy database + node.psql('copy', 'drop schema public cascade') + node.psql('copy', 'create schema public') + node.psql('copy', 'drop extension pg_pathman cascade') + + except: + raise + finally: + FNULL.close() + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_concurrent_detach(self): + """ + Test concurrent detach partition with contiguous + tuple inserting and spawning new partitions + """ + + # Init parameters + num_insert_workers = 8 + detach_timeout = 0.1 # time in sec between successive inserts and detachs + num_detachs = 100 # estimated number of detachs + inserts_advance = 1 # abvance in sec of inserts process under detachs + test_interval = int(math.ceil(detach_timeout * num_detachs)) + + insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/insert_current_timestamp.pgbench" + detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/detachs_in_timeout.pgbench" + + # Check pgbench scripts on existance + self.assertTrue( + os.path.isfile(insert_pgbench_script), + msg="pgbench script with insert timestamp doesn't exist") + self.assertTrue( + os.path.isfile(detach_pgbench_script), + msg="pgbench script with detach letfmost partition doesn't exist") + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec + with node.connect() as con0: + con0.begin() + con0.execute( + 'create table ts_range_partitioned(ts timestamp not null)') + + # yapf: disable + con0.execute(""" + select create_range_partitions('ts_range_partitioned', + 'ts', + current_timestamp, + interval '%f', + 1) + """ % detach_timeout) + con0.commit() + + # Run in background inserts and detachs processes + FNULL = open(os.devnull, 'w') + + # init pgbench's utility tables + init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) + init_pgbench.wait() + + inserts = node.pgbench( + stdout=FNULL, + stderr=subprocess.PIPE, + options=[ + "-j", + "%i" % num_insert_workers, "-c", + "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", + "%i" % (test_interval + inserts_advance) + ]) + time.sleep(inserts_advance) + detachs = node.pgbench( + stdout=FNULL, + stderr=FNULL, + options=[ + "-D", + "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, + "-T", + "%i" % test_interval + ]) + + # Wait for completion of processes + _, stderrdata = inserts.communicate() + detachs.wait() + + # Obtain error log from inserts process + self.assertIsNone( + re.search("ERROR|FATAL|PANIC", str(stderrdata)), + msg=""" + Race condition between detach and concurrent + inserts with append partition is expired + """) + + # Stop instance and finish work + node.stop() + node.cleanup() + FNULL.close() -if __name__ == "__main__": - unittest.main() +if __name__ == "__main__": + unittest.main() From c2ed28a6bdebdf743774ca3d1c1763c99b652f12 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 24 Jul 2017 15:36:49 +0300 Subject: [PATCH 0670/1124] new test test_conc_part_drop_runtime_append() --- tests/python/partitioning_test.py | 100 ++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 52b96d87..207fd664 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -629,6 +629,106 @@ def test_parallel_nodes(self): node.stop() node.cleanup() + def test_conc_part_drop_runtime_append(self): + """ Test concurrent partition drop + SELECT (RuntimeAppend) """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'drop_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table drop_test(val int not null)") + con0.execute("insert into drop_test select generate_series(1, 1000)") + con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con1.begin() + con2.execute('set enable_hashjoin = f') + con2.execute('set enable_mergejoin = f') + + res = con2.execute(""" + explain (analyze, costs off, timing off) + select * from drop_test + where val = any (select generate_series(1, 40, 34)) + """) # query selects from drop_test_1 and drop_test_4 + + con2.commit() + + has_runtime_append = False + has_drop_test_1 = False + has_drop_test_4 = False + + for row in res: + if row[0].find('RuntimeAppend') >= 0: + has_runtime_append = True + continue + + if row[0].find('drop_test_1') >= 0: + has_drop_test_1 = True + continue + + if row[0].find('drop_test_4') >= 0: + has_drop_test_4 = True + continue + + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_1) + self.assertTrue(has_drop_test_4) + + # Step 1: cache partitioned table in con1 + con1.begin() + con1.execute('select count(*) from drop_test') # load pathman's cache + con1.commit() + + # Step 2: cache partitioned table in con2 + con2.begin() + con2.execute('select count(*) from drop_test') # load pathman's cache + con2.commit() + + # Step 3: drop first partition of 'drop_test' + con1.begin() + con1.execute('drop table drop_test_1') + + # Step 4: try executing select (RuntimeAppend) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: commit 'DROP TABLE' + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'drop_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 99) + + # Stop instance and finish work + node.stop() + node.cleanup() + def test_conc_part_creation_insert(self): """ Test concurrent partition creation on INSERT """ From 61c01f603ce6938ea89e5ae5aa944fb4f6f2cdf1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 24 Jul 2017 16:22:58 +0300 Subject: [PATCH 0671/1124] improve test_conc_part_drop_runtime_append() --- tests/python/partitioning_test.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 207fd664..32c30492 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -647,6 +647,14 @@ def test_conc_part_drop_runtime_append(self): # Create two separate connections for this test with node.connect() as con1, node.connect() as con2: + try: + from queue import Queue + except ImportError: + from Queue import Queue + + # return values from thread + queue = Queue() + # Thread for connection #2 (it has to wait) def con2_thread(): con1.begin() @@ -678,9 +686,9 @@ def con2_thread(): has_drop_test_4 = True continue - self.assertTrue(has_runtime_append) - self.assertFalse(has_drop_test_1) - self.assertTrue(has_drop_test_4) + # return all values in tuple + queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) + # Step 1: cache partitioned table in con1 con1.begin() @@ -725,6 +733,12 @@ def con2_thread(): # check number of partitions self.assertEqual(len(rows), 99) + # check RuntimeAppend + selected partitions + (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_1) + self.assertTrue(has_drop_test_4) + # Stop instance and finish work node.stop() node.cleanup() From 526fe9d808e2a97ceeb074013bd78de1e433b5c1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 24 Jul 2017 16:28:16 +0300 Subject: [PATCH 0672/1124] bump lib version to 1.4.2 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 267f5f80..63110d68 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.1", + "version": "1.4.2", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.1", + "version": "1.4.2", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 9ae638b5..4fd11dcf 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10401 + 10402 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 262d48a0..27989803 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -157,7 +157,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010401 +#define CURRENT_LIB_VERSION 0x010402 void *pathman_cache_search_relid(HTAB *cache_table, From 90b4f5770a1bc7d587ee72619482306a3ad9fd1d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 24 Jul 2017 19:36:30 +0300 Subject: [PATCH 0673/1124] Make special container for postgres --with-cassert --- Dockerfile.tmpl | 2 +- make_images.py | 110 ++++++++++++++++++++++++++++++++++++------------ 2 files changed, 83 insertions(+), 29 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index e5663156..0504dd5a 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,4 +1,4 @@ -FROM postgres:${PG_VERSION}-alpine +FROM ${PG_IMAGE} ENV LANG=C.UTF-8 PGDATA=/pg/data diff --git a/make_images.py b/make_images.py index b63b3fbe..a8ba8112 100755 --- a/make_images.py +++ b/make_images.py @@ -1,10 +1,64 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 +import os import subprocess import getpass +import requests +import tempfile + +from urllib.parse import urljoin +from urllib.request import urlopen DOCKER_ID = 'pathman' -pg_versions = ['9.5','9.6','10'] +ALPINE_BASE_URL = 'https://raw.githubusercontent.com/docker-library/postgres/master/9.6/alpine/' +ALPINE_ENTRYPOINT = 'docker-entrypoint.sh' +ALPINE_PATCH = b''' +diff --git a/Dockerfile b/Dockerfile +index 9878023..ba215bc 100644 +--- a/Dockerfile ++++ b/Dockerfile +@@ -80,6 +80,7 @@ RUN set -ex \\ + # configure options taken from: + # https://anonscm.debian.org/cgit/pkg-postgresql/postgresql.git/tree/debian/rules?h=9.5 + && ./configure \\ ++ --enable-cassert \\ + --build="$gnuArch" \\ + # "/usr/src/postgresql/src/backend/access/common/tupconvert.c:105: undefined reference to `libintl_gettext'" + # --enable-nls \\ +''' +CUSTOM_IMAGE_NAME = "%s/postgres_stable" % DOCKER_ID + +def make_alpine_image(image_name): + dockerfile = urlopen(urljoin(ALPINE_BASE_URL, 'Dockerfile')).read() + entrypoint_sh = urlopen(urljoin(ALPINE_BASE_URL, ALPINE_ENTRYPOINT)).read() + + with tempfile.TemporaryDirectory() as tmpdir: + print("Creating build in %s" % tmpdir) + with open(os.path.join(tmpdir, 'Dockerfile'), 'w') as f: + f.write(dockerfile.decode()) + + with open(os.path.join(tmpdir, ALPINE_ENTRYPOINT), 'w') as f: + f.write(entrypoint_sh.decode()) + + with open(os.path.join(tmpdir, 'cassert.patch'), 'w') as f: + f.write(ALPINE_PATCH.decode()) + + subprocess.check_output(["git", "apply", "cassert.patch"], cwd=tmpdir) + print("patch applied") + subprocess.check_output(["docker", "build", ".", '-t', image_name], cwd=tmpdir) + print("build ok: ", image_name) + subprocess.check_output(['docker', 'push', image_name], + stderr=subprocess.STDOUT) + print("upload ok:", image_name) + +make_alpine_image(CUSTOM_IMAGE_NAME) + +pg_containers = [ + ('pg95', 'postgres:9.5-alpine'), + ('pg96', 'postgres:9.6-alpine'), + ('pg10', 'postgres:10-alpine'), + ('pg96_ca', CUSTOM_IMAGE_NAME), +] image_types = { 'clang_check_code': { @@ -30,32 +84,32 @@ travis_conf = [] print("") -for pg_version in pg_versions: - pgname = 'pg%s' % pg_version.replace('.', '') - for key, variables in image_types.items(): - image_name = '%s/%s_%s' % (DOCKER_ID, pgname, key) - with open('Dockerfile', 'w') as out: - with open('Dockerfile.tmpl', 'r') as f: - for line in f: - line = line.replace('${PG_VERSION}', pg_version) - for key, value in variables.items(): - varname = '${%s}' % key - line = line.replace(varname, value) - - out.write(line) - - args = [ - 'docker', - 'build', - '-t', image_name, - '.' - ] - subprocess.check_output(args, stderr=subprocess.STDOUT) - print("build ok:", image_name) - subprocess.check_output(['docker', 'push', image_name], - stderr=subprocess.STDOUT) - print("upload ok:", image_name) - travis_conf.append(travis_conf_line % image_name) +if __name__ == '__main__': + for pgname, container in pg_containers: + for key, variables in image_types.items(): + image_name = '%s/%s_%s' % (DOCKER_ID, pgname, key) + with open('Dockerfile', 'w') as out: + with open('Dockerfile.tmpl', 'r') as f: + for line in f: + line = line.replace('${PG_IMAGE}', container) + for key, value in variables.items(): + varname = '${%s}' % key + line = line.replace(varname, value) + + out.write(line) + + args = [ + 'docker', + 'build', + '-t', image_name, + '.' + ] + subprocess.check_output(args, stderr=subprocess.STDOUT) + print("build ok:", image_name) + subprocess.check_output(['docker', 'push', image_name], + stderr=subprocess.STDOUT) + print("upload ok:", image_name) + travis_conf.append(travis_conf_line % image_name) print("\ntravis configuration") print('\n'.join(travis_conf)) From 0967d410c0ef4a9c7c95bcd6743a4232fa6023fc Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 25 Jul 2017 10:43:17 +0300 Subject: [PATCH 0674/1124] Add cassert containers to travis --- .travis.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.travis.yml b/.travis.yml index cd41b73e..29f4dff5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,3 +26,6 @@ env: - DOCKER_IMAGE=pathman/pg10_clang_check_code - DOCKER_IMAGE=pathman/pg10_cppcheck - DOCKER_IMAGE=pathman/pg10_pathman_tests + - DOCKER_IMAGE=pathman/pg96_ca_clang_check_code + - DOCKER_IMAGE=pathman/pg96_ca_cppcheck + - DOCKER_IMAGE=pathman/pg96_ca_pathman_tests From b0d084f0cce2c2ce8c2970ff345244c208085fdd Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 25 Jul 2017 14:03:01 +0300 Subject: [PATCH 0675/1124] Use pg10 for custom container --- .travis.yml | 6 +++--- make_images.py | 33 ++++++++++++++++++--------------- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/.travis.yml b/.travis.yml index 29f4dff5..3ca602c2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,6 +26,6 @@ env: - DOCKER_IMAGE=pathman/pg10_clang_check_code - DOCKER_IMAGE=pathman/pg10_cppcheck - DOCKER_IMAGE=pathman/pg10_pathman_tests - - DOCKER_IMAGE=pathman/pg96_ca_clang_check_code - - DOCKER_IMAGE=pathman/pg96_ca_cppcheck - - DOCKER_IMAGE=pathman/pg96_ca_pathman_tests + - DOCKER_IMAGE=pathman/pg10_ca_clang_check_code + - DOCKER_IMAGE=pathman/pg10_ca_cppcheck + - DOCKER_IMAGE=pathman/pg10_ca_pathman_tests diff --git a/make_images.py b/make_images.py index a8ba8112..dc01407e 100755 --- a/make_images.py +++ b/make_images.py @@ -10,21 +10,20 @@ from urllib.request import urlopen DOCKER_ID = 'pathman' -ALPINE_BASE_URL = 'https://raw.githubusercontent.com/docker-library/postgres/master/9.6/alpine/' +ALPINE_BASE_URL = 'https://raw.githubusercontent.com/docker-library/postgres/master/10/alpine/' ALPINE_ENTRYPOINT = 'docker-entrypoint.sh' ALPINE_PATCH = b''' -diff --git a/Dockerfile b/Dockerfile -index 9878023..ba215bc 100644 ---- a/Dockerfile -+++ b/Dockerfile -@@ -80,6 +80,7 @@ RUN set -ex \\ - # configure options taken from: - # https://anonscm.debian.org/cgit/pkg-postgresql/postgresql.git/tree/debian/rules?h=9.5 - && ./configure \\ +--- Dockerfile 2017-07-25 12:43:20.424984422 +0300 ++++ Dockerfile 2017-07-25 12:46:10.279267520 +0300 +@@ -86,6 +86,7 @@ + --enable-integer-datetimes \\ + --enable-thread-safety \\ + --enable-tap-tests \\ + --enable-cassert \\ - --build="$gnuArch" \\ - # "/usr/src/postgresql/src/backend/access/common/tupconvert.c:105: undefined reference to `libintl_gettext'" - # --enable-nls \\ + # skip debugging info -- we want tiny size instead + # --enable-debug \\ + --disable-rpath \\ + ''' CUSTOM_IMAGE_NAME = "%s/postgres_stable" % DOCKER_ID @@ -34,16 +33,20 @@ def make_alpine_image(image_name): with tempfile.TemporaryDirectory() as tmpdir: print("Creating build in %s" % tmpdir) + patch_name = os.path.join(tmpdir, "cassert.patch") + with open(os.path.join(tmpdir, 'Dockerfile'), 'w') as f: f.write(dockerfile.decode()) with open(os.path.join(tmpdir, ALPINE_ENTRYPOINT), 'w') as f: f.write(entrypoint_sh.decode()) - with open(os.path.join(tmpdir, 'cassert.patch'), 'w') as f: + with open(patch_name, 'w') as f: f.write(ALPINE_PATCH.decode()) - subprocess.check_output(["git", "apply", "cassert.patch"], cwd=tmpdir) + with open(patch_name, 'r') as f: + p = subprocess.Popen(["patch", "-p0"], cwd=tmpdir, stdin=subprocess.PIPE) + p.communicate(str.encode(f.read())) print("patch applied") subprocess.check_output(["docker", "build", ".", '-t', image_name], cwd=tmpdir) print("build ok: ", image_name) @@ -57,7 +60,7 @@ def make_alpine_image(image_name): ('pg95', 'postgres:9.5-alpine'), ('pg96', 'postgres:9.6-alpine'), ('pg10', 'postgres:10-alpine'), - ('pg96_ca', CUSTOM_IMAGE_NAME), + ('pg10_ca', CUSTOM_IMAGE_NAME), ] image_types = { From f01f9883ca71ae0702392b9d96be49308f56d067 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 25 Jul 2017 15:52:02 +0300 Subject: [PATCH 0676/1124] fix compatibility issues for postgres 10 --- src/partition_creation.c | 14 ++++++++++++++ src/partition_filter.c | 11 ++++++----- src/pl_funcs.c | 6 ++++-- 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 6cfadc0e..73b48dfc 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -47,6 +47,9 @@ #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM >= 100000 +#include "utils/regproc.h" +#endif static Oid spawn_partitions_val(Oid parent_relid, const Bound *range_bound_min, @@ -1955,18 +1958,29 @@ drop_single_update_trigger_internal(Oid relid, * To avoid warning message about missing trigger we check it beforehand. * and quit if it doesn't */ +#if PG_VERSION_NUM >= 100000 + address = get_object_address(OBJECT_TRIGGER, + (Node *) namelist, + &relation, + AccessExclusiveLock, + true); +#else address = get_object_address(OBJECT_TRIGGER, namelist, NIL, &relation, AccessExclusiveLock, true); +#endif + if (!OidIsValid(address.objectId)) return; /* Actually remove trigger */ n->removeType = OBJECT_TRIGGER; n->objects = list_make1(namelist); +#if PG_VERSION_NUM < 100000 n->arguments = NIL; +#endif n->behavior = DROP_RESTRICT; /* default behavior */ n->missing_ok = true; n->concurrent = false; diff --git a/src/partition_filter.c b/src/partition_filter.c index f6a19f74..668ca32e 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -436,6 +436,7 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, int nparts; bool isnull; Datum value; + Oid parent = PrelParentRelid(prel); /* Execute expression */ value = ExecEvalExprCompat(expr_state, econtext, &isnull, @@ -453,11 +454,11 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, elog(ERROR, ERR_PART_ATTR_MULTIPLE); else if (nparts == 0) { - selected_partid = create_partitions_for_value(PrelParentRelid(prel), + selected_partid = create_partitions_for_value(parent, value, prel->ev_type); /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); + invalidate_pathman_relation_info(parent, NULL); } else selected_partid = parts[0]; @@ -469,15 +470,15 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, if (rri_holder == NULL) { /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(PrelParentRelid(prel), NULL); + invalidate_pathman_relation_info(parent, NULL); /* Get a fresh PartRelationInfo */ - prel = get_pathman_relation_info(PrelParentRelid(prel)); + prel = get_pathman_relation_info(parent); /* Paranoid check (all partitions have vanished) */ if (!prel) elog(ERROR, "table \"%s\" is not partitioned", - get_rel_name_or_relid(PrelParentRelid(prel))); + get_rel_name_or_relid(parent)); } /* If partition has subpartitions */ else if (rri_holder->has_subpartitions) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index fa0a982c..ba286020 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -671,8 +671,10 @@ is_tuple_convertible(PG_FUNCTION_ARGS) map = convert_tuples_by_name(RelationGetDescr(rel1), RelationGetDescr(rel2), ERR_PART_DESC_CONVERT); - /* Now free map */ - pfree(map); + + /* Now free map. Note that map can be NULL if conversion isn't needed */ + if (map) + pfree(map); } PG_CATCH(); { From 618ab2ffef561fd8e4db9e50b5e159cef6602ff4 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 25 Jul 2017 15:56:55 +0300 Subject: [PATCH 0677/1124] bring partitioning_test.py from rel_future_beta branch --- tests/python/partitioning_test.py | 2225 +++++++++++++++-------------- 1 file changed, 1184 insertions(+), 1041 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 6d79dd96..32c30492 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1,11 +1,10 @@ #!/usr/bin/env python3 # coding: utf-8 - """ - concurrent_partitioning_test.py - Tests concurrent partitioning worker with simultaneous update queries +partitioning_test.py + Various stuff that looks out of place in regression tests - Copyright (c) 2015-2017, Postgres Professional + Copyright (c) 2015-2017, Postgres Professional """ import unittest @@ -20,1051 +19,1195 @@ version = get_config().get("VERSION_NUM") + # Helper function for json equality def ordered(obj): - if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered(x) for x in obj) - else: - return obj + if isinstance(obj, dict): + return sorted((k, ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered(x) for x in obj) + else: + return obj def if_fdw_enabled(func): - """To run tests with FDW support set environment variable TEST_FDW=1""" - def wrapper(*args, **kwargs): - if os.environ.get('FDW_DISABLED') != '1': - func(*args, **kwargs) - else: - print('Warning: FDW features tests are disabled, skipping...') - return wrapper + """ To run tests with FDW support, set environment variable TEST_FDW=1 """ + def wrapper(*args, **kwargs): + if os.environ.get('FDW_DISABLED') != '1': + func(*args, **kwargs) + else: + print('Warning: FDW features tests are disabled, skipping...') -class PartitioningTests(unittest.TestCase): + return wrapper - def setUp(self): - self.setup_cmd = [ - 'create table abc(id serial, t text)', - 'insert into abc select generate_series(1, 300000)', - 'select create_hash_partitions(\'abc\', \'id\', 3, partition_data := false)', - ] - - def tearDown(self): - stop_all() - - def start_new_pathman_cluster(self, name='test', allows_streaming=False): - node = get_new_node(name) - node.init(allows_streaming=allows_streaming) - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - return node - - def init_test_data(self, node): - """Initialize pg_pathman extension and test data""" - for cmd in self.setup_cmd: - node.safe_psql('postgres', cmd) - - def catchup_replica(self, master, replica): - """Wait until replica synchronizes with master""" - if version >= 100000: - wait_lsn_query = \ - 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - else: - wait_lsn_query = \ - 'SELECT pg_current_xlog_location() <= replay_location ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - master.poll_query_until('postgres', wait_lsn_query) - - def test_concurrent(self): - """Tests concurrent partitioning""" - - node = self.start_new_pathman_cluster() - self.init_test_data(node) - - node.psql( - 'postgres', - 'select partition_table_concurrently(\'abc\')') - - while True: - # update some rows to check for deadlocks - node.safe_psql( - 'postgres', - ''' - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - ''') - - count = node.execute( - 'postgres', - 'select count(*) from pathman_concurrent_part_tasks') - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() - - def test_replication(self): - """Tests how pg_pathman works with replication""" - node = get_new_node('master') - replica = get_new_node('repl') - - # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 300000 - ) - - # check that direct UPDATE in pathman_config_params invalidates - # cache - node.psql( - 'postgres', - 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 0 - ) - - def test_locks(self): - """Test that a session trying to create new partitions waits for other - sessions if they are doing the same""" - - import threading - import time - - class Flag: - def __init__(self, value): - self.flag = value - - def set(self, value): - self.flag = value - - def get(self): - return self.flag - - # There is one flag for each thread which shows if thread have done its work - flags = [Flag(False) for i in range(3)] - - # All threads synchronize though this lock - lock = threading.Lock() - - # Define thread function - def add_partition(node, flag, query): - """ We expect that this query will wait until another session - commits or rolls back""" - node.safe_psql('postgres', query) - with lock: - flag.set(True) - - # Initialize master server - node = get_new_node('master') - - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + - 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' - ) - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] - for i in range(3): - thread = threading.Thread( - target=add_partition, - args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) - - # Commit transaction. Since then other sessions can create - # partitions - con.commit() - - # Now wait until each thread finishes - for thread in threads: - thread.join() - - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' - ), - b'6\n' - ) - - def test_tablespace(self): - """Check tablespace support""" - - def check_tablespace(node, tablename, tablespace): - res = node.execute( - 'postgres', - 'select get_tablespace(\'{}\')'.format(tablename)) - if len(res) == 0: - return False - - return res[0][0] == tablespace - - node = get_new_node('master') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - - # create tablespace - path = os.path.join(node.data_dir, 'test_space_location') - os.mkdir(path) - node.psql( - 'postgres', - 'create tablespace test_space location \'{}\''.format(path)) - - # create table in this tablespace - node.psql( - 'postgres', - 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql( - 'postgres', - 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended\')') - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # check tablespace for split - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') - self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) - - # now let's specify tablespace explicitly - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')') - self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) - - @if_fdw_enabled - def test_foreign_table(self): - """Test foreign tables""" - - # Start master server - master = get_new_node('test') - master.init() - master.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') - - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions - master.psql( - 'postgres', - '''create table abc(id serial, name text); - select create_range_partitions('abc', 'id', 0, 10, 2)''') - - # Current user name (needed for user mapping) - username = master.execute('postgres', 'select current_user')[0][0] - - # Start foreign server - fserv = get_new_node('fserv') - fserv.init().start() - fserv.safe_psql('postgres', 'create table ftable(id serial, name text)') - fserv.safe_psql('postgres', 'insert into ftable values (25, \'foreign\')') - - # Create foreign table and attach it to partitioned table - master.safe_psql( - 'postgres', - '''create server fserv - foreign data wrapper postgres_fdw - options (dbname 'postgres', host '127.0.0.1', port '{}')'''.format(fserv.port) - ) - master.safe_psql( - 'postgres', - '''create user mapping for {0} - server fserv - options (user '{0}')'''.format(username) - ) - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (ftable) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') - - # Check that table attached to partitioned table - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable'), - b'25|foreign\n' - ) - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - b'25|foreign\n26|part\n' - ) - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') - - # HASH partitioning with FDW: - # - create hash partitioned table in master - # - create foreign table - # - replace local partition with foreign one - # - insert data - # - drop partitions - master.psql( - 'postgres', - '''create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2)''') - fserv.safe_psql('postgres', 'create table f_hash_test(id serial, name text)') - - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (f_hash_test) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select replace_hash_partition(\'hash_test_1\', \'f_hash_test\')') - master.safe_psql('postgres', 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' - ) - master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') - - def test_parallel_nodes(self): - """Test parallel queries under partitions""" - - import json - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - node.start() - - # Check version of postgres server - # If version < 9.6 skip all tests for parallel queries - if version < 90600: - return - - # Prepare test database - node.psql('postgres', 'create extension pg_pathman') - node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table range_partitioned alter column i set not null') - node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)') - node.psql('postgres', 'vacuum analyze range_partitioned') - - node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table hash_partitioned alter column i set not null') - node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)') - node.psql('postgres', 'vacuum analyze hash_partitioned') - - node.psql('postgres', """ - create or replace function query_plan(query text) returns jsonb as $$ - declare - plan jsonb; - begin - execute 'explain (costs off, format json)' || query into plan; - return plan; - end; - $$ language plpgsql; - """) - - # Test parallel select - with node.connect() as con: - con.execute('set max_parallel_workers_per_gather = 2') - if version >= 100000: - con.execute('set min_parallel_table_scan_size = 0') - else: - con.execute('set min_parallel_relation_size = 0') - con.execute('set parallel_setup_cost = 0') - con.execute('set parallel_tuple_cost = 0') - - # Check parallel aggregate plan - test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Finalize", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Partial", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check count of returned tuples - count = con.execute('select count(*) from range_partitioned where i < 1500')[0][0] - self.assertEqual(count, 1499) - - # Check simple parallel seq scan plan with limit - test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Limit", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check tuples returned by query above - res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5') - res_tuples = sorted(map(lambda x: x[0], res_tuples)) - expected = [1, 2, 3, 4, 5] - self.assertEqual(res_tuples, expected) - - # Check the case when none partition is selected in result plan - test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Result", - "Parallel Aware": false, - "One-Time Filter": "false" - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Remove all objects for testing - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_creation_insert(self): - """Test concurrent partition creation on INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute('insert into ins_test select generate_series(1, 50)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.execute('insert into ins_test values(51)') - con2.commit() - - # Step 1: lock partitioned table in con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - con1.execute('lock table ins_test in share update exclusive mode') - - # Step 2: try inserting new value in con2 (waiting) - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - t = threading.Thread(target=con2_thread) - t.start() - - # Step 3: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 4: try inserting new value in con1 (success, unlock) - con1.execute('insert into ins_test values(52)') - con1.commit() - - # Step 5: wait for con2 - t.join() - - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'ins_test'::regclass - order by range_min, range_max - """) - - # check number of partitions - self.assertEqual(len(rows), 6) - - # check range_max of partitions - self.assertEqual(int(rows[0][5]), 11) - self.assertEqual(int(rows[1][5]), 21) - self.assertEqual(int(rows[2][5]), 31) - self.assertEqual(int(rows[3][5]), 41) - self.assertEqual(int(rows[4][5]), 51) - self.assertEqual(int(rows[5][5]), 61) - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_merge_insert(self): - """Test concurrent merge_range_partitions() + INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.begin() - con2.execute('insert into ins_test values(20)') - con2.commit() - - # Step 1: initilize con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - - # Step 2: initilize con2 - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - con2.commit() # unlock relations - - # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) - con1.execute("select merge_range_partitions('ins_test_1', 'ins_test_2')") - - # Step 4: try inserting new value in con2 (waiting) - t = threading.Thread(target=con2_thread) - t.start() - - # Step 5: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 6: finish merge in con1 (success, unlock) - con1.commit() - - # Step 7: wait for con2 - t.join() - - rows = con1.execute("select *, tableoid::regclass::text from ins_test") - - # check number of rows in table - self.assertEqual(len(rows), 1) - - # check value that has been inserted - self.assertEqual(int(rows[0][0]), 20) - - # check partition that was chosen for insert - self.assertEqual(str(rows[0][1]), 'ins_test_1') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_pg_dump(self): - """ - Test using dump and restore of partitioned table through pg_dump and pg_restore tools. - - Test strategy: - - test range and hash partitioned tables; - - for each partitioned table check on restorable side the following quantities: - * constraints related to partitioning; - * init callback function and enable parent flag; - * number of rows in parent and child tables; - * plan validity of simple SELECT query under partitioned table; - - check dumping using the following parameters of pg_dump: - * format = plain | custom; - * using of inserts and copy. - - all test cases are carried out on tables half-full with data located in parent part, - the rest of data - in child tables. - """ - - import subprocess - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - """ - shared_preload_libraries=\'pg_pathman\' - pg_pathman.override_copy=false - """) - node.start() - - # Init two databases: initial and copy - node.psql('postgres', 'create database initial') - node.psql('postgres', 'create database copy') - node.psql('initial', 'create extension pg_pathman') - - # Create and fillin partitioned table in initial database - with node.connect('initial') as con: - - # create and initailly fillin tables - con.execute('create table range_partitioned (i integer not null)') - con.execute('insert into range_partitioned select i from generate_series(1, 500) i') - con.execute('create table hash_partitioned (i integer not null)') - con.execute('insert into hash_partitioned select i from generate_series(1, 500) i') - - # partition table keeping data in base table - # enable_parent parameter automatically becames true - con.execute('select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)') - con.execute('select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)') - - # fillin child tables with remain data - con.execute('insert into range_partitioned select i from generate_series(501, 1000) i') - con.execute('insert into hash_partitioned select i from generate_series(501, 1000) i') - - # set init callback - con.execute(""" - create or replace function init_partition_stub_callback(args jsonb) - returns void as $$ - begin - end - $$ language plpgsql; - """) - con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')') - con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')') - - # turn off enable_parent option - con.execute('select set_enable_parent(\'range_partitioned\', false)') - con.execute('select set_enable_parent(\'hash_partitioned\', false)') - - con.commit() - - # compare strategies - CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) - def cmp_full(con1, con2): - """Compare selection partitions in plan and contents in partitioned tables""" - - plan_query = 'explain (costs off, format json) select * from %s' - content_query = 'select * from %s order by i' - table_refs = [ - 'range_partitioned', - 'only range_partitioned', - 'hash_partitioned', - 'only hash_partitioned' - ] - for table_ref in table_refs: - plan_initial = con1.execute(plan_query % table_ref)[0][0][0]['Plan'] - plan_copy = con2.execute(plan_query % table_ref)[0][0][0]['Plan'] - if ordered(plan_initial) != ordered(plan_copy): - return PLANS_MISMATCH - - content_initial = [x[0] for x in con1.execute(content_query % table_ref)] - content_copy = [x[0] for x in con2.execute(content_query % table_ref)] - if content_initial != content_copy: - return CONTENTS_MISMATCH - - return CMP_OK - - def turnoff_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to off') - node.reload() - - def turnon_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to on') - node.psql('copy', 'alter system set pg_pathman.enable to on') - node.psql('initial', 'alter system set pg_pathman.override_copy to off') - node.psql('copy', 'alter system set pg_pathman.override_copy to off') - node.reload() - - # Test dump/restore from init database to copy functionality - test_params = [ - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via COPY - (turnoff_pathman, - turnon_pathman, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--inserts", - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via INSERTs - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--format=custom", - "initial"], - [node.get_bin_path("pg_restore"), - "-p {}".format(node.port), - "--dbname=copy"], - cmp_full), # dump in archive format - ] - - try: - FNULL = open(os.devnull, 'w') - - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) - - if (preproc != None): - preproc(node) - - # transfer and restore data - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - stdoutdata, _ = p1.communicate() - p2 = subprocess.Popen(pg_restore_params, stdin=subprocess.PIPE, - stdout=FNULL, stderr=FNULL) - p2.communicate(input=stdoutdata) - - if (postproc != None): - postproc(node) - - # check validity of data - with node.connect('initial') as con1, node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual(cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" % dump_restore_cmd) - self.assertNotEqual(cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') - - except: - raise - finally: - FNULL.close() - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_concurrent_detach(self): - """Test concurrent detach partition with contiguous tuple inserting and spawning new partitions""" - - # Init parameters - num_insert_workers = 8 - detach_timeout = 0.1 # time in sec between successive inserts and detachs - num_detachs = 100 # estimated number of detachs - inserts_advance = 1 # abvance in sec of inserts process under detachs - test_interval = int(math.ceil(detach_timeout * num_detachs)) - - insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/insert_current_timestamp.pgbench" - detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/detachs_in_timeout.pgbench" - - # Check pgbench scripts on existance - self.assertTrue(os.path.isfile(insert_pgbench_script), - msg="pgbench script with insert timestamp doesn't exist") - self.assertTrue(os.path.isfile(detach_pgbench_script), - msg="pgbench script with detach letfmost partition doesn't exist") - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec - with node.connect() as con0: - con0.begin() - con0.execute('create table ts_range_partitioned(ts timestamp not null)') - con0.execute("select create_range_partitions('ts_range_partitioned', 'ts', current_timestamp, interval '%f', 1)" % detach_timeout) - con0.commit() - - # Run in background inserts and detachs processes - FNULL = open(os.devnull, 'w') - - # init pgbench's utility tables - init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) - init_pgbench.wait() - - inserts = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ - "-j", "%i" % num_insert_workers, - "-c", "%i" % num_insert_workers, - "-f", insert_pgbench_script, - "-T", "%i" % (test_interval+inserts_advance) - ]) - time.sleep(inserts_advance) - detachs = node.pgbench(stdout=FNULL, stderr=FNULL, options=[ - "-D", "timeout=%f" % detach_timeout, - "-f", detach_pgbench_script, - "-T", "%i" % test_interval - ]) - - # Wait for completion of processes - _, stderrdata = inserts.communicate() - detachs.wait() - - # Obtain error log from inserts process - self.assertIsNone(re.search("ERROR|FATAL|PANIC", str(stderrdata)), - msg="Race condition between detach and concurrent inserts with append partition is expired") - - # Stop instance and finish work - node.stop() - node.cleanup() - FNULL.close() +class PartitioningTests(unittest.TestCase): + def setUp(self): + self.setup_cmd = [ + "create table abc(id serial, t text)", + "insert into abc select generate_series(1, 300000)", + "select create_hash_partitions('abc', 'id', 3, partition_data := false)", + ] + + def tearDown(self): + stop_all() + + def start_new_pathman_cluster(self, name='test', allows_streaming=False): + node = get_new_node(name) + node.init(allows_streaming=allows_streaming) + node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") + node.start() + node.psql('postgres', 'create extension pg_pathman') + return node + + def init_test_data(self, node): + """ Initialize pg_pathman extension and test data """ + for cmd in self.setup_cmd: + node.safe_psql('postgres', cmd) + + def catchup_replica(self, master, replica): + """ Wait until replica synchronizes with master """ + if version >= 100000: + wait_lsn_query = \ + 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ + 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ + % replica.name + else: + wait_lsn_query = \ + 'SELECT pg_current_xlog_location() <= replay_location ' \ + 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ + % replica.name + master.poll_query_until('postgres', wait_lsn_query) + + def test_concurrent(self): + """ Test concurrent partitioning """ + + node = self.start_new_pathman_cluster() + self.init_test_data(node) + + node.psql('postgres', "select partition_table_concurrently('abc')") + + while True: + # update some rows to check for deadlocks + node.safe_psql('postgres', """ + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + """) + + count = node.execute('postgres', """ + select count(*) from pathman_concurrent_part_tasks + """) + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('postgres', 'select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('postgres', 'select count(*) from abc') + self.assertEqual(data[0][0], 300000) + + node.stop() + + def test_replication(self): + """ Test how pg_pathman works with replication """ + + node = get_new_node('master') + replica = get_new_node('repl') + + # initialize master server + node = self.start_new_pathman_cluster(allows_streaming=True) + node.backup('my_backup') + + # initialize replica from backup + replica.init_from_backup(node, 'my_backup', has_streaming=True) + replica.start() + + # initialize pg_pathman extension and some test data + self.init_test_data(node) + + # wait until replica catches up + self.catchup_replica(node, replica) + + # check that results are equal + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + + # enable parent and see if it is enabled in replica + node.psql('postgres', 'select enable_parent(\'abc\'') + + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], 300000) + + # check that UPDATE in pathman_config_params invalidates cache + node.psql('postgres', 'update pathman_config_params set enable_parent = false') + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual(node.execute('postgres', 'select count(*) from abc')[0][0], 0) + + def test_locks(self): + """ + Test that a session trying to create new partitions + waits for other sessions if they are doing the same + """ + + import threading + import time + + class Flag: + def __init__(self, value): + self.flag = value + + def set(self, value): + self.flag = value + + def get(self): + return self.flag + + # There is one flag for each thread which shows if thread have done its work + flags = [Flag(False) for i in range(3)] + + # All threads synchronize though this lock + lock = threading.Lock() + + # Define thread function + def add_partition(node, flag, query): + """ + We expect that this query will wait until + another session commits or rolls back + """ + node.safe_psql('postgres', query) + with lock: + flag.set(True) + + # Initialize master server + node = get_new_node('master') + + node.init() + node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") + node.start() + node.safe_psql( + 'postgres', + 'create extension pg_pathman; ' + 'create table abc(id serial, t text); ' + + 'insert into abc select generate_series(1, 100000); ' + + 'select create_range_partitions(\'abc\', \'id\', 1, 50000);') + + # Start transaction that will create partition + con = node.connect() + con.begin() + con.execute('select append_range_partition(\'abc\')') + + # Start threads that suppose to add new partitions and wait some + # time + query = [ + 'select prepend_range_partition(\'abc\')', + 'select append_range_partition(\'abc\')', + 'select add_range_partition(\'abc\', 500000, 550000)', + ] + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # This threads should wait until current transaction finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), False) + + # Commit transaction. Since then other sessions can create + # partitions + con.commit() + + # Now wait until each thread finishes + for thread in threads: + thread.join() + + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) + + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + 'postgres', + 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass'), + b'6\n') + + def test_tablespace(self): + """ Check tablespace support """ + + def check_tablespace(node, tablename, tablespace): + res = node.execute('postgres', + 'select get_tablespace(\'{}\')'.format(tablename)) + if len(res) == 0: + return False + + return res[0][0] == tablespace + + node = get_new_node('master') + node.init() + node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') + node.start() + node.psql('postgres', 'create extension pg_pathman') + + # create tablespace + path = os.path.join(node.data_dir, 'test_space_location') + os.mkdir(path) + node.psql('postgres', 'create tablespace test_space location \'{}\''.format(path)) + + # create table in this tablespace + node.psql('postgres', 'create table abc(a serial, b int) tablespace test_space') + + # create three partitions. Excpect that they will be created in the + # same tablespace as the parent table + node.psql('postgres', 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') + self.assertTrue(check_tablespace(node, 'abc', 'test_space')) + + # check tablespace for appended partition + node.psql('postgres', 'select append_range_partition(\'abc\', \'abc_appended\')') + self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended\')') + self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') + self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + + # check tablespace for split + node.psql('postgres', + 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') + self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) + + # now let's specify tablespace explicitly + node.psql( + 'postgres', + 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') + node.psql( + 'postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')' + ) + + # yapf: disable + self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) + + @if_fdw_enabled + def test_foreign_table(self): + """ Test foreign tables """ + + # Start master server + master = get_new_node('test') + master.init() + master.append_conf('postgresql.conf', """ + shared_preload_libraries='pg_pathman, postgres_fdw'\n + """) + master.start() + master.psql('postgres', 'create extension pg_pathman') + master.psql('postgres', 'create extension postgres_fdw') + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + master.psql('postgres', """ + create table abc(id serial, name text); + select create_range_partitions('abc', 'id', 0, 10, 2) + """) + + # Current user name (needed for user mapping) + username = master.execute('postgres', 'select current_user')[0][0] + + # Start foreign server + fserv = get_new_node('fserv') + fserv.init().start() + fserv.safe_psql('postgres', "create table ftable(id serial, name text)") + fserv.safe_psql('postgres', "insert into ftable values (25, 'foreign')") + + # Create foreign table and attach it to partitioned table + master.safe_psql('postgres', """ + create server fserv + foreign data wrapper postgres_fdw + options (dbname 'postgres', host '127.0.0.1', port '{}') + """.format(fserv.port)) + + master.safe_psql('postgres', """ + create user mapping for {0} server fserv + options (user '{0}') + """.format(username)) + + master.safe_psql('postgres', """ + import foreign schema public limit to (ftable) + from server fserv into public + """) + + master.safe_psql( + 'postgres', + "select attach_range_partition('abc', 'ftable', 20, 30)") + + # Check that table attached to partitioned table + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable'), + b'25|foreign\n') + + # Check that we can successfully insert new data into foreign partition + master.safe_psql('postgres', 'insert into abc values (26, \'part\')') + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable order by id'), + b'25|foreign\n26|part\n') + + # Testing drop partitions (including foreign partitions) + master.safe_psql('postgres', 'select drop_partitions(\'abc\')') + + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql('postgres', """ + create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2) + """) + fserv.safe_psql('postgres', + 'create table f_hash_test(id serial, name text)') + + master.safe_psql('postgres', """ + import foreign schema public limit to (f_hash_test) + from server fserv into public + """) + master.safe_psql('postgres', """ + select replace_hash_partition('hash_test_1', 'f_hash_test') + """) + master.safe_psql('postgres', + 'insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('postgres', 'select * from hash_test'), + b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') + master.safe_psql('postgres', "select drop_partitions('hash_test')") + + @if_fdw_enabled + def test_parallel_nodes(self): + """ Test parallel queries under partitions """ + + import json + + # Init and start postgres instance with preload pg_pathman module + node = get_new_node('test') + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + if version < 90600: + return + + # Prepare test database + node.psql('postgres', 'create extension pg_pathman') + + node.psql('postgres', """ + create table range_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table range_partitioned alter column i set not null; + select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); + + create table hash_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table hash_partitioned alter column i set not null; + select create_hash_partitions('hash_partitioned', 'i', 10); + """) + + # create statistics for both partitioned tables + node.psql('postgres', 'vacuum analyze') + + node.psql('postgres', """ + create or replace function query_plan(query text) + returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + if version >= 100000: + con.execute('set min_parallel_table_scan_size = 0') + else: + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check count of returned tuples + count = con.execute( + 'select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) + + # Check simple parallel seq scan plan with limit + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check tuples returned by query above + res_tuples = con.execute( + 'select * from range_partitioned where i < 1500 limit 5') + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] + self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Remove all objects for testing + node.psql('postgres', 'drop table range_partitioned cascade') + node.psql('postgres', 'drop table hash_partitioned cascade') + node.psql('postgres', 'drop extension pg_pathman cascade') + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_conc_part_drop_runtime_append(self): + """ Test concurrent partition drop + SELECT (RuntimeAppend) """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'drop_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table drop_test(val int not null)") + con0.execute("insert into drop_test select generate_series(1, 1000)") + con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + try: + from queue import Queue + except ImportError: + from Queue import Queue + + # return values from thread + queue = Queue() + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con1.begin() + con2.execute('set enable_hashjoin = f') + con2.execute('set enable_mergejoin = f') + + res = con2.execute(""" + explain (analyze, costs off, timing off) + select * from drop_test + where val = any (select generate_series(1, 40, 34)) + """) # query selects from drop_test_1 and drop_test_4 + + con2.commit() + + has_runtime_append = False + has_drop_test_1 = False + has_drop_test_4 = False + + for row in res: + if row[0].find('RuntimeAppend') >= 0: + has_runtime_append = True + continue + + if row[0].find('drop_test_1') >= 0: + has_drop_test_1 = True + continue + + if row[0].find('drop_test_4') >= 0: + has_drop_test_4 = True + continue + + # return all values in tuple + queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) + + + # Step 1: cache partitioned table in con1 + con1.begin() + con1.execute('select count(*) from drop_test') # load pathman's cache + con1.commit() + + # Step 2: cache partitioned table in con2 + con2.begin() + con2.execute('select count(*) from drop_test') # load pathman's cache + con2.commit() + + # Step 3: drop first partition of 'drop_test' + con1.begin() + con1.execute('drop table drop_test_1') + + # Step 4: try executing select (RuntimeAppend) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: commit 'DROP TABLE' + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'drop_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 99) + + # check RuntimeAppend + selected partitions + (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_1) + self.assertTrue(has_drop_test_4) + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_conc_part_creation_insert(self): + """ Test concurrent partition creation on INSERT """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("insert into ins_test select generate_series(1, 50)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + con2.commit() + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('lock table ins_test in share update exclusive mode') + + # Step 2: try inserting new value in con2 (waiting) + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 4: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 5: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_conc_part_merge_insert(self): + """ Test concurrent merge_range_partitions() + INSERT """ + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('insert into ins_test values(20)') + con2.commit() + + # Step 1: initilize con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + + # Step 2: initilize con2 + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations + + # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) + con1.execute( + "select merge_range_partitions('ins_test_1', 'ins_test_2')") + + # Step 4: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: finish merge in con1 (success, unlock) + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute("select *, tableoid::regclass::text from ins_test") + + # check number of rows in table + self.assertEqual(len(rows), 1) + + # check value that has been inserted + self.assertEqual(int(rows[0][0]), 20) + + # check partition that was chosen for insert + self.assertEqual(str(rows[0][1]), 'ins_test_1') + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_pg_dump(self): + """ + Test using dump and restore of partitioned table through pg_dump and pg_restore tools. + + Test strategy: + - test range and hash partitioned tables; + - for each partitioned table check on restorable side the following quantities: + * constraints related to partitioning; + * init callback function and enable parent flag; + * number of rows in parent and child tables; + * plan validity of simple SELECT query under partitioned table; + - check dumping using the following parameters of pg_dump: + * format = plain | custom; + * using of inserts and copy. + - all test cases are carried out on tables half-full with data located in parent part, + the rest of data - in child tables. + """ + + import subprocess + + # Init and start postgres instance with preload pg_pathman module + node = get_new_node('test') + node.init() + node.append_conf('postgresql.conf', """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false + """) + node.start() + + # Init two databases: initial and copy + node.psql('postgres', 'create database initial') + node.psql('postgres', 'create database copy') + node.psql('initial', 'create extension pg_pathman') + + # Create and fillin partitioned table in initial database + with node.connect('initial') as con: + + # create and initailly fillin tables + con.execute('create table range_partitioned (i integer not null)') + con.execute( + 'insert into range_partitioned select i from generate_series(1, 500) i' + ) + con.execute('create table hash_partitioned (i integer not null)') + con.execute( + 'insert into hash_partitioned select i from generate_series(1, 500) i' + ) + + # partition table keeping data in base table + # enable_parent parameter automatically becames true + con.execute( + 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)' + ) + con.execute( + 'select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)' + ) + + # fillin child tables with remain data + con.execute( + 'insert into range_partitioned select i from generate_series(501, 1000) i' + ) + con.execute( + 'insert into hash_partitioned select i from generate_series(501, 1000) i' + ) + + # set init callback + con.execute(""" + create or replace function init_partition_stub_callback(args jsonb) + returns void as $$ + begin + end + $$ language plpgsql; + """) + con.execute( + 'select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')' + ) + con.execute( + 'select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')' + ) + + # turn off enable_parent option + con.execute( + 'select set_enable_parent(\'range_partitioned\', false)') + con.execute('select set_enable_parent(\'hash_partitioned\', false)') + + con.commit() + + # compare strategies + CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) + + def cmp_full(con1, con2): + """ + Compare selection partitions in plan + and contents in partitioned tables + """ + + plan_query = 'explain (costs off, format json) select * from %s' + content_query = 'select * from %s order by i' + table_refs = [ + 'range_partitioned', 'only range_partitioned', + 'hash_partitioned', 'only hash_partitioned' + ] + for table_ref in table_refs: + plan_initial = con1.execute( + plan_query % table_ref)[0][0][0]['Plan'] + plan_copy = con2.execute( + plan_query % table_ref)[0][0][0]['Plan'] + if ordered(plan_initial) != ordered(plan_copy): + return PLANS_MISMATCH + + content_initial = [ + x[0] for x in con1.execute(content_query % table_ref) + ] + content_copy = [ + x[0] for x in con2.execute(content_query % table_ref) + ] + if content_initial != content_copy: + return CONTENTS_MISMATCH + + return CMP_OK + + def turnoff_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to off') + node.reload() + + def turnon_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to on') + node.psql('copy', 'alter system set pg_pathman.enable to on') + node.psql('initial', + 'alter system set pg_pathman.override_copy to off') + node.psql('copy', + 'alter system set pg_pathman.override_copy to off') + node.reload() + + # Test dump/restore from init database to copy functionality + test_params = [ + (None, None, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "initial" + ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via COPY + (turnoff_pathman, turnon_pathman, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "--inserts", "initial" + ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via INSERTs + (None, None, [ + node.get_bin_path("pg_dump"), "-p {}".format(node.port), + "--format=custom", "initial" + ], [ + node.get_bin_path("pg_restore"), "-p {}".format(node.port), + "--dbname=copy" + ], cmp_full), # dump in archive format + ] + + try: + FNULL = open(os.devnull, 'w') + + for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: + + dump_restore_cmd = " | ".join((' '.join(pg_dump_params), + ' '.join(pg_restore_params))) + + if (preproc != None): + preproc(node) + + # transfer and restore data + p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) + stdoutdata, _ = p1.communicate() + p2 = subprocess.Popen( + pg_restore_params, + stdin=subprocess.PIPE, + stdout=FNULL, + stderr=FNULL) + p2.communicate(input=stdoutdata) + + if (postproc != None): + postproc(node) + + # validate data + with node.connect('initial') as con1, \ + node.connect('copy') as con2: + + # compare plans and contents of initial and copy + cmp_result = cmp_dbs(con1, con2) + self.assertNotEqual( + cmp_result, PLANS_MISMATCH, + "mismatch in plans of select query on partitioned tables under the command: %s" + % dump_restore_cmd) + self.assertNotEqual( + cmp_result, CONTENTS_MISMATCH, + "mismatch in contents of partitioned tables under the command: %s" + % dump_restore_cmd) + + # compare enable_parent flag and callback function + config_params_query = """ + select partrel, enable_parent, init_callback from pathman_config_params + """ + config_params_initial, config_params_copy = {}, {} + for row in con1.execute(config_params_query): + config_params_initial[row[0]] = row[1:] + for row in con2.execute(config_params_query): + config_params_copy[row[0]] = row[1:] + self.assertEqual(config_params_initial, config_params_copy, \ + "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) + + # compare constraints on each partition + constraints_query = """ + select r.relname, c.conname, c.consrc from + pg_constraint c join pg_class r on c.conrelid=r.oid + where relname similar to '(range|hash)_partitioned_\d+' + """ + constraints_initial, constraints_copy = {}, {} + for row in con1.execute(constraints_query): + constraints_initial[row[0]] = row[1:] + for row in con2.execute(constraints_query): + constraints_copy[row[0]] = row[1:] + self.assertEqual(constraints_initial, constraints_copy, \ + "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) + + # clear copy database + node.psql('copy', 'drop schema public cascade') + node.psql('copy', 'create schema public') + node.psql('copy', 'drop extension pg_pathman cascade') + + except: + raise + finally: + FNULL.close() + + # Stop instance and finish work + node.stop() + node.cleanup() + + def test_concurrent_detach(self): + """ + Test concurrent detach partition with contiguous + tuple inserting and spawning new partitions + """ + + # Init parameters + num_insert_workers = 8 + detach_timeout = 0.1 # time in sec between successive inserts and detachs + num_detachs = 100 # estimated number of detachs + inserts_advance = 1 # abvance in sec of inserts process under detachs + test_interval = int(math.ceil(detach_timeout * num_detachs)) + + insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/insert_current_timestamp.pgbench" + detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/detachs_in_timeout.pgbench" + + # Check pgbench scripts on existance + self.assertTrue( + os.path.isfile(insert_pgbench_script), + msg="pgbench script with insert timestamp doesn't exist") + self.assertTrue( + os.path.isfile(detach_pgbench_script), + msg="pgbench script with detach letfmost partition doesn't exist") + + # Create and start new instance + node = self.start_new_pathman_cluster(allows_streaming=False) + + # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec + with node.connect() as con0: + con0.begin() + con0.execute( + 'create table ts_range_partitioned(ts timestamp not null)') + + # yapf: disable + con0.execute(""" + select create_range_partitions('ts_range_partitioned', + 'ts', + current_timestamp, + interval '%f', + 1) + """ % detach_timeout) + con0.commit() + + # Run in background inserts and detachs processes + FNULL = open(os.devnull, 'w') + + # init pgbench's utility tables + init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) + init_pgbench.wait() + + inserts = node.pgbench( + stdout=FNULL, + stderr=subprocess.PIPE, + options=[ + "-j", + "%i" % num_insert_workers, "-c", + "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", + "%i" % (test_interval + inserts_advance) + ]) + time.sleep(inserts_advance) + detachs = node.pgbench( + stdout=FNULL, + stderr=FNULL, + options=[ + "-D", + "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, + "-T", + "%i" % test_interval + ]) + + # Wait for completion of processes + _, stderrdata = inserts.communicate() + detachs.wait() + + # Obtain error log from inserts process + self.assertIsNone( + re.search("ERROR|FATAL|PANIC", str(stderrdata)), + msg=""" + Race condition between detach and concurrent + inserts with append partition is expired + """) + + # Stop instance and finish work + node.stop() + node.cleanup() + FNULL.close() -if __name__ == "__main__": - unittest.main() +if __name__ == "__main__": + unittest.main() From ef953a337e3146ba75e644481a52710114286226 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 25 Jul 2017 17:28:02 +0300 Subject: [PATCH 0678/1124] minor fixes --- src/pl_funcs.c | 11 +++++------ src/planner_tree_modification.c | 1 - 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ba286020..6672f124 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -668,13 +668,12 @@ is_tuple_convertible(PG_FUNCTION_ARGS) void *map; /* we don't actually need it */ /* Try to build a conversion map */ - map = convert_tuples_by_name(RelationGetDescr(rel1), - RelationGetDescr(rel2), - ERR_PART_DESC_CONVERT); + map = convert_tuples_by_name_map(RelationGetDescr(rel1), + RelationGetDescr(rel2), + ERR_PART_DESC_CONVERT); - /* Now free map. Note that map can be NULL if conversion isn't needed */ - if (map) - pfree(map); + /* Now free map */ + pfree(map); } PG_CATCH(); { diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 2f82f7f6..246436e3 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -367,7 +367,6 @@ find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition) return FP_NON_SINGULAR_RESULT; /* Exit if there's no quals (no use) */ - /* TODO: What if there is only one partition? */ if (!quals) return FP_NON_SINGULAR_RESULT; From 063ea8e363550680afa45fffd9255f8c459b5eb0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 26 Jul 2017 17:58:59 +0300 Subject: [PATCH 0679/1124] reduce diff with rel_future_beta --- src/partition_filter.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 668ca32e..323e6a8f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -431,12 +431,12 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, { MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; - Oid selected_partid = InvalidOid; + Oid parent_relid = PrelParentRelid(prel), + partition_relid = InvalidOid; Oid *parts; int nparts; bool isnull; Datum value; - Oid parent = PrelParentRelid(prel); /* Execute expression */ value = ExecEvalExprCompat(expr_state, econtext, &isnull, @@ -454,31 +454,31 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, elog(ERROR, ERR_PART_ATTR_MULTIPLE); else if (nparts == 0) { - selected_partid = create_partitions_for_value(parent, + partition_relid = create_partitions_for_value(parent_relid, value, prel->ev_type); /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent, NULL); + invalidate_pathman_relation_info(parent_relid, NULL); } - else selected_partid = parts[0]; + else partition_relid = parts[0]; old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); - rri_holder = scan_result_parts_storage(selected_partid, parts_storage); + rri_holder = scan_result_parts_storage(partition_relid, parts_storage); MemoryContextSwitchTo(old_mcxt); - /* Could not find suitable partition */ + /* This partition has been dropped, repeat with a new 'prel' */ if (rri_holder == NULL) { /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent, NULL); + invalidate_pathman_relation_info(parent_relid, NULL); /* Get a fresh PartRelationInfo */ - prel = get_pathman_relation_info(parent); + prel = get_pathman_relation_info(parent_relid); /* Paranoid check (all partitions have vanished) */ if (!prel) elog(ERROR, "table \"%s\" is not partitioned", - get_rel_name_or_relid(parent)); + get_rel_name_or_relid(parent_relid)); } /* If partition has subpartitions */ else if (rri_holder->has_subpartitions) @@ -486,7 +486,7 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, const PartRelationInfo *subprel; /* Fetch PartRelationInfo for this partitioned relation */ - subprel = get_pathman_relation_info(selected_partid); + subprel = get_pathman_relation_info(partition_relid); Assert(subprel != NULL); /* Build an expression state if not yet */ @@ -685,7 +685,10 @@ partition_filter_exec(CustomScanState *node) tmp_slot = econtext->ecxt_scantuple; econtext->ecxt_scantuple = slot; - /* Search for a matching partition */ + /* + * Search for a matching partition. + * WARNING: 'prel' might change after this call! + */ rri_holder = select_partition_for_insert(econtext, state->expr_state, prel, &state->result_parts, estate); From 2dd61661fc17ae4a05ef397509af021a06149d1d Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 27 Jul 2017 16:19:05 +0300 Subject: [PATCH 0680/1124] Change cassert container --- make_images.py | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/make_images.py b/make_images.py index dc01407e..4de7d40e 100755 --- a/make_images.py +++ b/make_images.py @@ -12,18 +12,36 @@ DOCKER_ID = 'pathman' ALPINE_BASE_URL = 'https://raw.githubusercontent.com/docker-library/postgres/master/10/alpine/' ALPINE_ENTRYPOINT = 'docker-entrypoint.sh' + +''' +How to create this patch: + 1) put `import ipdb; ipdb.set_trace()` in make_alpine_image, after `open(patch_name)..` + 2) run the script + 3) in temporary folder run `cp Dockerfile Dockerfile.1 && vim Dockerfile.1 && diff -Naur Dockerfile Dockerfile.1 > ./cassert.patch` + 4) contents of cassert.patch put to variable below + 5) change Dockerfile.1 to Dockerfile in text, change `\` symbols to `\\` +''' ALPINE_PATCH = b''' ---- Dockerfile 2017-07-25 12:43:20.424984422 +0300 -+++ Dockerfile 2017-07-25 12:46:10.279267520 +0300 -@@ -86,6 +86,7 @@ - --enable-integer-datetimes \\ +--- Dockerfile 2017-07-27 14:54:10.403971867 +0300 ++++ Dockerfile 2017-07-27 14:56:01.132503106 +0300 +@@ -79,7 +79,7 @@ + && wget -O config/config.sub 'https://git.savannah.gnu.org/cgit/config.git/plain/config.sub?id=7d3d27baf8107b630586c962c057e22149653deb' \\ + # configure options taken from: + # https://anonscm.debian.org/cgit/pkg-postgresql/postgresql.git/tree/debian/rules?h=9.5 +- && ./configure \\ ++ && CFLAGS="-O0" ./configure \\ + --build="$gnuArch" \\ + # "/usr/src/postgresql/src/backend/access/common/tupconvert.c:105: undefined reference to `libintl_gettext'" + # --enable-nls \\ +@@ -87,7 +87,7 @@ --enable-thread-safety \\ --enable-tap-tests \\ -+ --enable-cassert \\ # skip debugging info -- we want tiny size instead - # --enable-debug \\ +-# --enable-debug \\ ++ --enable-debug \\ --disable-rpath \\ - + --with-uuid=e2fs \\ + --with-gnu-ld \\ ''' CUSTOM_IMAGE_NAME = "%s/postgres_stable" % DOCKER_ID From 075d00cf9e446605f773fd48ec167069496be733 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 27 Jul 2017 16:20:16 +0300 Subject: [PATCH 0681/1124] reorder args of select_partition_for_insert(), eliminate tmp_slot --- src/include/partition_filter.h | 7 ++++--- src/partition_filter.c | 25 +++++++++++-------------- src/utility_stmt_hooking.c | 21 ++++++++++++--------- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index e053d2a5..00294050 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -135,10 +135,11 @@ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); -ResultRelInfoHolder *select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, +ResultRelInfoHolder *select_partition_for_insert(ExprState *expr_state, + ExprContext *econtext, + EState *estate, const PartRelationInfo *prel, - ResultPartsStorage *parts_storage, - EState *estate); + ResultPartsStorage *parts_storage); Plan * make_partition_filter(Plan *subplan, Oid parent_relid, diff --git a/src/partition_filter.c b/src/partition_filter.c index 323e6a8f..a046cd2b 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -424,10 +424,11 @@ find_partitions_for_value(Datum value, Oid value_type, * Smart wrapper for scan_result_parts_storage(). */ ResultRelInfoHolder * -select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, +select_partition_for_insert(ExprState *expr_state, + ExprContext *econtext, + EState *estate, const PartRelationInfo *prel, - ResultPartsStorage *parts_storage, - EState *estate) + ResultPartsStorage *parts_storage) { MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; @@ -496,11 +497,9 @@ select_partition_for_insert(ExprContext *econtext, ExprState *expr_state, Assert(rri_holder->expr_state != NULL); /* Recursively search for subpartitions */ - rri_holder = select_partition_for_insert(econtext, - rri_holder->expr_state, - subprel, - parts_storage, - estate); + rri_holder = select_partition_for_insert(rri_holder->expr_state, + econtext, estate, + subprel, parts_storage); } } /* Loop until we get some result */ @@ -665,7 +664,6 @@ partition_filter_exec(CustomScanState *node) MemoryContext old_mcxt; const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; - TupleTableSlot *tmp_slot; /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); @@ -682,17 +680,16 @@ partition_filter_exec(CustomScanState *node) /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - tmp_slot = econtext->ecxt_scantuple; + /* Store slot for expression evaluation */ econtext->ecxt_scantuple = slot; /* * Search for a matching partition. * WARNING: 'prel' might change after this call! */ - rri_holder = select_partition_for_insert(econtext, state->expr_state, prel, - &state->result_parts, estate); - - econtext->ecxt_scantuple = tmp_slot; + rri_holder = select_partition_for_insert(state->expr_state, + econtext, estate, + prel, &state->result_parts); /* Switch back and clean up per-tuple context */ MemoryContextSwitchTo(old_mcxt); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 14bfb800..b9ae406e 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -608,8 +608,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, for (;;) { - TupleTableSlot *slot, - *tmp_slot; + TupleTableSlot *slot; bool skip_tuple; Oid tuple_oid = InvalidOid; @@ -637,7 +636,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, if (!NextCopyFrom(cstate, econtext, values, nulls, &tuple_oid)) break; - /* We can form the input tuple. */ + /* We can form the input tuple */ tuple = heap_form_tuple(tupDesc, values, nulls); if (tuple_oid != InvalidOid) @@ -648,15 +647,19 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ExecSetSlotDescriptor(slot, tupDesc); ExecStoreTuple(tuple, slot, InvalidBuffer, false); - /* Execute expression */ - tmp_slot = econtext->ecxt_scantuple; + /* Store slot for expression evaluation */ econtext->ecxt_scantuple = slot; - /* Search for a matching partition */ - rri_holder = select_partition_for_insert(econtext, expr_state, prel, - &parts_storage, estate); - econtext->ecxt_scantuple = tmp_slot; + /* + * Search for a matching partition. + * WARNING: 'prel' might change after this call! + */ + rri_holder = select_partition_for_insert(expr_state, econtext, estate, + prel, &parts_storage); + child_result_rel = rri_holder->result_rel_info; + + /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = child_result_rel; /* From c2e09f2f09a212428773898ad6fe781d9f08720d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 27 Jul 2017 16:59:30 +0300 Subject: [PATCH 0682/1124] small adjustments in append_child_relation() --- src/pg_pathman.c | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index b3058fe2..a45e6e4d 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -379,7 +379,7 @@ append_child_relation(PlannerInfo *root, *child_rel; Relation child_relation; AppendRelInfo *appinfo; - Index childRTindex; + Index child_rti; PlanRowMark *child_rowmark; Node *childqual; List *childquals; @@ -415,22 +415,20 @@ append_child_relation(PlannerInfo *root, child_rte = copyObject(parent_rte); child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; - child_rte->requiredPerms = 0; /* perform all checks on parent */ - /* - * If it is the parent relation, then set inh flag to false to prevent - * further recursive unrolling. Else if relation is a child and has subclass - * then we will need to check if there are subpartitions - */ - child_rte->inh = (child_oid != parent_rte->relid) ? - child_relation->rd_rel->relhassubclass : false; + child_rte->requiredPerms = 0; /* perform all checks on parent */ + + /* Does this child have subpartitions? */ + child_rte->inh = (child_oid == parent_rte->relid) ? + false : /* it's a parent, skip */ + child_relation->rd_rel->relhassubclass; /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ root->parse->rtable = lappend(root->parse->rtable, child_rte); - childRTindex = list_length(root->parse->rtable); - root->simple_rte_array[childRTindex] = child_rte; + child_rti = list_length(root->parse->rtable); + root->simple_rte_array[child_rti] = child_rte; /* Create RelOptInfo for this child (and make some estimates as well) */ - child_rel = build_simple_rel_compat(root, childRTindex, parent_rel); + child_rel = build_simple_rel_compat(root, child_rti, parent_rel); /* Increase total_table_pages using the 'child_rel' */ root->total_table_pages += (double) child_rel->pages; @@ -441,7 +439,7 @@ append_child_relation(PlannerInfo *root, { child_rowmark = makeNode(PlanRowMark); - child_rowmark->rti = childRTindex; + child_rowmark->rti = child_rti; child_rowmark->prti = parent_rti; child_rowmark->rowmarkId = parent_rowmark->rowmarkId; /* Reselect rowmark type, because relkind might not match parent */ @@ -469,14 +467,14 @@ append_child_relation(PlannerInfo *root, /* Build an AppendRelInfo for this child */ appinfo = makeNode(AppendRelInfo); appinfo->parent_relid = parent_rti; - appinfo->child_relid = childRTindex; + appinfo->child_relid = child_rti; appinfo->parent_reloid = parent_rte->relid; /* Store table row types for wholerow references */ appinfo->parent_reltype = RelationGetDescr(parent_relation)->tdtypeid; appinfo->child_reltype = RelationGetDescr(child_relation)->tdtypeid; - make_inh_translation_list(parent_relation, child_relation, childRTindex, + make_inh_translation_list(parent_relation, child_relation, child_rti, &appinfo->translated_vars); /* Now append 'appinfo' to 'root->append_rel_list' */ @@ -575,18 +573,18 @@ append_child_relation(PlannerInfo *root, /* Close child relations, but keep locks */ heap_close(child_relation, NoLock); - /* - * Recursively expand child partition if it has subpartitions - */ + /* Recursively expand child partition if it has subpartitions */ if (child_rte->inh) { + child_rte->inh = false; + pathman_rel_pathlist_hook(root, child_rel, - childRTindex, + child_rti, child_rte); } - return childRTindex; + return child_rti; } From c44284b8f40edbcd139ac5eb6dd490397ba573c8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 27 Jul 2017 17:54:21 +0300 Subject: [PATCH 0683/1124] fixes in scan_result_parts_storage(), imporve pathman_subpartitions test --- expected/pathman_subpartitions.out | 233 +++++++++++++++-------------- sql/pathman_subpartitions.sql | 122 ++++++++------- src/include/partition_filter.h | 6 +- src/partition_filter.c | 22 +-- 4 files changed, 202 insertions(+), 181 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index f4a2620d..e103b3d5 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -1,89 +1,90 @@ \set VERBOSITY terse CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; /* Create two level partitioning structure */ -CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); -INSERT INTO abc SELECT i, i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('abc', 'a', 0, 100, 2); +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT create_hash_partitions('abc_1', 'a', 3); +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); create_hash_partitions ------------------------ 3 (1 row) -SELECT create_hash_partitions('abc_2', 'b', 2); +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); create_hash_partitions ------------------------ 2 (1 row) SELECT * FROM pathman_partition_list; - parent | partition | parttype | expr | range_min | range_max ---------+-----------+----------+------+-----------+----------- - abc | abc_1 | 2 | a | 0 | 100 - abc | abc_2 | 2 | a | 100 | 200 - abc_1 | abc_1_0 | 1 | a | | - abc_1 | abc_1_1 | 1 | a | | - abc_1 | abc_1_2 | 1 | a | | - abc_2 | abc_2_0 | 1 | b | | - abc_2 | abc_2_1 | 1 | b | | + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | (7 rows) -SELECT tableoid::regclass, * FROM abc; - tableoid | a | b -----------+-----+----- - abc_1_0 | 21 | 21 - abc_1_0 | 61 | 61 - abc_1_1 | 41 | 41 - abc_1_2 | 1 | 1 - abc_1_2 | 81 | 81 - abc_2_0 | 101 | 101 - abc_2_0 | 141 | 141 - abc_2_1 | 121 | 121 - abc_2_1 | 161 | 161 - abc_2_1 | 181 | 181 +SELECT tableoid::regclass, * FROM subpartitions.abc; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 (10 rows) -/* Insert should result in creating of new subpartition */ -SELECT append_range_partition('abc', 'abc_3'); +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); append_range_partition ------------------------ - abc_3 + subpartitions.abc_3 (1 row) -SELECT create_range_partitions('abc_3', 'b', 200, 10, 2); +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; - parent | partition | parttype | expr | range_min | range_max ---------+-----------+----------+------+-----------+----------- - abc_3 | abc_3_1 | 2 | b | 200 | 210 - abc_3 | abc_3_2 | 2 | b | 210 | 220 +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 (2 rows) -INSERT INTO abc VALUES (215, 215); -SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; - parent | partition | parttype | expr | range_min | range_max ---------+-----------+----------+------+-----------+----------- - abc_3 | abc_3_1 | 2 | b | 200 | 210 - abc_3 | abc_3_2 | 2 | b | 210 | 220 +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 (2 rows) -SELECT tableoid::regclass, * FROM abc WHERE a = 215 AND b = 215; - tableoid | a | b -----------+-----+----- - abc_3_2 | 215 | 215 +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 (1 row) /* Pruning tests */ -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a < 150; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; QUERY PLAN --------------------------------- Append @@ -98,7 +99,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a < 150; Filter: (a < 150) (10 rows) -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; QUERY PLAN --------------------------------- Append @@ -117,7 +118,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE b = 215; Filter: (b = 215) (14 rows) -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a = 215 AND b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; QUERY PLAN ------------------------------------------------- Append @@ -126,7 +127,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a = 215 AND b = 215; Filter: ((a = 215) AND (b = 215)) (4 rows) -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a >= 210 and b >= 210; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; QUERY PLAN ---------------------------------- Append @@ -136,7 +137,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a >= 210 and b >= 210; (4 rows) /* Multilevel partitioning with update triggers */ -CREATE OR REPLACE FUNCTION partitions_tree(rel REGCLASS) +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree(rel REGCLASS) RETURNS SETOF REGCLASS AS $$ DECLARE @@ -151,14 +152,14 @@ BEGIN FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) LOOP - FOR subpartition IN (SELECT partitions_tree(partition)) + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition)) LOOP RETURN NEXT subpartition; END LOOP; END LOOP; END $$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION get_triggers(rel REGCLASS) +CREATE OR REPLACE FUNCTION subpartitions.get_triggers(rel REGCLASS) RETURNS SETOF TEXT AS $$ DECLARE @@ -172,124 +173,128 @@ BEGIN RETURN; END; $$ LANGUAGE plpgsql; -SELECT create_update_triggers('abc_1'); /* Cannot perform on partition */ +SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ ERROR: Parent table must have an update trigger -SELECT create_update_triggers('abc'); /* Only on parent */ +SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ create_update_triggers ------------------------ (1 row) -SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; - p | get_triggers ----------+----------------------------------------------------------------------------------------------------------------------------- - abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_3 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_3_1 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_3_2 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON abc_3_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc') as p; + p | get_triggers +-----------------------+------------------------------------------------------------------------------------------------------------------------------------------- + subpartitions.abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_3 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_3_1 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_3_2 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() (14 rows) -SELECT append_range_partition('abc', 'abc_4'); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); append_range_partition ------------------------ - abc_4 + subpartitions.abc_4 (1 row) -SELECT create_hash_partitions('abc_4', 'b', 2); /* Triggers should automatically +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); create_hash_partitions ------------------------ 2 (1 row) - * be created on subpartitions */ -SELECT p, get_triggers(p) FROM partitions_tree('abc_4') as p; - p | get_triggers ----------+----------------------------------------------------------------------------------------------------------------------------- - abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_4 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_4_0 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - abc_4_1 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON abc_4_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc_4') as p; + p | get_triggers +-----------------------+------------------------------------------------------------------------------------------------------------------------------------------- + subpartitions.abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_4 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_4_0 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_4_1 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() (4 rows) -SELECT drop_triggers('abc_1'); /* Cannot perform on partition */ +SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ ERROR: Parent table must not have an update trigger -SELECT drop_triggers('abc'); /* Only on parent */ +SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ drop_triggers --------------- (1 row) -SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; /* No partitions */ +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc') as p; p | get_triggers ---+-------------- (0 rows) -DROP TABLE abc CASCADE; +DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 15 other objects -/* Test that update trigger words correclty */ -CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); -SELECT create_range_partitions('abc', 'a', 0, 100, 2); +/* Test that update trigger works correctly */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT create_range_partitions('abc_1', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT create_range_partitions('abc_2', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); create_range_partitions ------------------------- 2 (1 row) -SELECT create_update_triggers('abc'); +SELECT create_update_triggers('subpartitions.abc'); create_update_triggers ------------------------ (1 row) -INSERT INTO abc VALUES (25, 25); /* Should get into abc_1_1 */ -SELECT tableoid::regclass, * FROM abc; - tableoid | a | b -----------+----+---- - abc_1_1 | 25 | 25 +INSERT INTO subpartitions.abc VALUES (25, 25); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 (1 row) -UPDATE abc SET a = 125 WHERE a = 25 and b = 25; -SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_1 */ - tableoid | a | b -----------+-----+---- - abc_2_1 | 125 | 25 +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 (1 row) -UPDATE abc SET b = 75 WHERE a = 125 and b = 25; -SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_2 */ - tableoid | a | b -----------+-----+---- - abc_2_2 | 125 | 75 +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 (1 row) -UPDATE abc SET b = 125 WHERE a = 125 and b = 75; -SELECT tableoid::regclass, * FROM abc; /* Should create partition abc_2_3 */ - tableoid | a | b -----------+-----+----- - abc_2_3 | 125 | 125 +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 (1 row) -DROP TABLE abc CASCADE; +DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 10 other objects +DROP SCHEMA subpartitions CASCADE; +NOTICE: drop cascades to 2 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 4cf5d1a1..6f8d035c 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -1,32 +1,37 @@ \set VERBOSITY terse CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; + + /* Create two level partitioning structure */ -CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); -INSERT INTO abc SELECT i, i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('abc', 'a', 0, 100, 2); -SELECT create_hash_partitions('abc_1', 'a', 3); -SELECT create_hash_partitions('abc_2', 'b', 2); +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); SELECT * FROM pathman_partition_list; -SELECT tableoid::regclass, * FROM abc; +SELECT tableoid::regclass, * FROM subpartitions.abc; -/* Insert should result in creating of new subpartition */ -SELECT append_range_partition('abc', 'abc_3'); -SELECT create_range_partitions('abc_3', 'b', 200, 10, 2); -SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; -INSERT INTO abc VALUES (215, 215); -SELECT * FROM pathman_partition_list WHERE parent = 'abc_3'::regclass; -SELECT tableoid::regclass, * FROM abc WHERE a = 215 AND b = 215; +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215; /* Pruning tests */ -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a < 150; -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE b = 215; -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a = 215 AND b = 215; -EXPLAIN (COSTS OFF) SELECT * FROM abc WHERE a >= 210 and b >= 210; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + + /* Multilevel partitioning with update triggers */ -CREATE OR REPLACE FUNCTION partitions_tree(rel REGCLASS) +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree(rel REGCLASS) RETURNS SETOF REGCLASS AS $$ DECLARE @@ -41,7 +46,7 @@ BEGIN FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) LOOP - FOR subpartition IN (SELECT partitions_tree(partition)) + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition)) LOOP RETURN NEXT subpartition; END LOOP; @@ -49,7 +54,7 @@ BEGIN END $$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION get_triggers(rel REGCLASS) +CREATE OR REPLACE FUNCTION subpartitions.get_triggers(rel REGCLASS) RETURNS SETOF TEXT AS $$ DECLARE @@ -64,36 +69,47 @@ BEGIN END; $$ LANGUAGE plpgsql; -SELECT create_update_triggers('abc_1'); /* Cannot perform on partition */ -SELECT create_update_triggers('abc'); /* Only on parent */ -SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; - -SELECT append_range_partition('abc', 'abc_4'); -SELECT create_hash_partitions('abc_4', 'b', 2); /* Triggers should automatically - * be created on subpartitions */ -SELECT p, get_triggers(p) FROM partitions_tree('abc_4') as p; -SELECT drop_triggers('abc_1'); /* Cannot perform on partition */ -SELECT drop_triggers('abc'); /* Only on parent */ -SELECT p, get_triggers(p) FROM partitions_tree('abc') as p; /* No partitions */ - -DROP TABLE abc CASCADE; - -/* Test that update trigger words correclty */ -CREATE TABLE abc(a INTEGER NOT NULL, b INTEGER NOT NULL); -SELECT create_range_partitions('abc', 'a', 0, 100, 2); -SELECT create_range_partitions('abc_1', 'b', 0, 50, 2); -SELECT create_range_partitions('abc_2', 'b', 0, 50, 2); -SELECT create_update_triggers('abc'); - -INSERT INTO abc VALUES (25, 25); /* Should get into abc_1_1 */ -SELECT tableoid::regclass, * FROM abc; -UPDATE abc SET a = 125 WHERE a = 25 and b = 25; -SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_1 */ -UPDATE abc SET b = 75 WHERE a = 125 and b = 25; -SELECT tableoid::regclass, * FROM abc; /* Should be in abc_2_2 */ -UPDATE abc SET b = 125 WHERE a = 125 and b = 75; -SELECT tableoid::regclass, * FROM abc; /* Should create partition abc_2_3 */ - -DROP TABLE abc CASCADE; - -DROP EXTENSION pg_pathman; \ No newline at end of file +SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ +SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc') as p; + +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc_4') as p; + +SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ +SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ +SELECT p, subpartitions.get_triggers(p) +FROM subpartitions.partitions_tree('subpartitions.abc') as p; + +DROP TABLE subpartitions.abc CASCADE; + + + +/* Test that update trigger works correctly */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); +SELECT create_update_triggers('subpartitions.abc'); + +INSERT INTO subpartitions.abc VALUES (25, 25); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_1 */ + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_2 */ + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ + +DROP TABLE subpartitions.abc CASCADE; + + + +DROP SCHEMA subpartitions CASCADE; +DROP EXTENSION pg_pathman; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 00294050..c20449ab 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -39,9 +39,9 @@ typedef struct { Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ - TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ - bool has_subpartitions; - ExprState *expr_state; /* if has_subpartitions true */ + TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ + bool has_children; /* hint that it might have children */ + ExprState *expr_state; /* children have their own expressions */ } ResultRelInfoHolder; diff --git a/src/partition_filter.c b/src/partition_filter.c index a046cd2b..344f557a 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -327,8 +327,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) rri_holder->tuple_map = build_part_tuple_map(parent_rel, child_rel); /* Are there subpartitions? */ - rri_holder->has_subpartitions = - (get_pathman_relation_info(partid) != NULL); + rri_holder->has_children = child_rel->rd_rel->relhassubclass; rri_holder->expr_state = NULL; /* Call on_new_rri_holder_callback() if needed */ @@ -481,25 +480,26 @@ select_partition_for_insert(ExprState *expr_state, elog(ERROR, "table \"%s\" is not partitioned", get_rel_name_or_relid(parent_relid)); } - /* If partition has subpartitions */ - else if (rri_holder->has_subpartitions) + /* This partition might have sub-partitions */ + else if (rri_holder->has_children) { - const PartRelationInfo *subprel; + const PartRelationInfo *sub_prel; /* Fetch PartRelationInfo for this partitioned relation */ - subprel = get_pathman_relation_info(partition_relid); - Assert(subprel != NULL); + sub_prel = get_pathman_relation_info(partition_relid); + + /* Might be a false alarm */ + if (!sub_prel) + break; /* Build an expression state if not yet */ if (!rri_holder->expr_state) - rri_holder->expr_state = prepare_expr_state(subprel, estate); - - Assert(rri_holder->expr_state != NULL); + rri_holder->expr_state = prepare_expr_state(sub_prel, estate); /* Recursively search for subpartitions */ rri_holder = select_partition_for_insert(rri_holder->expr_state, econtext, estate, - subprel, parts_storage); + sub_prel, parts_storage); } } /* Loop until we get some result */ From 8467f5a0a682b7fec83bfa34581aa45f13401698 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 27 Jul 2017 18:02:28 +0300 Subject: [PATCH 0684/1124] make pathman_subpartitions more stable --- expected/pathman_subpartitions.out | 23 +++++++++++++---------- sql/pathman_subpartitions.sql | 13 ++++++++----- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index e103b3d5..bf31a580 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -34,17 +34,17 @@ SELECT * FROM pathman_partition_list; subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | (7 rows) -SELECT tableoid::regclass, * FROM subpartitions.abc; +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; tableoid | a | b -----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 subpartitions.abc_1_0 | 21 | 21 - subpartitions.abc_1_0 | 61 | 61 subpartitions.abc_1_1 | 41 | 41 - subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 61 | 61 subpartitions.abc_1_2 | 81 | 81 subpartitions.abc_2_0 | 101 | 101 - subpartitions.abc_2_0 | 141 | 141 subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 subpartitions.abc_2_1 | 161 | 161 subpartitions.abc_2_1 | 181 | 181 (10 rows) @@ -77,7 +77,7 @@ SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regcl subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 (2 rows) -SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215; +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; tableoid | a | b -----------------------+-----+----- subpartitions.abc_3_2 | 215 | 215 @@ -182,17 +182,18 @@ SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ (1 row) SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p; +FROM subpartitions.partitions_tree('subpartitions.abc') as p +ORDER BY p; p | get_triggers -----------------------+------------------------------------------------------------------------------------------------------------------------------------------- subpartitions.abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() @@ -214,7 +215,8 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); (1 row) SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc_4') as p; +FROM subpartitions.partitions_tree('subpartitions.abc_4') as p +ORDER BY p; p | get_triggers -----------------------+------------------------------------------------------------------------------------------------------------------------------------------- subpartitions.abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() @@ -232,7 +234,8 @@ SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ (1 row) SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p; +FROM subpartitions.partitions_tree('subpartitions.abc') as p +ORDER BY p; p | get_triggers ---+-------------- (0 rows) diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 6f8d035c..b1a79874 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -12,7 +12,7 @@ SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); SELECT * FROM pathman_partition_list; -SELECT tableoid::regclass, * FROM subpartitions.abc; +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; /* Insert should result in creation of new subpartition */ SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); @@ -20,7 +20,7 @@ SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; INSERT INTO subpartitions.abc VALUES (215, 215); SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; -SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215; +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; /* Pruning tests */ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; @@ -72,17 +72,20 @@ $$ LANGUAGE plpgsql; SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p; +FROM subpartitions.partitions_tree('subpartitions.abc') as p +ORDER BY p; SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc_4') as p; +FROM subpartitions.partitions_tree('subpartitions.abc_4') as p +ORDER BY p; SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p; +FROM subpartitions.partitions_tree('subpartitions.abc') as p +ORDER BY p; DROP TABLE subpartitions.abc CASCADE; From b96b32e20fe30cd44466fa360f70154b4a66d0ea Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 28 Jul 2017 13:48:27 +0300 Subject: [PATCH 0685/1124] Fix cmocka tests --- src/debug_print.c | 19 ++++++++++--------- tests/cmocka/Makefile | 1 + 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/debug_print.c b/src/debug_print.c index 9734ca06..1a4ea417 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -105,10 +105,9 @@ irange_print(IndexRange irange) return str.data; } - -/* ---------------- - * printatt - * ---------------- +#ifndef CMOCKA_TESTS +/* + * Print attribute information */ static char * printatt(unsigned attributeId, @@ -127,9 +126,8 @@ printatt(unsigned attributeId, attributeP->attbyval ? 't' : 'f'); } -/* ---------------- - * debugtup - print one tuple for an interactive backend - * ---------------- +/* + * Print one tuple for an interactive backend */ static char * debugtup(TupleTableSlot *slot) @@ -170,6 +168,9 @@ debugtup(TupleTableSlot *slot) return result; } +/* + * Print contents of tuple slot + */ #ifdef __GNUC__ __attribute__((unused)) #endif @@ -186,8 +187,7 @@ slot_print(TupleTableSlot *slot) } /* - * rt_print - * return contents of range table + * Print contents of range table */ #ifdef __GNUC__ __attribute__((unused)) @@ -251,3 +251,4 @@ rt_print(const List *rtable) return str.data; #undef APPEND_STR } +#endif diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index e31e6d95..f79e2637 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -8,6 +8,7 @@ CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) CFLAGS += $(PG_CPPFLAGS) +CFLAGS += -DCMOCKA_TESTS LDFLAGS += -lcmocka TEST_BIN = rangeset_tests From 41a5286cde5167cee8a370b749293e841975e98a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 28 Jul 2017 14:26:34 +0300 Subject: [PATCH 0686/1124] simplify fini_result_parts_storage() --- src/partition_filter.c | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index f0edf76d..aa07f4ad 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -182,38 +182,21 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) HASH_SEQ_STATUS stat; ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ - /* Close partitions and free free conversion-related stuff */ - if (close_rels) + hash_seq_init(&stat, parts_storage->result_rels_table); + while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) { - hash_seq_init(&stat, parts_storage->result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + /* Close partitions and indices */ + if (close_rels) { ExecCloseIndices(rri_holder->result_rel_info); heap_close(rri_holder->result_rel_info->ri_RelationDesc, parts_storage->heap_close_lock_mode); - - /* Skip if there's no map */ - if (!rri_holder->tuple_map) - continue; - - FreeTupleDesc(rri_holder->tuple_map->indesc); - FreeTupleDesc(rri_holder->tuple_map->outdesc); - - free_conversion_map(rri_holder->tuple_map); } - } - /* Else just free conversion-related stuff */ - else - { - hash_seq_init(&stat, parts_storage->result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + /* Free conversion-related stuff */ + if (rri_holder->tuple_map) { - /* Skip if there's no map */ - if (!rri_holder->tuple_map) - continue; - FreeTupleDesc(rri_holder->tuple_map->indesc); FreeTupleDesc(rri_holder->tuple_map->outdesc); From 16eff6b0b3dd5b05f52189cbd588001a80910c55 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 28 Jul 2017 14:48:47 +0300 Subject: [PATCH 0687/1124] use PG_REGRESS_DIFF_OPTS --- run_tests.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/run_tests.sh b/run_tests.sh index 1b9d7a70..6622ae39 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -66,6 +66,7 @@ status=$? if [ $status -ne 0 ]; then cat /tmp/postgres.log; fi # run regression tests +export PG_REGRESS_DIFF_OPTS="-w -U3" # for alpine's diff (BusyBox) PGPORT=55435 make USE_PGXS=1 installcheck || status=$? # show diff if it exists From 9b36a4c3c8912abbcf7f889046c44171b359a9ad Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 28 Jul 2017 15:03:18 +0300 Subject: [PATCH 0688/1124] make trigger listing in pathman_subpartitions more stable --- expected/pathman_subpartitions.out | 28 ++++++++++++++-------------- sql/pathman_subpartitions.sql | 12 ++++++------ 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index bf31a580..e6214f10 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -181,23 +181,23 @@ SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ (1 row) -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - p | get_triggers +ORDER BY p, trig; + p | trig -----------------------+------------------------------------------------------------------------------------------------------------------------------------------- subpartitions.abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_3 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_3_1 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_3_2 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() (14 rows) @@ -214,13 +214,13 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); 2 (1 row) -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc_4') as p -ORDER BY p; - p | get_triggers +ORDER BY p, trig; + p | trig -----------------------+------------------------------------------------------------------------------------------------------------------------------------------- - subpartitions.abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_4 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() + subpartitions.abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_4_0 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() subpartitions.abc_4_1 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() (4 rows) @@ -233,11 +233,11 @@ SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ (1 row) -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - p | get_triggers ----+-------------- +ORDER BY p, trig; + p | trig +---+------ (0 rows) DROP TABLE subpartitions.abc CASCADE; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index b1a79874..00ae6a4a 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -71,21 +71,21 @@ $$ LANGUAGE plpgsql; SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; +ORDER BY p, trig; SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc_4') as p -ORDER BY p; +ORDER BY p, trig; SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ -SELECT p, subpartitions.get_triggers(p) +SELECT p, subpartitions.get_triggers(p) as trig FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; +ORDER BY p, trig; DROP TABLE subpartitions.abc CASCADE; From 503f444435f5d8eb5be103ffeedd2d6b4de47d56 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 31 Jul 2017 15:48:23 +0300 Subject: [PATCH 0689/1124] Fix trigger related tests (WIP) --- expected/pathman_basic.out | 52 +----------------- expected/pathman_calamity.out | 84 ---------------------------- expected/pathman_expressions.out | 44 --------------- sql/pathman_basic.sql | 7 --- sql/pathman_calamity.sql | 30 ---------- sql/pathman_expressions.sql | 12 ---- src/hooks.c | 3 - src/include/partition_filter.h | 5 +- src/include/partition_update.h | 10 ++-- src/partition_filter.c | 23 ++------ src/partition_update.c | 94 ++++++++++++++++---------------- 11 files changed, 61 insertions(+), 303 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index c19e75ca..0a46ba3e 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1087,7 +1087,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' A (6 rows) SELECT pathman.detach_range_partition('test.range_rel_archive'); -NOTICE: trigger "range_rel_upd_trig" for relation "test.range_rel_archive" does not exist, skipping detach_range_partition ------------------------ test.range_rel_archive @@ -1227,7 +1226,6 @@ SELECT * FROM test.hash_rel WHERE id = 123; /* Test replacing hash partition */ CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); -NOTICE: trigger "hash_rel_upd_trig" for relation "test.hash_rel_0" does not exist, skipping replace_hash_partition ------------------------ test.hash_rel_extern @@ -1281,7 +1279,6 @@ CREATE TABLE test.hash_rel_wrong( id INTEGER NOT NULL, value INTEGER); SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); -NOTICE: trigger "hash_rel_upd_trig" for relation "test.hash_rel_1" does not exist, skipping ERROR: column "value" in child table must be marked NOT NULL EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; QUERY PLAN @@ -1498,55 +1495,8 @@ SELECT * FROM test."TeSt"; 1 | 1 (3 rows) -SELECT pathman.create_update_triggers('test."TeSt"'); - create_update_triggers ------------------------- - -(1 row) - -UPDATE test."TeSt" SET a = 1; -SELECT * FROM test."TeSt"; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - -SELECT * FROM test."TeSt" WHERE a = 1; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test."TeSt" WHERE a = 1; - QUERY PLAN ----------------------------- - Append - -> Seq Scan on "TeSt_2" - Filter: (a = 1) -(3 rows) - -SELECT pathman.drop_partitions('test."TeSt"'); -NOTICE: 0 rows copied from test."TeSt_0" -NOTICE: 0 rows copied from test."TeSt_1" -NOTICE: 3 rows copied from test."TeSt_2" - drop_partitions ------------------ - 3 -(1 row) - -SELECT * FROM test."TeSt"; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - DROP TABLE test."TeSt" CASCADE; +NOTICE: drop cascades to 3 other objects CREATE TABLE test."RangeRel" ( id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 251ec31c..a75e4518 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -398,36 +398,6 @@ SELECT build_check_constraint_name(NULL) IS NULL; t (1 row) -/* check function build_update_trigger_name() */ -SELECT build_update_trigger_name('calamity.part_test'); /* OK */ - build_update_trigger_name ---------------------------- - part_test_upd_trig -(1 row) - -SELECT build_update_trigger_name(0::REGCLASS); /* not ok */ -ERROR: relation "0" does not exist -SELECT build_update_trigger_name(NULL) IS NULL; - ?column? ----------- - t -(1 row) - -/* check function build_update_trigger_func_name() */ -SELECT build_update_trigger_func_name('calamity.part_test'); /* OK */ - build_update_trigger_func_name ----------------------------------- - calamity.part_test_upd_trig_func -(1 row) - -SELECT build_update_trigger_func_name(0::REGCLASS); /* not ok */ -ERROR: relation "0" does not exist -SELECT build_update_trigger_func_name(NULL) IS NULL; - ?column? ----------- - t -(1 row) - /* check function build_sequence_name() */ SELECT build_sequence_name('calamity.part_test'); /* OK */ build_sequence_name @@ -512,14 +482,6 @@ WARNING: table "pg_class" is not partitioned (1 row) -SELECT has_update_trigger(NULL); - has_update_trigger --------------------- - -(1 row) - -SELECT has_update_trigger(0::REGCLASS); /* not ok */ -ERROR: relation "0" does not exist /* check invoke_on_partition_created_callback() */ CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ begin @@ -806,52 +768,6 @@ SELECT merge_range_partitions('{calamity.merge_test_a_1, ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects -/* check function drop_triggers() */ -CREATE TABLE calamity.trig_test_tbl(val INT4 NOT NULL); -SELECT create_hash_partitions('calamity.trig_test_tbl', 'val', 2); - create_hash_partitions ------------------------- - 2 -(1 row) - -SELECT create_update_triggers('calamity.trig_test_tbl'); - create_update_triggers ------------------------- - -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; - count -------- - 1 -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; - count -------- - 1 -(1 row) - -SELECT drop_triggers('calamity.trig_test_tbl'); /* OK */ - drop_triggers ---------------- - -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; - count -------- - 0 -(1 row) - -DROP TABLE calamity.trig_test_tbl CASCADE; -NOTICE: drop cascades to 2 other objects DROP SCHEMA calamity CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 134fcae9..463ad584 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -425,50 +425,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-0 Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) (3 rows) -SELECT create_update_triggers('test_exprs.range_rel'); - create_update_triggers ------------------------- - -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel; - count -------- - 65 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_1; - count -------- - 12 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_2; - count -------- - 12 -(1 row) - -UPDATE test_exprs.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; -/* counts in partitions should be changed */ -SELECT COUNT(*) FROM test_exprs.range_rel; - count -------- - 65 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_1; - count -------- - 10 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_2; - count -------- - 24 -(1 row) - DROP SCHEMA test_exprs CASCADE; NOTICE: drop cascades to 24 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index d34285e5..85dd076b 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -455,13 +455,6 @@ INSERT INTO test."TeSt" VALUES (1, 1); INSERT INTO test."TeSt" VALUES (2, 2); INSERT INTO test."TeSt" VALUES (3, 3); SELECT * FROM test."TeSt"; -SELECT pathman.create_update_triggers('test."TeSt"'); -UPDATE test."TeSt" SET a = 1; -SELECT * FROM test."TeSt"; -SELECT * FROM test."TeSt" WHERE a = 1; -EXPLAIN (COSTS OFF) SELECT * FROM test."TeSt" WHERE a = 1; -SELECT pathman.drop_partitions('test."TeSt"'); -SELECT * FROM test."TeSt"; DROP TABLE test."TeSt" CASCADE; CREATE TABLE test."RangeRel" ( diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 881cebbd..90c700a9 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -179,16 +179,6 @@ SELECT build_check_constraint_name('calamity.part_test'); /* OK */ SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ SELECT build_check_constraint_name(NULL) IS NULL; -/* check function build_update_trigger_name() */ -SELECT build_update_trigger_name('calamity.part_test'); /* OK */ -SELECT build_update_trigger_name(0::REGCLASS); /* not ok */ -SELECT build_update_trigger_name(NULL) IS NULL; - -/* check function build_update_trigger_func_name() */ -SELECT build_update_trigger_func_name('calamity.part_test'); /* OK */ -SELECT build_update_trigger_func_name(0::REGCLASS); /* not ok */ -SELECT build_update_trigger_func_name(NULL) IS NULL; - /* check function build_sequence_name() */ SELECT build_sequence_name('calamity.part_test'); /* OK */ SELECT build_sequence_name(1::REGCLASS); /* not ok */ @@ -222,9 +212,6 @@ SELECT generate_range_bounds('1-jan-2017'::DATE, SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ -SELECT has_update_trigger(NULL); -SELECT has_update_trigger(0::REGCLASS); /* not ok */ - /* check invoke_on_partition_created_callback() */ CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ begin @@ -347,23 +334,6 @@ SELECT merge_range_partitions('{calamity.merge_test_a_1, DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; - -/* check function drop_triggers() */ -CREATE TABLE calamity.trig_test_tbl(val INT4 NOT NULL); -SELECT create_hash_partitions('calamity.trig_test_tbl', 'val', 2); -SELECT create_update_triggers('calamity.trig_test_tbl'); - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; - -SELECT drop_triggers('calamity.trig_test_tbl'); /* OK */ - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; - -DROP TABLE calamity.trig_test_tbl CASCADE; - - DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 1c7f4dbe..46bceafb 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -168,17 +168,5 @@ INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('as SELECT COUNT(*) FROM test_exprs.range_rel_6; EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; -SELECT create_update_triggers('test_exprs.range_rel'); -SELECT COUNT(*) FROM test_exprs.range_rel; -SELECT COUNT(*) FROM test_exprs.range_rel_1; -SELECT COUNT(*) FROM test_exprs.range_rel_2; -UPDATE test_exprs.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; - -/* counts in partitions should be changed */ -SELECT COUNT(*) FROM test_exprs.range_rel; -SELECT COUNT(*) FROM test_exprs.range_rel_1; -SELECT COUNT(*) FROM test_exprs.range_rel_2; - - DROP SCHEMA test_exprs CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index d732a317..f0714288 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -882,10 +882,7 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, * We unset junkfilter to disable junk cleaning in * ExecModifyTable. We don't need junk cleaning because * there is possible modification of tuple in `partition_filter_exec` - * Same time we need this junkfilter in PartitionFilter - * nodes, so we save it in node. */ - cstate->saved_junkFilter = cstate->resultRelInfo->ri_junkFilter; cstate->resultRelInfo->ri_junkFilter = NULL; /* hack, change UPDATE operation to INSERT */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 42344abc..ddb5b72f 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -40,8 +40,7 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ - JunkFilter *src_junkFilter; /* we keep junkfilter from scanned - ResultRelInfo here */ + JunkFilter *junkfilter; /* junkfilter for cached ResultRelInfo */ bool has_children; /* hint that it might have children */ ExprState *expr_state; /* children have their own expressions */ } ResultRelInfoHolder; @@ -102,7 +101,7 @@ typedef struct CmdType command_type; TupleTableSlot *subplan_slot; /* slot that was returned from subplan */ - JunkFilter *src_junkFilter; /* junkfilter for subplan_slot */ + JunkFilter *junkfilter; /* junkfilter for subplan_slot */ ExprContext *tup_convert_econtext; /* ExprContext for projections */ ExprState *expr_state; /* for partitioning expression */ diff --git a/src/include/partition_update.h b/src/include/partition_update.h index b82ec61a..01405b26 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -26,12 +26,12 @@ typedef struct PartitionUpdateState { - CustomScanState css; + CustomScanState css; - Oid partitioned_table; - ResultRelInfo *resultRelInfo; - JunkFilter *saved_junkFilter; - Plan *subplan; /* proxy variable to store subplan */ + Oid partitioned_table; + ResultRelInfo *resultRelInfo; + JunkFilter *junkfilter; + Plan *subplan; /* proxy variable to store subplan */ } PartitionUpdateState; extern bool pg_pathman_enable_partition_update; diff --git a/src/partition_filter.c b/src/partition_filter.c index 80442203..7f34104e 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -312,7 +312,11 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Copy necessary fields from saved ResultRelInfo */ CopyToResultRelInfo(ri_WithCheckOptions); CopyToResultRelInfo(ri_WithCheckOptionExprs); - CopyToResultRelInfo(ri_junkFilter); + if (parts_storage->command_type != CMD_UPDATE) + CopyToResultRelInfo(ri_junkFilter); + else + child_result_rel_info->ri_junkFilter = NULL; + CopyToResultRelInfo(ri_projectReturning); CopyToResultRelInfo(ri_onConflictSetProj); CopyToResultRelInfo(ri_onConflictSetWhere); @@ -323,18 +327,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; - rri_holder->src_junkFilter = NULL; - - if (parts_storage->command_type == CMD_UPDATE) - { - JunkFilter *junkfilter = parts_storage->saved_rel_info->ri_junkFilter; - - /* we don't need junk cleaning in ExecModifyTable */ - child_result_rel_info->ri_junkFilter = NULL; - - /* instead we do junk filtering ourselves */ - rri_holder->src_junkFilter = junkfilter; - } /* Generate tuple transformation map and some other stuff */ rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); @@ -691,6 +683,7 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) attr_map = build_attributes_map(prel, child_rel, &natts); expr = map_variable_attnos(expr, parent_varno, 0, attr_map, natts, &found_whole_row); + Assert(!found_whole_row); heap_close(child_rel, NoLock); } @@ -722,7 +715,6 @@ partition_filter_exec(CustomScanState *node) slot = ExecProcNode(child_ps); state->subplan_slot = slot; - state->src_junkFilter = NULL; /* Save original ResultRelInfo */ saved_resultRelInfo = estate->es_result_relation_info; @@ -774,9 +766,6 @@ partition_filter_exec(CustomScanState *node) /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = resultRelInfo; - /* pass junkfilter to upper node */ - state->src_junkFilter = rri_holder->src_junkFilter; - /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) { diff --git a/src/partition_update.c b/src/partition_update.c index 5d9e8dc4..93d44851 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -123,29 +123,19 @@ partition_update_exec(CustomScanState *node) TupleTableSlot *slot; PartitionUpdateState *state = (PartitionUpdateState *) node; - /* - * Restore junkfilter in base resultRelInfo, - * we do it because child's RelResultInfo expects its existence - * for proper initialization. - * Also we set jf_junkAttNo there, because - * it wasn't set in ModifyTable node initialization - */ - state->resultRelInfo->ri_junkFilter = state->saved_junkFilter; - /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); if (!TupIsNull(slot)) { - Datum datum; - char relkind; - ResultRelInfo *resultRelInfo, - *sourceRelInfo; - ItemPointer tupleid = NULL; - ItemPointerData tuple_ctid; - EPQState epqstate; - PartitionFilterState *child_state; - JunkFilter *junkfilter; + Datum datum; + ResultRelInfo *resultRelInfo, + *sourceRelInfo; + ItemPointer tupleid = NULL; + ItemPointerData tuple_ctid; + EPQState epqstate; + PartitionFilterState *child_state; + char relkind; child_state = (PartitionFilterState *) child_ps; Assert(child_state->command_type == CMD_UPDATE); @@ -154,41 +144,51 @@ partition_update_exec(CustomScanState *node) sourceRelInfo = child_state->result_parts.saved_rel_info; resultRelInfo = estate->es_result_relation_info; - junkfilter = child_state->src_junkFilter; - if (junkfilter != NULL) + /* we generate junkfilter, if it wasn't created before */ + if (state->junkfilter == NULL) + { + state->junkfilter = ExecInitJunkFilter(state->subplan->targetlist, + sourceRelInfo->ri_RelationDesc->rd_att->tdhasoid, + ExecInitExtraTupleSlot(estate)); + + state->junkfilter->jf_junkAttNo = ExecFindJunkAttribute(state->junkfilter, "ctid"); + if (!AttributeNumberIsValid(state->junkfilter->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } + + relkind = sourceRelInfo->ri_RelationDesc->rd_rel->relkind; + if (relkind == RELKIND_RELATION) { - relkind = sourceRelInfo->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION) - { - bool isNull; - - datum = ExecGetJunkAttribute(child_state->subplan_slot, - junkfilter->jf_junkAttNo, &isNull); - /* shouldn't ever get a null result... */ - if (isNull) - elog(ERROR, "ctid is NULL"); - - tupleid = (ItemPointer) DatumGetPointer(datum); - tuple_ctid = *tupleid; /* be sure we don't free - * ctid!! */ - tupleid = &tuple_ctid; - } - else if (relkind == RELKIND_FOREIGN_TABLE) - elog(ERROR, "update node is not supported for foreign tables"); - else - elog(ERROR, "got unexpected type of relation for update"); - - /* - * Clean from junk attributes before INSERT, - * but only if slot wasn't converted in PartitionFilter - */ - if (TupIsNull(child_state->tup_convert_slot)) - slot = ExecFilterJunk(junkfilter, slot); + bool isNull; + + datum = ExecGetJunkAttribute(child_state->subplan_slot, + state->junkfilter->jf_junkAttNo, &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* be sure we don't free + * ctid!! */ + tupleid = &tuple_ctid; } + else if (relkind == RELKIND_FOREIGN_TABLE) + elog(ERROR, "update node is not supported for foreign tables"); + else + elog(ERROR, "got unexpected type of relation for update"); + + /* + * Clean from junk attributes before INSERT, + * but only if slot wasn't converted in PartitionFilter + */ + if (TupIsNull(child_state->tup_convert_slot)) + slot = ExecFilterJunk(state->junkfilter, slot); /* Delete old tuple */ estate->es_result_relation_info = sourceRelInfo; + + Assert(tupleid != NULL); ExecDeleteInternal(tupleid, child_state->subplan_slot, &epqstate, estate); /* we've got the slot that can be inserted to child partition */ From 37f618ee14876c9299408ef0f38c160d4717008b Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 31 Jul 2017 16:53:43 +0300 Subject: [PATCH 0690/1124] Fix update nodes --- src/partition_filter.c | 68 ++++++++++++++++-------------------------- 1 file changed, 26 insertions(+), 42 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 7f34104e..884fbb12 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -625,70 +625,54 @@ void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { PartitionFilterState *state = (PartitionFilterState *) node; - const PartRelationInfo *prel; - Node *expr; - Index parent_varno = 1; - ListCell *lc; PlanState *child_state; /* It's convenient to store PlanState in 'custom_ps' */ child_state = ExecInitNode(state->subplan, estate, eflags); node->custom_ps = list_make1(child_state); - if (state->command_type == CMD_UPDATE) - parent_varno = ((Scan *) child_state->plan)->scanrelid; - else - { - Index varno = 1; - - foreach(lc, estate->es_range_table) - { - RangeTblEntry *entry = lfirst(lc); - - if (entry->relid == state->partitioned_table) - break; - - varno++; - } - - parent_varno = varno; - Assert(parent_varno <= list_length(estate->es_range_table)); - } - - if (state->expr_state == NULL) { /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); Assert(prel != NULL); - /* Change varno in expression Vars according to range table */ - Assert(parent_varno >= 1); - expr = PrelExpressionForRelid(prel, parent_varno); - - /* - * Also in updates we would operate with child relation, but - * expression expects varattnos like in base relation, so we map - * parent varattnos to child varattnos - */ + /* Prepare state for expression execution */ if (state->command_type == CMD_UPDATE) { - int natts; - bool found_whole_row; - AttrNumber *attr_map; - Oid child_relid = getrelid(parent_varno, estate->es_range_table); + int natts; + bool found_whole_row; + AttrNumber *attr_map; + MemoryContext old_mcxt; + + /* + * In UPDATE queries we would operate with child relation, but + * expression expects varattnos like in base relation, so we map + * parent varattnos to child varattnos + */ + + Index relno = ((Scan *) child_state->plan)->scanrelid; + Node *expr = PrelExpressionForRelid(prel, relno); + Oid child_relid = getrelid(relno, estate->es_range_table); Relation child_rel = heap_open(child_relid, NoLock); attr_map = build_attributes_map(prel, child_rel, &natts); - expr = map_variable_attnos(expr, parent_varno, 0, attr_map, natts, + expr = map_variable_attnos(expr, relno, 0, attr_map, natts, &found_whole_row); Assert(!found_whole_row); heap_close(child_rel, NoLock); - } - /* Prepare state for expression execution */ - state->expr_state = prepare_expr_state(prel, estate); + /* Prepare state for expression execution */ + old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + state->expr_state = ExecInitExpr((Expr *) expr, NULL); + MemoryContextSwitchTo(old_mcxt); + } + else + { + /* simple INSERT, expression based on parent attribute numbers */ + state->expr_state = prepare_expr_state(prel, estate); + } } /* Init ResultRelInfo cache */ From e8c708ca8edad5f63c073f9cbdc537b890fc9744 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 31 Jul 2017 17:01:25 +0300 Subject: [PATCH 0691/1124] Remove unused attributes from update node --- src/hooks.c | 10 +++------- src/include/partition_update.h | 1 - src/partition_filter.c | 18 +++++++++--------- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index f0714288..63808297 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -873,17 +873,13 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, if (strcmp(subplanstate->methods->CustomName, UPDATE_NODE_DESCRIPTION) == 0) { - PartitionUpdateState *cstate = (PartitionUpdateState *) subplanstate; - - /* Save parent resultRelInfo in PartitionUpdate node */ - cstate->resultRelInfo = mt_state->resultRelInfo + i; + ResultRelInfo *rri = mt_state->resultRelInfo + i; /* * We unset junkfilter to disable junk cleaning in - * ExecModifyTable. We don't need junk cleaning because - * there is possible modification of tuple in `partition_filter_exec` + * ExecModifyTable. */ - cstate->resultRelInfo->ri_junkFilter = NULL; + rri->ri_junkFilter = NULL; /* hack, change UPDATE operation to INSERT */ mt_state->operation = CMD_INSERT; diff --git a/src/include/partition_update.h b/src/include/partition_update.h index 01405b26..30e5e329 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -29,7 +29,6 @@ typedef struct PartitionUpdateState CustomScanState css; Oid partitioned_table; - ResultRelInfo *resultRelInfo; JunkFilter *junkfilter; Plan *subplan; /* proxy variable to store subplan */ } PartitionUpdateState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 884fbb12..d1ba303d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -641,21 +641,21 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) /* Prepare state for expression execution */ if (state->command_type == CMD_UPDATE) { - int natts; - bool found_whole_row; - AttrNumber *attr_map; - MemoryContext old_mcxt; - /* * In UPDATE queries we would operate with child relation, but * expression expects varattnos like in base relation, so we map * parent varattnos to child varattnos */ - Index relno = ((Scan *) child_state->plan)->scanrelid; - Node *expr = PrelExpressionForRelid(prel, relno); - Oid child_relid = getrelid(relno, estate->es_range_table); - Relation child_rel = heap_open(child_relid, NoLock); + int natts; + bool found_whole_row; + + AttrNumber *attr_map; + MemoryContext old_mcxt; + Index relno = ((Scan *) child_state->plan)->scanrelid; + Node *expr = PrelExpressionForRelid(prel, relno); + Relation child_rel = heap_open( + getrelid(relno, estate->es_range_table), NoLock); attr_map = build_attributes_map(prel, child_rel, &natts); expr = map_variable_attnos(expr, relno, 0, attr_map, natts, From efa1b4867a80cf97940ed1995044c51644f699ac Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 31 Jul 2017 19:10:49 +0300 Subject: [PATCH 0692/1124] Start fixing update node for subpartitions (WIP) --- expected/pathman_subpartitions.out | 84 ++---------------------------- sql/pathman_subpartitions.sql | 65 ++++++----------------- 2 files changed, 20 insertions(+), 129 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index bf31a580..659185bf 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -136,7 +136,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; Filter: (a >= 210) (4 rows) -/* Multilevel partitioning with update triggers */ +/* Multilevel partitioning with updates */ CREATE OR REPLACE FUNCTION subpartitions.partitions_tree(rel REGCLASS) RETURNS SETOF REGCLASS AS $$ @@ -159,49 +159,6 @@ BEGIN END LOOP; END $$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION subpartitions.get_triggers(rel REGCLASS) -RETURNS SETOF TEXT AS -$$ -DECLARE - def TEXT; -BEGIN - FOR def IN (SELECT pg_get_triggerdef(oid) FROM pg_trigger WHERE tgrelid = rel) - LOOP - RETURN NEXT def; - END LOOP; - - RETURN; -END; -$$ LANGUAGE plpgsql; -SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ -ERROR: Parent table must have an update trigger -SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ - create_update_triggers ------------------------- - -(1 row) - -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - p | get_triggers ------------------------+------------------------------------------------------------------------------------------------------------------------------------------- - subpartitions.abc | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1_0 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1_1 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_1_2 | CREATE TRIGGER abc_1_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_1_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2_0 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_2_1 | CREATE TRIGGER abc_2_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_2_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_3 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_3 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_3_1 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_3_2 | CREATE TRIGGER abc_3_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_3_2 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() -(14 rows) - SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); append_range_partition ------------------------ @@ -214,35 +171,10 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); 2 (1 row) -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc_4') as p -ORDER BY p; - p | get_triggers ------------------------+------------------------------------------------------------------------------------------------------------------------------------------- - subpartitions.abc_4 | CREATE TRIGGER abc_upd_trig BEFORE UPDATE OF a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_4 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_4_0 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_0 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() - subpartitions.abc_4_1 | CREATE TRIGGER abc_4_upd_trig BEFORE UPDATE OF b, a ON subpartitions.abc_4_1 FOR EACH ROW EXECUTE PROCEDURE pathman_update_trigger_func() -(4 rows) - -SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ -ERROR: Parent table must not have an update trigger -SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ - drop_triggers ---------------- - -(1 row) - -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - p | get_triggers ----+-------------- -(0 rows) - DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 15 other objects -/* Test that update trigger works correctly */ +/* Test that update works correctly */ +SET pg_pathman.enable_partitionupdate=on; CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); create_range_partitions @@ -262,12 +194,6 @@ SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); 2 (1 row) -SELECT create_update_triggers('subpartitions.abc'); - create_update_triggers ------------------------- - -(1 row) - INSERT INTO subpartitions.abc VALUES (25, 25); SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ tableoid | a | b @@ -297,7 +223,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartiti (1 row) DROP TABLE subpartitions.abc CASCADE; -NOTICE: drop cascades to 10 other objects +NOTICE: drop cascades to 9 other objects DROP SCHEMA subpartitions CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to function subpartitions.partitions_tree(regclass) DROP EXTENSION pg_pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index b1a79874..06cd5603 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -3,8 +3,6 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA subpartitions; - - /* Create two level partitioning structure */ CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; @@ -28,75 +26,45 @@ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; - - -/* Multilevel partitioning with update triggers */ -CREATE OR REPLACE FUNCTION subpartitions.partitions_tree(rel REGCLASS) -RETURNS SETOF REGCLASS AS +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS $$ DECLARE - partition REGCLASS; - subpartition REGCLASS; + partition REGCLASS; + subpartition TEXT; BEGIN IF rel IS NULL THEN RETURN; END IF; - RETURN NEXT rel; + RETURN NEXT rel::TEXT; FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) LOOP - FOR subpartition IN (SELECT subpartitions.partitions_tree(partition)) + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) LOOP - RETURN NEXT subpartition; + RETURN NEXT level || subpartition::TEXT; END LOOP; END LOOP; END $$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION subpartitions.get_triggers(rel REGCLASS) -RETURNS SETOF TEXT AS -$$ -DECLARE - def TEXT; -BEGIN - FOR def IN (SELECT pg_get_triggerdef(oid) FROM pg_trigger WHERE tgrelid = rel) - LOOP - RETURN NEXT def; - END LOOP; - - RETURN; -END; -$$ LANGUAGE plpgsql; - -SELECT create_update_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ -SELECT create_update_triggers('subpartitions.abc'); /* Can perform on parent */ -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc_4') as p -ORDER BY p; - -SELECT drop_triggers('subpartitions.abc_1'); /* Cannot perform on partition */ -SELECT drop_triggers('subpartitions.abc'); /* Can perform on parent */ -SELECT p, subpartitions.get_triggers(p) -FROM subpartitions.partitions_tree('subpartitions.abc') as p -ORDER BY p; - +SELECT subpartitions.partitions_tree('subpartitions.abc'); DROP TABLE subpartitions.abc CASCADE; - - -/* Test that update trigger works correctly */ +\q +/* Test that update works correctly */ +SET pg_pathman.enable_partitionupdate=on; CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); -SELECT create_update_triggers('subpartitions.abc'); INSERT INTO subpartitions.abc VALUES (25, 25); SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ @@ -111,8 +79,5 @@ UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ DROP TABLE subpartitions.abc CASCADE; - - - DROP SCHEMA subpartitions CASCADE; DROP EXTENSION pg_pathman; From 6a346b7daac0a262e70c778828a0f68b3123807c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 1 Aug 2017 10:58:25 +0300 Subject: [PATCH 0693/1124] Fix update on subpartitions --- expected/pathman_subpartitions.out | 40 +++++++++++++++++++++++------- sql/pathman_subpartitions.sql | 1 - src/planner_tree_modification.c | 22 ++++++---------- 3 files changed, 38 insertions(+), 25 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 659185bf..af35011e 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -137,24 +137,27 @@ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; (4 rows) /* Multilevel partitioning with updates */ -CREATE OR REPLACE FUNCTION subpartitions.partitions_tree(rel REGCLASS) -RETURNS SETOF REGCLASS AS +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS $$ DECLARE - partition REGCLASS; - subpartition REGCLASS; + partition REGCLASS; + subpartition TEXT; BEGIN IF rel IS NULL THEN RETURN; END IF; - RETURN NEXT rel; + RETURN NEXT rel::TEXT; FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) LOOP - FOR subpartition IN (SELECT subpartitions.partitions_tree(partition)) + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) LOOP - RETURN NEXT subpartition; + RETURN NEXT level || subpartition::TEXT; END LOOP; END LOOP; END @@ -171,6 +174,25 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); 2 (1 row) +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 15 other objects /* Test that update works correctly */ @@ -223,7 +245,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartiti (1 row) DROP TABLE subpartitions.abc CASCADE; -NOTICE: drop cascades to 9 other objects +NOTICE: drop cascades to 10 other objects DROP SCHEMA subpartitions CASCADE; -NOTICE: drop cascades to function subpartitions.partitions_tree(regclass) +NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) DROP EXTENSION pg_pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 06cd5603..23217872 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -58,7 +58,6 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); SELECT subpartitions.partitions_tree('subpartitions.abc'); DROP TABLE subpartitions.abc CASCADE; -\q /* Test that update works correctly */ SET pg_pathman.enable_partitionupdate=on; CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 8d44ded2..dc72bcb2 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -539,25 +539,17 @@ partition_update_visitor(Plan *plan, void *context) lc3 = list_head(modify_table->returningLists); forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) { - Oid parent_relid; Index rindex = lfirst_int(lc2); - Oid relid = getrelid(rindex, rtable); - const PartRelationInfo *prel = get_pathman_relation_info(relid); + Oid tmp_relid, + relid = getrelid(rindex, rtable); + const PartRelationInfo *prel; - /* query can be changed earlier to point on child partition, - * so we're possibly now looking at query that updates child partition - */ - if (prel == NULL) - { - parent_relid = get_parent_of_partition(relid, NULL); - if (parent_relid) - { - prel = get_pathman_relation_info(parent_relid); - relid = parent_relid; - } - } + while ((tmp_relid = get_parent_of_partition(relid, NULL)) != 0) + relid = tmp_relid; /* Check that table is partitioned */ + prel = get_pathman_relation_info(relid); + if (prel) { List *returning_list = NIL; From c58238b7e3c913f0c3e81bb94a4930a6c2a7ecd0 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 1 Aug 2017 12:12:11 +0300 Subject: [PATCH 0694/1124] Change name of update node --- expected/pathman_update_node.out | 4 ++-- src/include/partition_update.h | 2 +- tests/python/partitioning_test.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index a6214a52..254b301e 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -18,7 +18,7 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 1 QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionRoute) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) @@ -31,7 +31,7 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PrepareInsert) + -> Custom Scan (PartitionRoute) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) diff --git a/src/include/partition_update.h b/src/include/partition_update.h index 30e5e329..c2bd6926 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -22,7 +22,7 @@ #include "nodes/extensible.h" #endif -#define UPDATE_NODE_DESCRIPTION ("PrepareInsert") +#define UPDATE_NODE_DESCRIPTION ("PartitionRoute") typedef struct PartitionUpdateState { diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 6ec2f5cb..65892fbd 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1309,7 +1309,7 @@ def test_update_node_plan1(self): ], "Node Type": "Custom Scan", "Parent Relationship": "Member", - "Custom Plan Provider": "PrepareInsert" + "Custom Plan Provider": "PartitionRoute" } ''' for i, f in enumerate([''] + list(map(str, range(1, 10)))): From 200a391b38de54de9180b2d6432ffc98e9988919 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 2 Aug 2017 13:53:07 +0300 Subject: [PATCH 0695/1124] Fix compability issue with PostgresPro 9.6.3.2 --- src/include/compat/pg_compat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index b79f9192..45f1a6c5 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -209,7 +209,7 @@ /* * create_nestloop_path() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 100000 || (defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603) #define create_nestloop_path_compat(root, joinrel, jointype, workspace, extra, \ outer, inner, filtered_joinclauses, pathkeys, \ required_outer) \ From eb8b19ff14e1e9072d0f17a0c76cbcd911f7abb5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 7 Aug 2017 12:59:35 +0300 Subject: [PATCH 0696/1124] replace get_pathman_lib_version() with pathman_version(), new docs --- README.md | 7 +++ expected/pathman_calamity.out | 8 ++-- init.sql | 4 +- sql/pathman_calamity.sql | 2 +- src/include/init.h | 8 ++-- src/init.c | 84 ++++++++++++++++++++++++++--------- src/pl_funcs.c | 6 +-- 7 files changed, 85 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 3b37273f..c075c2a8 100644 --- a/README.md +++ b/README.md @@ -102,6 +102,13 @@ SET pg_pathman.enable = t; ## Available functions +### Module's version + +```plpgsql +pathman_version() +``` +Although it's possible to get major and minor version numbers using `\dx pg_pathman`, it doesn't show the actual [patch number](http://semver.org/). This function returns a complete version number of the loaded pg_pathman module in `MAJOR.MINOR.PATCH` format. + ### Partition creation ```plpgsql create_hash_partitions(relation REGCLASS, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 251ec31c..aceab6e8 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -9,10 +9,10 @@ SELECT debug_capture(); (1 row) -SELECT get_pathman_lib_version(); - get_pathman_lib_version -------------------------- - 10500 +SELECT pathman_version(); + pathman_version +----------------- + 1.5.0 (1 row) set client_min_messages = NOTICE; diff --git a/init.sql b/init.sql index 181a81a7..12abdf78 100644 --- a/init.sql +++ b/init.sql @@ -960,6 +960,6 @@ CREATE OR REPLACE FUNCTION @extschema@.debug_capture() RETURNS VOID AS 'pg_pathman', 'debug_capture' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() -RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' +CREATE OR REPLACE FUNCTION @extschema@.pathman_version() +RETURNS CSTRING AS 'pg_pathman', 'pathman_version' LANGUAGE C STRICT; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 881cebbd..79e8c79c 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -7,7 +7,7 @@ CREATE SCHEMA calamity; /* call for coverage test */ set client_min_messages = ERROR; SELECT debug_capture(); -SELECT get_pathman_lib_version(); +SELECT pathman_version(); set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 3f1790ce..928052a4 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -153,11 +153,11 @@ simpify_mcxt_name(MemoryContext mcxt) #define DEFAULT_PATHMAN_OVERRIDE_COPY true -/* Lowest version of Pl/PgSQL frontend compatible with internals (0xAA_BB_CC) */ -#define LOWEST_COMPATIBLE_FRONT 0x010500 +/* Lowest version of Pl/PgSQL frontend compatible with internals */ +#define LOWEST_COMPATIBLE_FRONT "1.5.0" -/* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010500 +/* Current version of native C library */ +#define CURRENT_LIB_VERSION "1.5.0" void *pathman_cache_search_relid(HTAB *cache_table, diff --git a/src/init.c b/src/init.c index e1a1b5bf..7b0cdda0 100644 --- a/src/init.c +++ b/src/init.c @@ -38,6 +38,8 @@ #include "utils/syscache.h" #include "utils/typcache.h" +#include + /* Various memory contexts for caches */ MemoryContext TopPathmanContext = NULL; @@ -92,9 +94,10 @@ static bool read_opexpr_const(const OpExpr *opexpr, /* Validate SQL facade */ -static uint32 build_sql_facade_version(char *version_cstr); -static uint32 get_sql_facade_version(void); -static void validate_sql_facade_version(uint32 ver); +static uint32 build_semver_uint32(char *version_cstr); +static uint32 get_plpgsql_frontend_version(void); +static void validate_plpgsql_frontend_version(uint32 current_ver, + uint32 compatible_ver); /* @@ -206,7 +209,8 @@ load_config(void) return false; /* remain 'uninitialized', exit before creating main caches */ /* Validate pg_pathman's Pl/PgSQL facade (might be outdated) */ - validate_sql_facade_version(get_sql_facade_version()); + validate_plpgsql_frontend_version(get_plpgsql_frontend_version(), + build_semver_uint32(LOWEST_COMPATIBLE_FRONT)); /* Create various hash tables (caches) */ init_local_cache(); @@ -1196,27 +1200,66 @@ validate_hash_constraint(const Expr *expr, /* Parse cstring and build uint32 representing the version */ static uint32 -build_sql_facade_version(char *version_cstr) +build_semver_uint32(char *version_cstr) { - uint32 version; + uint32 version = 0; + bool expect_num_token = false; + long max_dots = 2; + char *pos = version_cstr; + + while (*pos) + { + /* Invert expected token type */ + expect_num_token = !expect_num_token; + + if (expect_num_token) + { + char *end_pos; + long num; + long i; + + /* Parse number */ + num = strtol(pos, &end_pos, 10); - /* expect to see x+.y+.z+ */ - version = strtol(version_cstr, &version_cstr, 10) & 0xFF; + if (pos == end_pos || num > 99 || num < 0) + goto version_error; - version <<= 8; - if (strlen(version_cstr) > 1) - version |= (strtol(version_cstr + 1, &version_cstr, 10) & 0xFF); + for (i = 0; i < max_dots; i++) + num *= 100; - version <<= 8; - if (strlen(version_cstr) > 1) - version |= (strtol(version_cstr + 1, &version_cstr, 10) & 0xFF); + version += num; + + /* Move position */ + pos = end_pos; + } + else + { + /* Expect to see less dots */ + max_dots--; + + if (*pos != '.' || max_dots < 0) + goto version_error; + + /* Move position */ + pos++; + } + } + + if (!expect_num_token) + goto version_error; return version; + +version_error: + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, (errmsg("wrong version: \"%s\"", version_cstr), + errhint(INIT_ERROR_HINT))); + return 0; /* keep compiler happy */ } /* Get version of pg_pathman's facade written in Pl/PgSQL */ static uint32 -get_sql_facade_version(void) +get_plpgsql_frontend_version(void) { Relation pg_extension_rel; ScanKeyData skey; @@ -1255,20 +1298,21 @@ get_sql_facade_version(void) systable_endscan(scan); heap_close(pg_extension_rel, AccessShareLock); - return build_sql_facade_version(version_cstr); + return build_semver_uint32(version_cstr); } /* Check that current Pl/PgSQL facade is compatible with internals */ static void -validate_sql_facade_version(uint32 ver) +validate_plpgsql_frontend_version(uint32 current_ver, uint32 compatible_ver) { - Assert(ver > 0); + Assert(current_ver > 0); + Assert(compatible_ver > 0); /* Compare ver to 'lowest compatible frontend' version */ - if (ver < LOWEST_COMPATIBLE_FRONT) + if (current_ver < compatible_ver) { elog(DEBUG1, "current version: %x, lowest compatible: %x", - ver, LOWEST_COMPATIBLE_FRONT); + current_ver, compatible_ver); DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, diff --git a/src/pl_funcs.c b/src/pl_funcs.c index bb66506d..f1cf0000 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -77,7 +77,7 @@ PG_FUNCTION_INFO_V1( create_single_update_trigger ); PG_FUNCTION_INFO_V1( has_update_trigger ); PG_FUNCTION_INFO_V1( debug_capture ); -PG_FUNCTION_INFO_V1( get_pathman_lib_version ); +PG_FUNCTION_INFO_V1( pathman_version ); /* User context for function show_partition_list_internal() */ @@ -1594,7 +1594,7 @@ debug_capture(PG_FUNCTION_ARGS) /* NOTE: just in case */ Datum -get_pathman_lib_version(PG_FUNCTION_ARGS) +pathman_version(PG_FUNCTION_ARGS) { - PG_RETURN_CSTRING(psprintf("%x", CURRENT_LIB_VERSION)); + PG_RETURN_CSTRING(CURRENT_LIB_VERSION); } From c1d59bd8a3a8b6918d94b7f09fb090c5dcb9a47a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 8 Aug 2017 13:57:20 +0300 Subject: [PATCH 0697/1124] fix problems with DELETE USING mentioned in issue #111 --- expected/pathman_basic.out | 14 ++++++++++++++ sql/pathman_basic.sql | 3 +++ src/planner_tree_modification.c | 9 --------- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 7d83372e..5ef05408 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1740,6 +1740,20 @@ EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = ' (7 rows) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; +EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Delete on tmp t + -> Hash Join + Hash Cond: (t.id = r.id) + -> Seq Scan on tmp t + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(8 rows) + +DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; /* Create range partitions from whole range */ SELECT drop_partitions('test.range_rel'); NOTICE: 44 rows copied from test.range_rel_1 diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 8fed0b32..c6967445 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -527,6 +527,9 @@ UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-0 EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; +EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; +DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; + /* Create range partitions from whole range */ SELECT drop_partitions('test.range_rel'); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 464530c7..b582182f 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -188,15 +188,6 @@ disable_standard_inheritance(Query *parse) ListCell *lc; Index current_rti; /* current range table entry index */ -/* - * We can't handle non-SELECT queries unless - * there's a pathman_expand_inherited_rtentry_hook() - */ -#ifndef NATIVE_EXPAND_RTE_HOOK - if (parse->commandType != CMD_SELECT) - return; -#endif - /* Walk through RangeTblEntries list */ current_rti = 0; foreach (lc, parse->rtable) From 1a572f36006534a282f899c8e140877a0b142797 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 8 Aug 2017 14:20:28 +0300 Subject: [PATCH 0698/1124] provide fix for 9.5 --- src/planner_tree_modification.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index b582182f..54c869dc 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -188,6 +188,11 @@ disable_standard_inheritance(Query *parse) ListCell *lc; Index current_rti; /* current range table entry index */ +#ifdef LEGACY_ROWMARKS_95 + if (parse->commandType != CMD_SELECT) + return; +#endif + /* Walk through RangeTblEntries list */ current_rti = 0; foreach (lc, parse->rtable) From 9ebc83329385af0173700f02e1810e36c89c7dbe Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 8 Aug 2017 14:40:07 +0300 Subject: [PATCH 0699/1124] remove reference to the create_partitions_from_range() func (which was removed in 1.4) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d53ad374..2935ff3c 100644 --- a/README.md +++ b/README.md @@ -263,7 +263,7 @@ Update RANGE partitioned table interval. Note that interval must not be negative ```plpgsql set_enable_parent(relation REGCLASS, value BOOLEAN) ``` -Include/exclude parent table into/from query plan. In original PostgreSQL planner parent table is always included into query plan even if it's empty which can lead to additional overhead. You can use `disable_parent()` if you are never going to use parent table as a storage. Default value depends on the `partition_data` parameter that was specified during initial partitioning in `create_range_partitions()` or `create_partitions_from_range()` functions. If the `partition_data` parameter was `true` then all data have already been migrated to partitions and parent table disabled. Otherwise it is enabled. +Include/exclude parent table into/from query plan. In original PostgreSQL planner parent table is always included into query plan even if it's empty which can lead to additional overhead. You can use `disable_parent()` if you are never going to use parent table as a storage. Default value depends on the `partition_data` parameter that was specified during initial partitioning in `create_range_partitions()` function. If the `partition_data` parameter was `true` then all data have already been migrated to partitions and parent table disabled. Otherwise it is enabled. ```plpgsql set_auto(relation REGCLASS, value BOOLEAN) From c9b203e04eadf4ff99126651ad9b69d43df3120c Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 8 Aug 2017 15:22:57 +0300 Subject: [PATCH 0700/1124] Separate tests related with UPDATE and DELETE from pathman_basic --- Makefile | 1 + expected/pathman_basic.out | 107 +--------------------------- expected/pathman_upd_del.out | 124 ++++++++++++++++++++++++++++++++ expected/pathman_upd_del_1.out | 126 +++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 32 --------- sql/pathman_upd_del.sql | 51 +++++++++++++ 6 files changed, 305 insertions(+), 136 deletions(-) create mode 100644 expected/pathman_upd_del.out create mode 100644 expected/pathman_upd_del_1.out create mode 100644 sql/pathman_upd_del.sql diff --git a/Makefile b/Makefile index 5a2d01f3..c2cacaae 100644 --- a/Makefile +++ b/Makefile @@ -47,6 +47,7 @@ REGRESS = pathman_array_qual \ pathman_rowmarks \ pathman_runtime_nodes \ pathman_update_trigger \ + pathman_upd_del \ pathman_utility_stmt EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 5ef05408..fa946d72 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1653,114 +1653,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; -> Seq Scan on range_rel_14 (4 rows) -/* Temporary table for JOINs */ -CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); -INSERT INTO test.tmp VALUES (1, 1), (2, 2); -/* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ - QUERY PLAN --------------------------------------------------------------------------------- - Update on range_rel_6 - -> Seq Scan on range_rel_6 - Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) -(3 rows) - -UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; -SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; - id | dt | value ------+--------------------------+------- - 166 | Tue Jun 15 00:00:00 2010 | 111 -(1 row) - -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ - QUERY PLAN --------------------------------------------------------------------------------- - Delete on range_rel_6 - -> Seq Scan on range_rel_6 - Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) -(3 rows) - -DELETE FROM test.range_rel WHERE dt = '2010-06-15'; -SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; - id | dt | value -----+----+------- -(0 rows) - -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; /* no partitions for this 'dt' */ - QUERY PLAN --------------------------------------------------------------------------------- - Update on range_rel - -> Seq Scan on range_rel - Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) -(3 rows) - -UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; -SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; - id | dt | value -----+----+------- -(0 rows) - -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no partitions for this 'dt' */ - QUERY PLAN --------------------------------------------------------------------------------- - Delete on range_rel - -> Seq Scan on range_rel - Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) -(3 rows) - -DELETE FROM test.range_rel WHERE dt < '1990-01-01'; -SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; - id | dt | value -----+----+------- -(0 rows) - -EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- - Update on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -(7 rows) - -UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- - Delete on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -(7 rows) - -DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------------- - Delete on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -(8 rows) - -DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; /* Create range partitions from whole range */ SELECT drop_partitions('test.range_rel'); -NOTICE: 44 rows copied from test.range_rel_1 +NOTICE: 45 rows copied from test.range_rel_1 NOTICE: 31 rows copied from test.range_rel_3 NOTICE: 30 rows copied from test.range_rel_4 NOTICE: 31 rows copied from test.range_rel_5 -NOTICE: 29 rows copied from test.range_rel_6 +NOTICE: 30 rows copied from test.range_rel_6 NOTICE: 31 rows copied from test.range_rel_7 NOTICE: 31 rows copied from test.range_rel_8 NOTICE: 30 rows copied from test.range_rel_9 @@ -1953,6 +1852,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 29 other objects +NOTICE: drop cascades to 28 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out new file mode 100644 index 00000000..ee112926 --- /dev/null +++ b/expected/pathman_upd_del.out @@ -0,0 +1,124 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary table for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); + create_range_partitions +------------------------- + 12 +(1 row) + +/* Test UPDATE and DELETE */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; /* no partitions for this 'dt' */ + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no partitions for this 'dt' */ + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Hash Join + Hash Cond: (t.id = r.id) + -> Seq Scan on tmp t + -> Hash + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(7 rows) + +UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Hash Join + Hash Cond: (t.id = r.id) + -> Seq Scan on tmp t + -> Hash + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(7 rows) + +DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; +EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Delete on tmp t + -> Hash Join + Hash Cond: (t.id = r.id) + -> Seq Scan on tmp t + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(8 rows) + +DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out new file mode 100644 index 00000000..c8dcaf8c --- /dev/null +++ b/expected/pathman_upd_del_1.out @@ -0,0 +1,126 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary table for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); + create_range_partitions +------------------------- + 12 +(1 row) + +/* Test UPDATE and DELETE */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; /* no partitions for this 'dt' */ + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no partitions for this 'dt' */ + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Hash Join + Hash Cond: (t.id = r.id) + -> Seq Scan on tmp t + -> Hash + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(7 rows) + +UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Hash Join + Hash Cond: (t.id = r.id) + -> Seq Scan on tmp t + -> Hash + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(7 rows) + +DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; +EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Delete on tmp t + -> Hash Join + Hash Cond: (t.id = r.id) + -> Seq Scan on tmp t + -> Hash + -> Append + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(10 rows) + +DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index c6967445..f24716c0 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -499,38 +499,6 @@ SELECT prepend_range_partition('test.range_rel'); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; -/* Temporary table for JOINs */ -CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); -INSERT INTO test.tmp VALUES (1, 1), (2, 2); - - -/* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ -UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; -SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; - -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ -DELETE FROM test.range_rel WHERE dt = '2010-06-15'; -SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; - -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; /* no partitions for this 'dt' */ -UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; -SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; - -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no partitions for this 'dt' */ -DELETE FROM test.range_rel WHERE dt < '1990-01-01'; -SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; - -EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; - -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; -DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; - -EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; -DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; - - /* Create range partitions from whole range */ SELECT drop_partitions('test.range_rel'); diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql new file mode 100644 index 00000000..18e32c6a --- /dev/null +++ b/sql/pathman_upd_del.sql @@ -0,0 +1,51 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; + +/* Temporary table for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); + +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); + +/* Test UPDATE and DELETE */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; /* no partitions for this 'dt' */ +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no partitions for this 'dt' */ +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + +EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; +DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; + +EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; +DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; From 456de9cbb9a1c36ed16c0a24944e02f672533c40 Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Tue, 8 Aug 2017 15:25:25 +0300 Subject: [PATCH 0701/1124] postgrespro enterprise compatibility fix --- src/include/compat/pg_compat.h | 2 +- src/partition_creation.c | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 33a28339..3707765a 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -208,7 +208,7 @@ /* * create_nestloop_path() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 100000 || (defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603) #define create_nestloop_path_compat(root, joinrel, jointype, workspace, extra, \ outer, inner, filtered_joinclauses, pathkeys, \ required_outer) \ diff --git a/src/partition_creation.c b/src/partition_creation.c index 412b3f36..d4ee5abb 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -758,6 +758,9 @@ create_single_partition_internal(Oid parent_relid, create_stmt.partbound = NULL; create_stmt.partspec = NULL; #endif +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 90600 + create_stmt.partition_info = NULL; +#endif /* Obtain the sequence of Stmts to create partition and link it to parent */ create_stmts = transformCreateStmt(&create_stmt, NULL); From e8c3674b11cdf7058dc3b01e2b3d4d5c7d7681d8 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 10 Aug 2017 18:18:40 +0300 Subject: [PATCH 0702/1124] Add set_trace function in python tests --- tests/python/partitioning_test.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 65892fbd..8c3a5828 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -89,6 +89,11 @@ def setUp(self): def tearDown(self): stop_all() + def set_trace(self, con, command="pg_debug"): + pid = con.execute("select pg_backend_pid()")[0][0] + p = subprocess.Popen([command], stdin=subprocess.PIPE) + p.communicate(str(pid).encode()) + def start_new_pathman_cluster(self, name='test', allows_streaming=False): node = get_new_node(name) node.init(allows_streaming=allows_streaming) From 962efa2f48ffa2b66f09802cb66950a0853a8b8a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 10 Aug 2017 18:25:34 +0300 Subject: [PATCH 0703/1124] remove duplicate debug printing functions --- src/debug_print.c | 148 ------------------------------------------ tests/cmocka/Makefile | 1 - 2 files changed, 149 deletions(-) diff --git a/src/debug_print.c b/src/debug_print.c index 1a4ea417..bac1d622 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -104,151 +104,3 @@ irange_print(IndexRange irange) return str.data; } - -#ifndef CMOCKA_TESTS -/* - * Print attribute information - */ -static char * -printatt(unsigned attributeId, - Form_pg_attribute attributeP, - char *value) -{ - return psprintf("\t%2d: %s%s%s%s\t(typeid = %u, len = %d, typmod = %d, byval = %c)\n", - attributeId, - NameStr(attributeP->attname), - value != NULL ? " = \"" : "", - value != NULL ? value : "", - value != NULL ? "\"" : "", - (unsigned int) (attributeP->atttypid), - attributeP->attlen, - attributeP->atttypmod, - attributeP->attbyval ? 't' : 'f'); -} - -/* - * Print one tuple for an interactive backend - */ -static char * -debugtup(TupleTableSlot *slot) -{ - TupleDesc typeinfo = slot->tts_tupleDescriptor; - int natts = typeinfo->natts; - int i; - Datum attr; - char *value; - bool isnull; - Oid typoutput; - bool typisvarlena; - - int result_len = 0; - char *result = (char *) palloc(result_len + 1); - - for (i = 0; i < natts; ++i) - { - char *s; - int len; - - attr = slot_getattr(slot, i + 1, &isnull); - if (isnull) - continue; - getTypeOutputInfo(typeinfo->attrs[i]->atttypid, - &typoutput, &typisvarlena); - - value = OidOutputFunctionCall(typoutput, attr); - - s = printatt((unsigned) i + 1, typeinfo->attrs[i], value); - len = strlen(s); - result = (char *) repalloc(result, result_len + len + 1); - strncpy(result + result_len, s, len); - result_len += len; - } - - result[result_len] = '\0'; - return result; -} - -/* - * Print contents of tuple slot - */ -#ifdef __GNUC__ -__attribute__((unused)) -#endif -static char * -slot_print(TupleTableSlot *slot) -{ - if (TupIsNull(slot)) - return NULL; - - if (!slot->tts_tupleDescriptor) - return NULL; - - return debugtup(slot); -} - -/* - * Print contents of range table - */ -#ifdef __GNUC__ -__attribute__((unused)) -#endif -static char * -rt_print(const List *rtable) -{ -#define APPEND_STR(si, ...) \ -{ \ - char *line = psprintf(__VA_ARGS__); \ - appendStringInfo(&si, "%s", line); \ - pfree(line); \ -} - - const ListCell *l; - int i = 1; - - StringInfoData str; - - initStringInfo(&str); - APPEND_STR(str, "resno\trefname \trelid\tinFromCl\n"); - APPEND_STR(str, "-----\t---------\t-----\t--------\n"); - - foreach(l, rtable) - { - RangeTblEntry *rte = lfirst(l); - - switch (rte->rtekind) - { - case RTE_RELATION: - APPEND_STR(str, "%d\t%s\t%u\t%c", - i, rte->eref->aliasname, rte->relid, rte->relkind); - break; - case RTE_SUBQUERY: - APPEND_STR(str, "%d\t%s\t[subquery]", - i, rte->eref->aliasname); - break; - case RTE_JOIN: - APPEND_STR(str, "%d\t%s\t[join]", - i, rte->eref->aliasname); - break; - case RTE_FUNCTION: - APPEND_STR(str, "%d\t%s\t[rangefunction]", i, rte->eref->aliasname); - break; - case RTE_VALUES: - APPEND_STR(str, "%d\t%s\t[values list]", i, rte->eref->aliasname); - break; - case RTE_CTE: - APPEND_STR(str, "%d\t%s\t[cte]", i, rte->eref->aliasname); - break; - default: - elog(ERROR, "%d\t%s\t[unknown rtekind]", - i, rte->eref->aliasname); - } - - APPEND_STR(str, "\t%s\t%s\n", (rte->inh ? "inh" : ""), - (rte->inFromCl ? "inFromCl" : "")); - - i++; - } - return str.data; -#undef APPEND_STR -} -#endif diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index f79e2637..e31e6d95 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -8,7 +8,6 @@ CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) CFLAGS += $(PG_CPPFLAGS) -CFLAGS += -DCMOCKA_TESTS LDFLAGS += -lcmocka TEST_BIN = rangeset_tests From 8bc0be32109377f2a75e590cab8378024d0b543d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 10 Aug 2017 18:52:06 +0300 Subject: [PATCH 0704/1124] remove useless variable 'v_upper_parent' --- hash.sql | 3 --- range.sql | 2 -- 2 files changed, 5 deletions(-) diff --git a/hash.sql b/hash.sql index 5159e189..8cf9b19a 100644 --- a/hash.sql +++ b/hash.sql @@ -19,9 +19,6 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( partition_names TEXT[] DEFAULT NULL, tablespaces TEXT[] DEFAULT NULL) RETURNS INTEGER AS $$ -DECLARE - v_upper_parent REGCLASS; - BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, diff --git a/range.sql b/range.sql index 0fd287e7..67cf3d7a 100644 --- a/range.sql +++ b/range.sql @@ -65,7 +65,6 @@ DECLARE end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; - v_upper_parent REGCLASS; BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, @@ -165,7 +164,6 @@ DECLARE end_value start_value%TYPE; part_count INTEGER := 0; i INTEGER; - v_upper_parent REGCLASS; BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, From bff27f1b2048fb96e48d7f1a40e02dd72430f61b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 11 Aug 2017 13:21:08 +0300 Subject: [PATCH 0705/1124] fix regression tests --- expected/pathman_basic.out | 4 ++-- expected/pathman_expressions.out | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 0a46ba3e..06551aa1 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -18,7 +18,7 @@ PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM \set VERBOSITY terse ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); @@ -147,7 +147,7 @@ PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM \set VERBOSITY terse ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 463ad584..c0f4b0e9 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -234,7 +234,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using system attributes */ SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); ERROR: failed to analyze partitioning expression "xmin" @@ -244,7 +244,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using subqueries */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value, (select oid from pg_class limit 1)', @@ -256,7 +256,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using mutable expression */ SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); ERROR: failed to analyze partitioning expression "random()" @@ -266,7 +266,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using broken parentheses */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); ERROR: failed to parse partitioning expression "value * value2))" @@ -276,7 +276,7 @@ CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using missing columns */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); ERROR: failed to analyze partitioning expression "value * value3" @@ -287,7 +287,7 @@ CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 6 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; @@ -371,7 +371,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM /* Try using mutable expression */ SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); @@ -382,7 +382,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; From 5d955368a30470fb2fa8ce9de479e0e94fd92198 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 11 Aug 2017 17:26:34 +0300 Subject: [PATCH 0706/1124] code cleanup --- src/include/partition_filter.h | 2 +- src/include/relation_info.h | 5 ++-- src/partition_filter.c | 44 +++++++++++++++++++--------------- src/relation_info.c | 11 +++------ 4 files changed, 31 insertions(+), 31 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index ddb5b72f..2a4ca382 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -74,7 +74,7 @@ struct ResultPartsStorage EState *estate; /* pointer to executor's state */ - CmdType command_type; /* currenly we only allow INSERT */ + CmdType command_type; /* INSERT | UPDATE */ LOCKMODE head_open_lock_mode; LOCKMODE heap_close_lock_mode; }; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 2a22bf91..9921a029 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -393,9 +393,8 @@ extern bool pg_pathman_enable_bounds_cache; void init_relation_info_static_data(void); -AttrNumber * build_attributes_map(const PartRelationInfo *prel, - Relation child_rel, - int *map_length); +AttrNumber *build_attributes_map(const PartRelationInfo *prel, + TupleDesc child_tupdesc); #endif /* RELATION_INFO_H */ diff --git a/src/partition_filter.c b/src/partition_filter.c index 503bffe5..17490bb6 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -169,6 +169,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->on_new_rri_holder_callback = on_new_rri_holder_cb; parts_storage->callback_arg = on_new_rri_holder_cb_arg; + Assert(cmd_type == CMD_INSERT || cmd_type == CMD_UPDATE); parts_storage->command_type = cmd_type; parts_storage->speculative_inserts = speculative_inserts; @@ -582,15 +583,16 @@ partition_filter_create_scan_state(CustomScan *node) state = (PartitionFilterState *) palloc0(sizeof(PartitionFilterState)); NodeSetTag(state, T_CustomScanState); - state->css.flags = node->flags; - state->css.methods = &partition_filter_exec_methods; + /* Initialize base CustomScanState */ + state->css.flags = node->flags; + state->css.methods = &partition_filter_exec_methods; /* Extract necessary variables */ - state->subplan = (Plan *) linitial(node->custom_plans); - state->partitioned_table = intVal(linitial(node->custom_private)); - state->on_conflict_action = intVal(lsecond(node->custom_private)); - state->returning_list = lthird(node->custom_private); - state->command_type = (CmdType) intVal(lfourth(node->custom_private)); + state->subplan = (Plan *) linitial(node->custom_plans); + state->partitioned_table = (Oid) intVal(linitial(node->custom_private)); + state->on_conflict_action = intVal(lsecond(node->custom_private)); + state->returning_list = (List *) lthird(node->custom_private); + state->command_type = (CmdType) intVal(lfourth(node->custom_private)); /* Check boundaries */ Assert(state->on_conflict_action >= ONCONFLICT_NONE || @@ -619,7 +621,6 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { /* Fetch PartRelationInfo for this partitioned relation */ prel = get_pathman_relation_info(state->partitioned_table); - Assert(prel != NULL); /* Prepare state for expression execution */ if (state->command_type == CMD_UPDATE) @@ -630,20 +631,25 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) * parent varattnos to child varattnos */ - int natts; bool found_whole_row; - AttrNumber *attr_map; + AttrNumber *map; MemoryContext old_mcxt; Index relno = ((Scan *) child_state->plan)->scanrelid; - Node *expr = PrelExpressionForRelid(prel, relno); - Relation child_rel = heap_open( - getrelid(relno, estate->es_range_table), NoLock); - - attr_map = build_attributes_map(prel, child_rel, &natts); - expr = map_variable_attnos(expr, relno, 0, attr_map, natts, - &found_whole_row); - Assert(!found_whole_row); + Node *expr; + Relation child_rel; + + child_rel = heap_open(getrelid(relno, estate->es_range_table), NoLock); + + map = build_attributes_map(prel, RelationGetDescr(child_rel)); + expr = map_variable_attnos(PrelExpressionForRelid(prel, relno), + relno, 0, map, + RelationGetDescr(child_rel)->natts, + &found_whole_row); + + if (found_whole_row) + elog(ERROR, "unexpected whole-row reference found in partition key"); + heap_close(child_rel, NoLock); /* Prepare state for expression execution */ @@ -653,7 +659,7 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) } else { - /* simple INSERT, expression based on parent attribute numbers */ + /* Simple INSERT, expression based on parent attribute numbers */ state->expr_state = prepare_expr_state(prel, estate); } } diff --git a/src/relation_info.c b/src/relation_info.c index ffcff1c2..eb8b0980 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1447,18 +1447,13 @@ shout_if_prel_is_invalid(const Oid parent_oid, * And it should be faster if expression uses not all fields from relation. */ AttrNumber * -build_attributes_map(const PartRelationInfo *prel, Relation child_rel, - int *map_length) +build_attributes_map(const PartRelationInfo *prel, TupleDesc child_tupdesc) { AttrNumber i = -1; Oid parent_relid = PrelParentRelid(prel); - TupleDesc child_descr = RelationGetDescr(child_rel); - int natts = child_descr->natts; + int natts = child_tupdesc->natts; AttrNumber *result = (AttrNumber *) palloc0(natts * sizeof(AttrNumber)); - if (map_length != NULL) - *map_length = natts; - while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { int j; @@ -1467,7 +1462,7 @@ build_attributes_map(const PartRelationInfo *prel, Relation child_rel, for (j = 0; j < natts; j++) { - Form_pg_attribute att = child_descr->attrs[j]; + Form_pg_attribute att = child_tupdesc->attrs[j]; if (att->attisdropped) continue; /* attrMap[attnum - 1] is already 0 */ From 810919103ebe2e4a4160320b90c064b7892db892 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 14 Aug 2017 19:15:14 +0300 Subject: [PATCH 0707/1124] fix bogus varno in PartitionFilter's targetlist (issue #112) --- expected/pathman_inserts.out | 165 ++++++++++++++++++++++++++++++++ sql/pathman_inserts.sql | 27 ++++++ src/include/partition_filter.h | 1 + src/partition_filter.c | 9 +- src/planner_tree_modification.c | 4 +- 5 files changed, 202 insertions(+), 4 deletions(-) diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index 9e04ae26..c3a8566f 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -853,6 +853,171 @@ NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: 256 | 128 | test_inserts.storage_14 (27 rows) +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + Output: (storage.e * 2), storage.b, (storage.tableoid)::regclass + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, NULL::integer, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, NULL::text, NULL::bigint + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, storage_11.e + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b, storage_11.d, storage_11.e + -> Seq Scan on test_inserts.storage_1 + Output: storage_1.b, storage_1.d, storage_1.e + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b, storage_2.d, storage_2.e + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b, storage_3.d, storage_3.e + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b, storage_4.d, storage_4.e + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b, storage_5.d, storage_5.e + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b, storage_6.d, storage_6.e + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b, storage_7.d, storage_7.e + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b, storage_8.d, storage_8.e + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b, storage_9.d, storage_9.e + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b, storage_10.d, storage_10.e + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b, storage_12.d, storage_12.e + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b, storage_13.d, storage_13.e + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b, storage_14.d, storage_14.e +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b, storage_11.d + -> Seq Scan on test_inserts.storage_1 + Output: storage_1.b, storage_1.d + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b, storage_2.d + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b, storage_3.d + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b, storage_4.d + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b, storage_5.d + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b, storage_6.d + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b, storage_7.d + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b, storage_8.d + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b, storage_9.d + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b, storage_10.d + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b, storage_12.d + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b, storage_13.d + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b, storage_14.d +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, NULL::text, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b + -> Seq Scan on test_inserts.storage_1 + Output: storage_1.b + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b +(34 rows) + /* test gap case (missing partition in between) */ CREATE TABLE test_inserts.test_gap(val INT NOT NULL); INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index ff46c848..7653a3e6 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -163,6 +163,33 @@ FROM generate_series(-2, 130, 5) i RETURNING e * 2, b, tableoid::regclass; +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + + /* test gap case (missing partition in between) */ CREATE TABLE test_inserts.test_gap(val INT NOT NULL); INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index cccacf2f..85ddcf91 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -141,6 +141,7 @@ ResultRelInfoHolder * select_partition_for_insert(Datum value, Oid value_type, Plan * make_partition_filter(Plan *subplan, Oid parent_relid, + Index parent_rti, OnConflictAction conflict_action, List *returning_list); diff --git a/src/partition_filter.c b/src/partition_filter.c index f0edf76d..a8cbf5ea 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -481,7 +481,9 @@ select_partition_for_insert(Datum value, Oid value_type, */ Plan * -make_partition_filter(Plan *subplan, Oid parent_relid, +make_partition_filter(Plan *subplan, + Oid parent_relid, + Index parent_rti, OnConflictAction conflict_action, List *returning_list) { @@ -511,7 +513,10 @@ make_partition_filter(Plan *subplan, Oid parent_relid, /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; - cscan->custom_scan_tlist = subplan->targetlist; + + /* Prepare 'custom_scan_tlist' for EXPLAIN (VERBOSE) */ + cscan->custom_scan_tlist = copyObject(cscan->scan.plan.targetlist); + ChangeVarNodes((Node *) cscan->custom_scan_tlist, INDEX_VAR, parent_rti, 0); /* Pack partitioned table's Oid and conflict_action */ cscan->custom_private = list_make3(makeInteger(parent_relid), diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 464530c7..9f4d1785 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -423,8 +423,8 @@ partition_filter_visitor(Plan *plan, void *context) lc3 = lnext(lc3); } - lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), - relid, + lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, + modify_table->nominalRelation, modify_table->onConflictAction, returning_list); } From 20d1e80eb5a0582dd3e881ea51f5914f906c798a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 14 Aug 2017 19:32:20 +0300 Subject: [PATCH 0708/1124] add regression test variant for 9.5 --- expected/pathman_inserts_1.out | 1037 ++++++++++++++++++++++++++++++++ 1 file changed, 1037 insertions(+) create mode 100644 expected/pathman_inserts_1.out diff --git a/expected/pathman_inserts_1.out b/expected/pathman_inserts_1.out new file mode 100644 index 00000000..9f8633ab --- /dev/null +++ b/expected/pathman_inserts_1.out @@ -0,0 +1,1037 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_inserts; +/* create a partitioned table */ +CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); +INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +CREATE UNIQUE INDEX ON test_inserts.storage(a); +SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* attach before and after insertion triggers to partitioned table */ +CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +/* set triggers on existing first partition and new generated partitions */ +CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); +CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); +/* set partition init callback that will add triggers to partitions */ +CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ +BEGIN + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s + for each row execute procedure test_inserts.print_cols_before_change();', + args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s + for each row execute procedure test_inserts.print_cols_after_change();', + args->>'partition_schema', args->>'partition'); +END; +$$ LANGUAGE plpgsql; +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* we don't support ON CONLICT */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') +ON CONFLICT (a) DO UPDATE SET a = 3; +ERROR: ON CONFLICT clause is not supported with partitioned tables +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_2') +ON CONFLICT (a) DO NOTHING; +ERROR: ON CONFLICT clause is not supported with partitioned tables +/* implicitly prepend a partition (no columns have been dropped yet) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) + tableoid +------------------------- + test_inserts.storage_11 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+----------- + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. +(2 rows) + +INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) + ?column? +---------- + 3 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+------------ + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. + 3 | 0 | 0 | PREPEND... +(3 rows) + +/* cause an unique index conflict (a = 0) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,CONFLICT) +ERROR: duplicate key value violates unique constraint "storage_11_a_idx" +/* drop first column */ +ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_12 +(1 row) + +INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +SELECT * FROM test_inserts.storage_12; /* direct access */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +/* spawn a new partition (b, c, d) */ +INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +SELECT * FROM test_inserts.storage_13; /* direct access */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +/* column 'a' has been dropped */ +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) + b | c | d | ?column? +-----+---+-------------+---------- + 111 | 0 | DROP_COL_1. | 17 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) + tableoid +------------------------- + test_inserts.storage_13 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) + ?column? | b +----------+----- + 222 | 111 +(1 row) + +/* drop third column */ +ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; +/* will have 2 columns (b, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +SELECT * FROM test_inserts.storage_14; /* direct access */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +/* column 'c' has been dropped */ +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) + b | d +-----+------------- + 121 | DROP_COL_2. +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) + tableoid +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) + ?column? | ?column? +------------------+---------- + DROP_COL_2...0_0 | 363 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_1') +RETURNING (SELECT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) + ?column? +---------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_2') +RETURNING (SELECT generate_series(1, 10) LIMIT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) + generate_series +----------------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_3') +RETURNING (SELECT get_partition_key('test_inserts.storage')); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) + get_partition_key +------------------- + b +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_4') +RETURNING 1, 2, 3, 4; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + 1 | 2 | 3 | 4 +(1 row) + +/* show number of columns in each partition */ +SELECT partition, range_min, range_max, count(partition) +FROM pathman_partition_list JOIN pg_attribute ON partition = attrelid +WHERE attnum > 0 +GROUP BY partition, range_min, range_max +ORDER BY range_min::INT4; + partition | range_min | range_max | count +-------------------------+-----------+-----------+------- + test_inserts.storage_11 | -9 | 1 | 4 + test_inserts.storage_1 | 1 | 11 | 4 + test_inserts.storage_2 | 11 | 21 | 4 + test_inserts.storage_3 | 21 | 31 | 4 + test_inserts.storage_4 | 31 | 41 | 4 + test_inserts.storage_5 | 41 | 51 | 4 + test_inserts.storage_6 | 51 | 61 | 4 + test_inserts.storage_7 | 61 | 71 | 4 + test_inserts.storage_8 | 71 | 81 | 4 + test_inserts.storage_9 | 81 | 91 | 4 + test_inserts.storage_10 | 91 | 101 | 4 + test_inserts.storage_12 | 101 | 111 | 3 + test_inserts.storage_13 | 111 | 121 | 3 + test_inserts.storage_14 | 121 | 131 | 2 +(14 rows) + +/* check the data */ +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----------------+------------------------- + 0 | PREPEND. | test_inserts.storage_11 + 0 | PREPEND.. | test_inserts.storage_11 + 0 | PREPEND... | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 3 cols! | test_inserts.storage_12 + 111 | 3 cols as well! | test_inserts.storage_13 + 111 | DROP_COL_1. | test_inserts.storage_13 + 111 | DROP_COL_1.. | test_inserts.storage_13 + 111 | DROP_COL_1... | test_inserts.storage_13 + 121 | 2 cols! | test_inserts.storage_14 + 121 | DROP_COL_2. | test_inserts.storage_14 + 121 | DROP_COL_2.. | test_inserts.storage_14 + 121 | DROP_COL_2... | test_inserts.storage_14 + 121 | query_1 | test_inserts.storage_14 + 121 | query_2 | test_inserts.storage_14 + 121 | query_3 | test_inserts.storage_14 + 121 | query_4 | test_inserts.storage_14 +(116 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* one more time! */ +INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----+------------------------- + -2 | -2 | test_inserts.storage_11 + -1 | -1 | test_inserts.storage_11 + 0 | 0 | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 101 | test_inserts.storage_12 + 102 | 102 | test_inserts.storage_12 + 103 | 103 | test_inserts.storage_12 + 104 | 104 | test_inserts.storage_12 + 105 | 105 | test_inserts.storage_12 + 106 | 106 | test_inserts.storage_12 + 107 | 107 | test_inserts.storage_12 + 108 | 108 | test_inserts.storage_12 + 109 | 109 | test_inserts.storage_12 + 110 | 110 | test_inserts.storage_12 + 111 | 111 | test_inserts.storage_13 + 112 | 112 | test_inserts.storage_13 + 113 | 113 | test_inserts.storage_13 + 114 | 114 | test_inserts.storage_13 + 115 | 115 | test_inserts.storage_13 + 116 | 116 | test_inserts.storage_13 + 117 | 117 | test_inserts.storage_13 + 118 | 118 | test_inserts.storage_13 + 119 | 119 | test_inserts.storage_13 + 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* add new column */ +ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; +/* one more time! x2 */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | e | tableoid +-----+-----+-----+------------------------- + -2 | -2 | -2 | test_inserts.storage_11 + -1 | -1 | -1 | test_inserts.storage_11 + 0 | 0 | 0 | test_inserts.storage_11 + 1 | 1 | 1 | test_inserts.storage_1 + 2 | 2 | 2 | test_inserts.storage_1 + 3 | 3 | 3 | test_inserts.storage_1 + 4 | 4 | 4 | test_inserts.storage_1 + 5 | 5 | 5 | test_inserts.storage_1 + 6 | 6 | 6 | test_inserts.storage_1 + 7 | 7 | 7 | test_inserts.storage_1 + 8 | 8 | 8 | test_inserts.storage_1 + 9 | 9 | 9 | test_inserts.storage_1 + 10 | 10 | 10 | test_inserts.storage_1 + 11 | 11 | 11 | test_inserts.storage_2 + 12 | 12 | 12 | test_inserts.storage_2 + 13 | 13 | 13 | test_inserts.storage_2 + 14 | 14 | 14 | test_inserts.storage_2 + 15 | 15 | 15 | test_inserts.storage_2 + 16 | 16 | 16 | test_inserts.storage_2 + 17 | 17 | 17 | test_inserts.storage_2 + 18 | 18 | 18 | test_inserts.storage_2 + 19 | 19 | 19 | test_inserts.storage_2 + 20 | 20 | 20 | test_inserts.storage_2 + 21 | 21 | 21 | test_inserts.storage_3 + 22 | 22 | 22 | test_inserts.storage_3 + 23 | 23 | 23 | test_inserts.storage_3 + 24 | 24 | 24 | test_inserts.storage_3 + 25 | 25 | 25 | test_inserts.storage_3 + 26 | 26 | 26 | test_inserts.storage_3 + 27 | 27 | 27 | test_inserts.storage_3 + 28 | 28 | 28 | test_inserts.storage_3 + 29 | 29 | 29 | test_inserts.storage_3 + 30 | 30 | 30 | test_inserts.storage_3 + 31 | 31 | 31 | test_inserts.storage_4 + 32 | 32 | 32 | test_inserts.storage_4 + 33 | 33 | 33 | test_inserts.storage_4 + 34 | 34 | 34 | test_inserts.storage_4 + 35 | 35 | 35 | test_inserts.storage_4 + 36 | 36 | 36 | test_inserts.storage_4 + 37 | 37 | 37 | test_inserts.storage_4 + 38 | 38 | 38 | test_inserts.storage_4 + 39 | 39 | 39 | test_inserts.storage_4 + 40 | 40 | 40 | test_inserts.storage_4 + 41 | 41 | 41 | test_inserts.storage_5 + 42 | 42 | 42 | test_inserts.storage_5 + 43 | 43 | 43 | test_inserts.storage_5 + 44 | 44 | 44 | test_inserts.storage_5 + 45 | 45 | 45 | test_inserts.storage_5 + 46 | 46 | 46 | test_inserts.storage_5 + 47 | 47 | 47 | test_inserts.storage_5 + 48 | 48 | 48 | test_inserts.storage_5 + 49 | 49 | 49 | test_inserts.storage_5 + 50 | 50 | 50 | test_inserts.storage_5 + 51 | 51 | 51 | test_inserts.storage_6 + 52 | 52 | 52 | test_inserts.storage_6 + 53 | 53 | 53 | test_inserts.storage_6 + 54 | 54 | 54 | test_inserts.storage_6 + 55 | 55 | 55 | test_inserts.storage_6 + 56 | 56 | 56 | test_inserts.storage_6 + 57 | 57 | 57 | test_inserts.storage_6 + 58 | 58 | 58 | test_inserts.storage_6 + 59 | 59 | 59 | test_inserts.storage_6 + 60 | 60 | 60 | test_inserts.storage_6 + 61 | 61 | 61 | test_inserts.storage_7 + 62 | 62 | 62 | test_inserts.storage_7 + 63 | 63 | 63 | test_inserts.storage_7 + 64 | 64 | 64 | test_inserts.storage_7 + 65 | 65 | 65 | test_inserts.storage_7 + 66 | 66 | 66 | test_inserts.storage_7 + 67 | 67 | 67 | test_inserts.storage_7 + 68 | 68 | 68 | test_inserts.storage_7 + 69 | 69 | 69 | test_inserts.storage_7 + 70 | 70 | 70 | test_inserts.storage_7 + 71 | 71 | 71 | test_inserts.storage_8 + 72 | 72 | 72 | test_inserts.storage_8 + 73 | 73 | 73 | test_inserts.storage_8 + 74 | 74 | 74 | test_inserts.storage_8 + 75 | 75 | 75 | test_inserts.storage_8 + 76 | 76 | 76 | test_inserts.storage_8 + 77 | 77 | 77 | test_inserts.storage_8 + 78 | 78 | 78 | test_inserts.storage_8 + 79 | 79 | 79 | test_inserts.storage_8 + 80 | 80 | 80 | test_inserts.storage_8 + 81 | 81 | 81 | test_inserts.storage_9 + 82 | 82 | 82 | test_inserts.storage_9 + 83 | 83 | 83 | test_inserts.storage_9 + 84 | 84 | 84 | test_inserts.storage_9 + 85 | 85 | 85 | test_inserts.storage_9 + 86 | 86 | 86 | test_inserts.storage_9 + 87 | 87 | 87 | test_inserts.storage_9 + 88 | 88 | 88 | test_inserts.storage_9 + 89 | 89 | 89 | test_inserts.storage_9 + 90 | 90 | 90 | test_inserts.storage_9 + 91 | 91 | 91 | test_inserts.storage_10 + 92 | 92 | 92 | test_inserts.storage_10 + 93 | 93 | 93 | test_inserts.storage_10 + 94 | 94 | 94 | test_inserts.storage_10 + 95 | 95 | 95 | test_inserts.storage_10 + 96 | 96 | 96 | test_inserts.storage_10 + 97 | 97 | 97 | test_inserts.storage_10 + 98 | 98 | 98 | test_inserts.storage_10 + 99 | 99 | 99 | test_inserts.storage_10 + 100 | 100 | 100 | test_inserts.storage_10 + 101 | 101 | 101 | test_inserts.storage_12 + 102 | 102 | 102 | test_inserts.storage_12 + 103 | 103 | 103 | test_inserts.storage_12 + 104 | 104 | 104 | test_inserts.storage_12 + 105 | 105 | 105 | test_inserts.storage_12 + 106 | 106 | 106 | test_inserts.storage_12 + 107 | 107 | 107 | test_inserts.storage_12 + 108 | 108 | 108 | test_inserts.storage_12 + 109 | 109 | 109 | test_inserts.storage_12 + 110 | 110 | 110 | test_inserts.storage_12 + 111 | 111 | 111 | test_inserts.storage_13 + 112 | 112 | 112 | test_inserts.storage_13 + 113 | 113 | 113 | test_inserts.storage_13 + 114 | 114 | 114 | test_inserts.storage_13 + 115 | 115 | 115 | test_inserts.storage_13 + 116 | 116 | 116 | test_inserts.storage_13 + 117 | 117 | 117 | test_inserts.storage_13 + 118 | 118 | 118 | test_inserts.storage_13 + 119 | 119 | 119 | test_inserts.storage_13 + 120 | 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* now test RETURNING list using our new column 'e' */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(-2, 130, 5) i +RETURNING e * 2, b, tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) + ?column? | b | tableoid +----------+-----+------------------------- + -4 | -2 | test_inserts.storage_11 + 6 | 3 | test_inserts.storage_1 + 16 | 8 | test_inserts.storage_1 + 26 | 13 | test_inserts.storage_2 + 36 | 18 | test_inserts.storage_2 + 46 | 23 | test_inserts.storage_3 + 56 | 28 | test_inserts.storage_3 + 66 | 33 | test_inserts.storage_4 + 76 | 38 | test_inserts.storage_4 + 86 | 43 | test_inserts.storage_5 + 96 | 48 | test_inserts.storage_5 + 106 | 53 | test_inserts.storage_6 + 116 | 58 | test_inserts.storage_6 + 126 | 63 | test_inserts.storage_7 + 136 | 68 | test_inserts.storage_7 + 146 | 73 | test_inserts.storage_8 + 156 | 78 | test_inserts.storage_8 + 166 | 83 | test_inserts.storage_9 + 176 | 88 | test_inserts.storage_9 + 186 | 93 | test_inserts.storage_10 + 196 | 98 | test_inserts.storage_10 + 206 | 103 | test_inserts.storage_12 + 216 | 108 | test_inserts.storage_12 + 226 | 113 | test_inserts.storage_13 + 236 | 118 | test_inserts.storage_13 + 246 | 123 | test_inserts.storage_14 + 256 | 128 | test_inserts.storage_14 +(27 rows) + +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + Output: (storage.e * 2), storage.b, (storage.tableoid)::regclass + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i, NULL::integer, i, i + Function Call: generate_series(1, 10) +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, NULL::integer, NULL::integer, i, i + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + QUERY PLAN +--------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i, NULL::integer, NULL::text, NULL::bigint + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, b, NULL::integer, d, e + -> Append + -> Seq Scan on test_inserts.storage_11 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_1 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_2 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_3 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_4 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_5 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_6 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_7 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_8 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_9 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_10 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_12 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_13 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_14 storage + Output: b, d, e +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, b, NULL::integer, d, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 storage + Output: b, d + -> Seq Scan on test_inserts.storage_1 storage + Output: b, d + -> Seq Scan on test_inserts.storage_2 storage + Output: b, d + -> Seq Scan on test_inserts.storage_3 storage + Output: b, d + -> Seq Scan on test_inserts.storage_4 storage + Output: b, d + -> Seq Scan on test_inserts.storage_5 storage + Output: b, d + -> Seq Scan on test_inserts.storage_6 storage + Output: b, d + -> Seq Scan on test_inserts.storage_7 storage + Output: b, d + -> Seq Scan on test_inserts.storage_8 storage + Output: b, d + -> Seq Scan on test_inserts.storage_9 storage + Output: b, d + -> Seq Scan on test_inserts.storage_10 storage + Output: b, d + -> Seq Scan on test_inserts.storage_12 storage + Output: b, d + -> Seq Scan on test_inserts.storage_13 storage + Output: b, d + -> Seq Scan on test_inserts.storage_14 storage + Output: b, d +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + QUERY PLAN +--------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, b, NULL::integer, NULL::text, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 storage + Output: b + -> Seq Scan on test_inserts.storage_1 storage + Output: b + -> Seq Scan on test_inserts.storage_2 storage + Output: b + -> Seq Scan on test_inserts.storage_3 storage + Output: b + -> Seq Scan on test_inserts.storage_4 storage + Output: b + -> Seq Scan on test_inserts.storage_5 storage + Output: b + -> Seq Scan on test_inserts.storage_6 storage + Output: b + -> Seq Scan on test_inserts.storage_7 storage + Output: b + -> Seq Scan on test_inserts.storage_8 storage + Output: b + -> Seq Scan on test_inserts.storage_9 storage + Output: b + -> Seq Scan on test_inserts.storage_10 storage + Output: b + -> Seq Scan on test_inserts.storage_12 storage + Output: b + -> Seq Scan on test_inserts.storage_13 storage + Output: b + -> Seq Scan on test_inserts.storage_14 storage + Output: b +(34 rows) + +/* test gap case (missing partition in between) */ +CREATE TABLE test_inserts.test_gap(val INT NOT NULL); +INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); +SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test_inserts.test_gap_2; /* make a gap */ +INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ +ERROR: cannot spawn a partition +DROP TABLE test_inserts.test_gap CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA test_inserts CASCADE; +NOTICE: drop cascades to 19 other objects +DROP EXTENSION pg_pathman CASCADE; From 6a6ee15e515541e2d01fbb95072a4bbc420a49af Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 20 Aug 2017 21:06:56 +0300 Subject: [PATCH 0709/1124] disable dangerous query optimizations on PostgreSQL 9.5 (issue #110) --- expected/pathman_lateral.out | 72 ++++++++-------- expected/pathman_rowmarks_1.out | 147 ++++++++++++++++++++++++-------- src/hooks.c | 60 ++++++++++--- src/planner_tree_modification.c | 83 +++++++++++++++--- 4 files changed, 266 insertions(+), 96 deletions(-) diff --git a/expected/pathman_lateral.out b/expected/pathman_lateral.out index 808a4d64..e5148664 100644 --- a/expected/pathman_lateral.out +++ b/expected/pathman_lateral.out @@ -27,20 +27,22 @@ select * from where t1.id = t2.id and t.id = t3.id); QUERY PLAN -------------------------------------------------------------------------------------------- - Nested Loop Semi Join + Nested Loop -> Nested Loop - Join Filter: ((t2.id + t1.id) = t3.id) - -> Append - -> Seq Scan on data_0 t3 - -> Seq Scan on data_1 t3_1 - -> Seq Scan on data_2 t3_2 - -> Seq Scan on data_3 t3_3 - -> Seq Scan on data_4 t3_4 - -> Seq Scan on data_5 t3_5 - -> Seq Scan on data_6 t3_6 - -> Seq Scan on data_7 t3_7 - -> Seq Scan on data_8 t3_8 - -> Seq Scan on data_9 t3_9 + Join Filter: ((t2.id + t1.id) = t.id) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t + -> Seq Scan on data_1 t_1 + -> Seq Scan on data_2 t_2 + -> Seq Scan on data_3 t_3 + -> Seq Scan on data_4 t_4 + -> Seq Scan on data_5 t_5 + -> Seq Scan on data_6 t_6 + -> Seq Scan on data_7 t_7 + -> Seq Scan on data_8 t_8 + -> Seq Scan on data_9 t_9 -> Materialize -> Nested Loop Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) @@ -88,28 +90,28 @@ select * from -> Seq Scan on data_9 t1_9 Filter: ((id >= 1) AND (id <= 100)) -> Custom Scan (RuntimeAppend) - Prune by: (t3.id = t.id) - -> Seq Scan on data_0 t - Filter: (t3.id = id) - -> Seq Scan on data_1 t - Filter: (t3.id = id) - -> Seq Scan on data_2 t - Filter: (t3.id = id) - -> Seq Scan on data_3 t - Filter: (t3.id = id) - -> Seq Scan on data_4 t - Filter: (t3.id = id) - -> Seq Scan on data_5 t - Filter: (t3.id = id) - -> Seq Scan on data_6 t - Filter: (t3.id = id) - -> Seq Scan on data_7 t - Filter: (t3.id = id) - -> Seq Scan on data_8 t - Filter: (t3.id = id) - -> Seq Scan on data_9 t - Filter: (t3.id = id) -(82 rows) + Prune by: (t.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(84 rows) set enable_hashjoin = on; set enable_mergejoin = on; diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index 1ae02cf2..cbc0a1c6 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -179,16 +179,27 @@ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN --------------------------------------- + QUERY PLAN +--------------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) -> Materialize - -> Seq Scan on first - Filter: (id = 1) -(7 rows) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(18 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 @@ -200,40 +211,73 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); Join Filter: (second.id = first.id) -> HashAggregate Group Key: first.id - -> Seq Scan on first - Filter: (id < 1) + -> Append + -> Seq Scan on first + Filter: (id < 1) + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) -> Materialize -> Seq Scan on second -(9 rows) +(20 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +---------------------------------------------------------- Update on second -> Nested Loop Semi Join Join Filter: (second.id = first.id) -> Seq Scan on second -> Materialize - -> Seq Scan on first - Filter: ((id = 1) OR (id = 2)) -(7 rows) + -> Append + -> Seq Scan on first + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_4 + Filter: ((id = 1) OR (id = 2)) +(18 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) RETURNING *, tableoid::regclass; - QUERY PLAN --------------------------------------- + QUERY PLAN +--------------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) -> Materialize - -> Seq Scan on first - Filter: (id = 1) -(7 rows) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(18 rows) SET enable_hashjoin = t; SET enable_mergejoin = t; @@ -241,9 +285,11 @@ SET enable_mergejoin = t; UPDATE rowmarks.second SET id = 1 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) RETURNING *, tableoid::regclass; - id | tableoid -----+---------- -(0 rows) + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) /* Check deletes (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ @@ -251,16 +297,27 @@ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN --------------------------------------- + QUERY PLAN +--------------------------------------------- Delete on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) -> Materialize - -> Seq Scan on first - Filter: (id = 1) -(7 rows) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(18 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second @@ -272,25 +329,47 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); Join Filter: (second.id = first.id) -> HashAggregate Group Key: first.id - -> Seq Scan on first - Filter: (id < 1) + -> Append + -> Seq Scan on first + Filter: (id < 1) + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) -> Materialize -> Seq Scan on second -(9 rows) +(20 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +---------------------------------------------------------- Delete on second -> Nested Loop Semi Join Join Filter: (second.id = first.id) -> Seq Scan on second -> Materialize - -> Seq Scan on first - Filter: ((id = 1) OR (id = 2)) -(7 rows) + -> Append + -> Seq Scan on first + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_4 + Filter: ((id = 1) OR (id = 2)) +(18 rows) SET enable_hashjoin = t; SET enable_mergejoin = t; diff --git a/src/hooks.c b/src/hooks.c index 7b6c587c..56ac4bf8 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -95,15 +95,23 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady() || !pg_pathman_enable_runtimeappend) return; + /* We shouldn't process tables with active children */ + if (inner_rte && inner_rte->inh) + return; + + /* We can't handle full or right outer joins */ if (jointype == JOIN_FULL || jointype == JOIN_RIGHT) - return; /* we can't handle full or right outer joins */ + return; - /* Check that innerrel is a BASEREL with inheritors & PartRelationInfo */ - if (innerrel->reloptkind != RELOPT_BASEREL || !inner_rte->inh || + /* Check that innerrel is a BASEREL with PartRelationInfo */ + if (innerrel->reloptkind != RELOPT_BASEREL || !(inner_prel = get_pathman_relation_info(inner_rte->relid))) - { - return; /* Obviously not our case */ - } + return; + + /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, + inner_rte)) + return; /* * These codes are used internally in the planner, but are not supported @@ -267,6 +275,10 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady()) return; + /* We shouldn't process tables with active children */ + if (rte->inh) + return; + /* * Skip if it's a result relation (UPDATE | DELETE | INSERT), * or not a (partitioned) physical relation at all. @@ -277,10 +289,10 @@ pathman_rel_pathlist_hook(PlannerInfo *root, return; #ifdef LEGACY_ROWMARKS_95 - /* It's better to exit, since RowMarks might be broken */ - if (root->parse->commandType != CMD_SELECT && - root->parse->commandType != CMD_INSERT) - return; + /* It's better to exit, since RowMarks might be broken */ + if (root->parse->commandType != CMD_SELECT && + root->parse->commandType != CMD_INSERT) + return; #endif /* Skip if this table is not allowed to act as parent (e.g. FROM ONLY) */ @@ -304,6 +316,31 @@ pathman_rel_pathlist_hook(PlannerInfo *root, ListCell *lc; int i; + /* + * Check that this child is not the parent table itself. + * This is exactly how standard inheritance works. + */ + if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL) + { + foreach (lc, root->append_rel_list) + { + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); + RangeTblEntry *cur_parent_rte, + *cur_child_rte; + + /* This 'appinfo' is not for this child */ + if (appinfo->child_relid != rti) + continue; + + cur_parent_rte = root->simple_rte_array[appinfo->parent_relid]; + cur_child_rte = rte; /* we already have it, saves time */ + + /* This child == its own parent table! */ + if (cur_parent_rte->relid == cur_child_rte->relid) + return; + } + } + /* Make copy of partitioning expression and fix Var's varno attributes */ part_expr = PrelExpressionForRelid(prel, rti); @@ -332,9 +369,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, pathkeyDesc = (PathKey *) linitial(pathkeys); } - /* HACK: we must restore 'inh' flag! */ - rte->inh = true; - children = PrelGetChildrenArray(prel); ranges = list_make1_irange_full(prel, IR_COMPLETE); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 7c8df802..6c364b63 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -28,10 +28,38 @@ #define PARENTHOOD_TAG CppAsString(PARENTHOOD) +/* Build transform_query_cxt field name */ +#define TRANSFORM_CONTEXT_FIELD(command_type) \ + has_parent_##command_type##_query + +/* Check that transform_query_cxt field is TRUE */ +#define TRANSFORM_CONTEXT_HAS_PARENT(context, command_type) \ + ( (context)->TRANSFORM_CONTEXT_FIELD(command_type) ) + +/* Used in switch(CmdType) statements */ +#define TRANSFORM_CONTEXT_SWITCH_SET(context, command_type) \ + case CMD_##command_type: \ + (context)->TRANSFORM_CONTEXT_FIELD(command_type) = true; \ + break; \ + +typedef struct +{ + /* bool has_parent_CMD_TYPE_query; */ + bool TRANSFORM_CONTEXT_FIELD(SELECT), + TRANSFORM_CONTEXT_FIELD(INSERT), + TRANSFORM_CONTEXT_FIELD(UPDATE), + TRANSFORM_CONTEXT_FIELD(DELETE); + + /* params for handle_modification_query() */ + ParamListInfo query_params; +} transform_query_cxt; + + + static bool pathman_transform_query_walker(Node *node, void *context); -static void disable_standard_inheritance(Query *parse); -static void handle_modification_query(Query *parse, ParamListInfo params); +static void disable_standard_inheritance(Query *parse, transform_query_cxt *context); +static void handle_modification_query(Query *parse, transform_query_cxt *context); static void partition_filter_visitor(Plan *plan, void *context); @@ -139,7 +167,13 @@ plan_tree_walker(Plan *plan, void pathman_transform_query(Query *parse, ParamListInfo params) { - pathman_transform_query_walker((Node *) parse, (void *) params); + transform_query_cxt context; + + /* Initialize context */ + memset((void *) &context, 0, sizeof(context)); + context.query_params = params; + + pathman_transform_query_walker((Node *) parse, (void *) &context); } /* Walker for pathman_transform_query() */ @@ -151,20 +185,35 @@ pathman_transform_query_walker(Node *node, void *context) else if (IsA(node, Query)) { - Query *query = (Query *) node; + Query *query = (Query *) node; + transform_query_cxt *current_context = context, + next_context; + + /* Initialize next context for bottom subqueries */ + next_context = *current_context; + switch (query->commandType) + { + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, SELECT); + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, INSERT); + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, UPDATE); + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, DELETE); + + default: + break; + } /* Assign Query a 'queryId' */ assign_query_id(query); /* Apply Query tree modifiers */ rowmark_add_tableoids(query); - disable_standard_inheritance(query); - handle_modification_query(query, (ParamListInfo) context); + disable_standard_inheritance(query, current_context); + handle_modification_query(query, current_context); /* Handle Query node */ return query_tree_walker(query, pathman_transform_query_walker, - context, + (void *) &next_context, 0); } @@ -183,14 +232,17 @@ pathman_transform_query_walker(Node *node, void *context) /* Disable standard inheritance if table is partitioned by pg_pathman */ static void -disable_standard_inheritance(Query *parse) +disable_standard_inheritance(Query *parse, transform_query_cxt *context) { ListCell *lc; Index current_rti; /* current range table entry index */ #ifdef LEGACY_ROWMARKS_95 - if (parse->commandType != CMD_SELECT) - return; + /* Don't process non-topmost non-select queries */ + if (parse->commandType != CMD_SELECT || + TRANSFORM_CONTEXT_HAS_PARENT(context, UPDATE) || + TRANSFORM_CONTEXT_HAS_PARENT(context, DELETE)) + return; #endif /* Walk through RangeTblEntries list */ @@ -235,7 +287,7 @@ disable_standard_inheritance(Query *parse) /* Checks if query affects only one partition */ static void -handle_modification_query(Query *parse, ParamListInfo params) +handle_modification_query(Query *parse, transform_query_cxt *context) { const PartRelationInfo *prel; Node *prel_expr; @@ -243,9 +295,10 @@ handle_modification_query(Query *parse, ParamListInfo params) RangeTblEntry *rte; WrapperNode *wrap; Expr *expr; - WalkerContext context; + WalkerContext wcxt; Index result_rel; int num_selected; + ParamListInfo params; /* Fetch index of result relation */ result_rel = parse->resultRelation; @@ -276,6 +329,8 @@ handle_modification_query(Query *parse, ParamListInfo params) /* Exit if there's no expr (no use) */ if (!expr) return; + params = context->query_params; + /* Check if we can replace PARAMs with CONSTs */ if (params && clause_contains_params((Node *) expr)) expr = (Expr *) eval_extern_params_mutator((Node *) expr, params); @@ -284,8 +339,8 @@ handle_modification_query(Query *parse, ParamListInfo params) prel_expr = PrelExpressionForRelid(prel, result_rel); /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel_expr, prel, NULL); - wrap = walk_expr_tree(expr, &context); + InitWalkerContext(&wcxt, prel_expr, prel, NULL); + wrap = walk_expr_tree(expr, &wcxt); ranges = irange_list_intersection(ranges, wrap->rangeset); num_selected = irange_list_length(ranges); From 8b3b05cfb191cc3f24f55f95151b9c8608d507c4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sun, 20 Aug 2017 22:22:17 +0300 Subject: [PATCH 0710/1124] fix errors found by clang-analyzer, add special rule for CTEs (PostgreSQL 9.5) --- expected/pathman_upd_del.out | 15 +++++++++++++++ expected/pathman_upd_del_1.out | 15 +++++++++++++++ sql/pathman_upd_del.sql | 7 +++++++ src/hooks.c | 6 +++++- src/planner_tree_modification.c | 31 ++++++++++++++++++++++++++----- 5 files changed, 68 insertions(+), 6 deletions(-) diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index ee112926..0e800996 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -118,6 +118,21 @@ EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = ' (8 rows) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; +EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Append + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> CTE Scan on q + -> Materialize + -> Seq Scan on tmp +(9 rows) + +WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; DROP SCHEMA test CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index c8dcaf8c..9ca2f9ad 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -120,6 +120,21 @@ EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = ' (10 rows) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; +EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Append + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> CTE Scan on q + -> Materialize + -> Seq Scan on tmp +(9 rows) + +WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; DROP SCHEMA test CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index 18e32c6a..aea11ac0 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -5,6 +5,8 @@ CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; + + SET enable_indexscan = ON; SET enable_seqscan = OFF; @@ -46,6 +48,11 @@ DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; +EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; +WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; + + + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/src/hooks.c b/src/hooks.c index 56ac4bf8..f4996e65 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -95,8 +95,12 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady() || !pg_pathman_enable_runtimeappend) return; + /* We should only consider base relations */ + if (innerrel->reloptkind != RELOPT_BASEREL) + return; + /* We shouldn't process tables with active children */ - if (inner_rte && inner_rte->inh) + if (inner_rte->inh) return; /* We can't handle full or right outer joins */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 6c364b63..74b89400 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -52,6 +52,7 @@ typedef struct /* params for handle_modification_query() */ ParamListInfo query_params; + SubLink *parent_sublink; } transform_query_cxt; @@ -183,6 +184,20 @@ pathman_transform_query_walker(Node *node, void *context) if (node == NULL) return false; + else if (IsA(node, SubLink)) + { + transform_query_cxt *current_context = context, + next_context; + + /* Initialize next context for bottom subqueries */ + next_context = *current_context; + next_context.parent_sublink = (SubLink *) node; + + return expression_tree_walker(node, + pathman_transform_query_walker, + (void *) &next_context); + } + else if (IsA(node, Query)) { Query *query = (Query *) node; @@ -238,11 +253,17 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) Index current_rti; /* current range table entry index */ #ifdef LEGACY_ROWMARKS_95 - /* Don't process non-topmost non-select queries */ - if (parse->commandType != CMD_SELECT || - TRANSFORM_CONTEXT_HAS_PARENT(context, UPDATE) || - TRANSFORM_CONTEXT_HAS_PARENT(context, DELETE)) - return; + /* Don't process non-SELECT queries */ + if (parse->commandType != CMD_SELECT) + return; + + /* Don't process queries under UPDATE or DELETE (except for CTEs) */ + if ((TRANSFORM_CONTEXT_HAS_PARENT(context, UPDATE) || + TRANSFORM_CONTEXT_HAS_PARENT(context, DELETE)) && + (context->parent_sublink && + context->parent_sublink->subselect == (Node *) parse && + context->parent_sublink->subLinkType != CTE_SUBLINK)) + return; #endif /* Walk through RangeTblEntries list */ From 2284abcd742b33cba5f9fedbaf082fa341c63764 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 21 Aug 2017 12:40:18 +0300 Subject: [PATCH 0711/1124] add more comments for RowMarks fixes etc --- src/include/compat/relation_tags.h | 8 +++++++- src/include/compat/rowmarks_fix.h | 11 ++++++++++- src/planner_tree_modification.c | 6 ++++-- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/src/include/compat/relation_tags.h b/src/include/compat/relation_tags.h index cbd80b82..d5183d32 100644 --- a/src/include/compat/relation_tags.h +++ b/src/include/compat/relation_tags.h @@ -2,7 +2,13 @@ * * relation_tags.h * Attach custom (Key, Value) pairs to an arbitrary RangeTblEntry - * NOTE: implementations for vanilla and PostgresPro differ + * + * NOTE: implementations for vanilla and PostgresPro differ, + * which means that subquery pull-up might break the bond + * between a RangeTblEntry and the corresponding KVPs. + * + * This subsystem was meant to replace the broken 'inh' flag + * (see get_rel_parenthood_status() for more details). * * Copyright (c) 2017, Postgres Professional * diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h index d2587cee..8bbd2b1d 100644 --- a/src/include/compat/rowmarks_fix.h +++ b/src/include/compat/rowmarks_fix.h @@ -29,7 +29,16 @@ void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc); #else -#define LEGACY_ROWMARKS_95 /* NOTE: can't fix 9.5, see PlannerInfo->processed_tlist */ +/* + * Starting from 9.6, it's possible to append junk + * tableoid columns using the PlannerInfo->processed_tlist. + * This is absolutely crucial for UPDATE and DELETE queries, + * so we had to add some special fixes for 9.5: + * + * 1) provide legacy code for RowMarks (tableoids); + * 2) disable dangerous UPDATE & DELETE optimizations. + */ +#define LEGACY_ROWMARKS_95 #define append_tle_for_rowmark(root, rc) ( (void) true ) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 74b89400..a4f901b1 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -44,14 +44,16 @@ typedef struct { - /* bool has_parent_CMD_TYPE_query; */ + /* Do we have a parent CmdType query? */ bool TRANSFORM_CONTEXT_FIELD(SELECT), TRANSFORM_CONTEXT_FIELD(INSERT), TRANSFORM_CONTEXT_FIELD(UPDATE), TRANSFORM_CONTEXT_FIELD(DELETE); - /* params for handle_modification_query() */ + /* Parameters for handle_modification_query() */ ParamListInfo query_params; + + /* SubLink that might contain an examined query */ SubLink *parent_sublink; } transform_query_cxt; From 89d2d8f64a3d15b132e8749f52a34f234cb273d0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 21 Aug 2017 13:39:52 +0300 Subject: [PATCH 0712/1124] more regression tests for PostgreSQL 9.5 --- expected/pathman_upd_del.out | 157 ++++++++++++++++++++++++++++---- expected/pathman_upd_del_1.out | 161 +++++++++++++++++++++++++++++---- sql/pathman_upd_del.sql | 153 +++++++++++++++++++++++++++---- 3 files changed, 419 insertions(+), 52 deletions(-) diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 0e800996..9f590a9f 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -1,3 +1,8 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; @@ -8,20 +13,26 @@ SET enable_seqscan = OFF; /* Temporary table for JOINs */ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); -/* Range */ +/* Partition table by RANGE */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, value INTEGER); -INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; -SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); create_range_partitions ------------------------- 12 (1 row) -/* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; QUERY PLAN -------------------------------------------------------------------------------- Update on range_rel_6 @@ -29,6 +40,7 @@ EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15 Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) (3 rows) +BEGIN; UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; id | dt | value @@ -36,7 +48,9 @@ SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; 166 | Tue Jun 15 00:00:00 2010 | 111 (1 row) -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; QUERY PLAN -------------------------------------------------------------------------------- Delete on range_rel_6 @@ -44,13 +58,16 @@ EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) (3 rows) +BEGIN; DELETE FROM test.range_rel WHERE dt = '2010-06-15'; SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; id | dt | value ----+----+------- (0 rows) -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; /* no partitions for this 'dt' */ +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; QUERY PLAN -------------------------------------------------------------------------------- Update on range_rel @@ -58,13 +75,16 @@ EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01 Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) (3 rows) +BEGIN; UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; id | dt | value ----+----+------- (0 rows) -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no partitions for this 'dt' */ +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; QUERY PLAN -------------------------------------------------------------------------------- Delete on range_rel @@ -72,13 +92,18 @@ EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no pa Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) (3 rows) +BEGIN; DELETE FROM test.range_rel WHERE dt < '1990-01-01'; SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; id | dt | value ----+----+------- (0 rows) -EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; QUERY PLAN -------------------------------------------------------------------------------------------- Update on range_rel_1 r @@ -90,8 +115,34 @@ EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) (7 rows) -UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Update on tmp t + -> Hash Join + Hash Cond: (t.id = r.id) + -> Seq Scan on tmp t + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(8 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; QUERY PLAN -------------------------------------------------------------------------------------------- Delete on range_rel_1 r @@ -103,8 +154,14 @@ EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = ' Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) (7 rows) -DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; QUERY PLAN -------------------------------------------------------------------------------------------------- Delete on tmp t @@ -117,8 +174,15 @@ EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = ' Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) (8 rows) -DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; -EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; QUERY PLAN ---------------------------------------------------------------------------------------- Delete on tmp @@ -132,7 +196,68 @@ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010 -> Seq Scan on tmp (9 rows) -WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> CTE Scan on q + -> Materialize + -> Seq Scan on tmp +(9 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Hash Join + Hash Cond: (t.id = r.id) + -> Seq Scan on tmp t + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> CTE Scan on q + -> Materialize + -> Seq Scan on tmp +(14 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; DROP SCHEMA test CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index 9ca2f9ad..a019285b 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -1,3 +1,8 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; @@ -8,20 +13,26 @@ SET enable_seqscan = OFF; /* Temporary table for JOINs */ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); -/* Range */ +/* Partition table by RANGE */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, value INTEGER); -INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; -SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); create_range_partitions ------------------------- 12 (1 row) -/* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; QUERY PLAN -------------------------------------------------------------------------------- Update on range_rel_6 @@ -29,6 +40,7 @@ EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15 Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) (3 rows) +BEGIN; UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; id | dt | value @@ -36,7 +48,9 @@ SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; 166 | Tue Jun 15 00:00:00 2010 | 111 (1 row) -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; QUERY PLAN -------------------------------------------------------------------------------- Delete on range_rel_6 @@ -44,13 +58,16 @@ EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) (3 rows) +BEGIN; DELETE FROM test.range_rel WHERE dt = '2010-06-15'; SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; id | dt | value ----+----+------- (0 rows) -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; /* no partitions for this 'dt' */ +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; QUERY PLAN -------------------------------------------------------------------------------- Update on range_rel @@ -58,13 +75,16 @@ EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01 Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) (3 rows) +BEGIN; UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; id | dt | value ----+----+------- (0 rows) -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no partitions for this 'dt' */ +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; QUERY PLAN -------------------------------------------------------------------------------- Delete on range_rel @@ -72,13 +92,18 @@ EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no pa Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) (3 rows) +BEGIN; DELETE FROM test.range_rel WHERE dt < '1990-01-01'; SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; id | dt | value ----+----+------- (0 rows) -EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; QUERY PLAN -------------------------------------------------------------------------------------------- Update on range_rel_1 r @@ -90,8 +115,36 @@ EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) (7 rows) -UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Update on tmp t + -> Hash Join + Hash Cond: (t.id = r.id) + -> Seq Scan on tmp t + -> Hash + -> Append + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(10 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; QUERY PLAN -------------------------------------------------------------------------------------------- Delete on range_rel_1 r @@ -103,8 +156,14 @@ EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = ' Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) (7 rows) -DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; QUERY PLAN -------------------------------------------------------------------------------------------------- Delete on tmp t @@ -119,8 +178,15 @@ EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = ' Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) (10 rows) -DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; -EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; QUERY PLAN ---------------------------------------------------------------------------------------- Delete on tmp @@ -134,7 +200,70 @@ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010 -> Seq Scan on tmp (9 rows) -WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> CTE Scan on q + -> Materialize + -> Seq Scan on tmp +(9 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Hash Join + Hash Cond: (t.id = r.id) + -> Seq Scan on tmp t + -> Hash + -> Append + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> CTE Scan on q + -> Materialize + -> Seq Scan on tmp +(16 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; DROP SCHEMA test CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index aea11ac0..16d7ebfd 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -1,3 +1,9 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ + \set VERBOSITY terse SET search_path = 'public'; @@ -10,46 +16,153 @@ CREATE SCHEMA test; SET enable_indexscan = ON; SET enable_seqscan = OFF; + /* Temporary table for JOINs */ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); -/* Range */ + +/* Partition table by RANGE */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, value INTEGER); -INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; -SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); -/* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; + +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + + +/* + * Test UPDATE and DELETE + */ + +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + +BEGIN; UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; +ROLLBACK; + -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; /* have partitions for this 'dt' */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + +BEGIN; DELETE FROM test.range_rel WHERE dt = '2010-06-15'; SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; +ROLLBACK; -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; /* no partitions for this 'dt' */ -UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; -SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; /* no partitions for this 'dt' */ -DELETE FROM test.range_rel WHERE dt < '1990-01-01'; -SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; -EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; +ROLLBACK; -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; -DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; -DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; -EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; -WITH q AS (SELECT * FROM test.range_rel r WHERE r.dt = '2010-01-02') DELETE FROM test.tmp USING q; +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; +ROLLBACK; + + +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; + + +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; + + +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; + + +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; + + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; + + +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; + + +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; From 13375687786269d0bbc11c1a8ebaf67c2b530163 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 21 Aug 2017 13:59:49 +0300 Subject: [PATCH 0713/1124] add comment and safety assert --- src/planner_tree_modification.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index a4f901b1..2b8c811c 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -134,6 +134,8 @@ plan_tree_walker(Plan *plan, /* Since they look alike */ case T_MergeAppend: case T_Append: + Assert(offsetof(Append, appendplans) == + offsetof(MergeAppend, mergeplans)); foreach(l, ((Append *) plan)->appendplans) plan_tree_walker((Plan *) lfirst(l), visitor, context); break; @@ -195,6 +197,7 @@ pathman_transform_query_walker(Node *node, void *context) next_context = *current_context; next_context.parent_sublink = (SubLink *) node; + /* Handle expression subtree */ return expression_tree_walker(node, pathman_transform_query_walker, (void *) &next_context); From 7df9159d851c494fe0609cf0d431d0d07522cf1f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 21 Aug 2017 15:00:23 +0300 Subject: [PATCH 0714/1124] resolve conflicts, fix make_partition_update() --- expected/pathman_inserts.out | 12 ++++++------ expected/pathman_inserts_1.out | 24 ++++++++++++------------ src/include/partition_update.h | 1 + src/partition_update.c | 20 +++++++++++++++----- src/planner_tree_modification.c | 4 ++-- 5 files changed, 36 insertions(+), 25 deletions(-) diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index c3a8566f..15136608 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -872,11 +872,11 @@ RETURNING e * 2, b, tableoid::regclass; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (d, e) SELECT i, i FROM generate_series(1, 10) i; - QUERY PLAN -------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e -> Function Scan on pg_catalog.generate_series i Output: NULL::integer, NULL::integer, NULL::integer, i.i, i.i Function Call: generate_series(1, 10) @@ -889,7 +889,7 @@ FROM generate_series(1, 10) i; ----------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint -> Function Scan on pg_catalog.generate_series i Output: NULL::integer, i.i, NULL::integer, NULL::text, NULL::bigint Function Call: generate_series(1, 10) @@ -943,7 +943,7 @@ FROM test_inserts.storage; ---------------------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint -> Result Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, NULL::bigint -> Append @@ -984,7 +984,7 @@ FROM test_inserts.storage; -------------------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint -> Result Output: NULL::integer, storage_11.b, NULL::integer, NULL::text, NULL::bigint -> Append diff --git a/expected/pathman_inserts_1.out b/expected/pathman_inserts_1.out index 9f8633ab..d20e2c3a 100644 --- a/expected/pathman_inserts_1.out +++ b/expected/pathman_inserts_1.out @@ -872,11 +872,11 @@ RETURNING e * 2, b, tableoid::regclass; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (d, e) SELECT i, i FROM generate_series(1, 10) i; - QUERY PLAN -------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e -> Function Scan on pg_catalog.generate_series i Output: NULL::integer, NULL::integer, NULL::integer, i, i Function Call: generate_series(1, 10) @@ -885,11 +885,11 @@ FROM generate_series(1, 10) i; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b) SELECT i FROM generate_series(1, 10) i; - QUERY PLAN ---------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint -> Function Scan on pg_catalog.generate_series i Output: NULL::integer, i, NULL::integer, NULL::text, NULL::bigint Function Call: generate_series(1, 10) @@ -939,11 +939,11 @@ FROM test_inserts.storage; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b, d) SELECT b, d FROM test_inserts.storage; - QUERY PLAN -------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint -> Result Output: NULL::integer, b, NULL::integer, d, NULL::bigint -> Append @@ -980,11 +980,11 @@ FROM test_inserts.storage; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b) SELECT b FROM test_inserts.storage; - QUERY PLAN ---------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) - Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint -> Result Output: NULL::integer, b, NULL::integer, NULL::text, NULL::bigint -> Append diff --git a/src/include/partition_update.h b/src/include/partition_update.h index c2bd6926..7efdfe51 100644 --- a/src/include/partition_update.h +++ b/src/include/partition_update.h @@ -51,6 +51,7 @@ TupleTableSlot *partition_update_exec(CustomScanState *node); Plan *make_partition_update(Plan *subplan, Oid parent_relid, + Index parent_rti, List *returning_list); #endif /* PARTITION_UPDATE_H */ diff --git a/src/partition_update.c b/src/partition_update.c index 93d44851..f89edcdd 100644 --- a/src/partition_update.c +++ b/src/partition_update.c @@ -63,11 +63,20 @@ init_partition_update_static_data(void) Plan * make_partition_update(Plan *subplan, Oid parent_relid, + Index parent_rti, List *returning_list) { - Plan *pfilter; - CustomScan *cscan = makeNode(CustomScan); + CustomScan *cscan = makeNode(CustomScan); + Plan *pfilter; + + /* Create child PartitionFilter node */ + pfilter = make_partition_filter(subplan, + parent_relid, + parent_rti, + ONCONFLICT_NONE, + returning_list, + CMD_UPDATE); /* Copy costs etc */ cscan->scan.plan.startup_cost = subplan->startup_cost; @@ -77,15 +86,16 @@ make_partition_update(Plan *subplan, /* Setup methods and child plan */ cscan->methods = &partition_update_plan_methods; - pfilter = make_partition_filter(subplan, parent_relid, ONCONFLICT_NONE, - returning_list, CMD_UPDATE); cscan->custom_plans = list_make1(pfilter); + + /* Build an appropriate target list */ cscan->scan.plan.targetlist = pfilter->targetlist; /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; + + /* FIXME: should we use the same tlist? */ cscan->custom_scan_tlist = subplan->targetlist; - cscan->custom_private = NULL; return &cscan->scan.plan; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 293ce941..9af8c302 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -557,8 +557,8 @@ partition_update_visitor(Plan *plan, void *context) lc3 = lnext(lc3); } - lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), - relid, + lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), relid, + modify_table->nominalRelation, returning_list); } } From 1e0af6f3b9bb5598f6559f56d2085c170deb6b09 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 21 Aug 2017 16:32:24 +0300 Subject: [PATCH 0715/1124] test variants in 'extected' using a bash script --- expected/test_variants.sh | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100755 expected/test_variants.sh diff --git a/expected/test_variants.sh b/expected/test_variants.sh new file mode 100755 index 00000000..46bf2817 --- /dev/null +++ b/expected/test_variants.sh @@ -0,0 +1,27 @@ +#!/usr/bin/bash + +ret=0 + +red="\033[0;31m" +reset='\033[0m' + +shopt -s extglob + +for result in ./*_+([0-9]).out; do + f1="$result" + f2="${f1//_+([0-9])/}" + + printf "examine $(basename $f1) \n" + + file_diff=$(diff $f1 $f2 | wc -l) + + if [ $file_diff -eq 0 ]; then + printf $red + printf "WARNING: $(basename $f1) is redundant \n" >&2 + printf $reset + + ret=1 # change exit code + fi +done + +exit $ret From 75c0c77409214cde583564c28ba3b8cc45aae92f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 23 Aug 2017 16:10:33 +0300 Subject: [PATCH 0716/1124] rename update node to PartitionRouter --- Makefile | 2 +- expected/pathman_subpartitions.out | 2 +- expected/pathman_update_node.out | 6 +- sql/pathman_subpartitions.sql | 8 +- sql/pathman_update_node.sql | 4 +- src/hooks.c | 41 ++-- src/include/hooks.h | 6 +- src/include/partition_filter.h | 33 ++- src/include/partition_router.h | 85 +++++++ src/include/partition_update.h | 57 ----- src/include/planner_tree_modification.h | 2 +- src/partition_filter.c | 47 ++-- ...{partition_update.c => partition_router.c} | 128 +++++----- src/pg_pathman.c | 4 +- src/planner_tree_modification.c | 229 +++++++++--------- src/utility_stmt_hooking.c | 22 +- 16 files changed, 375 insertions(+), 301 deletions(-) create mode 100644 src/include/partition_router.h delete mode 100644 src/include/partition_update.h rename src/{partition_update.c => partition_router.c} (66%) diff --git a/Makefile b/Makefile index 1b159d23..3bd96b31 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/relation_tags.o src/compat/rowmarks_fix.o \ - src/partition_update.o $(WIN32RES) + src/partition_router.o $(WIN32RES) override PG_CPPFLAGS += -I$(CURDIR)/src/include diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index af35011e..ab93090d 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -196,7 +196,7 @@ SELECT subpartitions.partitions_tree('subpartitions.abc'); DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 15 other objects /* Test that update works correctly */ -SET pg_pathman.enable_partitionupdate=on; +SET pg_pathman.enable_partitionrouter = ON; CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); create_range_partitions diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 254b301e..125eedd4 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -2,7 +2,7 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_update_node; -SET pg_pathman.enable_partitionupdate=on; +SET pg_pathman.enable_partitionrouter = ON; /* Partition table by RANGE (NUMERIC) */ CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); CREATE INDEX val_idx ON test_update_node.test_range (val); @@ -18,7 +18,7 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 1 QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PartitionRoute) + -> Custom Scan (PartitionRouter) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) @@ -31,7 +31,7 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PartitionRoute) + -> Custom Scan (PartitionRouter) -> Custom Scan (PartitionFilter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 23217872..1e5b2e47 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -3,6 +3,8 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA subpartitions; + + /* Create two level partitioning structure */ CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; @@ -58,8 +60,10 @@ SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); SELECT subpartitions.partitions_tree('subpartitions.abc'); DROP TABLE subpartitions.abc CASCADE; + /* Test that update works correctly */ -SET pg_pathman.enable_partitionupdate=on; +SET pg_pathman.enable_partitionrouter = ON; + CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); @@ -77,6 +81,8 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitio UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ + + DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index f451010e..aff7f8ec 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -3,7 +3,9 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_update_node; -SET pg_pathman.enable_partitionupdate=on; + + +SET pg_pathman.enable_partitionrouter = ON; /* Partition table by RANGE (NUMERIC) */ diff --git a/src/hooks.c b/src/hooks.c index 63808297..7f77514a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -17,7 +17,7 @@ #include "hooks.h" #include "init.h" #include "partition_filter.h" -#include "partition_update.h" +#include "partition_router.h" #include "pathman_workers.h" #include "planner_tree_modification.h" #include "runtimeappend.h" @@ -559,8 +559,8 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); - /* Add PartitionUpdate node for UPDATE queries */ - ExecuteForPlanTree(result, add_partition_update_nodes); + /* Add PartitionRouter node for UPDATE queries */ + ExecuteForPlanTree(result, add_partition_routers); /* Decrement relation tags refcount */ decr_refcount_relation_tags(); @@ -847,41 +847,45 @@ pathman_process_utility_hook(Node *first_arg, #define EXECUTOR_RUN(q,d,c) standard_ExecutorRun((q),(d),(c)) #endif +/* + * Executor hook (for PartitionRouter). + */ #if PG_VERSION_NUM >= 100000 void -pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, - ExecutorRun_CountArgType count, bool execute_once) +pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, + ExecutorRun_CountArgType count, + bool execute_once) #else void -pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, - ExecutorRun_CountArgType count) +pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, + ExecutorRun_CountArgType count) #endif { PlanState *state = (PlanState *) queryDesc->planstate; if (IsA(state, ModifyTableState)) { - int i; ModifyTableState *mt_state = (ModifyTableState *) state; + int i; for (i = 0; i < mt_state->mt_nplans; i++) { - CustomScanState *subplanstate = (CustomScanState *) mt_state->mt_plans[i]; + CustomScanState *pr_state = (CustomScanState *) mt_state->mt_plans[i]; - if (!IsA(subplanstate, CustomScanState)) - continue; - - if (strcmp(subplanstate->methods->CustomName, UPDATE_NODE_DESCRIPTION) == 0) + /* Check if this is a PartitionRouter node */ + if (IsPartitionRouterState(pr_state)) { - ResultRelInfo *rri = mt_state->resultRelInfo + i; + ResultRelInfo *rri = &mt_state->resultRelInfo[i]; /* - * We unset junkfilter to disable junk cleaning in - * ExecModifyTable. + * We unset junkfilter to disable junk + * cleaning in ExecModifyTable. */ rri->ri_junkFilter = NULL; - /* hack, change UPDATE operation to INSERT */ + /* HACK: change UPDATE operation to INSERT */ mt_state->operation = CMD_INSERT; } } @@ -891,6 +895,5 @@ pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, if (executor_run_hook_next) EXECUTOR_HOOK_NEXT(queryDesc, direction, count); /* Else call internal implementation */ - else - EXECUTOR_RUN(queryDesc, direction, count); + else EXECUTOR_RUN(queryDesc, direction, count); } diff --git a/src/include/hooks.h b/src/include/hooks.h index d512436d..0c9922f7 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -79,11 +79,13 @@ typedef long ExecutorRun_CountArgType; #endif #if PG_VERSION_NUM >= 100000 -void pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, +void pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, ExecutorRun_CountArgType count, bool execute_once); #else -void pathman_executor_hook(QueryDesc *queryDesc, ScanDirection direction, +void pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, ExecutorRun_CountArgType count); #endif diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index c2626e46..841cd0cb 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -25,10 +25,13 @@ #endif +#define INSERT_NODE_NAME "PartitionFilter" + + #define ERR_PART_ATTR_NULL "partitioning expression's value should not be NULL" #define ERR_PART_ATTR_MULTIPLE_RESULTS "partitioning expression should return single value" #define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" -#define ERR_PART_ATTR_MULTIPLE "PartitionFilter selected more than one partition" +#define ERR_PART_ATTR_MULTIPLE INSERT_NODE_NAME " selected more than one partition" #define ERR_PART_DESC_CONVERT "could not convert row type for partition" @@ -46,6 +49,9 @@ typedef struct } ResultRelInfoHolder; +/* Standard size of ResultPartsStorage entry */ +#define ResultPartsStorageStandard 0 + /* Forward declaration (for on_new_rri_holder()) */ struct ResultPartsStorage; typedef struct ResultPartsStorage ResultPartsStorage; @@ -63,7 +69,8 @@ typedef void (*on_new_rri_holder)(EState *estate, */ struct ResultPartsStorage { - ResultRelInfo *saved_rel_info; /* original ResultRelInfo (parent) */ + ResultRelInfo *base_rri; /* original ResultRelInfo (parent) */ + HTAB *result_rels_table; HASHCTL result_rels_table_config; @@ -79,11 +86,6 @@ struct ResultPartsStorage LOCKMODE heap_close_lock_mode; }; -/* - * Standard size of ResultPartsStorage entry. - */ -#define ResultPartsStorageStandard 0 - typedef struct { CustomScanState css; @@ -115,6 +117,23 @@ extern CustomScanMethods partition_filter_plan_methods; extern CustomExecMethods partition_filter_exec_methods; +#define IsPartitionFilterPlan(node) \ + ( \ + IsA((node), CustomScan) && \ + (((CustomScan *) (node))->methods == &partition_filter_plan_methods) \ + ) + +#define IsPartitionFilterState(node) \ + ( \ + IsA((node), CustomScanState) && \ + (((CustomScanState *) (node))->methods == &partition_filter_exec_methods) \ + ) + +#define IsPartitionFilter(node) \ + ( IsPartitionFilterPlan(node) || IsPartitionFilterState(node) ) + + + void init_partition_filter_static_data(void); diff --git a/src/include/partition_router.h b/src/include/partition_router.h new file mode 100644 index 00000000..e90893ba --- /dev/null +++ b/src/include/partition_router.h @@ -0,0 +1,85 @@ +/* ------------------------------------------------------------------------ + * + * partition_update.h + * Insert row to right partition in UPDATE operation + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PARTITION_UPDATE_H +#define PARTITION_UPDATE_H + +#include "relation_info.h" +#include "utils.h" + +#include "postgres.h" +#include "commands/explain.h" +#include "optimizer/planner.h" + +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + + +#define UPDATE_NODE_NAME "PartitionRouter" + + +typedef struct PartitionRouterState +{ + CustomScanState css; + + Oid partitioned_table; + JunkFilter *junkfilter; + Plan *subplan; /* proxy variable to store subplan */ +} PartitionRouterState; + + +extern bool pg_pathman_enable_partition_router; + +extern CustomScanMethods partition_router_plan_methods; +extern CustomExecMethods partition_router_exec_methods; + + +#define IsPartitionRouterPlan(node) \ + ( \ + IsA((node), CustomScan) && \ + (((CustomScan *) (node))->methods == &partition_router_plan_methods) \ + ) + +#define IsPartitionRouterState(node) \ + ( \ + IsA((node), CustomScanState) && \ + (((CustomScanState *) (node))->methods == &partition_router_exec_methods) \ + ) + +#define IsPartitionRouter(node) \ + ( IsPartitionRouterPlan(node) || IsPartitionRouterState(node) ) + + +void init_partition_router_static_data(void); + + +Plan *make_partition_router(Plan *subplan, + Oid parent_relid, + Index parent_rti, + List *returning_list); + + +Node *partition_router_create_scan_state(CustomScan *node); + +void partition_router_begin(CustomScanState *node, EState *estate, int eflags); + +TupleTableSlot *partition_router_exec(CustomScanState *node); + +void partition_router_end(CustomScanState *node); + +void partition_router_rescan(CustomScanState *node); + +void partition_router_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); + + +#endif /* PARTITION_UPDATE_H */ diff --git a/src/include/partition_update.h b/src/include/partition_update.h deleted file mode 100644 index 7efdfe51..00000000 --- a/src/include/partition_update.h +++ /dev/null @@ -1,57 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * partition_update.h - * Insert row to right partition in UPDATE operation - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef PARTITION_UPDATE_H -#define PARTITION_UPDATE_H - -#include "relation_info.h" -#include "utils.h" - -#include "postgres.h" -#include "commands/explain.h" -#include "optimizer/planner.h" - -#if PG_VERSION_NUM >= 90600 -#include "nodes/extensible.h" -#endif - -#define UPDATE_NODE_DESCRIPTION ("PartitionRoute") - -typedef struct PartitionUpdateState -{ - CustomScanState css; - - Oid partitioned_table; - JunkFilter *junkfilter; - Plan *subplan; /* proxy variable to store subplan */ -} PartitionUpdateState; - -extern bool pg_pathman_enable_partition_update; - -extern CustomScanMethods partition_update_plan_methods; -extern CustomExecMethods partition_update_exec_methods; - -void init_partition_update_static_data(void); -Node *partition_update_create_scan_state(CustomScan *node); - -void partition_update_begin(CustomScanState *node, EState *estate, int eflags); -void partition_update_end(CustomScanState *node); -void partition_update_rescan(CustomScanState *node); -void partition_update_explain(CustomScanState *node, List *ancestors, - ExplainState *es); - -TupleTableSlot *partition_update_exec(CustomScanState *node); - -Plan *make_partition_update(Plan *subplan, - Oid parent_relid, - Index parent_rti, - List *returning_list); - -#endif /* PARTITION_UPDATE_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index 64053b3d..f7de3e3e 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -34,7 +34,7 @@ void pathman_transform_query(Query *parse, ParamListInfo params); /* These functions scribble on Plan tree */ void add_partition_filters(List *rtable, Plan *plan); -void add_partition_update_nodes(List *rtable, Plan *plan); +void add_partition_routers(List *rtable, Plan *plan); /* used by assign_rel_parenthood_status() etc */ diff --git a/src/partition_filter.c b/src/partition_filter.c index 77a62961..46ee75e9 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -96,10 +96,10 @@ static estate_mod_data * fetch_estate_mod_data(EState *estate); void init_partition_filter_static_data(void) { - partition_filter_plan_methods.CustomName = "PartitionFilter"; + partition_filter_plan_methods.CustomName = INSERT_NODE_NAME; partition_filter_plan_methods.CreateCustomScanState = partition_filter_create_scan_state; - partition_filter_exec_methods.CustomName = "PartitionFilter"; + partition_filter_exec_methods.CustomName = INSERT_NODE_NAME; partition_filter_exec_methods.BeginCustomScan = partition_filter_begin; partition_filter_exec_methods.ExecCustomScan = partition_filter_exec; partition_filter_exec_methods.EndCustomScan = partition_filter_end; @@ -109,7 +109,7 @@ init_partition_filter_static_data(void) partition_filter_exec_methods.ExplainCustomScan = partition_filter_explain; DefineCustomBoolVariable("pg_pathman.enable_partitionfilter", - "Enables the planner's use of PartitionFilter custom node.", + "Enables the planner's use of " INSERT_NODE_NAME " custom node.", NULL, &pg_pathman_enable_partition_filter, true, @@ -164,7 +164,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, result_rels_table_config, HASH_ELEM | HASH_BLOBS); parts_storage->estate = estate; - parts_storage->saved_rel_info = NULL; + parts_storage->base_rri = NULL; parts_storage->on_new_rri_holder_callback = on_new_rri_holder_cb; parts_storage->callback_arg = on_new_rri_holder_cb_arg; @@ -216,7 +216,7 @@ ResultRelInfoHolder * scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) { #define CopyToResultRelInfo(field_name) \ - ( child_result_rel_info->field_name = parts_storage->saved_rel_info->field_name ) + ( child_result_rel_info->field_name = parts_storage->base_rri->field_name ) ResultRelInfoHolder *rri_holder; bool found; @@ -229,13 +229,17 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) if (!found) { Relation child_rel, - base_rel = parts_storage->saved_rel_info->ri_RelationDesc; + base_rel; RangeTblEntry *child_rte, *parent_rte; Index child_rte_idx; ResultRelInfo *child_result_rel_info; List *translated_vars; + /* Check that 'base_rri' is set */ + if (!parts_storage->base_rri) + elog(ERROR, "ResultPartsStorage contains no base_rri"); + /* Lock partition and check if it exists */ LockRelationOid(partid, parts_storage->head_open_lock_mode); if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partid))) @@ -249,10 +253,13 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) return NULL; } - parent_rte = rt_fetch(parts_storage->saved_rel_info->ri_RangeTableIndex, + parent_rte = rt_fetch(parts_storage->base_rri->ri_RangeTableIndex, parts_storage->estate->es_range_table); - /* Open relation and check if it is a valid target */ + /* Get base relation */ + base_rel = parts_storage->base_rri->ri_RelationDesc; + + /* Open child relation and check if it is a valid target */ child_rel = heap_open(partid, NoLock); CheckValidResultRel(child_rel, parts_storage->command_type); @@ -281,10 +288,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Create ResultRelInfo for partition */ child_result_rel_info = makeNode(ResultRelInfo); - /* Check that 'saved_rel_info' is set */ - if (!parts_storage->saved_rel_info) - elog(ERROR, "ResultPartsStorage contains no saved_rel_info"); - InitResultRelInfoCompat(child_result_rel_info, child_rel, child_rte_idx, @@ -550,10 +553,10 @@ make_partition_filter(Plan *subplan, errmsg("ON CONFLICT clause is not supported with partitioned tables"))); /* Copy costs etc */ - cscan->scan.plan.startup_cost = subplan->startup_cost; - cscan->scan.plan.total_cost = subplan->total_cost; - cscan->scan.plan.plan_rows = subplan->plan_rows; - cscan->scan.plan.plan_width = subplan->plan_width; + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; /* Setup methods and child plan */ cscan->methods = &partition_filter_plan_methods; @@ -689,15 +692,13 @@ partition_filter_exec(CustomScanState *node) EState *estate = node->ss.ps.state; PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; - ResultRelInfo *saved_resultRelInfo; slot = ExecProcNode(child_ps); state->subplan_slot = slot; - /* Save original ResultRelInfo */ - saved_resultRelInfo = estate->es_result_relation_info; - if (!state->result_parts.saved_rel_info) - state->result_parts.saved_rel_info = saved_resultRelInfo; + /* Don't forget to initialize 'base_rri'! */ + if (!state->result_parts.base_rri) + state->result_parts.base_rri = estate->es_result_relation_info; if (state->tup_convert_slot) ExecClearTuple(state->tup_convert_slot); @@ -715,7 +716,7 @@ partition_filter_exec(CustomScanState *node) { if (!state->warning_triggered) elog(WARNING, "table \"%s\" is not partitioned, " - "PartitionFilter will behave as a normal INSERT", + INSERT_NODE_NAME " will behave as a normal INSERT", get_rel_name_or_relid(state->partitioned_table)); return slot; @@ -895,7 +896,7 @@ prepare_rri_returning_for_insert(EState *estate, return; child_rri = rri_holder->result_rel_info; - parent_rri = rps_storage->saved_rel_info; + parent_rri = rps_storage->base_rri; parent_rt_idx = parent_rri->ri_RangeTableIndex; /* Create ExprContext for tuple projections */ diff --git a/src/partition_update.c b/src/partition_router.c similarity index 66% rename from src/partition_update.c rename to src/partition_router.c index f89edcdd..84a98668 100644 --- a/src/partition_update.c +++ b/src/partition_router.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------ * - * partition_update.c - * Insert row to right partition in UPDATE operation + * partition_router.c + * Route row to a right partition in UPDATE operation * * Copyright (c) 2017, Postgres Professional * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group @@ -11,7 +11,7 @@ */ #include "partition_filter.h" -#include "partition_update.h" +#include "partition_router.h" #include "compat/pg_compat.h" #include "access/xact.h" @@ -23,10 +23,10 @@ #include "utils/guc.h" #include "utils/rel.h" -bool pg_pathman_enable_partition_update = true; +bool pg_pathman_enable_partition_router = true; -CustomScanMethods partition_update_plan_methods; -CustomExecMethods partition_update_exec_methods; +CustomScanMethods partition_router_plan_methods; +CustomExecMethods partition_router_exec_methods; static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, TupleTableSlot *planSlot, @@ -34,24 +34,24 @@ static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, EState *estate); void -init_partition_update_static_data(void) +init_partition_router_static_data(void) { - partition_update_plan_methods.CustomName = UPDATE_NODE_DESCRIPTION; - partition_update_plan_methods.CreateCustomScanState = partition_update_create_scan_state; - - partition_update_exec_methods.CustomName = UPDATE_NODE_DESCRIPTION; - partition_update_exec_methods.BeginCustomScan = partition_update_begin; - partition_update_exec_methods.ExecCustomScan = partition_update_exec; - partition_update_exec_methods.EndCustomScan = partition_update_end; - partition_update_exec_methods.ReScanCustomScan = partition_update_rescan; - partition_update_exec_methods.MarkPosCustomScan = NULL; - partition_update_exec_methods.RestrPosCustomScan = NULL; - partition_update_exec_methods.ExplainCustomScan = partition_update_explain; - - DefineCustomBoolVariable("pg_pathman.enable_partitionupdate", - "Enables the planner's use of PartitionUpdate custom node.", + partition_router_plan_methods.CustomName = UPDATE_NODE_NAME; + partition_router_plan_methods.CreateCustomScanState = partition_router_create_scan_state; + + partition_router_exec_methods.CustomName = UPDATE_NODE_NAME; + partition_router_exec_methods.BeginCustomScan = partition_router_begin; + partition_router_exec_methods.ExecCustomScan = partition_router_exec; + partition_router_exec_methods.EndCustomScan = partition_router_end; + partition_router_exec_methods.ReScanCustomScan = partition_router_rescan; + partition_router_exec_methods.MarkPosCustomScan = NULL; + partition_router_exec_methods.RestrPosCustomScan = NULL; + partition_router_exec_methods.ExplainCustomScan = partition_router_explain; + + DefineCustomBoolVariable("pg_pathman.enable_partitionrouter", + "Enables the planner's use of " UPDATE_NODE_NAME " custom node.", NULL, - &pg_pathman_enable_partition_update, + &pg_pathman_enable_partition_router, false, PGC_USERSET, 0, @@ -61,7 +61,7 @@ init_partition_update_static_data(void) } Plan * -make_partition_update(Plan *subplan, +make_partition_router(Plan *subplan, Oid parent_relid, Index parent_rti, List *returning_list) @@ -79,13 +79,13 @@ make_partition_update(Plan *subplan, CMD_UPDATE); /* Copy costs etc */ - cscan->scan.plan.startup_cost = subplan->startup_cost; - cscan->scan.plan.total_cost = subplan->total_cost; - cscan->scan.plan.plan_rows = subplan->plan_rows; - cscan->scan.plan.plan_width = subplan->plan_width; + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; /* Setup methods and child plan */ - cscan->methods = &partition_update_plan_methods; + cscan->methods = &partition_router_plan_methods; cscan->custom_plans = list_make1(pfilter); /* Build an appropriate target list */ @@ -101,15 +101,15 @@ make_partition_update(Plan *subplan, } Node * -partition_update_create_scan_state(CustomScan *node) +partition_router_create_scan_state(CustomScan *node) { - PartitionUpdateState *state; + PartitionRouterState *state; - state = (PartitionUpdateState *) palloc0(sizeof(PartitionUpdateState)); + state = (PartitionRouterState *) palloc0(sizeof(PartitionRouterState)); NodeSetTag(state, T_CustomScanState); state->css.flags = node->flags; - state->css.methods = &partition_update_exec_methods; + state->css.methods = &partition_router_exec_methods; /* Extract necessary variables */ state->subplan = (Plan *) linitial(node->custom_plans); @@ -117,30 +117,29 @@ partition_update_create_scan_state(CustomScan *node) } void -partition_update_begin(CustomScanState *node, EState *estate, int eflags) +partition_router_begin(CustomScanState *node, EState *estate, int eflags) { - PartitionUpdateState *state = (PartitionUpdateState *) node; + PartitionRouterState *state = (PartitionRouterState *) node; /* Initialize PartitionFilter child node */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); } TupleTableSlot * -partition_update_exec(CustomScanState *node) +partition_router_exec(CustomScanState *node) { EState *estate = node->ss.ps.state; PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; - PartitionUpdateState *state = (PartitionUpdateState *) node; + PartitionRouterState *state = (PartitionRouterState *) node; /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); if (!TupIsNull(slot)) { - Datum datum; - ResultRelInfo *resultRelInfo, - *sourceRelInfo; + ResultRelInfo *result_rri, + *parent_rri; ItemPointer tupleid = NULL; ItemPointerData tuple_ctid; EPQState epqstate; @@ -152,41 +151,46 @@ partition_update_exec(CustomScanState *node) EvalPlanQualSetSlot(&epqstate, child_state->subplan_slot); - sourceRelInfo = child_state->result_parts.saved_rel_info; - resultRelInfo = estate->es_result_relation_info; + parent_rri = child_state->result_parts.base_rri; + result_rri = estate->es_result_relation_info; - /* we generate junkfilter, if it wasn't created before */ + /* Build new junkfilter if we have to */ if (state->junkfilter == NULL) { - state->junkfilter = ExecInitJunkFilter(state->subplan->targetlist, - sourceRelInfo->ri_RelationDesc->rd_att->tdhasoid, - ExecInitExtraTupleSlot(estate)); + state->junkfilter = + ExecInitJunkFilter(state->subplan->targetlist, + parent_rri->ri_RelationDesc->rd_att->tdhasoid, + ExecInitExtraTupleSlot(estate)); + + state->junkfilter->jf_junkAttNo = + ExecFindJunkAttribute(state->junkfilter, "ctid"); - state->junkfilter->jf_junkAttNo = ExecFindJunkAttribute(state->junkfilter, "ctid"); if (!AttributeNumberIsValid(state->junkfilter->jf_junkAttNo)) elog(ERROR, "could not find junk ctid column"); } - relkind = sourceRelInfo->ri_RelationDesc->rd_rel->relkind; + relkind = parent_rri->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) { - bool isNull; + Datum ctid_datum; + bool ctid_isnull; + + ctid_datum = ExecGetJunkAttribute(child_state->subplan_slot, + state->junkfilter->jf_junkAttNo, + &ctid_isnull); - datum = ExecGetJunkAttribute(child_state->subplan_slot, - state->junkfilter->jf_junkAttNo, &isNull); /* shouldn't ever get a null result... */ - if (isNull) + if (ctid_isnull) elog(ERROR, "ctid is NULL"); - tupleid = (ItemPointer) DatumGetPointer(datum); - tuple_ctid = *tupleid; /* be sure we don't free - * ctid!! */ + tupleid = (ItemPointer) DatumGetPointer(ctid_datum); + tuple_ctid = *tupleid; /* be sure we don't free ctid! */ tupleid = &tuple_ctid; } else if (relkind == RELKIND_FOREIGN_TABLE) - elog(ERROR, "update node is not supported for foreign tables"); + elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); else - elog(ERROR, "got unexpected type of relation for update"); + elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); /* * Clean from junk attributes before INSERT, @@ -196,13 +200,13 @@ partition_update_exec(CustomScanState *node) slot = ExecFilterJunk(state->junkfilter, slot); /* Delete old tuple */ - estate->es_result_relation_info = sourceRelInfo; + estate->es_result_relation_info = parent_rri; Assert(tupleid != NULL); ExecDeleteInternal(tupleid, child_state->subplan_slot, &epqstate, estate); - /* we've got the slot that can be inserted to child partition */ - estate->es_result_relation_info = resultRelInfo; + /* We've got the slot that can be inserted to child partition */ + estate->es_result_relation_info = result_rri; return slot; } @@ -210,21 +214,21 @@ partition_update_exec(CustomScanState *node) } void -partition_update_end(CustomScanState *node) +partition_router_end(CustomScanState *node) { Assert(list_length(node->custom_ps) == 1); ExecEndNode((PlanState *) linitial(node->custom_ps)); } void -partition_update_rescan(CustomScanState *node) +partition_router_rescan(CustomScanState *node) { Assert(list_length(node->custom_ps) == 1); ExecReScan((PlanState *) linitial(node->custom_ps)); } void -partition_update_explain(CustomScanState *node, List *ancestors, ExplainState *es) +partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *es) { /* Nothing to do here now */ } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 57dbcef3..d6bfde96 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -16,7 +16,7 @@ #include "hooks.h" #include "pathman.h" #include "partition_filter.h" -#include "partition_update.h" +#include "partition_router.h" #include "runtimeappend.h" #include "runtime_merge_append.h" @@ -321,7 +321,7 @@ _PG_init(void) init_runtimeappend_static_data(); init_runtime_merge_append_static_data(); init_partition_filter_static_data(); - init_partition_update_static_data(); + init_partition_router_static_data(); } /* Get cached PATHMAN_CONFIG relation Oid */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 9af8c302..b0d61672 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -14,7 +14,7 @@ #include "compat/rowmarks_fix.h" #include "partition_filter.h" -#include "partition_update.h" +#include "partition_router.h" #include "planner_tree_modification.h" #include "relation_info.h" #include "rewrite/rewriteManip.h" @@ -38,20 +38,23 @@ typedef enum FP_NON_SINGULAR_RESULT /* Multiple or no partitions */ } FindPartitionResult; + static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse); static void handle_modification_query(Query *parse, ParamListInfo params); static void partition_filter_visitor(Plan *plan, void *context); -static void partition_update_visitor(Plan *plan, void *context); +static void partition_router_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); -static FindPartitionResult find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition); +static FindPartitionResult find_deepest_partition(Oid relid, Index idx, + Expr *quals, Oid *partition); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); static bool modifytable_contains_fdw(List *rtable, ModifyTable *node); + /* * HACK: We have to mark each Query with a unique * id in order to recognize them properly. @@ -274,10 +277,7 @@ handle_modification_query(Query *parse, ParamListInfo params) if (params && clause_contains_params((Node *) quals)) quals = (Expr *) eval_extern_params_mutator((Node *) quals, params); - /* - * Parse syntax tree and extract deepest partition (if there is only one - * satisfying quals) - */ + /* Parse syntax tree and extract deepest partition */ fp_result = find_deepest_partition(rte->relid, result_rel, quals, &child); /* @@ -342,82 +342,11 @@ handle_modification_query(Query *parse, ParamListInfo params) } } -/* - * Find a single deepest subpartition. If there are more than one partitions - * satisfies quals or no such partition at all then return InvalidOid. - */ -static FindPartitionResult -find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition) -{ - const PartRelationInfo *prel; - Node *prel_expr; - WalkerContext context; - List *ranges; - WrapperNode *wrap; - - prel = get_pathman_relation_info(relid); - - /* Exit if it's not partitioned */ - if (!prel) - return FP_PLAIN_TABLE; - - /* Exit if we must include parent */ - if (prel->enable_parent) - return FP_NON_SINGULAR_RESULT; - - /* Exit if there's no quals (no use) */ - if (!quals) - return FP_NON_SINGULAR_RESULT; - - /* Prepare partitioning expression */ - prel_expr = PrelExpressionForRelid(prel, idx); - - ranges = list_make1_irange_full(prel, IR_COMPLETE); - - /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel_expr, prel, NULL); - wrap = walk_expr_tree(quals, &context); - ranges = irange_list_intersection(ranges, wrap->rangeset); - - if (irange_list_length(ranges) == 1) - { - IndexRange irange = linitial_irange(ranges); - - if (irange_lower(irange) == irange_upper(irange)) - { - Oid *children = PrelGetChildrenArray(prel), - child = children[irange_lower(irange)], - subpartition; - FindPartitionResult result; - - /* - * Try to go deeper and see if there is subpartition - */ - result = find_deepest_partition(child, - idx, - quals, - &subpartition); - switch(result) - { - case FP_FOUND: - *partition = subpartition; - return FP_FOUND; - case FP_PLAIN_TABLE: - *partition = child; - return FP_FOUND; - case FP_NON_SINGULAR_RESULT: - return FP_NON_SINGULAR_RESULT; - } - } - } - - return FP_NON_SINGULAR_RESULT; -} /* - * ------------------------------- - * PartitionFilter and PartitionUpdate-related stuff - * ------------------------------- + * ---------------------------------------------------- + * PartitionFilter and PartitionRouter -related stuff + * ---------------------------------------------------- */ /* Add PartitionFilter nodes to the plan tree */ @@ -428,16 +357,16 @@ add_partition_filters(List *rtable, Plan *plan) plan_tree_walker(plan, partition_filter_visitor, rtable); } -/* Add PartitionUpdate nodes to the plan tree */ +/* Add PartitionRouter nodes to the plan tree */ void -add_partition_update_nodes(List *context, Plan *plan) +add_partition_routers(List *rtable, Plan *plan) { - if (pg_pathman_enable_partition_update) - plan_tree_walker(plan, partition_update_visitor, context); + if (pg_pathman_enable_partition_router) + plan_tree_walker(plan, partition_router_visitor, rtable); } /* - * Add partition filters to ModifyTable node's children. + * Add PartitionFilters to ModifyTable node's children. * * 'context' should point to the PlannedStmt->rtable. */ @@ -484,32 +413,13 @@ partition_filter_visitor(Plan *plan, void *context) } } - -static bool -modifytable_contains_fdw(List *rtable, ModifyTable *node) -{ - ListCell *lc; - - foreach(lc, node->resultRelations) - { - Index rti = lfirst_int(lc); - RangeTblEntry *rte = rt_fetch(rti, rtable); - - if (rte->relkind == RELKIND_FOREIGN_TABLE) - return true; - } - - return false; -} - - /* - * Add partition update to ModifyTable node's children. + * Add PartitionRouter to ModifyTable node's children. * * 'context' should point to the PlannedStmt->rtable. */ static void -partition_update_visitor(Plan *plan, void *context) +partition_router_visitor(Plan *plan, void *context) { List *rtable = (List *) context; ModifyTable *modify_table = (ModifyTable *) plan; @@ -527,8 +437,8 @@ partition_update_visitor(Plan *plan, void *context) { ereport(NOTICE, (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), - errmsg("discovered mix of local and foreign tables," - " pg_pathman's update node will not be used"))); + errmsg("discovered mix of local and foreign tables, " + UPDATE_NODE_NAME " will be disabled"))); return; } @@ -540,7 +450,8 @@ partition_update_visitor(Plan *plan, void *context) relid = getrelid(rindex, rtable); const PartRelationInfo *prel; - while ((tmp_relid = get_parent_of_partition(relid, NULL)) != 0) + /* Find topmost parent */ + while ((tmp_relid = get_parent_of_partition(relid, NULL)) != InvalidOid) relid = tmp_relid; /* Check that table is partitioned */ @@ -557,7 +468,7 @@ partition_update_visitor(Plan *plan, void *context) lc3 = lnext(lc3); } - lfirst(lc1) = make_partition_update((Plan *) lfirst(lc1), relid, + lfirst(lc1) = make_partition_router((Plan *) lfirst(lc1), relid, modify_table->nominalRelation, returning_list); } @@ -625,6 +536,102 @@ tag_extract_parenthood_status(List *relation_tag) } +/* + * -------------------------- + * Various helper functions + * -------------------------- + */ + +/* Does ModifyTable node contain any FDWs? */ +static bool +modifytable_contains_fdw(List *rtable, ModifyTable *node) +{ + ListCell *lc; + + foreach(lc, node->resultRelations) + { + Index rti = lfirst_int(lc); + RangeTblEntry *rte = rt_fetch(rti, rtable); + + if (rte->relkind == RELKIND_FOREIGN_TABLE) + return true; + } + + return false; +} + +/* + * Find a single deepest subpartition. + * Return InvalidOid if that's impossible. + */ +static FindPartitionResult +find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition) +{ + const PartRelationInfo *prel; + Node *prel_expr; + WalkerContext context; + List *ranges; + WrapperNode *wrap; + + prel = get_pathman_relation_info(relid); + + /* Exit if it's not partitioned */ + if (!prel) + return FP_PLAIN_TABLE; + + /* Exit if we must include parent */ + if (prel->enable_parent) + return FP_NON_SINGULAR_RESULT; + + /* Exit if there's no quals (no use) */ + if (!quals) + return FP_NON_SINGULAR_RESULT; + + /* Prepare partitioning expression */ + prel_expr = PrelExpressionForRelid(prel, idx); + + ranges = list_make1_irange_full(prel, IR_COMPLETE); + + /* Parse syntax tree and extract partition ranges */ + InitWalkerContext(&context, prel_expr, prel, NULL); + wrap = walk_expr_tree(quals, &context); + ranges = irange_list_intersection(ranges, wrap->rangeset); + + if (irange_list_length(ranges) == 1) + { + IndexRange irange = linitial_irange(ranges); + + if (irange_lower(irange) == irange_upper(irange)) + { + Oid *children = PrelGetChildrenArray(prel), + child = children[irange_lower(irange)], + subpartition; + FindPartitionResult result; + + /* Try to go deeper and see if there is subpartition */ + result = find_deepest_partition(child, + idx, + quals, + &subpartition); + switch(result) + { + case FP_FOUND: + *partition = subpartition; + return FP_FOUND; + + case FP_PLAIN_TABLE: + *partition = child; + return FP_FOUND; + + case FP_NON_SINGULAR_RESULT: + return FP_NON_SINGULAR_RESULT; + } + } + } + + return FP_NON_SINGULAR_RESULT; +} + /* Replace extern param nodes with consts */ static Node * eval_extern_params_mutator(Node *node, ParamListInfo params) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index ee7468a9..05183a0b 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -551,7 +551,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, bool *nulls; ResultPartsStorage parts_storage; - ResultRelInfo *parent_result_rel; + ResultRelInfo *parent_rri; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ ExprContext *econtext; @@ -566,16 +566,16 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, tupDesc = RelationGetDescr(parent_rel); - parent_result_rel = makeNode(ResultRelInfo); - InitResultRelInfoCompat(parent_result_rel, + parent_rri = makeNode(ResultRelInfo); + InitResultRelInfoCompat(parent_rri, parent_rel, 1, /* dummy rangetable index */ 0); - ExecOpenIndices(parent_result_rel, false); + ExecOpenIndices(parent_rri, false); - estate->es_result_relations = parent_result_rel; + estate->es_result_relations = parent_rri; estate->es_num_result_relations = 1; - estate->es_result_relation_info = parent_result_rel; + estate->es_result_relation_info = parent_rri; estate->es_range_table = range_table; /* Initialize ResultPartsStorage */ @@ -583,7 +583,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ResultPartsStorageStandard, prepare_rri_for_copy, NULL, CMD_INSERT); - parts_storage.saved_rel_info = parent_result_rel; + + /* Don't forget to initialize 'base_rri'! */ + parts_storage.base_rri = parent_rri; /* Set up a tuple slot too */ myslot = ExecInitExtraTupleSlot(estate); @@ -600,7 +602,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, * such. However, executing these triggers maintains consistency with the * EACH ROW triggers that we already fire on COPY. */ - ExecBSInsertTriggers(estate, parent_result_rel); + ExecBSInsertTriggers(estate, parent_rri); values = (Datum *) palloc(tupDesc->natts * sizeof(Datum)); nulls = (bool *) palloc(tupDesc->natts * sizeof(bool)); @@ -742,7 +744,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, pq_endmsgread(); /* Execute AFTER STATEMENT insertion triggers (FIXME: NULL transition) */ - ExecASInsertTriggersCompat(estate, parent_result_rel, NULL); + ExecASInsertTriggersCompat(estate, parent_rri, NULL); /* Handle queued AFTER triggers */ AfterTriggerEndQuery(estate); @@ -756,7 +758,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, fini_result_parts_storage(&parts_storage, true); /* Close parent's indices */ - ExecCloseIndices(parent_result_rel); + ExecCloseIndices(parent_rri); FreeExecutorState(estate); From 284ae41a1c2e010eb80a2e03db27ad87e30d059a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 23 Aug 2017 16:18:49 +0300 Subject: [PATCH 0717/1124] fix some comments (PartitionRouter) --- src/partition_router.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index 84a98668..b719bf40 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -184,7 +184,7 @@ partition_router_exec(CustomScanState *node) elog(ERROR, "ctid is NULL"); tupleid = (ItemPointer) DatumGetPointer(ctid_datum); - tuple_ctid = *tupleid; /* be sure we don't free ctid! */ + tuple_ctid = *tupleid; /* be sure we don't free ctid! */ tupleid = &tuple_ctid; } else if (relkind == RELKIND_FOREIGN_TABLE) @@ -194,19 +194,20 @@ partition_router_exec(CustomScanState *node) /* * Clean from junk attributes before INSERT, - * but only if slot wasn't converted in PartitionFilter + * but only if slot wasn't transformed in PartitionFilter. */ if (TupIsNull(child_state->tup_convert_slot)) slot = ExecFilterJunk(state->junkfilter, slot); - /* Delete old tuple */ + /* Magic: replace current ResultRelInfo with parent's one (DELETE) */ estate->es_result_relation_info = parent_rri; Assert(tupleid != NULL); ExecDeleteInternal(tupleid, child_state->subplan_slot, &epqstate, estate); - /* We've got the slot that can be inserted to child partition */ + /* Magic: replace parent's ResultRelInfo with child's one (INSERT) */ estate->es_result_relation_info = result_rri; + return slot; } @@ -234,11 +235,13 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e } -/* ---------------------------------------------------------------- - * ExecDeleteInternal +/* + * ---------------------------------------------------------------- + * ExecDeleteInternal * Basicly copy of ExecDelete from executor/nodeModifyTable.c * ---------------------------------------------------------------- */ + static TupleTableSlot * ExecDeleteInternal(ItemPointer tupleid, TupleTableSlot *planSlot, @@ -272,7 +275,7 @@ ExecDeleteInternal(ItemPointer tupleid, if (tupleid != NULL) { /* delete the tuple */ -ldelete:; +ldelete: result = heap_delete(resultRelationDesc, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, From 2c4155eb5818222ab40ac29901b8c334502b4ab8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 23 Aug 2017 18:10:04 +0300 Subject: [PATCH 0718/1124] small refactoring for PrelExpressionForRelid() & partition_filter_begin() --- src/partition_filter.c | 33 +++++++++------------------------ 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 46ee75e9..b2004c58 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -505,21 +505,10 @@ prepare_expr_state(const PartRelationInfo *prel, EState *estate) { ExprState *expr_state; MemoryContext old_mcxt; - Index parent_varno = 1; Node *expr; - ListCell *lc; /* Change varno in Vars according to range table */ - foreach(lc, estate->es_range_table) - { - RangeTblEntry *entry = lfirst(lc); - - if (entry->relid == PrelParentRelid(prel)) - break; - - parent_varno += 1; - } - expr = PrelExpressionForRelid(prel, parent_varno); + expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); /* Prepare state for expression execution */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); @@ -634,32 +623,28 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) if (state->command_type == CMD_UPDATE) { /* - * In UPDATE queries we would operate with child relation, but - * expression expects varattnos like in base relation, so we map - * parent varattnos to child varattnos + * In UPDATE queries we would work with child relation, but + * expression contains varattnos of base relation, so we map + * parent varattnos to child varattnos. */ - bool found_whole_row; AttrNumber *map; - MemoryContext old_mcxt; - Index relno = ((Scan *) child_state->plan)->scanrelid; Node *expr; - Relation child_rel; + ResultRelInfo *child_rri = estate->es_result_relation_info; + Relation child_rel = child_rri->ri_RelationDesc; - child_rel = heap_open(getrelid(relno, estate->es_range_table), NoLock); + MemoryContext old_mcxt; map = build_attributes_map(prel, RelationGetDescr(child_rel)); - expr = map_variable_attnos(PrelExpressionForRelid(prel, relno), - relno, 0, map, + expr = map_variable_attnos(PrelExpressionForRelid(prel, PART_EXPR_VARNO), + PART_EXPR_VARNO, 0, map, RelationGetDescr(child_rel)->natts, &found_whole_row); if (found_whole_row) elog(ERROR, "unexpected whole-row reference found in partition key"); - heap_close(child_rel, NoLock); - /* Prepare state for expression execution */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); state->expr_state = ExecInitExpr((Expr *) expr, NULL); From c858cc16d365224fe509a2615c567379f919afd6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 24 Aug 2017 13:30:53 +0300 Subject: [PATCH 0719/1124] bump lib version to 1.4.3 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 63110d68..72e75c25 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.2", + "version": "1.4.3", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.2", + "version": "1.4.3", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 4fd11dcf..29573fba 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10402 + 10403 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 27989803..c34eda56 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -157,7 +157,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010402 +#define CURRENT_LIB_VERSION 0x010403 void *pathman_cache_search_relid(HTAB *cache_table, From 4ef0128413caaf1e038ace20c234059b42042675 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 24 Aug 2017 18:18:17 +0300 Subject: [PATCH 0720/1124] fix incorrect Oid for sub_prel in select_partition_for_insert() --- src/partition_filter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index b2004c58..d4873cbe 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -478,11 +478,11 @@ select_partition_for_insert(ExprState *expr_state, const PartRelationInfo *sub_prel; /* Fetch PartRelationInfo for this partitioned relation */ - sub_prel = get_pathman_relation_info(partition_relid); + sub_prel = get_pathman_relation_info(rri_holder->partid); /* Might be a false alarm */ if (!sub_prel) - break; + return rri_holder; /* Build an expression state if not yet */ if (!rri_holder->expr_state) From 81668ed0ebc092dd15d4c29b0fb44dcd07beb4a1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 28 Aug 2017 18:54:39 +0300 Subject: [PATCH 0721/1124] bsearch: fix non-eq WHERE conditions pointing to gaps (issue #117) --- src/pg_pathman.c | 85 ++++++++++++++++++++++++++---------------------- 1 file changed, 46 insertions(+), 39 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 4a8c4ff5..ce74b361 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -591,22 +591,20 @@ select_range_partitions(const Datum value, WrapperNode *result) /* returned partitions */ { bool lossy = false, - is_less, - is_greater; - -#ifdef USE_ASSERT_CHECKING - bool found = false; - int counter = 0; -#endif + miss_left, /* 'value' is less than left bound */ + miss_right; /* 'value' is greater that right bound */ int startidx = 0, endidx = nranges - 1, cmp_min, cmp_max, - i; + i = 0; Bound value_bound = MakeBound(value); /* convert value to Bound */ +#ifdef USE_ASSERT_CHECKING + int counter = 0; +#endif /* Initial value (no missing partitions found) */ result->found_gap = false; @@ -628,9 +626,9 @@ select_range_partitions(const Datum value, cmp_min = cmp_bounds(cmp_func, collid, &value_bound, &ranges[startidx].min); cmp_max = cmp_bounds(cmp_func, collid, &value_bound, &ranges[endidx].max); - if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || - (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || - strategy == BTEqualStrategyNumber))) + if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || + (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || + strategy == BTEqualStrategyNumber))) { result->rangeset = NIL; return; @@ -644,7 +642,7 @@ select_range_partitions(const Datum value, return; } - if ((cmp_min < 0 && strategy == BTGreaterStrategyNumber) || + if ((cmp_min < 0 && strategy == BTGreaterStrategyNumber) || (cmp_min <= 0 && strategy == BTGreaterEqualStrategyNumber)) { result->rangeset = list_make1_irange(make_irange(startidx, @@ -677,44 +675,55 @@ select_range_partitions(const Datum value, cmp_min = cmp_bounds(cmp_func, collid, &value_bound, &ranges[i].min); cmp_max = cmp_bounds(cmp_func, collid, &value_bound, &ranges[i].max); - is_less = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); - is_greater = (cmp_max > 0 || (cmp_max >= 0 && strategy != BTLessStrategyNumber)); + /* How is 'value' located with respect to left & right bounds? */ + miss_left = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); + miss_right = (cmp_max > 0 || (cmp_max == 0 && strategy != BTLessStrategyNumber)); - if (!is_less && !is_greater) + /* Searched value is inside of partition */ + if (!miss_left && !miss_right) { - if (strategy == BTGreaterEqualStrategyNumber && cmp_min == 0) + /* 'value' == 'min' and we want everything on the right */ + if (cmp_min == 0 && strategy == BTGreaterEqualStrategyNumber) lossy = false; - else if (strategy == BTLessStrategyNumber && cmp_max == 0) + /* 'value' == 'max' and we want everything on the left */ + else if (cmp_max == 0 && strategy == BTLessStrategyNumber) lossy = false; - else - lossy = true; + /* We're somewhere in the middle */ + else lossy = true; -#ifdef USE_ASSERT_CHECKING - found = true; -#endif - break; + break; /* just exit loop */ } /* Indices have met, looks like there's no partition */ if (startidx >= endidx) { - result->rangeset = NIL; + result->rangeset = NIL; result->found_gap = true; - return; + + /* Return if it's "key = value" */ + if (strategy == BTEqualStrategyNumber) + return; + + if ((miss_left && (strategy == BTLessStrategyNumber || + strategy == BTLessEqualStrategyNumber)) || + (miss_right && (strategy == BTGreaterStrategyNumber || + strategy == BTGreaterEqualStrategyNumber))) + lossy = true; + else + lossy = false; + + break; /* just exit loop */ } - if (is_less) + if (miss_left) endidx = i - 1; - else if (is_greater) + else if (miss_right) startidx = i + 1; /* For debug's sake */ Assert(++counter < 100); } - /* Should've been found by now */ - Assert(found); - /* Filter partitions */ switch(strategy) { @@ -743,18 +752,16 @@ select_range_partitions(const Datum value, { result->rangeset = list_make1_irange(make_irange(i, i, IR_LOSSY)); if (i < nranges - 1) - result->rangeset = - lappend_irange(result->rangeset, - make_irange(i + 1, - nranges - 1, - IR_COMPLETE)); + result->rangeset = lappend_irange(result->rangeset, + make_irange(i + 1, + nranges - 1, + IR_COMPLETE)); } else { - result->rangeset = - list_make1_irange(make_irange(i, - nranges - 1, - IR_COMPLETE)); + result->rangeset = list_make1_irange(make_irange(i, + nranges - 1, + IR_COMPLETE)); } break; From 7bb6af4a0f4b745116ba5eb50859f4e36e4a21d9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 29 Aug 2017 15:39:59 +0300 Subject: [PATCH 0722/1124] more comments and tests for range + gap case --- Makefile | 1 + expected/pathman_gaps.out | 823 ++++++++++++++++++++++++++++++++++++++ sql/pathman_gaps.sql | 137 +++++++ src/pg_pathman.c | 5 + 4 files changed, 966 insertions(+) create mode 100644 expected/pathman_gaps.out create mode 100644 sql/pathman_gaps.sql diff --git a/Makefile b/Makefile index c2cacaae..40738ddf 100644 --- a/Makefile +++ b/Makefile @@ -35,6 +35,7 @@ REGRESS = pathman_array_qual \ pathman_domains \ pathman_expressions \ pathman_foreign_keys \ + pathman_gaps \ pathman_inserts \ pathman_interval \ pathman_join_clause \ diff --git a/expected/pathman_gaps.out b/expected/pathman_gaps.out new file mode 100644 index 00000000..a21734f0 --- /dev/null +++ b/expected/pathman_gaps.out @@ -0,0 +1,823 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 + Filter: (val = 21) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 + Filter: (val > 21) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val = 31) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + Filter: (val > 11) + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val > 31) + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val = 41) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + Filter: (val > 21) + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val > 41) + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val = 51) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + Filter: (val > 21) + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val > 51) + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +DROP SCHEMA gaps CASCADE; +NOTICE: drop cascades to 30 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_gaps.sql b/sql/pathman_gaps.sql new file mode 100644 index 00000000..eb185ff2 --- /dev/null +++ b/sql/pathman_gaps.sql @@ -0,0 +1,137 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; + + + +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); +DROP TABLE gaps.test_1_2; + +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); +DROP TABLE gaps.test_2_3; + +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); +DROP TABLE gaps.test_3_4; + +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; + + + +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + + + +DROP SCHEMA gaps CASCADE; +DROP EXTENSION pg_pathman; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index ce74b361..41090f3f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -704,6 +704,11 @@ select_range_partitions(const Datum value, if (strategy == BTEqualStrategyNumber) return; + /* + * Use current partition 'i' as a pivot that will be + * excluded by relation_excluded_by_constraints() if + * (lossy == true) & its WHERE clauses are trivial. + */ if ((miss_left && (strategy == BTLessStrategyNumber || strategy == BTLessEqualStrategyNumber)) || (miss_right && (strategy == BTGreaterStrategyNumber || From 217efab4c42a4e306399393f3b8704e98523f220 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 29 Aug 2017 16:35:52 +0300 Subject: [PATCH 0723/1124] undef useless defines (MSFT) --- src/planner_tree_modification.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 2b8c811c..68701b3d 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -24,10 +24,26 @@ #include "utils/syscache.h" +#ifdef SELECT +#undef SELECT +#endif + +#ifdef INSERT +#undef INSERT +#endif + +#ifdef UPDATE +#undef UPDATE +#endif + +#ifdef DELETE +#undef DELETE +#endif + + /* for assign_rel_parenthood_status() */ #define PARENTHOOD_TAG CppAsString(PARENTHOOD) - /* Build transform_query_cxt field name */ #define TRANSFORM_CONTEXT_FIELD(command_type) \ has_parent_##command_type##_query From ef72caa3fe9823602f2b3dfdf8de7b3bdcf26d2f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 30 Aug 2017 14:15:01 +0300 Subject: [PATCH 0724/1124] reorder some steps in pathman_post_parse_analysis_hook() (issue #118) --- src/hooks.c | 64 ++++++++++++++++++++++--------------- src/include/xact_handling.h | 2 +- src/xact_handling.c | 20 +++++++----- 3 files changed, 51 insertions(+), 35 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index f4996e65..abe6face 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -632,39 +632,51 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) if (post_parse_analyze_hook_next) post_parse_analyze_hook_next(pstate, query); - /* Hooks can be disabled */ + /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) return; - /* Finish delayed invalidation jobs */ - if (IsPathmanReady()) - finish_delayed_invalidation(); + /* We shouldn't proceed on: ... */ + if (query->commandType == CMD_UTILITY) + { + /* ... BEGIN */ + if (xact_is_transaction_stmt(query->utilityStmt)) + return; - /* - * We shouldn't proceed on: - * BEGIN - * SET [TRANSACTION] - */ - if (query->commandType == CMD_UTILITY && - (xact_is_transaction_stmt(query->utilityStmt) || - xact_is_set_stmt(query->utilityStmt))) - return; + /* ... SET pg_pathman.enable */ + if (xact_is_set_stmt(query->utilityStmt, PATHMAN_ENABLE)) + { + /* Accept all events in case it's "enable = OFF" */ + if (IsPathmanReady()) + finish_delayed_invalidation(); - /* - * We should also disable pg_pathman on: - * ALTER EXTENSION pg_pathman - */ - if (query->commandType == CMD_UTILITY && - xact_is_alter_pathman_stmt(query->utilityStmt)) - { - /* Disable pg_pathman to perform a painless update */ - (void) set_config_option(PATHMAN_ENABLE, "off", - PGC_SUSET, PGC_S_SESSION, - GUC_ACTION_SAVE, true, 0, false); + return; + } - return; + /* ... SET [TRANSACTION] */ + if (xact_is_set_stmt(query->utilityStmt, NULL)) + return; + + /* ... ALTER EXTENSION pg_pathman */ + if (xact_is_alter_pathman_stmt(query->utilityStmt)) + { + /* Leave no delayed events before ALTER EXTENSION */ + if (IsPathmanReady()) + finish_delayed_invalidation(); + + /* Disable pg_pathman to perform a painless update */ + (void) set_config_option(PATHMAN_ENABLE, "off", + PGC_SUSET, PGC_S_SESSION, + GUC_ACTION_SAVE, true, 0, false); + + return; + } } + /* Finish all delayed invalidation jobs */ + if (IsPathmanReady()) + finish_delayed_invalidation(); + /* Load config if pg_pathman exists & it's still necessary */ if (IsPathmanEnabled() && !IsPathmanInitialized() && @@ -746,7 +758,7 @@ pathman_relcache_hook(Datum arg, Oid relid) { Oid parent_relid; - /* Hooks can be disabled */ + /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) return; diff --git a/src/include/xact_handling.h b/src/include/xact_handling.h index 27939304..a762f197 100644 --- a/src/include/xact_handling.h +++ b/src/include/xact_handling.h @@ -28,7 +28,7 @@ LockAcquireResult xact_lock_rel(Oid relid, LOCKMODE lockmode, bool nowait); bool xact_bgw_conflicting_lock_exists(Oid relid); bool xact_is_level_read_committed(void); bool xact_is_transaction_stmt(Node *stmt); -bool xact_is_set_stmt(Node *stmt); +bool xact_is_set_stmt(Node *stmt, const char *name); bool xact_is_alter_pathman_stmt(Node *stmt); bool xact_object_is_visible(TransactionId obj_xmin); diff --git a/src/xact_handling.c b/src/xact_handling.c index 0d4ea5b0..a65bf3af 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -96,7 +96,7 @@ xact_is_level_read_committed(void) } /* - * Check if 'stmt' is BEGIN\ROLLBACK etc transaction statement. + * Check if 'stmt' is BEGIN/ROLLBACK/etc [TRANSACTION] statement. */ bool xact_is_transaction_stmt(Node *stmt) @@ -111,10 +111,10 @@ xact_is_transaction_stmt(Node *stmt) } /* - * Check if 'stmt' is SET [TRANSACTION] statement. + * Check if 'stmt' is SET ('name' | [TRANSACTION]) statement. */ bool -xact_is_set_stmt(Node *stmt) +xact_is_set_stmt(Node *stmt, const char *name) { /* Check that SET TRANSACTION is implemented via VariableSetStmt */ Assert(VAR_SET_MULTI > 0); @@ -122,7 +122,10 @@ xact_is_set_stmt(Node *stmt) if (!stmt) return false; - if (IsA(stmt, VariableSetStmt)) + if (!IsA(stmt, VariableSetStmt)) + return false; + + if (!name || pg_strcasecmp(name, ((VariableSetStmt *) stmt)->name) == 0) return true; return false; @@ -137,16 +140,17 @@ xact_is_alter_pathman_stmt(Node *stmt) if (!stmt) return false; - if (IsA(stmt, AlterExtensionStmt) && - 0 == strcmp(((AlterExtensionStmt *) stmt)->extname, - "pg_pathman")) + if (!IsA(stmt, AlterExtensionStmt)) + return false; + + if (pg_strcasecmp(((AlterExtensionStmt *) stmt)->extname, "pg_pathman") == 0) return true; return false; } /* - * Check if object is visible in newer transactions. + * Check if object is visible to newer transactions. */ bool xact_object_is_visible(TransactionId obj_xmin) From 5c913a703c37b02493ebb6efdf312bf1271d59f9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 30 Aug 2017 15:30:08 +0300 Subject: [PATCH 0725/1124] add a comment regarding '#undef DELETE' --- src/planner_tree_modification.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 68701b3d..1163197b 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -24,6 +24,10 @@ #include "utils/syscache.h" +/* + * Drop conflicting macros for the sake of TRANSFORM_CONTEXT_FIELD(...). + * For instance, Windows.h contains a nasty "#define DELETE". + */ #ifdef SELECT #undef SELECT #endif From fc365c5f0b16c1883f848b3167ea041cea3f338b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 31 Aug 2017 13:55:31 +0300 Subject: [PATCH 0726/1124] bump lib version to 1.4.4 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 72e75c25..b3f0bf35 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.3", + "version": "1.4.4", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.3", + "version": "1.4.4", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 29573fba..4adcbeb4 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10403 + 10404 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index c34eda56..546206aa 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -157,7 +157,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010403 +#define CURRENT_LIB_VERSION 0x010404 void *pathman_cache_search_relid(HTAB *cache_table, From 594f617cb4af9725e882ae1276346ae9b42754e7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 1 Sep 2017 12:25:55 +0300 Subject: [PATCH 0727/1124] fix crash on RESET ALL (issue #121) --- expected/pathman_calamity.out | 19 +++++++++++++++++++ sql/pathman_calamity.sql | 21 +++++++++++++++++++++ src/xact_handling.c | 9 ++++++++- 3 files changed, 48 insertions(+), 1 deletion(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 4adcbeb4..6c1a1729 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -855,6 +855,25 @@ NOTICE: drop cascades to 2 other objects DROP SCHEMA calamity CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; /* * ------------------------------------- * Special tests (pathman_cache_stats) diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 881cebbd..ed1b7b82 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -369,6 +369,27 @@ DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ + +CREATE EXTENSION pg_pathman; + +SET pg_pathman.enable = false; +SET pg_pathman.enable = true; +SET pg_pathman.enable = false; +RESET pg_pathman.enable; +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; + +DROP EXTENSION pg_pathman; + + + /* * ------------------------------------- * Special tests (pathman_cache_stats) diff --git a/src/xact_handling.c b/src/xact_handling.c index a65bf3af..c6696cce 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -125,8 +125,15 @@ xact_is_set_stmt(Node *stmt, const char *name) if (!IsA(stmt, VariableSetStmt)) return false; - if (!name || pg_strcasecmp(name, ((VariableSetStmt *) stmt)->name) == 0) + if (!name) return true; + else + { + char *set_name = ((VariableSetStmt *) stmt)->name; + + if (set_name && pg_strcasecmp(name, set_name) == 0) + return true; + } return false; } From a5e742233591da60078728b9ac9ddd3e920cb0eb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 1 Sep 2017 12:50:47 +0300 Subject: [PATCH 0728/1124] bump lib version to 1.4.5 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index b3f0bf35..b05c65a4 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.4", + "version": "1.4.5", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.4", + "version": "1.4.5", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 6c1a1729..66925628 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10404 + 10405 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 546206aa..e43747e1 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -157,7 +157,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010404 +#define CURRENT_LIB_VERSION 0x010405 void *pathman_cache_search_relid(HTAB *cache_table, From 7ca0dda771238fdd1ded9c2a7eea539c8c90e0d3 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 2 Sep 2017 15:48:42 +0300 Subject: [PATCH 0729/1124] improve PGXN config --- META.json | 12 +++++++++--- README.md | 1 + 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/META.json b/META.json index b05c65a4..ee2d0f5f 100644 --- a/META.json +++ b/META.json @@ -1,7 +1,7 @@ { "name": "pg_pathman", "abstract": "Partitioning tool", - "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", + "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", "version": "1.4.5", "maintainer": [ "Ildar Musin ", @@ -25,7 +25,7 @@ "file": "pg_pathman--1.4.sql", "docfile": "README.md", "version": "1.4.5", - "abstract": "Partitioning tool" + "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, "meta-spec": { @@ -35,6 +35,12 @@ "tags": [ "partitioning", "partition", - "optimization" + "optimization", + "table", + "tables", + "custom node", + "runtime append", + "background worker", + "fdw" ] } diff --git a/README.md b/README.md index 2935ff3c..19dc98aa 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions. The extension is compatible with: + * PostgreSQL 9.5, 9.6, 10; * Postgres Pro Standard 9.5, 9.6; * Postgres Pro Enterprise; From 88fcacb7b4b9732588f350e177c87260b580a17b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 12 Sep 2017 15:03:05 +0300 Subject: [PATCH 0730/1124] restore compatibility with PostgreSQL 10, refactoring of prepare_expr_state() --- pg_compat_available.sh | 6 ++ src/include/compat/pg_compat.h | 44 ++++++++-- src/include/relation_info.h | 7 +- src/partition_filter.c | 142 +++++++++++++++++++-------------- src/pg_pathman.c | 19 ++--- src/relation_info.c | 49 +++++++++--- 6 files changed, 176 insertions(+), 91 deletions(-) create mode 100755 pg_compat_available.sh diff --git a/pg_compat_available.sh b/pg_compat_available.sh new file mode 100755 index 00000000..d2d7cabc --- /dev/null +++ b/pg_compat_available.sh @@ -0,0 +1,6 @@ +#!/usr/bin/bash + +dir=$(dirname $0) +func="$1" + +grep -n -r --include=pg_compat.c --include=pg_compat.h $func $dir | head -n1 diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 72b23dc8..09844beb 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -158,6 +158,18 @@ #endif +/* + * CheckValidResultRel() + */ +#if PG_VERSION_NUM >= 100000 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd)) +#elif PG_VERSION_NUM >= 90500 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) +#endif + + /* * create_append_path() */ @@ -266,7 +278,7 @@ extern void create_plain_partial_paths(PlannerInfo *root, /* - * ExecBuildProjectionInfo + * ExecBuildProjectionInfo() */ #if PG_VERSION_NUM >= 100000 #define ExecBuildProjectionInfoCompat(targetList, econtext, resultSlot, \ @@ -366,7 +378,7 @@ char get_rel_persistence(Oid relid); /* - * initial_cost_nestloop + * initial_cost_nestloop() */ #if PG_VERSION_NUM >= 100000 || (defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603) #define initial_cost_nestloop_compat(root, workspace, jointype, outer_path, \ @@ -382,7 +394,7 @@ char get_rel_persistence(Oid relid); /* - * InitResultRelInfo + * InitResultRelInfo() * * for v10 set NULL into 'partition_root' argument to specify that result * relation is not vanilla partition @@ -461,7 +473,7 @@ extern int oid_cmp(const void *p1, const void *p2); /* - * pg_analyze_and_rewrite + * pg_analyze_and_rewrite() * * for v10 cast first arg to RawStmt type */ @@ -479,7 +491,7 @@ extern int oid_cmp(const void *p1, const void *p2); /* - * ProcessUtility + * ProcessUtility() * * for v10 set NULL into 'queryEnv' argument */ @@ -577,6 +589,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, ExecARInsertTriggers((estate), (relinfo), (trigtuple), (recheck_indexes)) #endif + /* * ExecARDeleteTriggers() */ @@ -591,6 +604,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, ExecARDeleteTriggers((estate), (relinfo), (tupleid), (fdw_trigtuple)) #endif + /* * ExecASInsertTriggers() */ @@ -603,6 +617,26 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif +/* + * map_variable_attnos() + */ +#if PG_VERSION_NUM >= 100000 +#define map_variable_attnos_compat(node, varno, \ + sublevels_up, map, map_len, \ + to_rowtype, found_wholerow) \ + map_variable_attnos((node), (varno), \ + (sublevels_up), (map), (map_len), \ + (to_rowtype), (found_wholerow)) +#elif PG_VERSION_NUM >= 90500 +#define map_variable_attnos_compat(node, varno, \ + sublevels_up, map, map_len, \ + to_rowtype, found_wholerow) \ + map_variable_attnos((node), (varno), \ + (sublevels_up), (map), (map_len), \ + (found_wholerow)) +#endif + + /* * ------------- diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 9921a029..ea6c9abe 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -271,6 +271,10 @@ PrelExpressionForRelid(const PartRelationInfo *prel, Index rel_index) return expr; } +AttrNumber *PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc, + int *map_length); + const PartRelationInfo *refresh_pathman_relation_info(Oid relid, Datum *values, @@ -393,8 +397,5 @@ extern bool pg_pathman_enable_bounds_cache; void init_relation_info_static_data(void); -AttrNumber *build_attributes_map(const PartRelationInfo *prel, - TupleDesc child_tupdesc); - #endif /* RELATION_INFO_H */ diff --git a/src/partition_filter.c b/src/partition_filter.c index d4873cbe..35475365 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -69,7 +69,9 @@ CustomScanMethods partition_filter_plan_methods; CustomExecMethods partition_filter_exec_methods; static ExprState *prepare_expr_state(const PartRelationInfo *prel, - EState *estate); + Relation source_rel, + EState *estate, + bool try_map); static void prepare_rri_for_insert(EState *estate, ResultRelInfoHolder *rri_holder, const ResultPartsStorage *rps_storage, @@ -261,7 +263,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Open child relation and check if it is a valid target */ child_rel = heap_open(partid, NoLock); - CheckValidResultRel(child_rel, parts_storage->command_type); /* Build Var translation list for 'inserted_cols' */ make_inh_translation_list(base_rel, child_rel, 0, &translated_vars); @@ -311,6 +312,10 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ child_result_rel_info->ri_ConstraintExprs = NULL; + /* Check that this partition is a valid result relation */ + CheckValidResultRelCompat(child_result_rel_info, + parts_storage->command_type); + /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; @@ -446,11 +451,11 @@ select_partition_for_insert(ExprState *expr_state, elog(ERROR, ERR_PART_ATTR_MULTIPLE); else if (nparts == 0) { - partition_relid = create_partitions_for_value(parent_relid, - value, prel->ev_type); + partition_relid = create_partitions_for_value(parent_relid, + value, prel->ev_type); - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_relid, NULL); + /* get_pathman_relation_info() will refresh this entry */ + invalidate_pathman_relation_info(parent_relid, NULL); } else partition_relid = parts[0]; @@ -475,23 +480,32 @@ select_partition_for_insert(ExprState *expr_state, /* This partition might have sub-partitions */ else if (rri_holder->has_children) { - const PartRelationInfo *sub_prel; + const PartRelationInfo *child_prel; /* Fetch PartRelationInfo for this partitioned relation */ - sub_prel = get_pathman_relation_info(rri_holder->partid); + child_prel = get_pathman_relation_info(rri_holder->partid); /* Might be a false alarm */ - if (!sub_prel) + if (!child_prel) return rri_holder; - /* Build an expression state if not yet */ + /* Build an expression state if it's not ready yet */ if (!rri_holder->expr_state) - rri_holder->expr_state = prepare_expr_state(sub_prel, estate); + { + /* Fetch original topmost parent */ + Relation source_rel = parts_storage->base_rri->ri_RelationDesc; + + /* Build a partitioning expression state */ + rri_holder->expr_state = prepare_expr_state(child_prel, + source_rel, + estate, + true); + } /* Recursively search for subpartitions */ rri_holder = select_partition_for_insert(rri_holder->expr_state, econtext, estate, - sub_prel, parts_storage); + child_prel, parts_storage); } } /* Loop until we get some result */ @@ -501,15 +515,45 @@ select_partition_for_insert(ExprState *expr_state, } static ExprState * -prepare_expr_state(const PartRelationInfo *prel, EState *estate) +prepare_expr_state(const PartRelationInfo *prel, + Relation source_rel, + EState *estate, + bool try_map) { ExprState *expr_state; MemoryContext old_mcxt; Node *expr; - /* Change varno in Vars according to range table */ + /* Fetch partitioning expression (we don't care about varno) */ expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); + /* Should we try using map? */ + if (try_map) + { + + AttrNumber *map; + int map_length; + TupleDesc source_tupdesc = RelationGetDescr(source_rel); + + /* Remap expression attributes for source relation */ + map = PrelExpressionAttributesMap(prel, source_tupdesc, &map_length); + + if (map) + { + bool found_whole_row; + + expr = map_variable_attnos_compat(expr, PART_EXPR_VARNO, 0, map, + map_length, InvalidOid, + &found_whole_row); + + if (found_whole_row) + elog(ERROR, "unexpected whole-row reference" + " found in partition key"); + + pfree(map); + } + } + /* Prepare state for expression execution */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); expr_state = ExecInitExpr((Expr *) expr, NULL); @@ -595,8 +639,6 @@ partition_filter_create_scan_state(CustomScan *node) Assert(state->on_conflict_action >= ONCONFLICT_NONE || state->on_conflict_action <= ONCONFLICT_UPDATE); - state->expr_state = NULL; - /* There should be exactly one subplan */ Assert(list_length(node->custom_plans) == 1); @@ -607,55 +649,35 @@ void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { PartitionFilterState *state = (PartitionFilterState *) node; - const PartRelationInfo *prel; PlanState *child_state; + ResultRelInfo *current_rri; + Relation current_rel; + const PartRelationInfo *prel; + bool try_map; /* It's convenient to store PlanState in 'custom_ps' */ child_state = ExecInitNode(state->subplan, estate, eflags); node->custom_ps = list_make1(child_state); - if (state->expr_state == NULL) - { - /* Fetch PartRelationInfo for this partitioned relation */ - prel = get_pathman_relation_info(state->partitioned_table); + /* Fetch current result relation (rri + rel) */ + current_rri = estate->es_result_relation_info; + current_rel = current_rri->ri_RelationDesc; - /* Prepare state for expression execution */ - if (state->command_type == CMD_UPDATE) - { - /* - * In UPDATE queries we would work with child relation, but - * expression contains varattnos of base relation, so we map - * parent varattnos to child varattnos. - */ - bool found_whole_row; - - AttrNumber *map; - Node *expr; - ResultRelInfo *child_rri = estate->es_result_relation_info; - Relation child_rel = child_rri->ri_RelationDesc; - - MemoryContext old_mcxt; - - map = build_attributes_map(prel, RelationGetDescr(child_rel)); - expr = map_variable_attnos(PrelExpressionForRelid(prel, PART_EXPR_VARNO), - PART_EXPR_VARNO, 0, map, - RelationGetDescr(child_rel)->natts, - &found_whole_row); + /* Fetch PartRelationInfo for this partitioned relation */ + prel = get_pathman_relation_info(state->partitioned_table); - if (found_whole_row) - elog(ERROR, "unexpected whole-row reference found in partition key"); + /* + * In UPDATE queries we have to work with child relation tlist, + * but expression contains varattnos of base relation, so we + * map parent varattnos to child varattnos. + * + * We don't need map if current relation == base relation. + */ + try_map = state->command_type == CMD_UPDATE && + RelationGetRelid(current_rel) != state->partitioned_table; - /* Prepare state for expression execution */ - old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); - state->expr_state = ExecInitExpr((Expr *) expr, NULL); - MemoryContextSwitchTo(old_mcxt); - } - else - { - /* Simple INSERT, expression based on parent attribute numbers */ - state->expr_state = prepare_expr_state(prel, estate); - } - } + /* Build a partitioning expression state */ + state->expr_state = prepare_expr_state(prel, current_rel, estate, try_map); /* Init ResultRelInfo cache */ init_result_parts_storage(&state->result_parts, estate, @@ -665,6 +687,10 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) (void *) state, state->command_type); + /* Don't forget to initialize 'base_rri'! */ + state->result_parts.base_rri = current_rri; + + /* No warnings yet */ state->warning_triggered = false; } @@ -681,10 +707,6 @@ partition_filter_exec(CustomScanState *node) slot = ExecProcNode(child_ps); state->subplan_slot = slot; - /* Don't forget to initialize 'base_rri'! */ - if (!state->result_parts.base_rri) - state->result_parts.base_rri = estate->es_result_relation_info; - if (state->tup_convert_slot) ExecClearTuple(state->tup_convert_slot); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index d6bfde96..0bd0919c 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -420,11 +420,7 @@ append_child_relation(PlannerInfo *root, child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; child_rte->requiredPerms = 0; /* perform all checks on parent */ - - /* Does this child have subpartitions? */ - child_rte->inh = (child_oid == parent_rte->relid) ? - false : /* it's a parent, skip */ - child_relation->rd_rel->relhassubclass; + child_rte->inh = false; /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ root->parse->rtable = lappend(root->parse->rtable, child_rte); @@ -574,20 +570,19 @@ append_child_relation(PlannerInfo *root, add_child_rel_equivalences(root, appinfo, parent_rel, child_rel); child_rel->has_eclass_joins = parent_rel->has_eclass_joins; - /* Close child relations, but keep locks */ - heap_close(child_relation, NoLock); - - /* Recursively expand child partition if it has subpartitions */ - if (child_rte->inh) + /* Expand child partition if it might have subpartitions */ + if (parent_rte->relid != child_oid && + child_relation->rd_rel->relhassubclass) { - child_rte->inh = false; - pathman_rel_pathlist_hook(root, child_rel, child_rti, child_rte); } + /* Close child relations, but keep locks */ + heap_close(child_relation, NoLock); + return child_rti; } diff --git a/src/relation_info.c b/src/relation_info.c index eb8b0980..e6a40a36 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1442,27 +1442,43 @@ shout_if_prel_is_invalid(const Oid parent_oid, } /* - * Get attributes map between parent and child relation. - * This is simplified version of functions that return TupleConversionMap. - * And it should be faster if expression uses not all fields from relation. + * Remap partitioning expression columns for tuple source relation. + * This is a simplified version of functions that return TupleConversionMap. + * It should be faster if expression uses a few fields of relation. */ AttrNumber * -build_attributes_map(const PartRelationInfo *prel, TupleDesc child_tupdesc) +PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc, + int *map_length) { - AttrNumber i = -1; Oid parent_relid = PrelParentRelid(prel); - int natts = child_tupdesc->natts; - AttrNumber *result = (AttrNumber *) palloc0(natts * sizeof(AttrNumber)); + int source_natts = source_tupdesc->natts, + expr_natts = 0; + AttrNumber *result, + i; + bool is_trivial = true; + + /* Get largest attribute number used in expression */ + i = -1; + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + expr_natts = i; + + /* Allocate array for map */ + result = (AttrNumber *) palloc0(expr_natts * sizeof(AttrNumber)); + /* Find a match for each attribute */ + i = -1; while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { - int j; AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; char *attname = get_attname(parent_relid, attnum); + int j; + + Assert(attnum <= expr_natts); - for (j = 0; j < natts; j++) + for (j = 0; j < source_natts; j++) { - Form_pg_attribute att = child_tupdesc->attrs[j]; + Form_pg_attribute att = source_tupdesc->attrs[j]; if (att->attisdropped) continue; /* attrMap[attnum - 1] is already 0 */ @@ -1475,8 +1491,19 @@ build_attributes_map(const PartRelationInfo *prel, TupleDesc child_tupdesc) } if (result[attnum - 1] == 0) - elog(ERROR, "Couldn't find '%s' column in child relation", attname); + elog(ERROR, "cannot find column \"%s\" in child relation", attname); + + if (result[attnum - 1] != attnum) + is_trivial = false; + } + + /* Check if map is trivial */ + if (is_trivial) + { + pfree(result); + return NULL; } + *map_length = expr_natts; return result; } From eecab831eeb5cdc7664870e33db1964e550b3008 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 13 Sep 2017 11:57:13 +0300 Subject: [PATCH 0731/1124] fixup! restore compatibility with PostgreSQL 10, refactoring of prepare_expr_state() --- src/partition_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_router.c b/src/partition_router.c index b719bf40..f4a8cb6c 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -238,7 +238,7 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e /* * ---------------------------------------------------------------- * ExecDeleteInternal - * Basicly copy of ExecDelete from executor/nodeModifyTable.c + * Basicly is a copy of ExecDelete from executor/nodeModifyTable.c * ---------------------------------------------------------------- */ From e263f6a661899875657ee81dfe9a90d6d95e5fac Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 13 Sep 2017 14:21:15 +0300 Subject: [PATCH 0732/1124] Introduce TupleDescAttr macro for future compability with pg11 --- src/compat/pg_compat.c | 9 +++++---- src/init.c | 10 ++++------ src/partition_creation.c | 5 ++--- src/partition_filter.c | 2 +- src/pg_pathman.c | 6 +++--- src/relation_info.c | 2 +- src/utility_stmt_hooking.c | 11 ++++++----- 7 files changed, 22 insertions(+), 23 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 71f93a1e..ff2aa15f 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -520,6 +520,9 @@ get_rel_persistence(Oid relid) } #endif +#if PG_VERSION_NUM < 110000 +#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)]) +#endif #if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) || \ (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) @@ -542,7 +545,7 @@ convert_tuples_by_name_map(TupleDesc indesc, attrMap = (AttrNumber *) palloc0(n * sizeof(AttrNumber)); for (i = 0; i < n; i++) { - Form_pg_attribute att = outdesc->attrs[i]; + Form_pg_attribute att = TupleDescAttr(outdesc, i); char *attname; Oid atttypid; int32 atttypmod; @@ -555,7 +558,7 @@ convert_tuples_by_name_map(TupleDesc indesc, atttypmod = att->atttypmod; for (j = 0; j < indesc->natts; j++) { - att = indesc->attrs[j]; + att = TupleDescAttr(indesc, j); if (att->attisdropped) continue; if (strcmp(attname, NameStr(att->attname)) == 0) @@ -587,8 +590,6 @@ convert_tuples_by_name_map(TupleDesc indesc, } #endif - - /* * ------------- * Common code diff --git a/src/init.c b/src/init.c index 7b0cdda0..13487f7e 100644 --- a/src/init.c +++ b/src/init.c @@ -631,9 +631,8 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, rel = heap_open(get_pathman_config_relid(false), AccessShareLock); /* Check that 'partrel' column is of regclass type */ - Assert(RelationGetDescr(rel)-> - attrs[Anum_pathman_config_partrel - 1]-> - atttypid == REGCLASSOID); + Assert(TupleDescAttr(RelationGetDescr(rel), + Anum_pathman_config_partrel - 1)->atttypid == REGCLASSOID); /* Check that number of columns == Natts_pathman_config */ Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); @@ -880,9 +879,8 @@ read_pathman_config(void (*per_row_cb)(Datum *values, rel = heap_open(get_pathman_config_relid(false), AccessShareLock); /* Check that 'partrel' column is if regclass type */ - Assert(RelationGetDescr(rel)-> - attrs[Anum_pathman_config_partrel - 1]-> - atttypid == REGCLASSOID); + Assert(TupleDescAttr(RelationGetDescr(rel), + Anum_pathman_config_partrel - 1)->atttypid == REGCLASSOID); /* Check that number of columns == Natts_pathman_config */ Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); diff --git a/src/partition_creation.c b/src/partition_creation.c index 3b98761a..51532089 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -920,8 +920,7 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) { Form_pg_attribute acl_column; - acl_column = pg_class_desc->attrs[Anum_pg_class_relacl - 1]; - + acl_column = TupleDescAttr(pg_class_desc, Anum_pg_class_relacl - 1); acl_datum = datumCopy(acl_datum, acl_column->attbyval, acl_column->attlen); } @@ -997,7 +996,7 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) { Form_pg_attribute acl_column; - acl_column = pg_attribute_desc->attrs[Anum_pg_attribute_attacl - 1]; + acl_column = TupleDescAttr(pg_attribute_desc, Anum_pg_attribute_attacl - 1); acl_datum = datumCopy(acl_datum, acl_column->attbyval, diff --git a/src/partition_filter.c b/src/partition_filter.c index 35475365..b70a296f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -1022,7 +1022,7 @@ prepare_rri_fdw_for_insert(EState *estate, TargetEntry *te; Param *param; - attr = tupdesc->attrs[i]; + attr = TupleDescAttr(tupdesc, i); if (attr->attisdropped) continue; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 0bd0919c..25308479 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1787,7 +1787,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, Oid attcollation; int new_attno; - att = old_tupdesc->attrs[old_attno]; + att = TupleDescAttr(old_tupdesc, old_attno); if (att->attisdropped) { /* Just put NULL into this list entry */ @@ -1825,7 +1825,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, * notational device to include the assignment into the if-clause. */ if (old_attno < newnatts && - (att = new_tupdesc->attrs[old_attno]) != NULL && + (att = TupleDescAttr(new_tupdesc, old_attno)) != NULL && !att->attisdropped && att->attinhcount != 0 && strcmp(attname, NameStr(att->attname)) == 0) new_attno = old_attno; @@ -1833,7 +1833,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, { for (new_attno = 0; new_attno < newnatts; new_attno++) { - att = new_tupdesc->attrs[new_attno]; + att = TupleDescAttr(new_tupdesc, new_attno); /* * Make clang analyzer happy: diff --git a/src/relation_info.c b/src/relation_info.c index e6a40a36..2e0ce598 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1478,7 +1478,7 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, for (j = 0; j < source_natts; j++) { - Form_pg_attribute att = source_tupdesc->attrs[j]; + Form_pg_attribute att = TupleDescAttr(source_tupdesc, j); if (att->attisdropped) continue; /* attrMap[attnum - 1] is already 0 */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 05183a0b..d8c956af 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -262,13 +262,12 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) if (attnamelist == NIL) { /* Generate default column list */ - Form_pg_attribute *attr = tupDesc->attrs; int attr_count = tupDesc->natts; int i; for (i = 0; i < attr_count; i++) { - if (attr[i]->attisdropped) + if (TupleDescAttr(tupDesc, i)->attisdropped) continue; attnums = lappend_int(attnums, i + 1); } @@ -288,11 +287,13 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) attnum = InvalidAttrNumber; for (i = 0; i < tupDesc->natts; i++) { - if (tupDesc->attrs[i]->attisdropped) + Form_pg_attribute att = TupleDescAttr(tupDesc, i); + + if (att->attisdropped) continue; - if (namestrcmp(&(tupDesc->attrs[i]->attname), name) == 0) + if (namestrcmp(&(att->attname), name) == 0) { - attnum = tupDesc->attrs[i]->attnum; + attnum = att->attnum; break; } } From a874ae7492dc2a6e7301cefad68b152e91396f62 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 13 Sep 2017 16:25:45 +0300 Subject: [PATCH 0733/1124] Fix INSERTs for subpartitions --- expected/pathman_subpartitions.out | 46 ++++++++++++++++++++++++++---- sql/pathman_subpartitions.sql | 2 +- src/compat/pg_compat.c | 4 --- src/include/compat/pg_compat.h | 4 +++ src/partition_filter.c | 4 ++- 5 files changed, 49 insertions(+), 11 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index ab93090d..1965b7a1 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -216,33 +216,69 @@ SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); 2 (1 row) -INSERT INTO subpartitions.abc VALUES (25, 25); +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ tableoid | a | b -----------------------+----+---- subpartitions.abc_1_1 | 25 | 25 -(1 row) + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_1 */ tableoid | a | b -----------------------+-----+---- subpartitions.abc_2_1 | 125 | 25 -(1 row) + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_2 */ tableoid | a | b -----------------------+-----+---- subpartitions.abc_2_2 | 125 | 75 -(1 row) + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ tableoid | a | b -----------------------+-----+----- subpartitions.abc_2_3 | 125 | 125 -(1 row) + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 10 other objects diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 1e5b2e47..3d48f26a 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -69,7 +69,7 @@ SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); -INSERT INTO subpartitions.abc VALUES (25, 25); +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index ff2aa15f..809dc79f 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -520,10 +520,6 @@ get_rel_persistence(Oid relid) } #endif -#if PG_VERSION_NUM < 110000 -#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)]) -#endif - #if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) || \ (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) /* diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 09844beb..f63f1bf9 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -22,6 +22,7 @@ #include "compat/debug_compat_features.h" #include "postgres.h" +#include "access/tupdesc.h" #include "commands/trigger.h" #include "executor/executor.h" #include "nodes/memnodes.h" @@ -636,6 +637,9 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, (found_wholerow)) #endif +#ifndef TupleDescAttr +#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)]) +#endif /* diff --git a/src/partition_filter.c b/src/partition_filter.c index b70a296f..78123c71 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -524,6 +524,9 @@ prepare_expr_state(const PartRelationInfo *prel, MemoryContext old_mcxt; Node *expr; + /* Make sure we use query memory context */ + old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + /* Fetch partitioning expression (we don't care about varno) */ expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); @@ -555,7 +558,6 @@ prepare_expr_state(const PartRelationInfo *prel, } /* Prepare state for expression execution */ - old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); expr_state = ExecInitExpr((Expr *) expr, NULL); MemoryContextSwitchTo(old_mcxt); From c6ebde7909ae7c37462ff9a43f1ff0505ec5ce70 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 13 Sep 2017 18:25:27 +0300 Subject: [PATCH 0734/1124] Fix compability with pg11 --- src/hooks.c | 8 ++--- src/include/compat/pg_compat.h | 53 +++++++++++++++++++++++++++++++++- src/pg_pathman.c | 4 +-- 3 files changed, 58 insertions(+), 7 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 7f77514a..94a46399 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -199,7 +199,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, return; /* could not build it, retreat! */ - required_nestloop = calc_nestloop_required_outer(outer, inner); + required_nestloop = calc_nestloop_required_outer_compat(outer, inner); /* * Check to see if proposed path is still parameterized, and reject if the @@ -230,9 +230,9 @@ pathman_join_pathlist_hook(PlannerInfo *root, nest_path = create_nestloop_path_compat(root, joinrel, jointype, - &workspace, extra, outer, inner, - filtered_joinclauses, pathkeys, - calc_nestloop_required_outer(outer, inner)); + &workspace, extra, outer, inner, + filtered_joinclauses, pathkeys, + calc_nestloop_required_outer_compat(outer, inner)); /* * NOTE: Override 'rows' value produced by standard estimator. diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index f63f1bf9..59a12f74 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -30,6 +30,7 @@ #include "nodes/pg_list.h" #include "optimizer/cost.h" #include "optimizer/paths.h" +#include "optimizer/pathnode.h" #include "utils/memutils.h" /* @@ -38,11 +39,61 @@ * ---------- */ +/* + * calc_nestloop_required_outer() + */ + +#if PG_VERSION_NUM >= 110000 +static inline Relids +calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) +{ + RelOptInfo *innerrel = inner_path->parent; + RelOptInfo *outerrel = outer_path->parent; + Relids innerrelids = innerrel->relids; + Relids outerrelids = outerrel->relids; + Relids inner_paramrels = PATH_REQ_OUTER(inner_path); + Relids outer_paramrels = PATH_REQ_OUTER(outer_path); + + return calc_nestloop_required_outer(outerrelids, outer_paramrels, + innerrelids, inner_paramrels); +} +#else +#define calc_nestloop_required_outer_compat(outer_path, inner_path) \ + (calc_nestloop_required_outer((outer_path), (inner_path))) +#endif /* * adjust_appendrel_attrs() */ -#if PG_VERSION_NUM >= 90600 + +#if PG_VERSION_NUM >= 110000 +#define adjust_appendrel_attrs_compat(root, node, appinfo) \ + adjust_appendrel_attrs((root), \ + node, \ + 1, &(appinfo)) +#elif PG_VERSION_NUM >= 90600 +#define adjust_appendrel_attrs_compat(root, node, appinfo) \ + adjust_appendrel_attrs((root), \ + node, \ + (appinfo)) +#elif PG_VERSION_NUM >= 90500 +#define adjust_appendrel_attrs_compat(root, node, appinfo) \ + adjust_appendrel_attrs((root), \ + node, \ + (appinfo)) +#endif + + +#if PG_VERSION_NUM >= 110000 +#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ + do { \ + (dst_rel)->reltarget->exprs = (List *) \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltarget->exprs, \ + 1, \ + &(appinfo)); \ + } while (0) +#elif PG_VERSION_NUM >= 90600 #define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ do { \ (dst_rel)->reltarget->exprs = (List *) \ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 25308479..3079408f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -495,7 +495,7 @@ append_child_relation(PlannerInfo *root, AssertState(parent_rel); /* Adjust join quals for this child */ - child_rel->joininfo = (List *) adjust_appendrel_attrs(root, + child_rel->joininfo = (List *) adjust_appendrel_attrs_compat(root, (Node *) parent_rel->joininfo, appinfo); @@ -532,7 +532,7 @@ append_child_relation(PlannerInfo *root, else childquals = get_all_actual_clauses(parent_rel->baserestrictinfo); /* Now it's time to change varnos and rebuld quals */ - childquals = (List *) adjust_appendrel_attrs(root, + childquals = (List *) adjust_appendrel_attrs_compat(root, (Node *) childquals, appinfo); childqual = eval_const_expressions(root, (Node *) From 749ae29124e91dc4c188205439172f9587deee16 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Wed, 13 Sep 2017 18:42:47 +0300 Subject: [PATCH 0735/1124] fixup! Fix compability with pg11 --- src/include/compat/pg_compat.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 59a12f74..7a320213 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -69,17 +69,17 @@ calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) #if PG_VERSION_NUM >= 110000 #define adjust_appendrel_attrs_compat(root, node, appinfo) \ adjust_appendrel_attrs((root), \ - node, \ + (node), \ 1, &(appinfo)) #elif PG_VERSION_NUM >= 90600 #define adjust_appendrel_attrs_compat(root, node, appinfo) \ adjust_appendrel_attrs((root), \ - node, \ + (node), \ (appinfo)) #elif PG_VERSION_NUM >= 90500 #define adjust_appendrel_attrs_compat(root, node, appinfo) \ adjust_appendrel_attrs((root), \ - node, \ + (node), \ (appinfo)) #endif From 70f24e8393c2120293af6dfe52d0c890126b9d72 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 14 Sep 2017 11:23:51 +0300 Subject: [PATCH 0736/1124] Remove unnecessary block of code --- src/include/compat/pg_compat.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 7a320213..ba2e8a72 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -71,11 +71,6 @@ calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) adjust_appendrel_attrs((root), \ (node), \ 1, &(appinfo)) -#elif PG_VERSION_NUM >= 90600 -#define adjust_appendrel_attrs_compat(root, node, appinfo) \ - adjust_appendrel_attrs((root), \ - (node), \ - (appinfo)) #elif PG_VERSION_NUM >= 90500 #define adjust_appendrel_attrs_compat(root, node, appinfo) \ adjust_appendrel_attrs((root), \ From 1154cfafe03148b601ecf41c68dbcb2d2fc2f453 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Thu, 14 Sep 2017 12:23:13 +0300 Subject: [PATCH 0737/1124] Make clang analyzer quiet --- src/pg_pathman.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 3079408f..cfe24cf7 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -410,6 +410,11 @@ append_child_relation(PlannerInfo *root, } parent_rel = root->simple_rel_array[parent_rti]; + + /* make clang analyzer quiet */ + if (!parent_rel) + elog(ERROR, "parent relation is NULL"); + parent_rte = root->simple_rte_array[parent_rti]; /* Open child relation (we've just locked it) */ From 8f60be7850578ccc48c7e6102b2ff669f4c0abb9 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 15 Sep 2017 12:42:33 +0300 Subject: [PATCH 0738/1124] Add support of several levels for RTI bitmapset lists in planner --- expected/pathman_subpartitions.out | 20 +++++++++++++ sql/pathman_subpartitions.sql | 16 +++++++++++ src/compat/pg_compat.c | 6 ++-- src/compat/relation_tags.c | 46 ++++++++++++++++++++++++++++++ src/hooks.c | 5 +++- src/include/compat/relation_tags.h | 3 +- src/include/relation_info.h | 2 -- src/pg_pathman.c | 2 +- 8 files changed, 93 insertions(+), 7 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 1965b7a1..54e93e9e 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -136,6 +136,26 @@ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; Filter: (a >= 210) (4 rows) +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); /* Multilevel partitioning with updates */ CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( rel REGCLASS, diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 3d48f26a..aefe728d 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -28,6 +28,22 @@ EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); +DROP FUNCTION check_multilevel_queries(); + /* Multilevel partitioning with updates */ CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( rel REGCLASS, diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 809dc79f..602102c4 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -618,8 +618,7 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) /* * Accumulate size information from each live child. */ - Assert(childrel->rows > 0); - + Assert(childrel->rows >= 0); parent_rows += childrel->rows; #if PG_VERSION_NUM >= 90600 @@ -632,6 +631,9 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) /* Set 'rows' for append relation */ rel->rows = parent_rows; + if (parent_rows == 0) + parent_rows = 1; + #if PG_VERSION_NUM >= 90600 rel->reltarget->width = rint(parent_size / parent_rows); #else diff --git a/src/compat/relation_tags.c b/src/compat/relation_tags.c index 383dd1f5..288f60ff 100644 --- a/src/compat/relation_tags.c +++ b/src/compat/relation_tags.c @@ -13,6 +13,7 @@ #include "planner_tree_modification.h" #include "nodes/nodes.h" +#include "nodes/pg_list.h" #ifndef NATIVE_RELATION_TAGS @@ -23,6 +24,15 @@ */ static HTAB *per_table_relation_tags = NULL; +/* + * List of bitmapsets, where we keep partitioned RangeTblEntry indexes + * for each level of planner + */ +List *partitioned_rti_sets = NIL; + +/* Points to last bitmapset in list */ +Bitmapset *current_partitioned_rti = NULL; + /* * Single row of 'per_table_relation_tags'. * NOTE: do not reorder these fields. @@ -219,6 +229,11 @@ incr_refcount_relation_tags(void) if (++per_table_relation_tags_refcount <= 0) elog(WARNING, "imbalanced %s", CppAsString(incr_refcount_relation_tags)); + + if (per_table_relation_tags_refcount == 1) + partitioned_rti_sets = NIL; + + current_partitioned_rti = NULL; } /* Return current value of usage counter */ @@ -233,11 +248,26 @@ get_refcount_relation_tags(void) void decr_refcount_relation_tags(void) { + int len; + /* Decrement reference counter */ if (--per_table_relation_tags_refcount < 0) elog(WARNING, "imbalanced %s", CppAsString(decr_refcount_relation_tags)); + /* Partitioned RTEs list management */ + len = list_length(partitioned_rti_sets); + if (len && current_partitioned_rti) + { + bms_free(current_partitioned_rti); + + partitioned_rti_sets = list_truncate(partitioned_rti_sets, len - 1); + if (partitioned_rti_sets) + current_partitioned_rti = llast(partitioned_rti_sets); + else + current_partitioned_rti = NULL; + } + /* Free resources if no one is using them */ if (per_table_relation_tags_refcount == 0) { @@ -249,3 +279,19 @@ decr_refcount_relation_tags(void) #endif } } + +void +MarkPartitionedRTE(Index rti) +{ + bool add = (current_partitioned_rti == NULL); + + current_partitioned_rti = bms_add_member(current_partitioned_rti, rti); + if (add) + partitioned_rti_sets = lappend(partitioned_rti_sets, current_partitioned_rti); +} + +bool +IsPartitionedRTE(Index rti) +{ + return bms_is_member(rti, current_partitioned_rti); +} diff --git a/src/hooks.c b/src/hooks.c index 8d8fc717..fde6ea84 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -36,11 +36,11 @@ #include "utils/typcache.h" #include "utils/lsyscache.h" + /* Borrowed from joinpath.c */ #define PATH_PARAM_BY_REL(path, rel) \ ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids)) - static inline bool allow_star_schema_join(PlannerInfo *root, Path *outer_path, @@ -374,6 +374,9 @@ pathman_rel_pathlist_hook(PlannerInfo *root, pathkeyDesc = (PathKey *) linitial(pathkeys); } + /* mark as partitioned table */ + MarkPartitionedRTE(rti); + children = PrelGetChildrenArray(prel); ranges = list_make1_irange_full(prel, IR_COMPLETE); diff --git a/src/include/compat/relation_tags.h b/src/include/compat/relation_tags.h index d5183d32..f391bbd6 100644 --- a/src/include/compat/relation_tags.h +++ b/src/include/compat/relation_tags.h @@ -36,6 +36,8 @@ /* Memory context we're going to use for tags */ #define RELATION_TAG_MCXT TopTransactionContext +extern void MarkPartitionedRTE(Index rti); +extern bool IsPartitionedRTE(Index rti); /* Safe TAG constructor (Integer) */ static inline List * @@ -74,5 +76,4 @@ void incr_refcount_relation_tags(void); uint32 get_refcount_relation_tags(void); void decr_refcount_relation_tags(void); - #endif /* RELATION_TAGS_H */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h index ea6c9abe..b5ac6877 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -214,8 +214,6 @@ typedef enum PPS_NOT_SURE /* can't determine (not transactional state) */ } PartParentSearch; - - /* * PartRelationInfo field access macros & functions. */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 2ed81291..704328ba 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1952,7 +1952,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, * table and we've already filled it, skip it. Otherwise build a * pathlist for it */ - if (!childRTE->inh || !childrel->pathlist) + if (!IsPartitionedRTE(childRTindex) || childrel->pathlist == NIL) { /* Compute child's access paths & sizes */ if (childRTE->relkind == RELKIND_FOREIGN_TABLE) From 5e65cef18bf523d89f3f987f70fc00c181cd1845 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 15 Sep 2017 16:34:56 +0300 Subject: [PATCH 0739/1124] Refactor tests according to new testgres version --- tests/python/partitioning_test.py | 1866 ++++++++++++++--------------- 1 file changed, 903 insertions(+), 963 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 32c30492..2f772041 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -7,17 +7,20 @@ Copyright (c) 2015-2017, Postgres Professional """ -import unittest +import json import math -import time import os import re import subprocess import threading +import time +import time +import unittest -from testgres import get_new_node, stop_all, get_config +from distutils.version import LooseVersion +from testgres import get_new_node, get_bin_path, get_pg_config -version = get_config().get("VERSION_NUM") +version = LooseVersion(get_pg_config().get("VERSION_NUM")) # Helper function for json equality @@ -42,124 +45,109 @@ def wrapper(*args, **kwargs): return wrapper -class PartitioningTests(unittest.TestCase): - def setUp(self): - self.setup_cmd = [ - "create table abc(id serial, t text)", - "insert into abc select generate_series(1, 300000)", - "select create_hash_partitions('abc', 'id', 3, partition_data := false)", - ] - - def tearDown(self): - stop_all() - - def start_new_pathman_cluster(self, name='test', allows_streaming=False): +class Tests(unittest.TestCase): + def start_new_pathman_cluster(self, name='test', + allow_streaming=False, test_data=False): node = get_new_node(name) - node.init(allows_streaming=allows_streaming) + node.init(allow_streaming=allow_streaming) node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") node.start() node.psql('postgres', 'create extension pg_pathman') - return node + if test_data: + cmds = ( + "create table abc(id serial, t text)", + "insert into abc select generate_series(1, 300000)", + "select create_hash_partitions('abc', 'id', 3, partition_data := false)", + ) + for cmd in cmds: + node.safe_psql('postgres', cmd) - def init_test_data(self, node): - """ Initialize pg_pathman extension and test data """ - for cmd in self.setup_cmd: - node.safe_psql('postgres', cmd) + return node def catchup_replica(self, master, replica): """ Wait until replica synchronizes with master """ - if version >= 100000: - wait_lsn_query = \ - 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name + if version >= LooseVersion('10'): + wait_lsn_query = """ + SELECT pg_current_wal_lsn() <= replay_lsn + FROM pg_stat_replication + WHERE application_name = '{0}' + """ else: - wait_lsn_query = \ - 'SELECT pg_current_xlog_location() <= replay_location ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - master.poll_query_until('postgres', wait_lsn_query) + wait_lsn_query = """ + SELECT pg_current_xlog_location() <= replay_location + FROM pg_stat_replication + WHERE application_name = '{0}' + """ + + master.poll_query_until('postgres', + wait_lsn_query.format(replica.name)) def test_concurrent(self): """ Test concurrent partitioning """ - node = self.start_new_pathman_cluster() - self.init_test_data(node) - - node.psql('postgres', "select partition_table_concurrently('abc')") + with self.start_new_pathman_cluster(test_data=True) as node: + node.psql('postgres', "select partition_table_concurrently('abc')") - while True: - # update some rows to check for deadlocks - node.safe_psql('postgres', """ - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - """) - - count = node.execute('postgres', """ - select count(*) from pathman_concurrent_part_tasks - """) - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() + while True: + # update some rows to check for deadlocks + node.safe_psql('postgres', """ + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + """) + + count = node.execute('postgres', """ + select count(*) from pathman_concurrent_part_tasks + """) + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('postgres', 'select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('postgres', 'select count(*) from abc') + self.assertEqual(data[0][0], 300000) + node.stop() def test_replication(self): """ Test how pg_pathman works with replication """ - node = get_new_node('master') - replica = get_new_node('repl') - - # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc')) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], 300000) - - # check that UPDATE in pathman_config_params invalidates cache - node.psql('postgres', 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc')) - self.assertEqual(node.execute('postgres', 'select count(*) from abc')[0][0], 0) + with self.start_new_pathman_cluster(allow_streaming=True, test_data=True) as node: + with node.replicate('node2') as replica: + replica.start() + # wait until replica catches up + self.catchup_replica(node, replica) + + # check that results are equal + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + + # enable parent and see if it is enabled in replica + node.psql('postgres', 'select enable_parent(\'abc\'') + + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], 300000) + + # check that UPDATE in pathman_config_params invalidates cache + node.psql('postgres', 'update pathman_config_params set enable_parent = false') + self.catchup_replica(node, replica) + self.assertEqual( + node.psql('postgres', 'explain (costs off) select * from abc'), + replica.psql('postgres', 'explain (costs off) select * from abc')) + self.assertEqual( + node.psql('postgres', 'select * from abc'), + replica.psql('postgres', 'select * from abc')) + self.assertEqual(node.execute('postgres', 'select count(*) from abc')[0][0], 0) def test_locks(self): """ @@ -167,9 +155,6 @@ def test_locks(self): waits for other sessions if they are doing the same """ - import threading - import time - class Flag: def __init__(self, value): self.flag = value @@ -197,62 +182,63 @@ def add_partition(node, flag, query): flag.set(True) # Initialize master server - node = get_new_node('master') + with get_new_node('master') as node: + node.init() + node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") + node.start() + sql = """ + create extension pg_pathman; + create table abc(id serial, t text); + insert into abc select generate_series(1, 100000); + select create_range_partitions('abc', 'id', 1, 50000); + """ + node.safe_psql('postgres', sql) + + # Start transaction that will create partition + with node.connect() as con: + con.begin() + con.execute('select append_range_partition(\'abc\')') + + # Start threads that suppose to add new partitions and wait some + # time + query = ( + "select prepend_range_partition('abc')", + "select append_range_partition('abc')", + "select add_range_partition('abc', 500000, 550000)", + ) + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # This threads should wait until current transaction finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), False) + + # Commit transaction. Since then other sessions can create + # partitions + con.commit() + + # Now wait until each thread finishes + for thread in threads: + thread.join() + + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) - node.init() - node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);') - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] - for i in range(3): - thread = threading.Thread( - target=add_partition, args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) - - # Commit transaction. Since then other sessions can create - # partitions - con.commit() - - # Now wait until each thread finishes - for thread in threads: - thread.join() - - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass'), - b'6\n') + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + 'postgres', + "select count(*) from pg_inherits where inhparent='abc'::regclass"), + b'6\n') def test_tablespace(self): """ Check tablespace support """ @@ -265,251 +251,301 @@ def check_tablespace(node, tablename, tablespace): return res[0][0] == tablespace - node = get_new_node('master') - node.init() - node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') + with get_new_node('master') as node: + node.init() + node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') + node.start() + node.psql('postgres', 'create extension pg_pathman') + + # create tablespace + path = os.path.join(node.data_dir, 'test_space_location') + os.mkdir(path) + node.psql('postgres', 'create tablespace test_space location \'{}\''.format(path)) + + # create table in this tablespace + node.psql('postgres', 'create table abc(a serial, b int) tablespace test_space') + + # create three partitions. Excpect that they will be created in the + # same tablespace as the parent table + node.psql('postgres', 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') + self.assertTrue(check_tablespace(node, 'abc', 'test_space')) + + # check tablespace for appended partition + node.psql('postgres', 'select append_range_partition(\'abc\', \'abc_appended\')') + self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended\')') + self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) + + # check tablespace for prepended partition + node.psql('postgres', + 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') + self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + + # check tablespace for split + node.psql('postgres', + 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') + self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) + + # now let's specify tablespace explicitly + node.psql( + 'postgres', + 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') + node.psql( + 'postgres', + 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')' + ) + node.psql( + 'postgres', + 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')' + ) - # create tablespace - path = os.path.join(node.data_dir, 'test_space_location') - os.mkdir(path) - node.psql('postgres', 'create tablespace test_space location \'{}\''.format(path)) - - # create table in this tablespace - node.psql('postgres', 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql('postgres', 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql('postgres', 'select append_range_partition(\'abc\', \'abc_appended\')') - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql('postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql('postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # check tablespace for split - node.psql('postgres', - 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') - self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) - - # now let's specify tablespace explicitly - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')' - ) - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')' - ) - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')' - ) - - # yapf: disable - self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) + # yapf: disable + self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) @if_fdw_enabled def test_foreign_table(self): """ Test foreign tables """ # Start master server - master = get_new_node('test') - master.init() - master.append_conf('postgresql.conf', """ - shared_preload_libraries='pg_pathman, postgres_fdw'\n - """) - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') - - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions - master.psql('postgres', """ - create table abc(id serial, name text); - select create_range_partitions('abc', 'id', 0, 10, 2) - """) - - # Current user name (needed for user mapping) - username = master.execute('postgres', 'select current_user')[0][0] - - # Start foreign server - fserv = get_new_node('fserv') - fserv.init().start() - fserv.safe_psql('postgres', "create table ftable(id serial, name text)") - fserv.safe_psql('postgres', "insert into ftable values (25, 'foreign')") - - # Create foreign table and attach it to partitioned table - master.safe_psql('postgres', """ - create server fserv - foreign data wrapper postgres_fdw - options (dbname 'postgres', host '127.0.0.1', port '{}') - """.format(fserv.port)) - - master.safe_psql('postgres', """ - create user mapping for {0} server fserv - options (user '{0}') - """.format(username)) - - master.safe_psql('postgres', """ - import foreign schema public limit to (ftable) - from server fserv into public - """) - - master.safe_psql( - 'postgres', - "select attach_range_partition('abc', 'ftable', 20, 30)") - - # Check that table attached to partitioned table - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable'), - b'25|foreign\n') - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - b'25|foreign\n26|part\n') - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') - - # HASH partitioning with FDW: - # - create hash partitioned table in master - # - create foreign table - # - replace local partition with foreign one - # - insert data - # - drop partitions - master.psql('postgres', """ - create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2) - """) - fserv.safe_psql('postgres', - 'create table f_hash_test(id serial, name text)') - - master.safe_psql('postgres', """ - import foreign schema public limit to (f_hash_test) - from server fserv into public - """) - master.safe_psql('postgres', """ - select replace_hash_partition('hash_test_1', 'f_hash_test') - """) - master.safe_psql('postgres', - 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') - master.safe_psql('postgres', "select drop_partitions('hash_test')") + with get_new_node('test') as master, get_new_node('fserv') as fserv: + master.init() + master.append_conf('postgresql.conf', """ + shared_preload_libraries='pg_pathman, postgres_fdw'\n + """) + master.start() + master.psql('postgres', 'create extension pg_pathman') + master.psql('postgres', 'create extension postgres_fdw') + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + master.psql('postgres', """ + create table abc(id serial, name text); + select create_range_partitions('abc', 'id', 0, 10, 2) + """) + + # Current user name (needed for user mapping) + username = master.execute('postgres', 'select current_user')[0][0] + + fserv.init().start() + fserv.safe_psql('postgres', "create table ftable(id serial, name text)") + fserv.safe_psql('postgres', "insert into ftable values (25, 'foreign')") + + # Create foreign table and attach it to partitioned table + master.safe_psql('postgres', """ + create server fserv + foreign data wrapper postgres_fdw + options (dbname 'postgres', host '127.0.0.1', port '{}') + """.format(fserv.port)) + + master.safe_psql('postgres', """ + create user mapping for {0} server fserv + options (user '{0}') + """.format(username)) + + master.safe_psql('postgres', """ + import foreign schema public limit to (ftable) + from server fserv into public + """) + + master.safe_psql( + 'postgres', + "select attach_range_partition('abc', 'ftable', 20, 30)") + + # Check that table attached to partitioned table + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable'), + b'25|foreign\n') + + # Check that we can successfully insert new data into foreign partition + master.safe_psql('postgres', 'insert into abc values (26, \'part\')') + self.assertEqual( + master.safe_psql('postgres', 'select * from ftable order by id'), + b'25|foreign\n26|part\n') + + # Testing drop partitions (including foreign partitions) + master.safe_psql('postgres', 'select drop_partitions(\'abc\')') + + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql('postgres', """ + create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2) + """) + fserv.safe_psql('postgres', + 'create table f_hash_test(id serial, name text)') + + master.safe_psql('postgres', """ + import foreign schema public limit to (f_hash_test) + from server fserv into public + """) + master.safe_psql('postgres', """ + select replace_hash_partition('hash_test_1', 'f_hash_test') + """) + master.safe_psql('postgres', + 'insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('postgres', 'select * from hash_test'), + b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') + master.safe_psql('postgres', "select drop_partitions('hash_test')") @if_fdw_enabled def test_parallel_nodes(self): """ Test parallel queries under partitions """ - import json - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - node.start() + with get_new_node('test') as node: + node.init() + node.append_conf( + 'postgresql.conf', + 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + if version < LooseVersion('9.6.0'): + return + + # Prepare test database + node.psql('postgres', 'create extension pg_pathman') + node.psql('postgres', """ + create table range_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table range_partitioned alter column i set not null; + select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); + + create table hash_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table hash_partitioned alter column i set not null; + select create_hash_partitions('hash_partitioned', 'i', 10); + """) - # Check version of postgres server - # If version < 9.6 skip all tests for parallel queries - if version < 90600: - return + # create statistics for both partitioned tables + node.psql('postgres', 'vacuum analyze') - # Prepare test database - node.psql('postgres', 'create extension pg_pathman') + node.psql('postgres', """ + create or replace function query_plan(query text) + returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) - node.psql('postgres', """ - create table range_partitioned as - select generate_series(1, 1e4::integer) i; - - alter table range_partitioned alter column i set not null; - select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); - - create table hash_partitioned as - select generate_series(1, 1e4::integer) i; - - alter table hash_partitioned alter column i set not null; - select create_hash_partitions('hash_partitioned', 'i', 10); - """) - - # create statistics for both partitioned tables - node.psql('postgres', 'vacuum analyze') - - node.psql('postgres', """ - create or replace function query_plan(query text) - returns jsonb as $$ - declare - plan jsonb; - begin - execute 'explain (costs off, format json)' || query into plan; - return plan; - end; - $$ language plpgsql; - """) - - # Test parallel select - with node.connect() as con: - con.execute('set max_parallel_workers_per_gather = 2') - if version >= 100000: - con.execute('set min_parallel_table_scan_size = 0') - else: - con.execute('set min_parallel_relation_size = 0') - con.execute('set parallel_setup_cost = 0') - con.execute('set parallel_tuple_cost = 0') - - # Check parallel aggregate plan - test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Finalize", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + if version >= LooseVersion('10'): + con.execute('set min_parallel_table_scan_size = 0') + else: + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check count of returned tuples + count = con.execute( + 'select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) + + # Check simple parallel seq scan plan with limit + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, "Plans": [ { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Partial", + "Node Type": "Gather", "Parent Relationship": "Outer", "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, "Plans": [ { "Node Type": "Append", @@ -537,354 +573,279 @@ def test_parallel_nodes(self): } ] } - ] + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check tuples returned by query above + res_tuples = con.execute( + 'select * from range_partitioned where i < 1500 limit 5') + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] + self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" } } ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check count of returned tuples - count = con.execute( - 'select count(*) from range_partitioned where i < 1500')[0][0] - self.assertEqual(count, 1499) - - # Check simple parallel seq scan plan with limit - test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Limit", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check tuples returned by query above - res_tuples = con.execute( - 'select * from range_partitioned where i < 1500 limit 5') - res_tuples = sorted(map(lambda x: x[0], res_tuples)) - expected = [1, 2, 3, 4, 5] - self.assertEqual(res_tuples, expected) - - # Check the case when none partition is selected in result plan - test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Result", - "Parallel Aware": false, - "One-Time Filter": "false" - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Remove all objects for testing - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') + """) + self.assertEqual(ordered(plan), ordered(expected)) - # Stop instance and finish work - node.stop() - node.cleanup() + # Remove all objects for testing + node.psql('postgres', 'drop table range_partitioned cascade') + node.psql('postgres', 'drop table hash_partitioned cascade') + node.psql('postgres', 'drop extension pg_pathman cascade') def test_conc_part_drop_runtime_append(self): """ Test concurrent partition drop + SELECT (RuntimeAppend) """ # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'drop_test' and partition it - with node.connect() as con0: - # yapf: disable - con0.begin() - con0.execute("create table drop_test(val int not null)") - con0.execute("insert into drop_test select generate_series(1, 1000)") - con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - try: - from queue import Queue - except ImportError: - from Queue import Queue - - # return values from thread - queue = Queue() - - # Thread for connection #2 (it has to wait) - def con2_thread(): + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'drop_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table drop_test(val int not null)") + con0.execute("insert into drop_test select generate_series(1, 1000)") + con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + try: + from queue import Queue + except ImportError: + from Queue import Queue + + # return values from thread + queue = Queue() + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con1.begin() + con2.execute('set enable_hashjoin = f') + con2.execute('set enable_mergejoin = f') + + res = con2.execute(""" + explain (analyze, costs off, timing off) + select * from drop_test + where val = any (select generate_series(1, 40, 34)) + """) # query selects from drop_test_1 and drop_test_4 + + con2.commit() + + has_runtime_append = False + has_drop_test_1 = False + has_drop_test_4 = False + + for row in res: + if row[0].find('RuntimeAppend') >= 0: + has_runtime_append = True + continue + + if row[0].find('drop_test_1') >= 0: + has_drop_test_1 = True + continue + + if row[0].find('drop_test_4') >= 0: + has_drop_test_4 = True + continue + + # return all values in tuple + queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) + + + # Step 1: cache partitioned table in con1 con1.begin() - con2.execute('set enable_hashjoin = f') - con2.execute('set enable_mergejoin = f') - - res = con2.execute(""" - explain (analyze, costs off, timing off) - select * from drop_test - where val = any (select generate_series(1, 40, 34)) - """) # query selects from drop_test_1 and drop_test_4 + con1.execute('select count(*) from drop_test') # load pathman's cache + con1.commit() + # Step 2: cache partitioned table in con2 + con2.begin() + con2.execute('select count(*) from drop_test') # load pathman's cache con2.commit() - has_runtime_append = False - has_drop_test_1 = False - has_drop_test_4 = False - - for row in res: - if row[0].find('RuntimeAppend') >= 0: - has_runtime_append = True - continue - - if row[0].find('drop_test_1') >= 0: - has_drop_test_1 = True - continue - - if row[0].find('drop_test_4') >= 0: - has_drop_test_4 = True - continue - - # return all values in tuple - queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) - - - # Step 1: cache partitioned table in con1 - con1.begin() - con1.execute('select count(*) from drop_test') # load pathman's cache - con1.commit() - - # Step 2: cache partitioned table in con2 - con2.begin() - con2.execute('select count(*) from drop_test') # load pathman's cache - con2.commit() - - # Step 3: drop first partition of 'drop_test' - con1.begin() - con1.execute('drop table drop_test_1') - - # Step 4: try executing select (RuntimeAppend) - t = threading.Thread(target=con2_thread) - t.start() + # Step 3: drop first partition of 'drop_test' + con1.begin() + con1.execute('drop table drop_test_1') - # Step 5: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) + # Step 4: try executing select (RuntimeAppend) + t = threading.Thread(target=con2_thread) + t.start() - if int(locks[0][0]) > 0: - break + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) - # Step 6: commit 'DROP TABLE' - con1.commit() + if int(locks[0][0]) > 0: + break - # Step 7: wait for con2 - t.join() + # Step 6: commit 'DROP TABLE' + con1.commit() - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'drop_test'::regclass - order by range_min, range_max - """) + # Step 7: wait for con2 + t.join() - # check number of partitions - self.assertEqual(len(rows), 99) + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'drop_test'::regclass + order by range_min, range_max + """) - # check RuntimeAppend + selected partitions - (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() - self.assertTrue(has_runtime_append) - self.assertFalse(has_drop_test_1) - self.assertTrue(has_drop_test_4) + # check number of partitions + self.assertEqual(len(rows), 99) - # Stop instance and finish work - node.stop() - node.cleanup() + # check RuntimeAppend + selected partitions + (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_1) + self.assertTrue(has_drop_test_4) def test_conc_part_creation_insert(self): """ Test concurrent partition creation on INSERT """ # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - # yapf: disable - con0.begin() - con0.execute("create table ins_test(val int not null)") - con0.execute("insert into ins_test select generate_series(1, 50)") - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.execute('insert into ins_test values(51)') - con2.commit() - - # Step 1: lock partitioned table in con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - con1.execute('lock table ins_test in share update exclusive mode') - - # Step 2: try inserting new value in con2 (waiting) - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - t = threading.Thread(target=con2_thread) - t.start() - - # Step 3: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 4: try inserting new value in con1 (success, unlock) - con1.execute('insert into ins_test values(52)') - con1.commit() - - # Step 5: wait for con2 - t.join() - - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'ins_test'::regclass - order by range_min, range_max - """) - - # check number of partitions - self.assertEqual(len(rows), 6) - - # check range_max of partitions - self.assertEqual(int(rows[0][5]), 11) - self.assertEqual(int(rows[1][5]), 21) - self.assertEqual(int(rows[2][5]), 31) - self.assertEqual(int(rows[3][5]), 41) - self.assertEqual(int(rows[4][5]), 51) - self.assertEqual(int(rows[5][5]), 61) + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("insert into ins_test select generate_series(1, 50)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + con2.commit() + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('lock table ins_test in share update exclusive mode') - # Stop instance and finish work - node.stop() - node.cleanup() + # Step 2: try inserting new value in con2 (waiting) + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 4: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 5: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) def test_conc_part_merge_insert(self): """ Test concurrent merge_range_partitions() + INSERT """ # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - # yapf: disable - con0.begin() - con0.execute("create table ins_test(val int not null)") - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('insert into ins_test values(20)') + con2.commit() + + # Step 1: initilize con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache - # Thread for connection #2 (it has to wait) - def con2_thread(): + # Step 2: initilize con2 con2.begin() - con2.execute('insert into ins_test values(20)') - con2.commit() - - # Step 1: initilize con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - - # Step 2: initilize con2 - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - con2.commit() # unlock relations + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations - # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) - con1.execute( - "select merge_range_partitions('ins_test_1', 'ins_test_2')") + # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) + con1.execute( + "select merge_range_partitions('ins_test_1', 'ins_test_2')") - # Step 4: try inserting new value in con2 (waiting) - t = threading.Thread(target=con2_thread) - t.start() - - # Step 5: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) + # Step 4: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() - if int(locks[0][0]) > 0: - break + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) - # Step 6: finish merge in con1 (success, unlock) - con1.commit() + if int(locks[0][0]) > 0: + break - # Step 7: wait for con2 - t.join() + # Step 6: finish merge in con1 (success, unlock) + con1.commit() - rows = con1.execute("select *, tableoid::regclass::text from ins_test") + # Step 7: wait for con2 + t.join() - # check number of rows in table - self.assertEqual(len(rows), 1) + rows = con1.execute("select *, tableoid::regclass::text from ins_test") - # check value that has been inserted - self.assertEqual(int(rows[0][0]), 20) + # check number of rows in table + self.assertEqual(len(rows), 1) - # check partition that was chosen for insert - self.assertEqual(str(rows[0][1]), 'ins_test_1') + # check value that has been inserted + self.assertEqual(int(rows[0][0]), 20) - # Stop instance and finish work - node.stop() - node.cleanup() + # check partition that was chosen for insert + self.assertEqual(str(rows[0][1]), 'ins_test_1') def test_pg_dump(self): """ @@ -904,220 +865,206 @@ def test_pg_dump(self): the rest of data - in child tables. """ - import subprocess - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf('postgresql.conf', """ - shared_preload_libraries=\'pg_pathman\' - pg_pathman.override_copy=false - """) - node.start() - - # Init two databases: initial and copy - node.psql('postgres', 'create database initial') - node.psql('postgres', 'create database copy') - node.psql('initial', 'create extension pg_pathman') - - # Create and fillin partitioned table in initial database - with node.connect('initial') as con: - - # create and initailly fillin tables - con.execute('create table range_partitioned (i integer not null)') - con.execute( - 'insert into range_partitioned select i from generate_series(1, 500) i' - ) - con.execute('create table hash_partitioned (i integer not null)') - con.execute( - 'insert into hash_partitioned select i from generate_series(1, 500) i' - ) - - # partition table keeping data in base table - # enable_parent parameter automatically becames true - con.execute( - 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)' - ) - con.execute( - 'select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)' - ) - - # fillin child tables with remain data - con.execute( - 'insert into range_partitioned select i from generate_series(501, 1000) i' - ) - con.execute( - 'insert into hash_partitioned select i from generate_series(501, 1000) i' - ) - - # set init callback - con.execute(""" - create or replace function init_partition_stub_callback(args jsonb) - returns void as $$ - begin - end - $$ language plpgsql; + with get_new_node('test') as node: + node.init() + node.append_conf('postgresql.conf', """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false """) - con.execute( - 'select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')' - ) - con.execute( - 'select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')' - ) - - # turn off enable_parent option - con.execute( - 'select set_enable_parent(\'range_partitioned\', false)') - con.execute('select set_enable_parent(\'hash_partitioned\', false)') - - con.commit() - - # compare strategies - CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) - - def cmp_full(con1, con2): - """ - Compare selection partitions in plan - and contents in partitioned tables - """ - - plan_query = 'explain (costs off, format json) select * from %s' - content_query = 'select * from %s order by i' - table_refs = [ - 'range_partitioned', 'only range_partitioned', - 'hash_partitioned', 'only hash_partitioned' - ] - for table_ref in table_refs: - plan_initial = con1.execute( - plan_query % table_ref)[0][0][0]['Plan'] - plan_copy = con2.execute( - plan_query % table_ref)[0][0][0]['Plan'] - if ordered(plan_initial) != ordered(plan_copy): - return PLANS_MISMATCH - - content_initial = [ - x[0] for x in con1.execute(content_query % table_ref) + node.start() + + # Init two databases: initial and copy + node.psql('postgres', 'create database initial') + node.psql('postgres', 'create database copy') + node.psql('initial', 'create extension pg_pathman') + + # Create and fillin partitioned table in initial database + with node.connect('initial') as con: + + # create and initailly fillin tables + con.execute('create table range_partitioned (i integer not null)') + con.execute( + 'insert into range_partitioned select i from generate_series(1, 500) i' + ) + con.execute('create table hash_partitioned (i integer not null)') + con.execute( + 'insert into hash_partitioned select i from generate_series(1, 500) i' + ) + + # partition table keeping data in base table + # enable_parent parameter automatically becames true + con.execute( + "select create_range_partitions('range_partitioned', 'i', 1, 200, partition_data := false)" + ) + con.execute( + "select create_hash_partitions('hash_partitioned', 'i', 5, false)" + ) + + # fillin child tables with remain data + con.execute( + 'insert into range_partitioned select i from generate_series(501, 1000) i' + ) + con.execute( + 'insert into hash_partitioned select i from generate_series(501, 1000) i' + ) + + # set init callback + con.execute(""" + create or replace function init_partition_stub_callback(args jsonb) + returns void as $$ + begin + end + $$ language plpgsql; + """) + con.execute( + "select set_init_callback('range_partitioned', 'init_partition_stub_callback(jsonb)')" + ) + con.execute( + "select set_init_callback('hash_partitioned', 'init_partition_stub_callback(jsonb)')" + ) + + # turn off enable_parent option + con.execute( + "select set_enable_parent('range_partitioned', false)") + con.execute("select set_enable_parent('hash_partitioned', false)") + con.commit() + + # compare strategies + CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) + + def cmp_full(con1, con2): + """ + Compare selection partitions in plan + and contents in partitioned tables + """ + + plan_query = 'explain (costs off, format json) select * from %s' + content_query = 'select * from %s order by i' + table_refs = [ + 'range_partitioned', 'only range_partitioned', + 'hash_partitioned', 'only hash_partitioned' ] - content_copy = [ - x[0] for x in con2.execute(content_query % table_ref) - ] - if content_initial != content_copy: - return CONTENTS_MISMATCH - - return CMP_OK - - def turnoff_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to off') - node.reload() - - def turnon_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to on') - node.psql('copy', 'alter system set pg_pathman.enable to on') - node.psql('initial', - 'alter system set pg_pathman.override_copy to off') - node.psql('copy', - 'alter system set pg_pathman.override_copy to off') - node.reload() - - # Test dump/restore from init database to copy functionality - test_params = [ - (None, None, [ - node.get_bin_path("pg_dump"), "-p {}".format(node.port), - "initial" - ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via COPY - (turnoff_pathman, turnon_pathman, [ - node.get_bin_path("pg_dump"), "-p {}".format(node.port), - "--inserts", "initial" - ], [node.get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via INSERTs - (None, None, [ - node.get_bin_path("pg_dump"), "-p {}".format(node.port), - "--format=custom", "initial" - ], [ - node.get_bin_path("pg_restore"), "-p {}".format(node.port), - "--dbname=copy" - ], cmp_full), # dump in archive format - ] - - try: - FNULL = open(os.devnull, 'w') - - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), - ' '.join(pg_restore_params))) - - if (preproc != None): - preproc(node) - - # transfer and restore data - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - stdoutdata, _ = p1.communicate() - p2 = subprocess.Popen( - pg_restore_params, - stdin=subprocess.PIPE, - stdout=FNULL, - stderr=FNULL) - p2.communicate(input=stdoutdata) - - if (postproc != None): - postproc(node) - - # validate data - with node.connect('initial') as con1, \ - node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual( - cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" - % dump_restore_cmd) - self.assertNotEqual( - cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" - % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') - - except: - raise - finally: - FNULL.close() - - # Stop instance and finish work - node.stop() - node.cleanup() + for table_ref in table_refs: + plan_initial = con1.execute( + plan_query % table_ref)[0][0][0]['Plan'] + plan_copy = con2.execute( + plan_query % table_ref)[0][0][0]['Plan'] + if ordered(plan_initial) != ordered(plan_copy): + return PLANS_MISMATCH + + content_initial = [ + x[0] for x in con1.execute(content_query % table_ref) + ] + content_copy = [ + x[0] for x in con2.execute(content_query % table_ref) + ] + if content_initial != content_copy: + return CONTENTS_MISMATCH + + return CMP_OK + + def turnoff_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to off') + node.reload() + + def turnon_pathman(node): + node.psql('initial', 'alter system set pg_pathman.enable to on') + node.psql('copy', 'alter system set pg_pathman.enable to on') + node.psql('initial', + 'alter system set pg_pathman.override_copy to off') + node.psql('copy', + 'alter system set pg_pathman.override_copy to off') + node.reload() + + # Test dump/restore from init database to copy functionality + test_params = [ + (None, None, [ + get_bin_path("pg_dump"), "-p {}".format(node.port), + "initial" + ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via COPY + (turnoff_pathman, turnon_pathman, [ + get_bin_path("pg_dump"), "-p {}".format(node.port), + "--inserts", "initial" + ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], + cmp_full), # dump as plain text and restore via INSERTs + (None, None, [ + get_bin_path("pg_dump"), "-p {}".format(node.port), + "--format=custom", "initial" + ], [ + get_bin_path("pg_restore"), "-p {}".format(node.port), + "--dbname=copy" + ], cmp_full), # dump in archive format + ] + + with open(os.devnull, 'w') as fnull: + for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: + + dump_restore_cmd = " | ".join((' '.join(pg_dump_params), + ' '.join(pg_restore_params))) + + if (preproc != None): + preproc(node) + + # transfer and restore data + p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) + stdoutdata, _ = p1.communicate() + p2 = subprocess.Popen( + pg_restore_params, + stdin=subprocess.PIPE, + stdout=fnull, + stderr=fnull) + p2.communicate(input=stdoutdata) + + if (postproc != None): + postproc(node) + + # validate data + with node.connect('initial') as con1, \ + node.connect('copy') as con2: + + # compare plans and contents of initial and copy + cmp_result = cmp_dbs(con1, con2) + self.assertNotEqual( + cmp_result, PLANS_MISMATCH, + "mismatch in plans of select query on partitioned tables under the command: %s" + % dump_restore_cmd) + self.assertNotEqual( + cmp_result, CONTENTS_MISMATCH, + "mismatch in contents of partitioned tables under the command: %s" + % dump_restore_cmd) + + # compare enable_parent flag and callback function + config_params_query = """ + select partrel, enable_parent, init_callback from pathman_config_params + """ + config_params_initial, config_params_copy = {}, {} + for row in con1.execute(config_params_query): + config_params_initial[row[0]] = row[1:] + for row in con2.execute(config_params_query): + config_params_copy[row[0]] = row[1:] + self.assertEqual(config_params_initial, config_params_copy, \ + "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) + + # compare constraints on each partition + constraints_query = """ + select r.relname, c.conname, c.consrc from + pg_constraint c join pg_class r on c.conrelid=r.oid + where relname similar to '(range|hash)_partitioned_\d+' + """ + constraints_initial, constraints_copy = {}, {} + for row in con1.execute(constraints_query): + constraints_initial[row[0]] = row[1:] + for row in con2.execute(constraints_query): + constraints_copy[row[0]] = row[1:] + self.assertEqual(constraints_initial, constraints_copy, \ + "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) + + # clear copy database + node.psql('copy', 'drop schema public cascade') + node.psql('copy', 'create schema public') + node.psql('copy', 'drop extension pg_pathman cascade') def test_concurrent_detach(self): """ @@ -1141,73 +1088,66 @@ def test_concurrent_detach(self): self.assertTrue( os.path.isfile(insert_pgbench_script), msg="pgbench script with insert timestamp doesn't exist") + self.assertTrue( os.path.isfile(detach_pgbench_script), msg="pgbench script with detach letfmost partition doesn't exist") # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec - with node.connect() as con0: - con0.begin() - con0.execute( - 'create table ts_range_partitioned(ts timestamp not null)') - - # yapf: disable - con0.execute(""" - select create_range_partitions('ts_range_partitioned', - 'ts', - current_timestamp, - interval '%f', - 1) - """ % detach_timeout) - con0.commit() - - # Run in background inserts and detachs processes - FNULL = open(os.devnull, 'w') - - # init pgbench's utility tables - init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) - init_pgbench.wait() - - inserts = node.pgbench( - stdout=FNULL, - stderr=subprocess.PIPE, - options=[ - "-j", - "%i" % num_insert_workers, "-c", - "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", - "%i" % (test_interval + inserts_advance) - ]) - time.sleep(inserts_advance) - detachs = node.pgbench( - stdout=FNULL, - stderr=FNULL, - options=[ - "-D", - "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, - "-T", - "%i" % test_interval - ]) - - # Wait for completion of processes - _, stderrdata = inserts.communicate() - detachs.wait() - - # Obtain error log from inserts process - self.assertIsNone( - re.search("ERROR|FATAL|PANIC", str(stderrdata)), - msg=""" - Race condition between detach and concurrent - inserts with append partition is expired - """) - - # Stop instance and finish work - node.stop() - node.cleanup() - FNULL.close() - + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec + with node.connect() as con0: + con0.begin() + con0.execute( + 'create table ts_range_partitioned(ts timestamp not null)') + + # yapf: disable + con0.execute(""" + select create_range_partitions('ts_range_partitioned', + 'ts', + current_timestamp, + interval '%f', + 1) + """ % detach_timeout) + con0.commit() + + # Run in background inserts and detachs processes + with open(os.devnull, 'w') as fnull: + # init pgbench's utility tables + init_pgbench = node.pgbench(stdout=fnull, stderr=fnull, options=["-i"]) + init_pgbench.wait() + + inserts = node.pgbench( + stdout=fnull, + stderr=subprocess.PIPE, + options=[ + "-j", + "%i" % num_insert_workers, "-c", + "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", + "%i" % (test_interval + inserts_advance) + ]) + time.sleep(inserts_advance) + detachs = node.pgbench( + stdout=fnull, + stderr=fnull, + options=[ + "-D", + "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, + "-T", + "%i" % test_interval + ]) + + # Wait for completion of processes + _, stderrdata = inserts.communicate() + detachs.wait() + + # Obtain error log from inserts process + self.assertIsNone( + re.search("ERROR|FATAL|PANIC", str(stderrdata)), + msg=""" + Race condition between detach and concurrent + inserts with append partition is expired + """) if __name__ == "__main__": unittest.main() From ce19675e7e935a2f8d2bed3d03cbd089d6d85e72 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 15 Sep 2017 16:50:55 +0300 Subject: [PATCH 0740/1124] Fix formatting of partitioning_test.py --- tests/python/partitioning_test.py | 68 +++++++++++++++++-------------- 1 file changed, 38 insertions(+), 30 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 2f772041..1425149c 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -46,8 +46,10 @@ def wrapper(*args, **kwargs): class Tests(unittest.TestCase): - def start_new_pathman_cluster(self, name='test', - allow_streaming=False, test_data=False): + def start_new_pathman_cluster(self, + name='test', + allow_streaming=False, + test_data=False): node = get_new_node(name) node.init(allow_streaming=allow_streaming) node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") @@ -79,8 +81,7 @@ def catchup_replica(self, master, replica): WHERE application_name = '{0}' """ - master.poll_query_until('postgres', - wait_lsn_query.format(replica.name)) + master.poll_query_until('postgres', wait_lsn_query.format(replica.name)) def test_concurrent(self): """ Test concurrent partitioning """ @@ -126,7 +127,7 @@ def test_replication(self): replica.psql('postgres', 'explain (costs off) select * from abc')) # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') + node.psql('postgres', "select enable_parent('abc')") self.catchup_replica(node, replica) self.assertEqual( @@ -139,7 +140,8 @@ def test_replication(self): node.execute('postgres', 'select count(*) from abc')[0][0], 300000) # check that UPDATE in pathman_config_params invalidates cache - node.psql('postgres', 'update pathman_config_params set enable_parent = false') + node.psql('postgres', + 'update pathman_config_params set enable_parent = false') self.catchup_replica(node, replica) self.assertEqual( node.psql('postgres', 'explain (costs off) select * from abc'), @@ -147,7 +149,8 @@ def test_replication(self): self.assertEqual( node.psql('postgres', 'select * from abc'), replica.psql('postgres', 'select * from abc')) - self.assertEqual(node.execute('postgres', 'select count(*) from abc')[0][0], 0) + self.assertEqual( + node.execute('postgres', 'select count(*) from abc')[0][0], 0) def test_locks(self): """ @@ -197,15 +200,14 @@ def add_partition(node, flag, query): # Start transaction that will create partition with node.connect() as con: con.begin() - con.execute('select append_range_partition(\'abc\')') + con.execute("select append_range_partition('abc')") # Start threads that suppose to add new partitions and wait some # time query = ( "select prepend_range_partition('abc')", "select append_range_partition('abc')", - "select add_range_partition('abc', 500000, 550000)", - ) + "select add_range_partition('abc', 500000, 550000)", ) threads = [] for i in range(3): thread = threading.Thread( @@ -245,7 +247,7 @@ def test_tablespace(self): def check_tablespace(node, tablename, tablespace): res = node.execute('postgres', - 'select get_tablespace(\'{}\')'.format(tablename)) + "select get_tablespace('{}')".format(tablename)) if len(res) == 0: return False @@ -253,57 +255,63 @@ def check_tablespace(node, tablename, tablespace): with get_new_node('master') as node: node.init() - node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') + node.append_conf('postgresql.conf', + "shared_preload_libraries='pg_pathman'\n") node.start() node.psql('postgres', 'create extension pg_pathman') # create tablespace path = os.path.join(node.data_dir, 'test_space_location') os.mkdir(path) - node.psql('postgres', 'create tablespace test_space location \'{}\''.format(path)) + node.psql('postgres', + "create tablespace test_space location '{}'".format(path)) # create table in this tablespace - node.psql('postgres', 'create table abc(a serial, b int) tablespace test_space') + node.psql('postgres', + 'create table abc(a serial, b int) tablespace test_space') # create three partitions. Excpect that they will be created in the # same tablespace as the parent table - node.psql('postgres', 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') + node.psql('postgres', + "select create_range_partitions('abc', 'a', 1, 10, 3)") self.assertTrue(check_tablespace(node, 'abc', 'test_space')) # check tablespace for appended partition - node.psql('postgres', 'select append_range_partition(\'abc\', \'abc_appended\')') + node.psql('postgres', + "select append_range_partition('abc', 'abc_appended')") self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) # check tablespace for prepended partition node.psql('postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') + "select prepend_range_partition('abc', 'abc_prepended')") self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) # check tablespace for prepended partition node.psql('postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') + "select add_range_partition('abc', 41, 51, 'abc_added')") self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) # check tablespace for split node.psql('postgres', - 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') + "select split_range_partition('abc_added', 45, 'abc_splitted')") self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) # now let's specify tablespace explicitly node.psql( 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') + "select append_range_partition('abc', 'abc_appended_2', 'pg_default')" + ) node.psql( 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')' + "select prepend_range_partition('abc', 'abc_prepended_2', 'pg_default')" ) node.psql( 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')' + "select add_range_partition('abc', 61, 71, 'abc_added_2', 'pg_default')" ) node.psql( 'postgres', - 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')' + "select split_range_partition('abc_added_2', 65, 'abc_splitted_2', 'pg_default')" ) # yapf: disable @@ -372,13 +380,13 @@ def test_foreign_table(self): b'25|foreign\n') # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') + master.safe_psql('postgres', "insert into abc values (26, 'part')") self.assertEqual( master.safe_psql('postgres', 'select * from ftable order by id'), b'25|foreign\n26|part\n') # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') + master.safe_psql('postgres', "select drop_partitions('abc')") # HASH partitioning with FDW: # - create hash partitioned table in master @@ -417,7 +425,7 @@ def test_parallel_nodes(self): node.init() node.append_conf( 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') + "shared_preload_libraries='pg_pathman, postgres_fdw'\n") node.start() # Check version of postgres server @@ -468,7 +476,7 @@ def test_parallel_nodes(self): # Check parallel aggregate plan test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + plan = con.execute("select query_plan('%s')" % test_query)[0][0] expected = json.loads(""" [ { @@ -532,7 +540,7 @@ def test_parallel_nodes(self): # Check simple parallel seq scan plan with limit test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + plan = con.execute("select query_plan('%s')" % test_query)[0][0] expected = json.loads(""" [ { @@ -587,7 +595,7 @@ def test_parallel_nodes(self): # Check the case when none partition is selected in result plan test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] + plan = con.execute("select query_plan('%s')" % test_query)[0][0] expected = json.loads(""" [ { @@ -869,7 +877,7 @@ def test_pg_dump(self): with get_new_node('test') as node: node.init() node.append_conf('postgresql.conf', """ - shared_preload_libraries=\'pg_pathman\' + shared_preload_libraries='pg_pathman' pg_pathman.override_copy=false """) node.start() From bc4d271cd7d76e89278b3eaf09ab5a860e9577b9 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Fri, 15 Sep 2017 17:17:39 +0300 Subject: [PATCH 0741/1124] Fix include error --- src/pg_pathman.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 704328ba..bc957212 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -23,6 +23,7 @@ #include "postgres.h" #include "access/sysattr.h" #include "catalog/pg_type.h" +#include "compat/relation_tags.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "optimizer/clauses.h" From 713c5f9164e142aae1f257558f2d18ed88af9a67 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 19 Sep 2017 14:10:32 +0300 Subject: [PATCH 0742/1124] Fix compability with pg10 (#123) --- src/include/compat/pg_compat.h | 11 ++++++++++- src/partition_filter.c | 4 +++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 45f1a6c5..22a3d5ff 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -59,6 +59,16 @@ } while (0) #endif +/* + * CheckValidResultRel() + */ +#if PG_VERSION_NUM >= 100000 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd)) +#elif PG_VERSION_NUM >= 90500 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) +#endif /* * BeginCopyFrom() @@ -590,7 +600,6 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif - /* * ------------- * Common code diff --git a/src/partition_filter.c b/src/partition_filter.c index a8cbf5ea..214b926a 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -268,7 +268,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Open relation and check if it is a valid target */ child_rel = heap_open(partid, NoLock); - CheckValidResultRel(child_rel, parts_storage->command_type); /* Build Var translation list for 'inserted_cols' */ make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); @@ -318,6 +317,9 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ child_result_rel_info->ri_ConstraintExprs = NULL; + /* Check that this partition is a valid result relation */ + CheckValidResultRelCompat(child_result_rel_info, parts_storage->command_type); + /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; From d38e7c93b1bcddedeb2c435bb2a0c62288a805ad Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 20 Sep 2017 14:29:43 +0300 Subject: [PATCH 0743/1124] Fix segfault on DELETE .. USING with joins of partitioned tables --- expected/pathman_upd_del.out | 12 +++++++++++- sql/pathman_upd_del.sql | 6 +++++- src/hooks.c | 17 +++++++++++++++++ src/pg_pathman.c | 3 +++ 4 files changed, 36 insertions(+), 2 deletions(-) diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 9f590a9f..21f284e6 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -13,6 +13,13 @@ SET enable_seqscan = OFF; /* Temporary table for JOINs */ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + /* Partition table by RANGE */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, @@ -258,7 +265,10 @@ WITH q AS (DELETE FROM test.tmp t RETURNING *) DELETE FROM test.tmp USING q; ROLLBACK; +/* Test special rule for CTE; DELETE + USING with partitioned table */ +DELETE FROM test.range_rel r USING test.tmp2 t WHERE t.id = r.id; +ERROR: pg_pathman doesn't support DELETE queries with joining of partitioned tables DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 15 other objects +NOTICE: drop cascades to 27 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index 16d7ebfd..aae6f466 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -21,6 +21,9 @@ SET enable_seqscan = OFF; CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + /* Partition table by RANGE */ CREATE TABLE test.range_rel ( @@ -164,7 +167,8 @@ WITH q AS (DELETE FROM test.tmp t DELETE FROM test.tmp USING q; ROLLBACK; - +/* Test special rule for CTE; DELETE + USING with partitioned table */ +DELETE FROM test.range_rel r USING test.tmp2 t WHERE t.id = r.id; DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/hooks.c b/src/hooks.c index abe6face..e5234f4f 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -99,6 +99,23 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (innerrel->reloptkind != RELOPT_BASEREL) return; + /* check if query DELETE FROM .. USING .. */ + if (root->parse->commandType == CMD_DELETE && jointype == JOIN_INNER) + { + int x = -1; + int count = 0; + + while ((x = bms_next_member(joinrel->relids, x)) >= 0) + if (get_pathman_relation_info(root->simple_rte_array[x]->relid)) + count += 1; + + if (count > 1) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("pg_pathman doesn't support DELETE queries with "\ + "joining of partitioned tables"))); + } + /* We shouldn't process tables with active children */ if (inner_rte->inh) return; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 41090f3f..37a2d3f1 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1916,6 +1916,9 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, childRTE = root->simple_rte_array[childRTindex]; childrel = root->simple_rel_array[childRTindex]; + if (!childrel) + elog(ERROR, "could not make access paths to a relation"); + #if PG_VERSION_NUM >= 90600 /* * If parallelism is allowable for this query in general and for parent From a17aeef48df6e19a4bbe16aab6d714a3fc0956e0 Mon Sep 17 00:00:00 2001 From: Sokolov Yura Date: Fri, 22 Sep 2017 17:40:09 +0300 Subject: [PATCH 0744/1124] Fix out of source build When included into postgresql contribs, and postgresql configured "out of source", pg_pathman could not build because it misses include path. Fix it by referring top_srcdir and subdir if build without USE_PGXS. --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index 40738ddf..4a94480e 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,11 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/compat/pg_compat.o src/compat/relation_tags.o src/compat/rowmarks_fix.o \ $(WIN32RES) +ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include +else +override PG_CPPFLAGS += -I$(top_srcdir)/$(subdir)/src/include +endif EXTENSION = pg_pathman From 03058fb2b0666a4e003ca6e4b623ff70f09f1f40 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Sep 2017 18:12:51 +0300 Subject: [PATCH 0745/1124] use pip instead of pip3 in run_tests.sh --- run_tests.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 6622ae39..49c481b9 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -39,8 +39,8 @@ fi virtualenv env export VIRTUAL_ENV_DISABLE_PROMPT=1 source env/bin/activate -pip3 install testgres -pip3 freeze | grep testgres +pip install testgres +pip freeze | grep testgres # don't forget to "make clean" make USE_PGXS=1 clean From 4d29720b50b3961bd372763696428572412d6e81 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Sep 2017 20:16:58 +0300 Subject: [PATCH 0746/1124] improve checks for DELETE FROM part_table USING part_table --- expected/pathman_upd_del.out | 197 +++++++++++++++++++++++++---------- sql/pathman_upd_del.sql | 68 +++++++++++- src/hooks.c | 52 ++++++--- 3 files changed, 242 insertions(+), 75 deletions(-) diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 21f284e6..147ee2e6 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -10,7 +10,7 @@ CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; SET enable_indexscan = ON; SET enable_seqscan = OFF; -/* Temporary table for JOINs */ +/* Temporary tables for JOINs */ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); @@ -35,6 +35,7 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', 12 (1 row) +VACUUM ANALYZE; /* * Test UPDATE and DELETE */ @@ -111,16 +112,15 @@ ROLLBACK; EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------- Update on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -(7 rows) +(6 rows) BEGIN; UPDATE test.range_rel r SET value = t.value @@ -130,17 +130,16 @@ ROLLBACK; EXPLAIN (COSTS OFF) UPDATE test.tmp t SET value = r.value FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Update on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -(8 rows) + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(7 rows) BEGIN; UPDATE test.tmp t SET value = r.value @@ -150,16 +149,15 @@ ROLLBACK; EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------- Delete on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -(7 rows) +(6 rows) BEGIN; DELETE FROM test.range_rel r USING test.tmp t @@ -169,22 +167,118 @@ ROLLBACK; EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Delete on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -(8 rows) + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(7 rows) BEGIN; DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------------ + Delete on tmp r + -> Nested Loop + Join Filter: (a1.id = a2.id) + -> Append + -> Seq Scan on tmp2_1 a2 + -> Seq Scan on tmp2_2 a2_1 + -> Seq Scan on tmp2_3 a2_2 + -> Seq Scan on tmp2_4 a2_3 + -> Seq Scan on tmp2_5 a2_4 + -> Seq Scan on tmp2_6 a2_5 + -> Seq Scan on tmp2_7 a2_6 + -> Seq Scan on tmp2_8 a2_7 + -> Seq Scan on tmp2_9 a2_8 + -> Seq Scan on tmp2_10 a2_9 + -> Materialize + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) +(39 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r @@ -198,10 +292,9 @@ DELETE FROM test.tmp USING q; -> Seq Scan on range_rel_1 r Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(9 rows) +(8 rows) BEGIN; WITH q AS (SELECT * FROM test.range_rel r @@ -222,10 +315,9 @@ DELETE FROM test.tmp USING q; -> Seq Scan on range_rel_1 r Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(9 rows) +(8 rows) BEGIN; WITH q AS (DELETE FROM test.range_rel r @@ -240,23 +332,21 @@ WITH q AS (DELETE FROM test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id RETURNING *) DELETE FROM test.tmp USING q; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Delete on tmp CTE q -> Delete on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(14 rows) +(12 rows) BEGIN; WITH q AS (DELETE FROM test.tmp t @@ -265,9 +355,6 @@ WITH q AS (DELETE FROM test.tmp t RETURNING *) DELETE FROM test.tmp USING q; ROLLBACK; -/* Test special rule for CTE; DELETE + USING with partitioned table */ -DELETE FROM test.range_rel r USING test.tmp2 t WHERE t.id = r.id; -ERROR: pg_pathman doesn't support DELETE queries with joining of partitioned tables DROP SCHEMA test CASCADE; NOTICE: drop cascades to 27 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index aae6f466..bc51f815 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -17,7 +17,7 @@ SET enable_indexscan = ON; SET enable_seqscan = OFF; -/* Temporary table for JOINs */ +/* Temporary tables for JOINs */ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); @@ -39,6 +39,9 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', 12); +VACUUM ANALYZE; + + /* * Test UPDATE and DELETE */ @@ -123,6 +126,66 @@ WHERE r.dt = '2010-01-02' AND r.id = t.id; ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ROLLBACK; + + +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; + + +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; + + +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; + +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ROLLBACK; + + /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r @@ -167,8 +230,7 @@ WITH q AS (DELETE FROM test.tmp t DELETE FROM test.tmp USING q; ROLLBACK; -/* Test special rule for CTE; DELETE + USING with partitioned table */ -DELETE FROM test.range_rel r USING test.tmp2 t WHERE t.id = r.id; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/hooks.c b/src/hooks.c index e5234f4f..e1f34b8b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -99,23 +99,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (innerrel->reloptkind != RELOPT_BASEREL) return; - /* check if query DELETE FROM .. USING .. */ - if (root->parse->commandType == CMD_DELETE && jointype == JOIN_INNER) - { - int x = -1; - int count = 0; - - while ((x = bms_next_member(joinrel->relids, x)) >= 0) - if (get_pathman_relation_info(root->simple_rte_array[x]->relid)) - count += 1; - - if (count > 1) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("pg_pathman doesn't support DELETE queries with "\ - "joining of partitioned tables"))); - } - /* We shouldn't process tables with active children */ if (inner_rte->inh) return; @@ -129,6 +112,41 @@ pathman_join_pathlist_hook(PlannerInfo *root, !(inner_prel = get_pathman_relation_info(inner_rte->relid))) return; + /* + * Check if query is: + * 1) UPDATE part_table SET = .. FROM part_table. + * 2) DELETE FROM part_table USING part_table. + * + * Either outerrel or innerrel may be a result relation. + */ + if ((root->parse->resultRelation == outerrel->relid || + root->parse->resultRelation == innerrel->relid) && + (root->parse->commandType == CMD_UPDATE || + root->parse->commandType == CMD_DELETE)) + { + int rti = -1, + count = 0; + + /* Inner relation must be partitioned */ + Assert(inner_prel); + + /* Check each base rel of outer relation */ + while ((rti = bms_next_member(outerrel->relids, rti)) >= 0) + { + Oid outer_baserel = root->simple_rte_array[rti]->relid; + + /* Is it partitioned? */ + if (get_pathman_relation_info(outer_baserel)) + count++; + } + + if (count > 0) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("DELETE and UPDATE queries with a join " + "of partitioned tables are not supported"))); + } + /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, inner_rte)) From a40fc5aa6acdc652772cb2ffe8a6b4a5a3d1501b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Sep 2017 20:21:41 +0300 Subject: [PATCH 0747/1124] use pip instead of pip3 in pg-travis-test.sh --- travis/pg-travis-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index 5c0ec44e..bdae1541 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -97,7 +97,7 @@ virtualenv /tmp/envs/pg_pathman source /tmp/envs/pg_pathman/bin/activate # install pip packages -pip3 install $pip_packages +pip install $pip_packages # run python tests make USE_PGXS=1 PG_CONFIG=$config_path python_tests || status=$? From 64ebf7f690629c14a27ec11145562113187f0fca Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Sep 2017 20:29:12 +0300 Subject: [PATCH 0748/1124] use python instead of python3 in tests/python/Makefile --- tests/python/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/Makefile b/tests/python/Makefile index cb2bc50d..bb548928 100644 --- a/tests/python/Makefile +++ b/tests/python/Makefile @@ -1,2 +1,2 @@ partitioning_tests: - python3 -m unittest partitioning_test.py + python -m unittest partitioning_test.py From 4bdc79290086cf51a866846a6685f4deb0f43c36 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 25 Sep 2017 12:29:03 +0300 Subject: [PATCH 0749/1124] Update make_images.py --- make_images.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/make_images.py b/make_images.py index 4de7d40e..9c9b6e43 100755 --- a/make_images.py +++ b/make_images.py @@ -15,30 +15,33 @@ ''' How to create this patch: - 1) put `import ipdb; ipdb.set_trace()` in make_alpine_image, after `open(patch_name)..` - 2) run the script - 3) in temporary folder run `cp Dockerfile Dockerfile.1 && vim Dockerfile.1 && diff -Naur Dockerfile Dockerfile.1 > ./cassert.patch` - 4) contents of cassert.patch put to variable below - 5) change Dockerfile.1 to Dockerfile in text, change `\` symbols to `\\` + * put `import ipdb; ipdb.set_trace()` in make_alpine_image, just before `open(patch_name)..` + * run the script + * in temporary folder run `cp Dockerfile Dockerfile.1 && vim Dockerfile.1` + * uncomment --enable-debug, add --enable-cassert, add `CFLAGS="-g3 -O0"` before ./configure + * run `diff -Naur Dockerfile Dockerfile.1 > ./cassert.patch` + * contents of cassert.patch put to variable below + * change Dockerfile.1 to Dockerfile in text, change `\` symbols to `\\` ''' ALPINE_PATCH = b''' ---- Dockerfile 2017-07-27 14:54:10.403971867 +0300 -+++ Dockerfile 2017-07-27 14:56:01.132503106 +0300 -@@ -79,7 +79,7 @@ +--- Dockerfile 2017-09-25 12:01:24.597813507 +0300 ++++ Dockerfile 2017-09-25 12:09:06.104059704 +0300 +@@ -79,15 +79,15 @@ && wget -O config/config.sub 'https://git.savannah.gnu.org/cgit/config.git/plain/config.sub?id=7d3d27baf8107b630586c962c057e22149653deb' \\ # configure options taken from: # https://anonscm.debian.org/cgit/pkg-postgresql/postgresql.git/tree/debian/rules?h=9.5 - && ./configure \\ -+ && CFLAGS="-O0" ./configure \\ ++ && CFLAGS="-g3 -O0" ./configure \\ --build="$gnuArch" \\ # "/usr/src/postgresql/src/backend/access/common/tupconvert.c:105: undefined reference to `libintl_gettext'" # --enable-nls \\ -@@ -87,7 +87,7 @@ + --enable-integer-datetimes \\ --enable-thread-safety \\ --enable-tap-tests \\ - # skip debugging info -- we want tiny size instead +-# skip debugging info -- we want tiny size instead -# --enable-debug \\ + --enable-debug \\ ++ --enable-cassert \\ --disable-rpath \\ --with-uuid=e2fs \\ --with-gnu-ld \\ From aee0875c1bff6cc893a01f433d70214148a7bfdd Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 25 Sep 2017 17:31:01 +0300 Subject: [PATCH 0750/1124] Deny split and merge for subpartitions --- expected/pathman_subpartitions.out | 40 +++++++++++++++++++++++++++++- range.sql | 23 +++++++++++------ sql/pathman_subpartitions.sql | 8 ++++++ src/init.c | 8 +++--- src/pl_range_funcs.c | 15 +++++++++-- 5 files changed, 80 insertions(+), 14 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 54e93e9e..36bf0919 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -300,8 +300,46 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartiti subpartitions.abc_2_3 | 125 | 125 (10 rows) +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); +ERROR: could not split partition if it has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); + split_range_partition +----------------------- + {50,100} +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); +ERROR: cannot merge partitions +select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); + merge_range_partitions +------------------------ + +(1 row) + DROP TABLE subpartitions.abc CASCADE; -NOTICE: drop cascades to 10 other objects +NOTICE: drop cascades to 11 other objects DROP SCHEMA subpartitions CASCADE; NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) DROP EXTENSION pg_pathman; diff --git a/range.sql b/range.sql index 67cf3d7a..c03a04d5 100644 --- a/range.sql +++ b/range.sql @@ -313,13 +313,14 @@ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( OUT p_range ANYARRAY) RETURNS ANYARRAY AS $$ DECLARE - parent_relid REGCLASS; - part_type INTEGER; - part_expr TEXT; - part_expr_type REGTYPE; - check_name TEXT; - check_cond TEXT; - new_partition TEXT; + parent_relid REGCLASS; + inhparent REGCLASS; + part_type INTEGER; + part_expr TEXT; + part_expr_type REGTYPE; + check_name TEXT; + check_cond TEXT; + new_partition TEXT; BEGIN parent_relid = @extschema@.get_parent_of_partition(partition_relid); @@ -327,6 +328,14 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); + EXECUTE format('SELECT inhparent::REGCLASS FROM pg_inherits WHERE inhparent = $1 LIMIT 1') + USING partition_relid + INTO inhparent; + + if inhparent IS NOT NULL THEN + RAISE EXCEPTION 'could not split partition if it has children'; + END IF; + /* Acquire lock on parent */ PERFORM @extschema@.prevent_part_modification(parent_relid); diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index aefe728d..e5ed87c0 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -97,7 +97,15 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitio UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); +SELECT split_range_partition('subpartitions.abc_2_2', 75); +SELECT subpartitions.partitions_tree('subpartitions.abc'); +/* merge_range_partitions */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); +select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); +select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; diff --git a/src/init.c b/src/init.c index 13487f7e..c556c485 100644 --- a/src/init.c +++ b/src/init.c @@ -406,7 +406,7 @@ fini_local_cache(void) * find_inheritance_children * * Returns an array containing the OIDs of all relations which - * inherit *directly* from the relation with OID 'parentrelId'. + * inherit *directly* from the relation with OID 'parent_relid'. * * The specified lock type is acquired on each child relation (but not on the * given rel; caller should already have locked it). If lockmode is NoLock @@ -416,7 +416,7 @@ fini_local_cache(void) * borrowed from pg_inherits.c */ find_children_status -find_inheritance_children_array(Oid parentrelId, +find_inheritance_children_array(Oid parent_relid, LOCKMODE lockmode, bool nowait, uint32 *children_size, /* ret value #1 */ @@ -444,7 +444,7 @@ find_inheritance_children_array(Oid parentrelId, * Can skip the scan if pg_class shows the * relation has never had a subclass. */ - if (!has_subclass(parentrelId)) + if (!has_subclass(parent_relid)) return FCS_NO_CHILDREN; /* @@ -459,7 +459,7 @@ find_inheritance_children_array(Oid parentrelId, ScanKeyInit(&key[0], Anum_pg_inherits_inhparent, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(parentrelId)); + ObjectIdGetDatum(parent_relid)); scan = systable_beginscan(relation, InheritsParentIndexId, true, NULL, 1, key); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 716c5d17..805af65c 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -16,9 +16,10 @@ #include "xact_handling.h" #include "access/xact.h" +#include "catalog/heap.h" #include "catalog/namespace.h" +#include "catalog/pg_inherits_fn.h" #include "catalog/pg_type.h" -#include "catalog/heap.h" #include "commands/tablecmds.h" #include "executor/spi.h" #include "nodes/nodeFuncs.h" @@ -636,7 +637,17 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* Extract partition Oids from array */ partitions = palloc(sizeof(Oid) * nparts); for (i = 0; i < nparts; i++) - partitions[i] = DatumGetObjectId(datums[i]); + { + Oid partition_relid; + partition_relid = DatumGetObjectId(datums[i]); + + /* check that is not has subpartitions */ + if (has_subclass(partition_relid)) + ereport(ERROR, (errmsg("cannot merge partitions"), + errdetail("at least one of specified partitions has children"))); + + partitions[i] = partition_relid; + } if (nparts < 2) ereport(ERROR, (errmsg("cannot merge partitions"), From 533b00b7db7cfaf1bafdfd409b3cf8b548912f6f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Sep 2017 14:27:02 +0300 Subject: [PATCH 0751/1124] show alias definitions for python and pip --- travis/pg-travis-test.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index bdae1541..db1feb3e 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -95,6 +95,8 @@ set +u # create virtual environment and activate it virtualenv /tmp/envs/pg_pathman source /tmp/envs/pg_pathman/bin/activate +type python +type pip # install pip packages pip install $pip_packages From 9949eeee35018fd3b00a7c335373174c0c572f4a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Sep 2017 15:02:22 +0300 Subject: [PATCH 0752/1124] use python3 for tests --- travis/pg-travis-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index db1feb3e..97fa5ea9 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -93,7 +93,7 @@ if test -f regression.diffs; then cat regression.diffs; fi set +u # create virtual environment and activate it -virtualenv /tmp/envs/pg_pathman +virtualenv /tmp/envs/pg_pathman --python=python3 source /tmp/envs/pg_pathman/bin/activate type python type pip From ef1e4440b3e6451222a4121d8216a869e5baae7b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Sep 2017 16:19:35 +0300 Subject: [PATCH 0753/1124] fix upd_del tests for 9.5 --- expected/pathman_upd_del_1.out | 205 +++++++++++++++++++++++---------- 1 file changed, 143 insertions(+), 62 deletions(-) diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index a019285b..a1eeda7f 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -10,9 +10,16 @@ CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; SET enable_indexscan = ON; SET enable_seqscan = OFF; -/* Temporary table for JOINs */ +/* Temporary tables for JOINs */ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + /* Partition table by RANGE */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, @@ -28,6 +35,7 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', 12 (1 row) +VACUUM ANALYZE; /* * Test UPDATE and DELETE */ @@ -104,16 +112,15 @@ ROLLBACK; EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------- Update on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -(7 rows) +(6 rows) BEGIN; UPDATE test.range_rel r SET value = t.value @@ -123,19 +130,21 @@ ROLLBACK; EXPLAIN (COSTS OFF) UPDATE test.tmp t SET value = r.value FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Update on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_pkey on range_rel r - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) - -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -(10 rows) + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t +(12 rows) BEGIN; UPDATE test.tmp t SET value = r.value @@ -145,16 +154,15 @@ ROLLBACK; EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------- Delete on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -(7 rows) +(6 rows) BEGIN; DELETE FROM test.range_rel r USING test.tmp t @@ -164,24 +172,98 @@ ROLLBACK; EXPLAIN (COSTS OFF) DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Delete on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_pkey on range_rel r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) - -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -(10 rows) + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t +(12 rows) BEGIN; DELETE FROM test.tmp t USING test.range_rel r WHERE r.dt = '2010-01-02' AND r.id = t.id; ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +--------------------------------------------- + Delete on tmp r + -> Merge Join + Merge Cond: (a1.id = a2.id) + -> Merge Join + Merge Cond: (r.id = a1.id) + -> Sort + Sort Key: r.id + -> Seq Scan on tmp r + -> Sort + Sort Key: a1.id + -> Seq Scan on tmp2 a1 + -> Sort + Sort Key: a2.id + -> Seq Scan on tmp2 a2 +(14 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r @@ -195,10 +277,9 @@ DELETE FROM test.tmp USING q; -> Seq Scan on range_rel_1 r Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(9 rows) +(8 rows) BEGIN; WITH q AS (SELECT * FROM test.range_rel r @@ -219,10 +300,9 @@ DELETE FROM test.tmp USING q; -> Seq Scan on range_rel_1 r Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(9 rows) +(8 rows) BEGIN; WITH q AS (DELETE FROM test.range_rel r @@ -237,25 +317,26 @@ WITH q AS (DELETE FROM test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id RETURNING *) DELETE FROM test.tmp USING q; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Delete on tmp CTE q -> Delete on tmp t - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Append - -> Index Scan using range_rel_pkey on range_rel r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) - -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t -> Nested Loop + -> Seq Scan on tmp -> CTE Scan on q - -> Materialize - -> Seq Scan on tmp -(16 rows) +(17 rows) BEGIN; WITH q AS (DELETE FROM test.tmp t @@ -265,6 +346,6 @@ WITH q AS (DELETE FROM test.tmp t DELETE FROM test.tmp USING q; ROLLBACK; DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 15 other objects +NOTICE: drop cascades to 27 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; From cfcb53da159a8dfe1da0dc55719487d7524020eb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Sep 2017 17:47:35 +0300 Subject: [PATCH 0754/1124] use testgres==0.4.0 for master branch --- travis/pg-travis-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index 97fa5ea9..be0e645e 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -7,7 +7,7 @@ sudo apt-get update # required packages apt_packages="postgresql-$PG_VER postgresql-server-dev-$PG_VER postgresql-common python-pip python-dev build-essential" -pip_packages="testgres" +pip_packages="testgres==0.4.0" # exit code status=0 From 280c26a67125944e398bec7966c3a5c852a5f999 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 27 Sep 2017 16:29:10 +0300 Subject: [PATCH 0755/1124] Add more checks and fixes for subpartitions --- init.sql | 30 +++++++++++++ range.sql | 84 ++++++++++++++++++++++++++++++++-- sql/pathman_subpartitions.sql | 9 ++-- src/include/relation_info.h | 4 ++ src/pl_funcs.c | 44 ++++++++++++++++++ src/relation_info.c | 85 +++++++++++++++++++++++++++++++++++ 6 files changed, 250 insertions(+), 6 deletions(-) diff --git a/init.sql b/init.sql index 96c537d3..5dd808ec 100644 --- a/init.sql +++ b/init.sql @@ -862,6 +862,36 @@ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; +/* + * Get parent of pg_pathman's partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.is_equal_to_partitioning_expression( + parent_relid REGCLASS, + expression TEXT, + value_type OID) +RETURNS BOOL AS 'pg_pathman', 'is_equal_to_partitioning_expression_pl' +LANGUAGE C STRICT; + +/* + * Get lower bound of a partitioned relation + * bound_value is used to determine the type of bound + */ +CREATE OR REPLACE FUNCTION @extschema@.get_lower_bound( + relid REGCLASS, + bound_value ANYELEMENT +) +RETURNS ANYELEMENT AS 'pg_pathman', 'get_lower_bound_pl' +LANGUAGE C STRICT; + +/* + * Get upper bound of a partition + */ +CREATE OR REPLACE FUNCTION @extschema@.get_upper_bound( + relid REGCLASS, + bound_value ANYELEMENT +) +RETURNS ANYELEMENT AS 'pg_pathman', 'get_upper_bound_pl' +LANGUAGE C STRICT; /* * DEBUG: Place this inside some plpgsql fuction and set breakpoint. diff --git a/range.sql b/range.sql index c03a04d5..77b6c4de 100644 --- a/range.sql +++ b/range.sql @@ -158,24 +158,47 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( partition_data BOOLEAN DEFAULT TRUE) RETURNS INTEGER AS $$ DECLARE + relid REGCLASS; rows_count BIGINT; max_value start_value%TYPE; cur_value start_value%TYPE := start_value; end_value start_value%TYPE; + lower_bound start_value%TYPE = NULL; + upper_bound start_value%TYPE = NULL; part_count INTEGER := 0; i INTEGER; - + part_type INT4; BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + /* + * Check that we're trying to make subpartitions. + * If expressions are same then we set and use upper bound. + * We change start_value if it's greater than lower bound. + */ + relid := @extschema@.get_parent_of_partition(parent_relid, false); + IF relid IS NOT NULL THEN + part_type := get_partition_type(relid); + IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( + relid, expression, pg_typeof(start_value)) + THEN + lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); + upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); + IF lower_bound != start_value THEN + start_value := lower_bound; + RAISE NOTICE '"start_value" was set to %', start_value; + END IF; + END IF; + END IF; + IF p_count < 0 THEN RAISE EXCEPTION 'partitions count must not be less than zero'; END IF; /* Try to determine partitions count if not set */ - IF p_count IS NULL THEN + IF p_count IS NULL OR (relid IS NOT NULL AND p_count = 0) THEN EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO rows_count, max_value; @@ -189,6 +212,7 @@ BEGIN p_count := 0; WHILE cur_value <= max_value + OR (upper_bound IS NOT NULL AND cur_value < upper_bound) LOOP cur_value := cur_value + p_interval; p_count := p_count + 1; @@ -205,6 +229,20 @@ BEGIN FOR i IN 1..p_count LOOP end_value := end_value + p_interval; + IF upper_bound IS NOT NULL AND end_value >= upper_bound THEN + part_count := i; + IF end_value > upper_bound THEN + RAISE WARNING '"p_interval" is not multiple of range (%, %)', + start_value, end_value; + END IF; + IF p_count != part_count THEN + p_count := part_count; + RAISE NOTICE '"p_count" was set %', p_count; + END IF; + + /* we got our partitions count */ + EXIT; + END IF; END LOOP; /* check boundaries */ @@ -460,6 +498,26 @@ BEGIN END $$ LANGUAGE plpgsql; + +/* + * NOTE: we need this function just to determine the type + * of "upper_bound" var + */ +CREATE OR REPLACE FUNCTION @extschema@.check_against_upper_bound_internal( + relid REGCLASS, + bound_value ANYELEMENT, + error_message TEXT) +RETURNS VOID AS $$ +DECLARE + upper_bound bound_value%TYPE; +BEGIN + upper_bound := get_upper_bound(relid, bound_value); + IF bound_value > upper_bound THEN + RAISE EXCEPTION '%', error_message; + END IF; +END +$$ LANGUAGE plpgsql; + /* * Spawn logic for append_partition(). We have to * separate this in order to pass the 'p_range'. @@ -475,10 +533,12 @@ CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( tablespace TEXT DEFAULT NULL) RETURNS TEXT AS $$ DECLARE + relid REGCLASS; part_expr_type REGTYPE; part_name TEXT; v_args_format TEXT; - + part_expr TEXT; + part_type INTEGER; BEGIN IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN RAISE EXCEPTION 'cannot append to empty partitions set'; @@ -496,6 +556,24 @@ BEGIN RAISE EXCEPTION 'Cannot append partition because last partition''s range is half open'; END IF; + /* + * In case a user has used same expression on two levels, we need to check + * that we've not reached upper bound of higher partitioned table + */ + relid := @extschema@.get_parent_of_partition(parent_relid, false); + IF relid IS NOT NULL THEN + SELECT expr FROM @extschema@.pathman_config WHERE partrel = parent_relid + INTO part_expr; + + part_type := get_partition_type(relid); + IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( + relid, part_expr, part_expr_type) + THEN + PERFORM @extschema@.check_against_upper_bound_internal(parent_relid, + p_range[2], 'reached upper bound in the current level of subpartitions'); + END IF; + END IF; + IF @extschema@.is_date_type(p_atttype) THEN v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); ELSE diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index e5ed87c0..91abd021 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -82,8 +82,8 @@ SET pg_pathman.enable_partitionrouter = ON; CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); -SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); -SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ @@ -103,10 +103,13 @@ SELECT split_range_partition('subpartitions.abc_2_2', 75); SELECT subpartitions.partitions_tree('subpartitions.abc'); /* merge_range_partitions */ -SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); +/* create subpartitions but use same expression */ +SELECT create_range_partitions('subpartitions.abc_3', 'a', 150, 50, 2); + DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index b5ac6877..0c5428ba 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -296,6 +296,8 @@ Datum cook_partitioning_expression(const Oid relid, char *canonicalize_partitioning_expression(const Oid relid, const char *expr_cstr); +bool is_equal_to_partitioning_expression(Oid relid, char *expression, + Oid value_type); /* Global invalidation routines */ void delay_pathman_shutdown(void); @@ -312,6 +314,8 @@ Oid get_parent_of_partition(Oid partition, PartParentSearch *status); void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); +Datum get_lower_bound(Oid parent_relid, Oid value_type); +Datum get_upper_bound(Oid relid, Oid value_type); /* PartType wrappers */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 1b6cf9c2..d8e48540 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -72,6 +72,10 @@ PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( pathman_version ); +PG_FUNCTION_INFO_V1( get_lower_bound_pl ); +PG_FUNCTION_INFO_V1( get_upper_bound_pl ); +PG_FUNCTION_INFO_V1( is_equal_to_partitioning_expression_pl ); + /* User context for function show_partition_list_internal() */ typedef struct @@ -145,6 +149,46 @@ get_parent_of_partition_pl(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } +/* + * Get parent of a specified partition. + */ +Datum +is_equal_to_partitioning_expression_pl(PG_FUNCTION_ARGS) +{ + bool result; + Oid parent_relid = PG_GETARG_OID(0); + char *expr = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + Oid value_type = PG_GETARG_OID(2); + + result = is_equal_to_partitioning_expression(parent_relid, expr, + value_type); + PG_RETURN_BOOL(result); +} + +/* + * Get min bound value for parent relation + */ +Datum +get_lower_bound_pl(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + + PG_RETURN_POINTER(get_lower_bound(relid, value_type)); +} + +/* + * Get min bound value for parent relation + */ +Datum +get_upper_bound_pl(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + + PG_RETURN_POINTER(get_upper_bound(relid, value_type)); +} + /* * Extract basic type of a domain. */ diff --git a/src/relation_info.c b/src/relation_info.c index 2e0ce598..0b29ac18 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1040,6 +1040,91 @@ get_parent_of_partition(Oid partition, PartParentSearch *status) return get_parent_of_partition_internal(partition, status, HASH_FIND); } +/* Check that expression is equal to expression of some partitioned table */ +bool +is_equal_to_partitioning_expression(Oid relid, char *expression, + Oid value_type) +{ + const PartRelationInfo *prel; + char *cexpr; + Oid expr_type; + + /* + * Cook and get a canonicalized expression, + * we don't need a result of the cooking + */ + cook_partitioning_expression(relid, expression, &expr_type); + cexpr = canonicalize_partitioning_expression(relid, expression); + + prel = get_pathman_relation_info(relid); + + /* caller should have been check it already */ + Assert(prel != NULL); + + return (expr_type == value_type) && + (strcmp(cexpr, prel->expr_cstr) == 0); +} + +/* Get lower bound of a partition */ +Datum +get_lower_bound(Oid relid, Oid value_type) +{ + Oid parent_relid; + Datum result; + const PartRelationInfo *prel; + PartBoundInfo *pbin; + PartParentSearch parent_search; + + parent_relid = get_parent_of_partition(relid, &parent_search); + if (parent_search != PPS_ENTRY_PART_PARENT) + elog(ERROR, "relation \"%s\" is not a partition", + get_rel_name_or_relid(relid)); + + prel = get_pathman_relation_info(parent_relid); + Assert(prel && prel->parttype == PT_RANGE); + pbin = get_bounds_of_partition(relid, prel); + Assert(prel != NULL); + + if (IsInfinite(&pbin->range_min)) + return PointerGetDatum(NULL); + + result = BoundGetValue(&pbin->range_min); + if (value_type != prel->ev_type) + result = perform_type_cast(result, prel->ev_type, value_type, NULL); + + return result; +} + +/* Get upper bound of a partition */ +Datum +get_upper_bound(Oid relid, Oid value_type) +{ + Oid parent_relid; + Datum result; + const PartRelationInfo *prel; + PartBoundInfo *pbin; + PartParentSearch parent_search; + + parent_relid = get_parent_of_partition(relid, &parent_search); + if (parent_search != PPS_ENTRY_PART_PARENT) + elog(ERROR, "relation \"%s\" is not a partition", + get_rel_name_or_relid(relid)); + + prel = get_pathman_relation_info(parent_relid); + Assert(prel && prel->parttype == PT_RANGE); + pbin = get_bounds_of_partition(relid, prel); + Assert(prel != NULL); + + if (IsInfinite(&pbin->range_max)) + return PointerGetDatum(NULL); + + result = BoundGetValue(&pbin->range_max); + if (value_type != prel->ev_type) + result = perform_type_cast(result, prel->ev_type, value_type, NULL); + + return result; +} + /* * Get [and remove] "partition+parent" pair from cache, * also check syscache if 'status' is provided. From 89622778f2b9b5af4d5d707e4593eaf13ff7d9a0 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 2 Oct 2017 18:19:07 +0300 Subject: [PATCH 0756/1124] Add more subpartitions fixes and tests --- expected/pathman_basic.out | 2 +- expected/pathman_expressions.out | 4 +- expected/pathman_subpartitions.out | 149 +++++++++++++++++++++++++++- range.sql | 151 ++++++++++++++++++++++++----- sql/pathman_subpartitions.sql | 37 ++++++- src/relation_info.c | 2 +- 6 files changed, 312 insertions(+), 33 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index f809eebc..de3bf727 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -147,7 +147,7 @@ PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM \set VERBOSITY terse ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index c0f4b0e9..9e19d217 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -371,7 +371,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM /* Try using mutable expression */ SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); @@ -382,7 +382,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 36bf0919..3a7d4706 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -224,13 +224,13 @@ SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); 2 (1 row) -SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ create_range_partitions ------------------------- 2 (1 row) -SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ create_range_partitions ------------------------- 2 @@ -324,7 +324,7 @@ SELECT subpartitions.partitions_tree('subpartitions.abc'); (9 rows) /* merge_range_partitions */ -SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ append_range_partition ------------------------ subpartitions.abc_3 @@ -340,6 +340,149 @@ select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 11 other objects +/* subpartitions on same expressions */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'a', 0, 11, 9); /* not multiple */ + create_range_partitions +------------------------- + 9 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'a', 150, 11, 8); /* start_value should be lower */ +WARNING: "start_value" was set to 100 + create_range_partitions +------------------------- + 8 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'a', 200, 11, 20); /* too big p_count */ +WARNING: "p_interval" is not multiple of range (200, 310) +NOTICE: "p_count" was limited to 10 + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[301, 350, 400]); /* bounds check */ +ERROR: Bounds should start from 300 +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 450, 500]); /* bounds check */ +ERROR: Lower bound of rightmost partition should be less than 400 +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]); /* bounds check */ + create_range_partitions +------------------------- + 2 +(1 row) + +\d+ subpartitions.abc_1 + Table "subpartitions.abc_1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | not null | | plain | | +Check constraints: + "pathman_abc_1_check" CHECK (a >= 0 AND a < 100) +Inherits: subpartitions.abc +Child tables: subpartitions.abc_1_1, + subpartitions.abc_1_2, + subpartitions.abc_1_3, + subpartitions.abc_1_4, + subpartitions.abc_1_5, + subpartitions.abc_1_6, + subpartitions.abc_1_7, + subpartitions.abc_1_8, + subpartitions.abc_1_9 + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+------------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | a | 200 | 300 + subpartitions.abc | subpartitions.abc_4 | 2 | a | 300 | 400 + subpartitions.abc_1 | subpartitions.abc_1_1 | 2 | a | 0 | 11 + subpartitions.abc_1 | subpartitions.abc_1_2 | 2 | a | 11 | 22 + subpartitions.abc_1 | subpartitions.abc_1_3 | 2 | a | 22 | 33 + subpartitions.abc_1 | subpartitions.abc_1_4 | 2 | a | 33 | 44 + subpartitions.abc_1 | subpartitions.abc_1_5 | 2 | a | 44 | 55 + subpartitions.abc_1 | subpartitions.abc_1_6 | 2 | a | 55 | 66 + subpartitions.abc_1 | subpartitions.abc_1_7 | 2 | a | 66 | 77 + subpartitions.abc_1 | subpartitions.abc_1_8 | 2 | a | 77 | 88 + subpartitions.abc_1 | subpartitions.abc_1_9 | 2 | a | 88 | 99 + subpartitions.abc_2 | subpartitions.abc_2_1 | 2 | a | 100 | 111 + subpartitions.abc_2 | subpartitions.abc_2_2 | 2 | a | 111 | 122 + subpartitions.abc_2 | subpartitions.abc_2_3 | 2 | a | 122 | 133 + subpartitions.abc_2 | subpartitions.abc_2_4 | 2 | a | 133 | 144 + subpartitions.abc_2 | subpartitions.abc_2_5 | 2 | a | 144 | 155 + subpartitions.abc_2 | subpartitions.abc_2_6 | 2 | a | 155 | 166 + subpartitions.abc_2 | subpartitions.abc_2_7 | 2 | a | 166 | 177 + subpartitions.abc_2 | subpartitions.abc_2_8 | 2 | a | 177 | 188 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | a | 200 | 211 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | a | 211 | 222 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | a | 222 | 233 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | a | 233 | 244 + subpartitions.abc_3 | subpartitions.abc_3_5 | 2 | a | 244 | 255 + subpartitions.abc_3 | subpartitions.abc_3_6 | 2 | a | 255 | 266 + subpartitions.abc_3 | subpartitions.abc_3_7 | 2 | a | 266 | 277 + subpartitions.abc_3 | subpartitions.abc_3_8 | 2 | a | 277 | 288 + subpartitions.abc_3 | subpartitions.abc_3_9 | 2 | a | 288 | 299 + subpartitions.abc_3 | subpartitions.abc_3_10 | 2 | a | 299 | 310 + subpartitions.abc_4 | subpartitions.abc_4_1 | 2 | a | 300 | 350 + subpartitions.abc_4 | subpartitions.abc_4_2 | 2 | a | 350 | 450 +(33 rows) + +SELECT append_range_partition('subpartitions.abc_1'::regclass); + append_range_partition +------------------------ + subpartitions.abc_1_10 +(1 row) + +SELECT append_range_partition('subpartitions.abc_1'::regclass); +ERROR: reached upper bound in the current level of subpartitions +DROP TABLE subpartitions.abc_1_10; +/* detach_range_partition */ +SELECt detach_range_partition('subpartitions.abc_1'); +ERROR: could not detach partition if it has children +/* attach_range_partition */ +CREATE TABLE subpartitions.abc_c(LIKE subpartitions.abc_1 INCLUDING ALL); +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 98, 110); /* fail */ +ERROR: specified range [98, 110) overlaps with existing partitions +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 100, 110); /* fail */ +ERROR: "start value" exceeds upper bound of the current level of subpartitions +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 99, 110); /* ok */ + attach_range_partition +------------------------ + subpartitions.abc_c +(1 row) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 39 other objects +/* subpartitions on same expression but dates */ +CREATE TABLE subpartitions.abc(a DATE NOT NULL); +INSERT INTO subpartitions.abc SELECT current_date + i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', current_date, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 6 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'a', current_date + 1, + '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ +WARNING: "start_value" was set to 10-02-2017 +WARNING: "p_interval" is not multiple of range (10-02-2017, 11-03-2017) +NOTICE: "p_count" was limited to 1 + create_range_partitions +------------------------- + 1 +(1 row) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects DROP SCHEMA subpartitions CASCADE; NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) DROP EXTENSION pg_pathman; diff --git a/range.sql b/range.sql index 77b6c4de..fa72df8d 100644 --- a/range.sql +++ b/range.sql @@ -46,6 +46,29 @@ BEGIN END $$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION @extschema@.has_parent_partitioned_by_expression( + parent_relid REGCLASS, + expression TEXT, + expr_type REGTYPE) +RETURNS BOOL AS $$ +DECLARE + relid REGCLASS; + part_type INTEGER; +BEGIN + relid := @extschema@.get_parent_of_partition(parent_relid, false); + IF relid IS NOT NULL THEN + part_type := @extschema@.get_partition_type(relid); + IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( + relid, expression, expr_type) + THEN + RETURN TRUE; + END IF; + END IF; + + RETURN FALSE; +END +$$ LANGUAGE plpgsql; + /* * Creates RANGE partitions for specified relation based on datetime attribute */ @@ -63,14 +86,33 @@ DECLARE max_value start_value%TYPE; cur_value start_value%TYPE := start_value; end_value start_value%TYPE; + lower_bound start_value%TYPE = NULL; + upper_bound start_value%TYPE = NULL; part_count INTEGER := 0; i INTEGER; - BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + value_type := @extschema@.get_base_type(pg_typeof(start_value)); + + /* + * Check that we're trying to make subpartitions. + * If expressions are same then we set and use upper bound. + * We change start_value if it's greater than lower bound. + */ + IF @extschema@.has_parent_partitioned_by_expression(parent_relid, + expression, value_type) + THEN + lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); + upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); + IF lower_bound != start_value THEN + start_value := lower_bound; + RAISE WARNING '"start_value" was set to %', start_value; + END IF; + END IF; + IF p_count < 0 THEN RAISE EXCEPTION '"p_count" must not be less than 0'; END IF; @@ -86,14 +128,13 @@ BEGIN p_count := 0; WHILE cur_value <= max_value + OR (upper_bound IS NOT NULL AND cur_value < upper_bound) LOOP cur_value := cur_value + p_interval; p_count := p_count + 1; END LOOP; END IF; - value_type := @extschema@.get_base_type(pg_typeof(start_value)); - /* * In case when user doesn't want to automatically create partitions * and specifies partition count as 0 then do not check boundaries @@ -104,6 +145,20 @@ BEGIN FOR i IN 1..p_count LOOP end_value := end_value + p_interval; + IF upper_bound IS NOT NULL AND end_value >= upper_bound THEN + part_count := i; + IF end_value > upper_bound THEN + RAISE WARNING '"p_interval" is not multiple of range (%, %)', + start_value, end_value; + END IF; + IF p_count != part_count THEN + p_count := part_count; + RAISE NOTICE '"p_count" was limited to %', p_count; + END IF; + + /* we got our partitions count */ + EXIT; + END IF; END LOOP; /* Check boundaries */ @@ -158,7 +213,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( partition_data BOOLEAN DEFAULT TRUE) RETURNS INTEGER AS $$ DECLARE - relid REGCLASS; + value_type REGTYPE; rows_count BIGINT; max_value start_value%TYPE; cur_value start_value%TYPE := start_value; @@ -167,29 +222,26 @@ DECLARE upper_bound start_value%TYPE = NULL; part_count INTEGER := 0; i INTEGER; - part_type INT4; BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); + value_type := @extschema@.get_base_type(pg_typeof(start_value)); + /* * Check that we're trying to make subpartitions. * If expressions are same then we set and use upper bound. * We change start_value if it's greater than lower bound. */ - relid := @extschema@.get_parent_of_partition(parent_relid, false); - IF relid IS NOT NULL THEN - part_type := get_partition_type(relid); - IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( - relid, expression, pg_typeof(start_value)) - THEN - lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); - upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); - IF lower_bound != start_value THEN - start_value := lower_bound; - RAISE NOTICE '"start_value" was set to %', start_value; - END IF; + IF @extschema@.has_parent_partitioned_by_expression(parent_relid, + expression, value_type) + THEN + lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); + upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); + IF lower_bound != start_value THEN + start_value := lower_bound; + RAISE WARNING '"start_value" was set to %', start_value; END IF; END IF; @@ -198,7 +250,7 @@ BEGIN END IF; /* Try to determine partitions count if not set */ - IF p_count IS NULL OR (relid IS NOT NULL AND p_count = 0) THEN + IF p_count IS NULL THEN EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO rows_count, max_value; @@ -237,7 +289,7 @@ BEGIN END IF; IF p_count != part_count THEN p_count := part_count; - RAISE NOTICE '"p_count" was set %', p_count; + RAISE NOTICE '"p_count" was limited to %', p_count; END IF; /* we got our partitions count */ @@ -294,6 +346,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE part_count INTEGER := 0; + part_bounds bounds%TYPE; BEGIN IF array_ndims(bounds) > 1 THEN @@ -308,11 +361,31 @@ BEGIN expression, partition_data); + /* + * Subpartitions checks, in array version of create_range_partitions + * we raise exception instead of notice + */ + IF @extschema@.has_parent_partitioned_by_expression(parent_relid, + expression, pg_typeof(bounds[1])) + THEN + part_bounds[1] := @extschema@.get_lower_bound(parent_relid, bounds[1]); + part_bounds[2] := @extschema@.get_upper_bound(parent_relid, bounds[1]); + IF part_bounds[1] != bounds[1] THEN + RAISE EXCEPTION 'Bounds should start from %', part_bounds[1]; + END IF; + END IF; + + IF part_bounds[2] IS NOT NULL AND + bounds[array_length(bounds, 1) - 1] > part_bounds[2] + THEN + RAISE EXCEPTION 'Lower bound of rightmost partition should be less than %', part_bounds[2]; + END IF; + /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, expression, - bounds[0], - bounds[array_length(bounds, 1) - 1]); + bounds[1], + bounds[array_length(bounds, 1)]); /* Create sequence for child partitions names */ PERFORM @extschema@.create_naming_sequence(parent_relid); @@ -512,7 +585,7 @@ DECLARE upper_bound bound_value%TYPE; BEGIN upper_bound := get_upper_bound(relid, bound_value); - IF bound_value > upper_bound THEN + IF bound_value >= upper_bound THEN RAISE EXCEPTION '%', error_message; END IF; END @@ -565,7 +638,7 @@ BEGIN SELECT expr FROM @extschema@.pathman_config WHERE partrel = parent_relid INTO part_expr; - part_type := get_partition_type(relid); + part_type := @extschema@.get_partition_type(relid); IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( relid, part_expr, part_expr_type) THEN @@ -813,9 +886,11 @@ CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( RETURNS TEXT AS $$ DECLARE part_expr TEXT; + part_expr_type REGTYPE; + part_type INTEGER; rel_persistence CHAR; v_init_callback REGPROCEDURE; - + relid REGCLASS; BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); @@ -839,6 +914,25 @@ BEGIN RAISE EXCEPTION 'partition must have a compatible tuple format'; END IF; + /* + * In case a user has used same expression on two levels, we need to check + * that we've not reached upper bound of higher partitioned table + */ + relid := @extschema@.get_parent_of_partition(parent_relid, false); + IF relid IS NOT NULL THEN + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + SELECT expr FROM @extschema@.pathman_config WHERE partrel = parent_relid + INTO part_expr; + + part_type := @extschema@.get_partition_type(relid); + IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( + relid, part_expr, part_expr_type) + THEN + PERFORM @extschema@.check_against_upper_bound_internal(parent_relid, + start_value, '"start value" exceeds upper bound of the current level of subpartitions'); + END IF; + END IF; + /* Set inheritance */ EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); @@ -884,6 +978,7 @@ CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( RETURNS TEXT AS $$ DECLARE parent_relid REGCLASS; + inhparent REGCLASS; part_type INTEGER; BEGIN @@ -892,6 +987,14 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); + EXECUTE format('SELECT inhparent::REGCLASS FROM pg_inherits WHERE inhparent = $1 LIMIT 1') + USING partition_relid + INTO inhparent; + + if inhparent IS NOT NULL THEN + RAISE EXCEPTION 'could not detach partition if it has children'; + END IF; + /* Acquire lock on parent */ PERFORM @extschema@.prevent_data_modification(parent_relid); diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 91abd021..72e38c60 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -107,8 +107,41 @@ SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 20 select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); -/* create subpartitions but use same expression */ -SELECT create_range_partitions('subpartitions.abc_3', 'a', 150, 50, 2); +DROP TABLE subpartitions.abc CASCADE; + +/* subpartitions on same expressions */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 4); +SELECT create_range_partitions('subpartitions.abc_1', 'a', 0, 11, 9); /* not multiple */ +SELECT create_range_partitions('subpartitions.abc_2', 'a', 150, 11, 8); /* start_value should be lower */ +SELECT create_range_partitions('subpartitions.abc_3', 'a', 200, 11, 20); /* too big p_count */ +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[301, 350, 400]); /* bounds check */ +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 450, 500]); /* bounds check */ +SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]); /* bounds check */ +\d+ subpartitions.abc_1 +SELECT * FROM pathman_partition_list; +SELECT append_range_partition('subpartitions.abc_1'::regclass); +SELECT append_range_partition('subpartitions.abc_1'::regclass); +DROP TABLE subpartitions.abc_1_10; + +/* detach_range_partition */ +SELECt detach_range_partition('subpartitions.abc_1'); + +/* attach_range_partition */ +CREATE TABLE subpartitions.abc_c(LIKE subpartitions.abc_1 INCLUDING ALL); +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 98, 110); /* fail */ +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 100, 110); /* fail */ +SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 99, 110); /* ok */ + +DROP TABLE subpartitions.abc CASCADE; + +/* subpartitions on same expression but dates */ +CREATE TABLE subpartitions.abc(a DATE NOT NULL); +INSERT INTO subpartitions.abc SELECT current_date + i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', current_date, '1 month'::INTERVAL); +SELECT create_range_partitions('subpartitions.abc_1', 'a', current_date + 1, + '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; diff --git a/src/relation_info.c b/src/relation_info.c index 0b29ac18..d7d69116 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1061,7 +1061,7 @@ is_equal_to_partitioning_expression(Oid relid, char *expression, /* caller should have been check it already */ Assert(prel != NULL); - return (expr_type == value_type) && + return (getBaseType(expr_type) == value_type) && (strcmp(cexpr, prel->expr_cstr) == 0); } From 65d2f20200e71eac41a464f8f0d60b908969b0f4 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 3 Oct 2017 12:32:55 +0300 Subject: [PATCH 0757/1124] fixup! Add more subpartitions fixes and tests --- expected/pathman_subpartitions.out | 24 +++--------------------- sql/pathman_subpartitions.sql | 7 +++---- 2 files changed, 6 insertions(+), 25 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 3a7d4706..27be6b1e 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -380,24 +380,6 @@ SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]) 2 (1 row) -\d+ subpartitions.abc_1 - Table "subpartitions.abc_1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | not null | | plain | | -Check constraints: - "pathman_abc_1_check" CHECK (a >= 0 AND a < 100) -Inherits: subpartitions.abc -Child tables: subpartitions.abc_1_1, - subpartitions.abc_1_2, - subpartitions.abc_1_3, - subpartitions.abc_1_4, - subpartitions.abc_1_5, - subpartitions.abc_1_6, - subpartitions.abc_1_7, - subpartitions.abc_1_8, - subpartitions.abc_1_9 - SELECT * FROM pathman_partition_list; parent | partition | parttype | expr | range_min | range_max ---------------------+------------------------+----------+------+-----------+----------- @@ -464,14 +446,14 @@ DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 39 other objects /* subpartitions on same expression but dates */ CREATE TABLE subpartitions.abc(a DATE NOT NULL); -INSERT INTO subpartitions.abc SELECT current_date + i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', current_date, '1 month'::INTERVAL); +INSERT INTO subpartitions.abc SELECT '2017-10-02'::DATE + i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', '2017-10-02'::DATE, '1 month'::INTERVAL); create_range_partitions ------------------------- 6 (1 row) -SELECT create_range_partitions('subpartitions.abc_1', 'a', current_date + 1, +SELECT create_range_partitions('subpartitions.abc_1', 'a', '2017-10-02'::DATE + 1, '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ WARNING: "start_value" was set to 10-02-2017 WARNING: "p_interval" is not multiple of range (10-02-2017, 11-03-2017) diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 72e38c60..7f38f629 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -119,7 +119,6 @@ SELECT create_range_partitions('subpartitions.abc_3', 'a', 200, 11, 20); /* too SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[301, 350, 400]); /* bounds check */ SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 450, 500]); /* bounds check */ SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]); /* bounds check */ -\d+ subpartitions.abc_1 SELECT * FROM pathman_partition_list; SELECT append_range_partition('subpartitions.abc_1'::regclass); SELECT append_range_partition('subpartitions.abc_1'::regclass); @@ -138,9 +137,9 @@ DROP TABLE subpartitions.abc CASCADE; /* subpartitions on same expression but dates */ CREATE TABLE subpartitions.abc(a DATE NOT NULL); -INSERT INTO subpartitions.abc SELECT current_date + i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', current_date, '1 month'::INTERVAL); -SELECT create_range_partitions('subpartitions.abc_1', 'a', current_date + 1, +INSERT INTO subpartitions.abc SELECT '2017-10-02'::DATE + i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', '2017-10-02'::DATE, '1 month'::INTERVAL); +SELECT create_range_partitions('subpartitions.abc_1', 'a', '2017-10-02'::DATE + 1, '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ DROP TABLE subpartitions.abc CASCADE; From 6fb5b84b0d67cc67239acc9f5b4ea156e7be05f9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Oct 2017 12:48:05 +0300 Subject: [PATCH 0758/1124] bump lib version to 1.4.6 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index b05c65a4..31e669e8 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.5", + "version": "1.4.6", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.5", + "version": "1.4.6", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 66925628..b887d37b 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10405 + 10406 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index e43747e1..8addc1f4 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -157,7 +157,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010405 +#define CURRENT_LIB_VERSION 0x010406 void *pathman_cache_search_relid(HTAB *cache_table, From d75175a3945891e2f3c738300f160856d1a678da Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Oct 2017 14:59:21 +0300 Subject: [PATCH 0759/1124] more tests for pathman_rel_pathlist_hook() --- expected/pathman_upd_del.out | 127 +++++++++++++++++++++++---------- expected/pathman_upd_del_1.out | 41 +++++++++++ sql/pathman_upd_del.sql | 12 ++++ src/hooks.c | 11 +++ 4 files changed, 152 insertions(+), 39 deletions(-) diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 147ee2e6..863418b3 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -14,6 +14,7 @@ SET enable_seqscan = OFF; CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); create_range_partitions ------------------------- @@ -218,48 +219,57 @@ USING (SELECT * JOIN test.tmp2 a2 USING(id)) t WHERE t.id = r.id; - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +------------------------------------------------ Delete on tmp r -> Nested Loop - Join Filter: (a1.id = a2.id) - -> Append + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) -> Seq Scan on tmp2_1 a2 - -> Seq Scan on tmp2_2 a2_1 - -> Seq Scan on tmp2_3 a2_2 - -> Seq Scan on tmp2_4 a2_3 - -> Seq Scan on tmp2_5 a2_4 - -> Seq Scan on tmp2_6 a2_5 - -> Seq Scan on tmp2_7 a2_6 - -> Seq Scan on tmp2_8 a2_7 - -> Seq Scan on tmp2_9 a2_8 - -> Seq Scan on tmp2_10 a2_9 - -> Materialize - -> Nested Loop - -> Seq Scan on tmp r - -> Custom Scan (RuntimeAppend) - Prune by: (r.id = a1.id) - -> Seq Scan on tmp2_1 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_2 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_3 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_4 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_5 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_6 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_7 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_8 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_9 a1 - Filter: (r.id = id) - -> Seq Scan on tmp2_10 a1 - Filter: (r.id = id) -(39 rows) + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) BEGIN; DELETE FROM test.tmp r @@ -279,6 +289,45 @@ UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t WHERE t.id = r.id; ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index a1eeda7f..cce19b10 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -14,6 +14,7 @@ SET enable_seqscan = OFF; CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); create_range_partitions ------------------------- @@ -264,6 +265,46 @@ UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t WHERE t.id = r.id; ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +----------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Append + -> Seq Scan on tmp2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_1 t2_1 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2_2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2_3 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2_4 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2_5 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2_6 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2_7 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2_8 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2_9 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2_10 + Filter: (id = t.id) +(27 rows) + /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index bc51f815..034f942a 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -22,6 +22,7 @@ CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); INSERT INTO test.tmp VALUES (1, 1), (2, 2); CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); @@ -186,6 +187,17 @@ WHERE t.id = r.id; ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + + /* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ EXPLAIN (COSTS OFF) WITH q AS (SELECT * FROM test.range_rel r diff --git a/src/hooks.c b/src/hooks.c index e1f34b8b..884cea09 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -358,6 +358,17 @@ pathman_rel_pathlist_hook(PlannerInfo *root, /* * Check that this child is not the parent table itself. * This is exactly how standard inheritance works. + * + * Helps with queries like this one: + * + * UPDATE test.tmp t SET value = 2 + * WHERE t.id IN (SELECT id + * FROM test.tmp2 t2 + * WHERE id = t.id); + * + * Since we disable optimizations on 9.5, we + * have to skip parent table that has already + * been expanded by standard inheritance. */ if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL) { From 51f39abb49fac3d094896a0fee64edd1863ec2c5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Oct 2017 15:01:18 +0300 Subject: [PATCH 0760/1124] disable pruning optimizations for SELECT .. FOR UPDATE/SHARE/etc on PostgreSQL 9.5 --- expected/pathman_rowmarks.out | 102 +++++++++----- expected/pathman_rowmarks_1.out | 219 ++++++++++++++++++------------ sql/pathman_rowmarks.sql | 17 +++ src/compat/rowmarks_fix.c | 157 --------------------- src/hooks.c | 3 - src/include/compat/rowmarks_fix.h | 10 +- src/planner_tree_modification.c | 7 +- 7 files changed, 224 insertions(+), 291 deletions(-) diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 3e37c57f..52fd3347 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -15,6 +15,7 @@ SELECT create_hash_partitions('rowmarks.first', 'id', 5); 5 (1 row) +VACUUM ANALYZE; /* Not partitioned */ SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; id @@ -173,23 +174,64 @@ FOR SHARE; 6 (1 row) +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +--------------------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Hash Join + Hash Cond: (first_0.id = second.id) + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first_0 - Filter: (id = 1) -(8 rows) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 @@ -197,10 +239,10 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); QUERY PLAN ----------------------------------------------- Update on second - -> Nested Loop + -> Nested Loop Semi Join Join Filter: (second.id = first_0.id) - -> HashAggregate - Group Key: first_0.id + -> Seq Scan on second + -> Materialize -> Append -> Seq Scan on first_0 Filter: (id < 1) @@ -212,9 +254,7 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); Filter: (id < 1) -> Seq Scan on first_4 Filter: (id < 1) - -> Materialize - -> Seq Scan on second -(18 rows) +(16 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 @@ -237,17 +277,16 @@ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) RETURNING *, tableoid::regclass; - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first_0 - Filter: (id = 1) -(8 rows) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) SET enable_hashjoin = t; SET enable_mergejoin = t; @@ -267,17 +306,16 @@ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Delete on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first_0 - Filter: (id = 1) -(8 rows) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second @@ -285,10 +323,10 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); QUERY PLAN ----------------------------------------------- Delete on second - -> Nested Loop + -> Nested Loop Semi Join Join Filter: (second.id = first_0.id) - -> HashAggregate - Group Key: first_0.id + -> Seq Scan on second + -> Materialize -> Append -> Seq Scan on first_0 Filter: (id < 1) @@ -300,9 +338,7 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); Filter: (id < 1) -> Seq Scan on first_4 Filter: (id < 1) - -> Materialize - -> Seq Scan on second -(18 rows) +(16 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index cbc0a1c6..bd21d42f 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -15,6 +15,7 @@ SELECT create_hash_partitions('rowmarks.first', 'id', 5); 5 (1 row) +VACUUM ANALYZE; /* Not partitioned */ SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; id @@ -38,14 +39,15 @@ SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; --------------------------------------- LockRows -> Sort - Sort Key: first_0.id + Sort Key: first.id -> Append + -> Seq Scan on first -> Seq Scan on first_0 -> Seq Scan on first_1 -> Seq Scan on first_2 -> Seq Scan on first_3 -> Seq Scan on first_4 -(9 rows) +(10 rows) /* Simple case (execution) */ SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; @@ -90,33 +92,35 @@ WHERE id = (SELECT id FROM rowmarks.first OFFSET 10 LIMIT 1 FOR UPDATE) FOR SHARE; - QUERY PLAN ------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------- LockRows InitPlan 1 (returns $1) -> Limit -> LockRows -> Sort - Sort Key: first_0.id + Sort Key: first_5.id -> Append - -> Seq Scan on first_0 - -> Seq Scan on first_1 - -> Seq Scan on first_2 - -> Seq Scan on first_3 - -> Seq Scan on first_4 - -> Custom Scan (RuntimeAppend) - Prune by: (first.id = $1) - -> Seq Scan on first_0 first + -> Seq Scan on first first_5 + -> Seq Scan on first_0 first_0_1 + -> Seq Scan on first_1 first_1_1 + -> Seq Scan on first_2 first_2_1 + -> Seq Scan on first_3 first_3_1 + -> Seq Scan on first_4 first_4_1 + -> Append + -> Seq Scan on first + Filter: (id = $1) + -> Seq Scan on first_0 Filter: (id = $1) - -> Seq Scan on first_1 first + -> Seq Scan on first_1 Filter: (id = $1) - -> Seq Scan on first_2 first + -> Seq Scan on first_2 Filter: (id = $1) - -> Seq Scan on first_3 first + -> Seq Scan on first_3 Filter: (id = $1) - -> Seq Scan on first_4 first + -> Seq Scan on first_4 Filter: (id = $1) -(24 rows) +(26 rows) /* A little harder (execution) */ SELECT * FROM rowmarks.first @@ -147,19 +151,20 @@ FOR SHARE; -> Sort Sort Key: second.id -> Seq Scan on second - -> Custom Scan (RuntimeAppend) - Prune by: (first.id = $1) - -> Seq Scan on first_0 first + -> Append + -> Seq Scan on first Filter: (id = $1) - -> Seq Scan on first_1 first + -> Seq Scan on first_0 Filter: (id = $1) - -> Seq Scan on first_2 first + -> Seq Scan on first_1 Filter: (id = $1) - -> Seq Scan on first_3 first + -> Seq Scan on first_2 Filter: (id = $1) - -> Seq Scan on first_4 first + -> Seq Scan on first_3 Filter: (id = $1) -(19 rows) + -> Seq Scan on first_4 + Filter: (id = $1) +(20 rows) /* Two tables (execution) */ SELECT * FROM rowmarks.first @@ -173,33 +178,75 @@ FOR SHARE; 6 (1 row) +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +------------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Hash Join + Hash Cond: (first.id = second.id) + -> Append + -> Seq Scan on first + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(14 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first - Filter: (id = 1) - -> Seq Scan on first_0 - Filter: (id = 1) - -> Seq Scan on first_1 - Filter: (id = 1) - -> Seq Scan on first_2 - Filter: (id = 1) - -> Seq Scan on first_3 - Filter: (id = 1) - -> Seq Scan on first_4 - Filter: (id = 1) -(18 rows) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 @@ -207,10 +254,10 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); QUERY PLAN --------------------------------------------- Update on second - -> Nested Loop + -> Nested Loop Semi Join Join Filter: (second.id = first.id) - -> HashAggregate - Group Key: first.id + -> Seq Scan on second + -> Materialize -> Append -> Seq Scan on first Filter: (id < 1) @@ -224,9 +271,7 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); Filter: (id < 1) -> Seq Scan on first_4 Filter: (id < 1) - -> Materialize - -> Seq Scan on second -(20 rows) +(18 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 @@ -257,27 +302,26 @@ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) RETURNING *, tableoid::regclass; - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first - Filter: (id = 1) - -> Seq Scan on first_0 - Filter: (id = 1) - -> Seq Scan on first_1 - Filter: (id = 1) - -> Seq Scan on first_2 - Filter: (id = 1) - -> Seq Scan on first_3 - Filter: (id = 1) - -> Seq Scan on first_4 - Filter: (id = 1) -(18 rows) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) SET enable_hashjoin = t; SET enable_mergejoin = t; @@ -297,27 +341,26 @@ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------- Delete on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Materialize - -> Append - -> Seq Scan on first - Filter: (id = 1) - -> Seq Scan on first_0 - Filter: (id = 1) - -> Seq Scan on first_1 - Filter: (id = 1) - -> Seq Scan on first_2 - Filter: (id = 1) - -> Seq Scan on first_3 - Filter: (id = 1) - -> Seq Scan on first_4 - Filter: (id = 1) -(18 rows) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second @@ -325,10 +368,10 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); QUERY PLAN --------------------------------------------- Delete on second - -> Nested Loop + -> Nested Loop Semi Join Join Filter: (second.id = first.id) - -> HashAggregate - Group Key: first.id + -> Seq Scan on second + -> Materialize -> Append -> Seq Scan on first Filter: (id < 1) @@ -342,9 +385,7 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); Filter: (id < 1) -> Seq Scan on first_4 Filter: (id < 1) - -> Materialize - -> Seq Scan on second -(20 rows) +(18 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index dac456d7..a95fbe84 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -18,6 +18,10 @@ INSERT INTO rowmarks.second SELECT generate_series(1, 10); SELECT create_hash_partitions('rowmarks.first', 'id', 5); + +VACUUM ANALYZE; + + /* Not partitioned */ SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; @@ -64,6 +68,19 @@ WHERE id = (SELECT id FROM rowmarks.second FOR UPDATE) FOR SHARE; +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ diff --git a/src/compat/rowmarks_fix.c b/src/compat/rowmarks_fix.c index 66257d9d..4dd1c20a 100644 --- a/src/compat/rowmarks_fix.c +++ b/src/compat/rowmarks_fix.c @@ -51,161 +51,4 @@ append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc) } -#else - - -/* Special column name for rowmarks */ -#define TABLEOID_STR(subst) ( "pathman_tableoid" subst ) -#define TABLEOID_STR_BASE_LEN ( sizeof(TABLEOID_STR("")) - 1 ) - - -static void lock_rows_visitor(Plan *plan, void *context); -static List *get_tableoids_list(List *tlist); - - -/* Final rowmark processing for partitioned tables */ -void -postprocess_lock_rows(List *rtable, Plan *plan) -{ - plan_tree_walker(plan, lock_rows_visitor, rtable); -} - -/* - * Add missing 'TABLEOID_STR%u' junk attributes for inherited partitions - * - * This is necessary since preprocess_targetlist() heavily - * depends on the 'inh' flag which we have to unset. - * - * postprocess_lock_rows() will later transform 'TABLEOID_STR:Oid' - * relnames into 'tableoid:rowmarkId'. - */ -void -rowmark_add_tableoids(Query *parse) -{ - ListCell *lc; - - /* Generate 'tableoid' for partitioned table rowmark */ - foreach (lc, parse->rowMarks) - { - RowMarkClause *rc = (RowMarkClause *) lfirst(lc); - Oid parent = getrelid(rc->rti, parse->rtable); - Var *var; - TargetEntry *tle; - char resname[64]; - - /* Check that table is partitioned */ - if (!get_pathman_relation_info(parent)) - continue; - - var = makeVar(rc->rti, - TableOidAttributeNumber, - OIDOID, - -1, - InvalidOid, - 0); - - /* Use parent's Oid as TABLEOID_STR's key (%u) */ - snprintf(resname, sizeof(resname), TABLEOID_STR("%u"), parent); - - tle = makeTargetEntry((Expr *) var, - list_length(parse->targetList) + 1, - pstrdup(resname), - true); - - /* There's no problem here since new attribute is junk */ - parse->targetList = lappend(parse->targetList, tle); - } -} - -/* - * Extract target entries with resnames beginning with TABLEOID_STR - * and var->varoattno == TableOidAttributeNumber - */ -static List * -get_tableoids_list(List *tlist) -{ - List *result = NIL; - ListCell *lc; - - foreach (lc, tlist) - { - TargetEntry *te = (TargetEntry *) lfirst(lc); - Var *var = (Var *) te->expr; - - if (!IsA(var, Var)) - continue; - - /* Check that column name begins with TABLEOID_STR & it's tableoid */ - if (var->varoattno == TableOidAttributeNumber && - (te->resname && strlen(te->resname) > TABLEOID_STR_BASE_LEN) && - 0 == strncmp(te->resname, TABLEOID_STR(""), TABLEOID_STR_BASE_LEN)) - { - result = lappend(result, te); - } - } - - return result; -} - -/* - * Find 'TABLEOID_STR%u' attributes that were manually - * created for partitioned tables and replace Oids - * (used for '%u') with expected rc->rowmarkIds - */ -static void -lock_rows_visitor(Plan *plan, void *context) -{ - List *rtable = (List *) context; - LockRows *lock_rows = (LockRows *) plan; - Plan *lock_child = outerPlan(plan); - List *tableoids; - ListCell *lc; - - if (!IsA(lock_rows, LockRows)) - return; - - Assert(rtable && IsA(rtable, List) && lock_child); - - /* Select tableoid attributes that must be renamed */ - tableoids = get_tableoids_list(lock_child->targetlist); - if (!tableoids) - return; /* this LockRows has nothing to do with partitioned table */ - - foreach (lc, lock_rows->rowMarks) - { - PlanRowMark *rc = (PlanRowMark *) lfirst(lc); - Oid parent_oid = getrelid(rc->rti, rtable); - ListCell *mark_lc; - List *finished_tes = NIL; /* postprocessed target entries */ - - foreach (mark_lc, tableoids) - { - TargetEntry *te = (TargetEntry *) lfirst(mark_lc); - const char *cur_oid_str = &(te->resname[TABLEOID_STR_BASE_LEN]); - Datum cur_oid_datum; - - cur_oid_datum = DirectFunctionCall1(oidin, CStringGetDatum(cur_oid_str)); - - if (DatumGetObjectId(cur_oid_datum) == parent_oid) - { - char resname[64]; - - /* Replace 'TABLEOID_STR:Oid' with 'tableoid:rowmarkId' */ - snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); - te->resname = pstrdup(resname); - - finished_tes = lappend(finished_tes, te); - } - } - - /* Remove target entries that have been processed in this step */ - foreach (mark_lc, finished_tes) - tableoids = list_delete_ptr(tableoids, lfirst(mark_lc)); - - if (list_length(tableoids) == 0) - break; /* nothing to do */ - } -} - - #endif diff --git a/src/hooks.c b/src/hooks.c index 884cea09..d0da940e 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -636,9 +636,6 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready) { - /* Give rowmark-related attributes correct names */ - ExecuteForPlanTree(result, postprocess_lock_rows); - /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h index 8bbd2b1d..4875358e 100644 --- a/src/include/compat/rowmarks_fix.h +++ b/src/include/compat/rowmarks_fix.h @@ -24,9 +24,6 @@ void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc); -#define postprocess_lock_rows(rtable, plan) ( (void) true ) -#define rowmark_add_tableoids(parse) ( (void) true ) - #else /* @@ -35,16 +32,13 @@ void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc); * This is absolutely crucial for UPDATE and DELETE queries, * so we had to add some special fixes for 9.5: * - * 1) provide legacy code for RowMarks (tableoids); - * 2) disable dangerous UPDATE & DELETE optimizations. + * 1) disable dangerous UPDATE & DELETE optimizations. + * 2) disable optimizations for SELECT .. FOR UPDATE etc. */ #define LEGACY_ROWMARKS_95 #define append_tle_for_rowmark(root, rc) ( (void) true ) -void postprocess_lock_rows(List *rtable, Plan *plan); -void rowmark_add_tableoids(Query *parse); - #endif diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 1163197b..b601e307 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -246,7 +246,6 @@ pathman_transform_query_walker(Node *node, void *context) assign_query_id(query); /* Apply Query tree modifiers */ - rowmark_add_tableoids(query); disable_standard_inheritance(query, current_context); handle_modification_query(query, current_context); @@ -311,6 +310,12 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) { const PartRelationInfo *prel; +#ifdef LEGACY_ROWMARKS_95 + /* Don't process queries with RowMarks on 9.5 */ + if (get_parse_rowmark(parse, current_rti)) + continue; +#endif + /* Proceed if table is partitioned by pg_pathman */ if ((prel = get_pathman_relation_info(rte->relid)) != NULL) { From 1b3dbb9855fcea9dedb71371ff445ad5a60c9869 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Oct 2017 17:03:55 +0300 Subject: [PATCH 0761/1124] more tests for RowMarks --- expected/pathman_rowmarks.out | 15 +++++++++++++++ expected/pathman_rowmarks_1.out | 15 +++++++++++++++ sql/pathman_rowmarks.sql | 7 +++++++ 3 files changed, 37 insertions(+) diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 52fd3347..4c399e85 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -216,6 +216,21 @@ FOR UPDATE; 10 (10 rows) +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index bd21d42f..28d3f27d 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -221,6 +221,21 @@ FOR UPDATE; 10 (10 rows) +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index a95fbe84..9864b8b9 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -81,6 +81,13 @@ JOIN rowmarks.second USING(id) ORDER BY id FOR UPDATE; +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + /* Check updates (plan) */ SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ From 48c8526aef5f1e37ff3697d8dd53c13b0aebe38e Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Mon, 9 Oct 2017 19:25:18 +0300 Subject: [PATCH 0762/1124] there is no CreateStmt.partition_info in PostgresPro Enterprise since version 10 --- src/partition_creation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index c432b2fd..f8cf8a55 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -757,7 +757,7 @@ create_single_partition_internal(Oid parent_relid, create_stmt.partbound = NULL; create_stmt.partspec = NULL; #endif -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 90600 +#if defined(PGPRO_EE) && PG_VERSION_NUM < 100000 create_stmt.partition_info = NULL; #endif From cd6db20d3988c801c0a3619fc3cd565324faca20 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Oct 2017 19:43:16 +0300 Subject: [PATCH 0763/1124] fix prunning in CTEs on PostgreSQL 9.5 --- expected/pathman_upd_del.out | 49 ++++++++++++++++ expected/pathman_upd_del_1.out | 99 +++++++++++++++++++++++++++------ sql/pathman_upd_del.sql | 23 ++++++++ src/planner_tree_modification.c | 49 ++++++++++++---- 4 files changed, 193 insertions(+), 27 deletions(-) diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 863418b3..935b65b4 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -404,6 +404,55 @@ WITH q AS (DELETE FROM test.tmp t RETURNING *) DELETE FROM test.tmp USING q; ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + CTE n + -> Append + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + Join Filter: (t.id = n.id) + -> Seq Scan on tmp t + -> CTE Scan on n + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(14 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +------------------------------------ + Delete on tmp t + CTE q + -> Append + -> Seq Scan on tmp2_1 + -> Seq Scan on tmp2_2 + -> Nested Loop Semi Join + Join Filter: (t.id = q.id) + -> Seq Scan on tmp t + -> CTE Scan on q +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; DROP SCHEMA test CASCADE; NOTICE: drop cascades to 27 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index cce19b10..d0022855 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -229,23 +229,41 @@ USING (SELECT * JOIN test.tmp2 a2 USING(id)) t WHERE t.id = r.id; - QUERY PLAN ---------------------------------------------- + QUERY PLAN +--------------------------------------------------------- Delete on tmp r - -> Merge Join - Merge Cond: (a1.id = a2.id) - -> Merge Join - Merge Cond: (r.id = a1.id) - -> Sort - Sort Key: r.id - -> Seq Scan on tmp r - -> Sort - Sort Key: a1.id - -> Seq Scan on tmp2 a1 - -> Sort - Sort Key: a2.id - -> Seq Scan on tmp2 a2 -(14 rows) + -> Nested Loop + Join Filter: (a1.id = a2.id) + -> Nested Loop + Join Filter: (r.id = a1.id) + -> Seq Scan on tmp r + -> Materialize + -> Append + -> Seq Scan on tmp2 a1 + -> Seq Scan on tmp2_1 a1_1 + -> Seq Scan on tmp2_2 a1_2 + -> Seq Scan on tmp2_3 a1_3 + -> Seq Scan on tmp2_4 a1_4 + -> Seq Scan on tmp2_5 a1_5 + -> Seq Scan on tmp2_6 a1_6 + -> Seq Scan on tmp2_7 a1_7 + -> Seq Scan on tmp2_8 a1_8 + -> Seq Scan on tmp2_9 a1_9 + -> Seq Scan on tmp2_10 a1_10 + -> Materialize + -> Append + -> Seq Scan on tmp2 a2 + -> Seq Scan on tmp2_1 a2_1 + -> Seq Scan on tmp2_2 a2_2 + -> Seq Scan on tmp2_3 a2_3 + -> Seq Scan on tmp2_4 a2_4 + -> Seq Scan on tmp2_5 a2_5 + -> Seq Scan on tmp2_6 a2_6 + -> Seq Scan on tmp2_7 a2_7 + -> Seq Scan on tmp2_8 a2_8 + -> Seq Scan on tmp2_9 a2_9 + -> Seq Scan on tmp2_10 a2_10 +(32 rows) BEGIN; DELETE FROM test.tmp r @@ -386,6 +404,55 @@ WITH q AS (DELETE FROM test.tmp t RETURNING *) DELETE FROM test.tmp USING q; ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + CTE n + -> Append + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + Join Filter: (t.id = n.id) + -> Seq Scan on tmp t + -> CTE Scan on n + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(14 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +------------------------------------ + Delete on tmp t + CTE q + -> Append + -> Seq Scan on tmp2_1 + -> Seq Scan on tmp2_2 + -> Nested Loop Semi Join + Join Filter: (t.id = q.id) + -> Seq Scan on tmp t + -> CTE Scan on q +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; DROP SCHEMA test CASCADE; NOTICE: drop cascades to 27 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index 034f942a..adca1e4c 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -243,6 +243,29 @@ DELETE FROM test.tmp USING q; ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; + + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index b601e307..77540d95 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -62,19 +62,36 @@ (context)->TRANSFORM_CONTEXT_FIELD(command_type) = true; \ break; \ +#define TRANSFORM_CONTEXT_QUERY_IS_CTE_CTE(context, query) \ + ( (context)->parent_cte && \ + (context)->parent_cte->ctequery == (Node *) (query) ) + +#define TRANSFORM_CONTEXT_QUERY_IS_CTE_SL(context, query) \ + ( (context)->parent_sublink && \ + (context)->parent_sublink->subselect == (Node *) (query) && \ + (context)->parent_sublink->subLinkType == CTE_SUBLINK ) + +/* Check if 'query' is CTE according to 'context' */ +#define TRANSFORM_CONTEXT_QUERY_IS_CTE(context, query) \ + ( TRANSFORM_CONTEXT_QUERY_IS_CTE_CTE((context), (query)) || \ + TRANSFORM_CONTEXT_QUERY_IS_CTE_SL ((context), (query)) ) + typedef struct { /* Do we have a parent CmdType query? */ - bool TRANSFORM_CONTEXT_FIELD(SELECT), - TRANSFORM_CONTEXT_FIELD(INSERT), - TRANSFORM_CONTEXT_FIELD(UPDATE), - TRANSFORM_CONTEXT_FIELD(DELETE); + bool TRANSFORM_CONTEXT_FIELD(SELECT), + TRANSFORM_CONTEXT_FIELD(INSERT), + TRANSFORM_CONTEXT_FIELD(UPDATE), + TRANSFORM_CONTEXT_FIELD(DELETE); /* Parameters for handle_modification_query() */ - ParamListInfo query_params; + ParamListInfo query_params; /* SubLink that might contain an examined query */ - SubLink *parent_sublink; + SubLink *parent_sublink; + + /* CommonTableExpr that might containt an examined query */ + CommonTableExpr *parent_cte; } transform_query_cxt; @@ -208,14 +225,24 @@ pathman_transform_query_walker(Node *node, void *context) if (node == NULL) return false; - else if (IsA(node, SubLink)) + else if (IsA(node, SubLink) || IsA(node, CommonTableExpr)) { transform_query_cxt *current_context = context, next_context; /* Initialize next context for bottom subqueries */ next_context = *current_context; - next_context.parent_sublink = (SubLink *) node; + + if (IsA(node, SubLink)) + { + next_context.parent_sublink = (SubLink *) node; + next_context.parent_cte = NULL; + } + else + { + next_context.parent_sublink = NULL; + next_context.parent_cte = (CommonTableExpr *) node; + } /* Handle expression subtree */ return expression_tree_walker(node, @@ -241,6 +268,8 @@ pathman_transform_query_walker(Node *node, void *context) default: break; } + next_context.parent_sublink = NULL; + next_context.parent_cte = NULL; /* Assign Query a 'queryId' */ assign_query_id(query); @@ -284,9 +313,7 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Don't process queries under UPDATE or DELETE (except for CTEs) */ if ((TRANSFORM_CONTEXT_HAS_PARENT(context, UPDATE) || TRANSFORM_CONTEXT_HAS_PARENT(context, DELETE)) && - (context->parent_sublink && - context->parent_sublink->subselect == (Node *) parse && - context->parent_sublink->subLinkType != CTE_SUBLINK)) + !TRANSFORM_CONTEXT_QUERY_IS_CTE(context, parse)) return; #endif From bd2e71c47df5eff3ea2adb6516d7971558164761 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Oct 2017 20:05:45 +0300 Subject: [PATCH 0764/1124] add more tests for UPDATE/DELETE on PostgreSQL 9.5 (issue #77) --- Makefile | 3 +- expected/pathman_views.out | 107 ++++++++++++++++++++++++++ expected/pathman_views_1.out | 143 +++++++++++++++++++++++++++++++++++ sql/pathman_views.sql | 63 +++++++++++++++ 4 files changed, 315 insertions(+), 1 deletion(-) create mode 100644 expected/pathman_views.out create mode 100644 expected/pathman_views_1.out create mode 100644 sql/pathman_views.sql diff --git a/Makefile b/Makefile index 4a94480e..9e036208 100644 --- a/Makefile +++ b/Makefile @@ -53,7 +53,8 @@ REGRESS = pathman_array_qual \ pathman_runtime_nodes \ pathman_update_trigger \ pathman_upd_del \ - pathman_utility_stmt + pathman_utility_stmt \ + pathman_views EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_views.out b/expected/pathman_views.out new file mode 100644 index 00000000..8d433b89 --- /dev/null +++ b/expected/pathman_views.out @@ -0,0 +1,107 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +DROP SCHEMA views CASCADE; +NOTICE: drop cascades to 13 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out new file mode 100644 index 00000000..a0cdfda1 --- /dev/null +++ b/expected/pathman_views_1.out @@ -0,0 +1,143 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +---------------------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_4 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_5 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_6 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_7 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_8 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_9 + Filter: ((id = 1) OR (id = 2)) +(25 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +---------------------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_4 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_5 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_6 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_7 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_8 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_9 + Filter: ((id = 1) OR (id = 2)) +(25 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +DROP SCHEMA views CASCADE; +NOTICE: drop cascades to 13 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql new file mode 100644 index 00000000..6fb2989f --- /dev/null +++ b/sql/pathman_views.sql @@ -0,0 +1,63 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; + + + +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); +insert into views._abc select generate_series(1, 100); + + +/* create a facade view */ +create view views.abc as select * from views._abc; + +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; + +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); + + +/* Test SELECT */ +explain (costs off) select * from views.abc; +explain (costs off) select * from views.abc where id = 1; +select count (*) from views.abc; + + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); +insert into views.abc values (1); + + +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; +update views.abc set id = 2 where id = 1 or id = 2; + + +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; +delete from views.abc where id = 1 or id = 2; + + + +DROP SCHEMA views CASCADE; +DROP EXTENSION pg_pathman; From 55e6dfd7f60a11d4956519b57e926e49c49b4678 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Oct 2017 20:20:21 +0300 Subject: [PATCH 0765/1124] execute SELECT FOR UPDATE on view (pathman_views) --- expected/pathman_views.out | 15 +++++++++++++++ expected/pathman_views_1.out | 35 +++++++++++++++++++++++++++++++++++ sql/pathman_views.sql | 2 ++ 3 files changed, 52 insertions(+) diff --git a/expected/pathman_views.out b/expected/pathman_views.out index 8d433b89..2341919a 100644 --- a/expected/pathman_views.out +++ b/expected/pathman_views.out @@ -56,6 +56,21 @@ explain (costs off) select * from views.abc where id = 1; Filter: (id = 1) (3 rows) +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------------- + LockRows + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(4 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + select count (*) from views.abc; count ------- diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out index a0cdfda1..fdf19f28 100644 --- a/expected/pathman_views_1.out +++ b/expected/pathman_views_1.out @@ -56,6 +56,41 @@ explain (costs off) select * from views.abc where id = 1; Filter: (id = 1) (3 rows) +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------------- + LockRows + -> Append + -> Seq Scan on _abc + Filter: (id = 1) + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_2 + Filter: (id = 1) + -> Seq Scan on _abc_3 + Filter: (id = 1) + -> Seq Scan on _abc_4 + Filter: (id = 1) + -> Seq Scan on _abc_5 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 1) + -> Seq Scan on _abc_7 + Filter: (id = 1) + -> Seq Scan on _abc_8 + Filter: (id = 1) + -> Seq Scan on _abc_9 + Filter: (id = 1) +(24 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + select count (*) from views.abc; count ------- diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql index 6fb2989f..90118fe0 100644 --- a/sql/pathman_views.sql +++ b/sql/pathman_views.sql @@ -40,6 +40,8 @@ execute procedure views.disable_modification(); /* Test SELECT */ explain (costs off) select * from views.abc; explain (costs off) select * from views.abc where id = 1; +explain (costs off) select * from views.abc where id = 1 for update; +select * from views.abc where id = 1 for update; select count (*) from views.abc; From fda1b36d1869dfe0ed643c1f8aef8323e9b1cf3c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Oct 2017 15:50:33 +0300 Subject: [PATCH 0766/1124] rename auto naming constraint if parent is renamed, take AccessShareLock on naming sequence when choosing a name for partition --- expected/pathman_utility_stmt.out | 58 +++++++++++++++- sql/pathman_utility_stmt.sql | 24 ++++++- src/hooks.c | 12 +++- src/include/init.h | 3 +- src/include/utility_stmt_hooking.h | 8 ++- src/init.c | 16 ++++- src/partition_creation.c | 31 ++++++--- src/pl_funcs.c | 2 +- src/pl_range_funcs.c | 4 +- src/utility_stmt_hooking.c | 107 ++++++++++++++++++++++------- 10 files changed, 216 insertions(+), 49 deletions(-) diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index b8d8ad31..95c64f58 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -285,6 +285,58 @@ NOTICE: drop cascades to 797 other objects * Test auto check constraint renaming */ CREATE SCHEMA rename; +/* + * Check that auto naming sequence is renamed + */ +CREATE TABLE rename.parent(id int not null); +SELECT create_range_partitions('rename.parent', 'id', 1, 2, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT 'rename.parent'::regclass; /* parent is OK */ + regclass +--------------- + rename.parent +(1 row) + +SELECT 'rename.parent_seq'::regclass; /* sequence is OK */ + regclass +------------------- + rename.parent_seq +(1 row) + +ALTER TABLE rename.parent RENAME TO parent_renamed; +SELECT 'rename.parent_renamed'::regclass; /* parent is OK */ + regclass +----------------------- + rename.parent_renamed +(1 row) + +SELECT 'rename.parent_renamed_seq'::regclass; /* sequence is OK */ + regclass +--------------------------- + rename.parent_renamed_seq +(1 row) + +SELECT append_range_partition('rename.parent_renamed'); /* can append */ + append_range_partition +------------------------- + rename.parent_renamed_3 +(1 row) + +DROP SEQUENCE rename.parent_renamed_seq; +ALTER TABLE rename.parent_renamed RENAME TO parent; +SELECT 'rename.parent'::regclass; /* parent is OK */ + regclass +--------------- + rename.parent +(1 row) + +/* + * Check that partitioning constraints are renamed + */ CREATE TABLE rename.test(a serial, b int); SELECT create_hash_partitions('rename.test', 'a', 3); create_hash_partitions @@ -336,7 +388,9 @@ WHERE r.conrelid = 'rename.test_inh_one'::regclass AND r.contype = 'c'; pathman_test_inh_1_check | CHECK (a < 100) (1 row) -/* Check that plain tables are not affected too */ +/* + * Check that plain tables are not affected too + */ CREATE TABLE rename.plain_test(a serial, b int); ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; SELECT add_constraint('rename.plain_test_renamed'); @@ -365,7 +419,7 @@ WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; (1 row) DROP SCHEMA rename CASCADE; -NOTICE: drop cascades to 7 other objects +NOTICE: drop cascades to 11 other objects /* * Test DROP INDEX CONCURRENTLY (test snapshots) */ diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index a0d4ae0e..62636f00 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -167,6 +167,25 @@ DROP SCHEMA copy_stmt_hooking CASCADE; */ CREATE SCHEMA rename; + +/* + * Check that auto naming sequence is renamed + */ +CREATE TABLE rename.parent(id int not null); +SELECT create_range_partitions('rename.parent', 'id', 1, 2, 2); +SELECT 'rename.parent'::regclass; /* parent is OK */ +SELECT 'rename.parent_seq'::regclass; /* sequence is OK */ +ALTER TABLE rename.parent RENAME TO parent_renamed; +SELECT 'rename.parent_renamed'::regclass; /* parent is OK */ +SELECT 'rename.parent_renamed_seq'::regclass; /* sequence is OK */ +SELECT append_range_partition('rename.parent_renamed'); /* can append */ +DROP SEQUENCE rename.parent_renamed_seq; +ALTER TABLE rename.parent_renamed RENAME TO parent; +SELECT 'rename.parent'::regclass; /* parent is OK */ + +/* + * Check that partitioning constraints are renamed + */ CREATE TABLE rename.test(a serial, b int); SELECT create_hash_partitions('rename.test', 'a', 3); ALTER TABLE rename.test_0 RENAME TO test_one; @@ -201,7 +220,9 @@ SELECT r.conname, pg_get_constraintdef(r.oid, true) FROM pg_constraint r WHERE r.conrelid = 'rename.test_inh_one'::regclass AND r.contype = 'c'; -/* Check that plain tables are not affected too */ +/* + * Check that plain tables are not affected too + */ CREATE TABLE rename.plain_test(a serial, b int); ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; SELECT add_constraint('rename.plain_test_renamed'); @@ -216,6 +237,7 @@ SELECT r.conname, pg_get_constraintdef(r.oid, true) FROM pg_constraint r WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; + DROP SCHEMA rename CASCADE; diff --git a/src/hooks.c b/src/hooks.c index d0da940e..690c398a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -874,6 +874,7 @@ pathman_process_utility_hook(Node *first_arg, Oid relation_oid; PartType part_type; AttrNumber attr_number; + bool is_parent; /* Override standard COPY statement if needed */ if (is_pathman_related_copy(parsetree)) @@ -892,10 +893,15 @@ pathman_process_utility_hook(Node *first_arg, /* Override standard RENAME statement if needed */ else if (is_pathman_related_table_rename(parsetree, - &relation_oid)) + &relation_oid, + &is_parent)) { - PathmanRenameConstraint(relation_oid, - (const RenameStmt *) parsetree); + const RenameStmt *rename_stmt = (const RenameStmt *) parsetree; + + if (is_parent) + PathmanRenameSequence(relation_oid, rename_stmt); + else + PathmanRenameConstraint(relation_oid, rename_stmt); } /* Override standard ALTER COLUMN TYPE statement if needed */ diff --git a/src/include/init.h b/src/include/init.h index 8addc1f4..8aea9295 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -201,7 +201,8 @@ find_children_status find_inheritance_children_array(Oid parentrelId, char *build_check_constraint_name_relid_internal(Oid relid); char *build_check_constraint_name_relname_internal(const char *relname); -char *build_sequence_name_internal(Oid relid); +char *build_sequence_name_relid_internal(Oid relid); +char *build_sequence_name_relname_internal(const char *relname); char *build_update_trigger_name_internal(Oid relid); char *build_update_trigger_func_name_internal(Oid relid); diff --git a/src/include/utility_stmt_hooking.h b/src/include/utility_stmt_hooking.h index 6b45cde3..cc22efaf 100644 --- a/src/include/utility_stmt_hooking.h +++ b/src/include/utility_stmt_hooking.h @@ -23,7 +23,8 @@ /* Various traits */ bool is_pathman_related_copy(Node *parsetree); bool is_pathman_related_table_rename(Node *parsetree, - Oid *partition_relid_out); + Oid *relation_oid_out, + bool *is_parent_out); bool is_pathman_related_alter_column_type(Node *parsetree, Oid *parent_relid_out, AttrNumber *attr_number, @@ -32,8 +33,9 @@ bool is_pathman_related_alter_column_type(Node *parsetree, /* Statement handlers */ void PathmanDoCopy(const CopyStmt *stmt, const char *queryString, int stmt_location, int stmt_len, uint64 *processed); -void PathmanRenameConstraint(Oid partition_relid, - const RenameStmt *partition_rename_stmt); + +void PathmanRenameConstraint(Oid partition_relid, const RenameStmt *rename_stmt); +void PathmanRenameSequence(Oid parent_relid, const RenameStmt *rename_stmt); #endif /* COPY_STMT_HOOKING_H */ diff --git a/src/init.c b/src/init.c index 3219b1fa..3729bd16 100644 --- a/src/init.c +++ b/src/init.c @@ -565,6 +565,7 @@ build_check_constraint_name_relid_internal(Oid relid) char * build_check_constraint_name_relname_internal(const char *relname) { + AssertArg(relname != NULL); return psprintf("pathman_%s_check", relname); } @@ -573,10 +574,21 @@ build_check_constraint_name_relname_internal(const char *relname) * NOTE: this function does not perform sanity checks at all. */ char * -build_sequence_name_internal(Oid relid) +build_sequence_name_relid_internal(Oid relid) { AssertArg(OidIsValid(relid)); - return psprintf("%s_seq", get_rel_name(relid)); + return build_sequence_name_relname_internal(get_rel_name(relid)); +} + +/* + * Generate part sequence name for a parent. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_sequence_name_relname_internal(const char *relname) +{ + AssertArg(relname != NULL); + return psprintf("%s_seq", relname); } /* diff --git a/src/partition_creation.c b/src/partition_creation.c index f8cf8a55..20094a4f 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -600,22 +600,31 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ static char * choose_range_partition_name(Oid parent_relid, Oid parent_nsp) { - Datum part_num; - Oid part_seq_relid; - char *part_seq_relname; - Oid save_userid; - int save_sec_context; - bool need_priv_escalation = !superuser(); /* we might be a SU */ - char *relname; - int attempts_cnt = 1000; - - part_seq_relname = build_sequence_name_internal(parent_relid); - part_seq_relid = get_relname_relid(part_seq_relname, parent_nsp); + Datum part_num; + Oid part_seq_relid; + char *part_seq_nspname, + *part_seq_relname; + RangeVar *part_seq_rv; + Oid save_userid; + int save_sec_context; + bool need_priv_escalation = !superuser(); /* we might be a SU */ + char *relname; + int attempts_cnt = 1000; + + /* Dispatch sequence and lock it using AccessShareLock */ + part_seq_nspname = get_namespace_name(get_rel_namespace(parent_relid)); + part_seq_relname = build_sequence_name_relid_internal(parent_relid); + part_seq_rv = makeRangeVar(part_seq_nspname, part_seq_relname, -1); + part_seq_relid = RangeVarGetRelid(part_seq_rv, AccessShareLock, true); /* Could not find part number generating sequence */ if (!OidIsValid(part_seq_relid)) elog(ERROR, "auto naming sequence \"%s\" does not exist", part_seq_relname); + pfree(part_seq_nspname); + pfree(part_seq_relname); + pfree(part_seq_rv); + /* Do we have to escalate privileges? */ if (need_priv_escalation) { diff --git a/src/pl_funcs.c b/src/pl_funcs.c index bb66506d..175d36de 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -901,7 +901,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) Oid naming_seq; naming_seq_rv = makeRangeVar(get_namespace_name(get_rel_namespace(relid)), - build_sequence_name_internal(relid), + build_sequence_name_relid_internal(relid), -1); naming_seq = RangeVarGetRelid(naming_seq_rv, AccessShareLock, true); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 91452ba9..1b8b2ade 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -589,16 +589,18 @@ build_sequence_name(PG_FUNCTION_ARGS) { Oid parent_relid = PG_GETARG_OID(0); Oid parent_nsp; + char *seq_name; char *result; if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) ereport(ERROR, (errmsg("relation \"%u\" does not exist", parent_relid))); parent_nsp = get_rel_namespace(parent_relid); + seq_name = build_sequence_name_relid_internal(parent_relid); result = psprintf("%s.%s", quote_identifier(get_namespace_name(parent_nsp)), - quote_identifier(build_sequence_name_internal(parent_relid))); + quote_identifier(seq_name)); PG_RETURN_TEXT_P(cstring_to_text(result)); } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 31d39bc2..f24b9543 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -134,18 +134,19 @@ is_pathman_related_copy(Node *parsetree) */ bool is_pathman_related_table_rename(Node *parsetree, - Oid *partition_relid_out) /* ret value */ + Oid *relation_oid_out, /* ret value #1 */ + bool *is_parent_out) /* ret value #2 */ { RenameStmt *rename_stmt = (RenameStmt *) parsetree; - Oid partition_relid, + Oid relation_oid, parent_relid; - const PartRelationInfo *prel; PartParentSearch parent_search; + const PartRelationInfo *prel; Assert(IsPathmanReady()); /* Set default values */ - if (partition_relid_out) *partition_relid_out = InvalidOid; + if (relation_oid_out) *relation_oid_out = InvalidOid; if (!IsA(parsetree, RenameStmt)) return false; @@ -154,20 +155,33 @@ is_pathman_related_table_rename(Node *parsetree, if (rename_stmt->renameType != OBJECT_TABLE) return false; - /* Assume it's a partition, fetch its Oid */ - partition_relid = RangeVarGetRelid(rename_stmt->relation, - AccessShareLock, - false); + /* Fetch Oid of this relation */ + relation_oid = RangeVarGetRelid(rename_stmt->relation, + AccessShareLock, + false); + + /* Assume it's a parent */ + if (get_pathman_relation_info(relation_oid)) + { + if (relation_oid_out) + *relation_oid_out = relation_oid; + if (is_parent_out) + *is_parent_out = true; + return true; + } - /* Try fetching parent of this table */ - parent_relid = get_parent_of_partition(partition_relid, &parent_search); + /* Assume it's a partition, fetch its parent */ + parent_relid = get_parent_of_partition(relation_oid, &parent_search); if (parent_search != PPS_ENTRY_PART_PARENT) return false; /* Is parent partitioned? */ if ((prel = get_pathman_relation_info(parent_relid)) != NULL) { - if (partition_relid_out) *partition_relid_out = partition_relid; + if (relation_oid_out) + *relation_oid_out = relation_oid; + if (is_parent_out) + *is_parent_out = false; return true; } @@ -789,12 +803,12 @@ prepare_rri_for_copy(EState *estate, * Rename RANGE\HASH check constraint of a partition on table rename event. */ void -PathmanRenameConstraint(Oid partition_relid, /* cached partition Oid */ - const RenameStmt *part_rename_stmt) /* partition rename stmt */ +PathmanRenameConstraint(Oid partition_relid, /* partition Oid */ + const RenameStmt *rename_stmt) /* partition rename stmt */ { char *old_constraint_name, *new_constraint_name; - RenameStmt rename_stmt; + RenameStmt rename_con_stmt; /* Generate old constraint name */ old_constraint_name = @@ -802,16 +816,61 @@ PathmanRenameConstraint(Oid partition_relid, /* cached partition Oid */ /* Generate new constraint name */ new_constraint_name = - build_check_constraint_name_relname_internal(part_rename_stmt->newname); + build_check_constraint_name_relname_internal(rename_stmt->newname); /* Build check constraint RENAME statement */ - memset((void *) &rename_stmt, 0, sizeof(RenameStmt)); - NodeSetTag(&rename_stmt, T_RenameStmt); - rename_stmt.renameType = OBJECT_TABCONSTRAINT; - rename_stmt.relation = part_rename_stmt->relation; - rename_stmt.subname = old_constraint_name; - rename_stmt.newname = new_constraint_name; - rename_stmt.missing_ok = false; - - RenameConstraint(&rename_stmt); + memset((void *) &rename_con_stmt, 0, sizeof(RenameStmt)); + NodeSetTag(&rename_con_stmt, T_RenameStmt); + rename_con_stmt.renameType = OBJECT_TABCONSTRAINT; + rename_con_stmt.relation = rename_stmt->relation; + rename_con_stmt.subname = old_constraint_name; + rename_con_stmt.newname = new_constraint_name; + rename_con_stmt.missing_ok = false; + + /* Finally, rename partitioning constraint */ + RenameConstraint(&rename_con_stmt); + + pfree(old_constraint_name); + pfree(new_constraint_name); + + /* Make changes visible */ + CommandCounterIncrement(); +} + +/* + * Rename auto naming sequence of a parent on table rename event. + */ +void +PathmanRenameSequence(Oid parent_relid, /* parent Oid */ + const RenameStmt *rename_stmt) /* parent rename stmt */ +{ + char *old_seq_name, + *new_seq_name, + *seq_nsp_name; + RangeVar *seq_rv; + Oid seq_relid; + + /* Produce old & new names and RangeVar */ + seq_nsp_name = get_namespace_name(get_rel_namespace(parent_relid)); + old_seq_name = build_sequence_name_relid_internal(parent_relid); + new_seq_name = build_sequence_name_relname_internal(rename_stmt->newname); + seq_rv = makeRangeVar(seq_nsp_name, old_seq_name, -1); + + /* Fetch Oid of sequence */ + seq_relid = RangeVarGetRelid(seq_rv, AccessExclusiveLock, true); + + /* Do nothing if there's no naming sequence */ + if (!OidIsValid(seq_relid)) + return; + + /* Finally, rename auto naming sequence */ + RenameRelationInternal(seq_relid, new_seq_name, false); + + pfree(seq_nsp_name); + pfree(old_seq_name); + pfree(new_seq_name); + pfree(seq_rv); + + /* Make changes visible */ + CommandCounterIncrement(); } From f9c842e0a9228814f69cff2b6d36f99b3d439204 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Oct 2017 15:59:33 +0300 Subject: [PATCH 0767/1124] make concurrent part worker more reliable, new tests --- expected/pathman_bgw.out | 77 ++++++++++++++++++++++++- sql/pathman_bgw.sql | 47 ++++++++++++++++ src/include/pathman_workers.h | 21 ++++++- src/pathman_workers.c | 103 ++++++++++++++++++---------------- 4 files changed, 197 insertions(+), 51 deletions(-) diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index 2356c1fc..f7136533 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -132,7 +132,7 @@ SELECT set_init_callback('test_bgw.test_5', 'test_bgw.abort_xact(jsonb)'); (1 row) INSERT INTO test_bgw.test_5 VALUES (-100); -ERROR: Attempt to spawn new partitions of relation "test_5" failed +ERROR: attempt to spawn new partitions of relation "test_5" failed SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ parent | partition | parttype | expr | range_min | range_max -----------------+-------------------+----------+------+-----------+----------- @@ -143,5 +143,80 @@ SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 par DROP FUNCTION test_bgw.abort_xact(args JSONB); DROP TABLE test_bgw.test_5 CASCADE; NOTICE: drop cascades to 3 other objects +/* + * Tests for ConcurrentPartWorker + */ +CREATE TABLE test_bgw.conc_part(id INT4 NOT NULL); +INSERT INTO test_bgw.conc_part SELECT generate_series(1, 500); +SELECT create_hash_partitions('test_bgw.conc_part', 'id', 5, false); + create_hash_partitions +------------------------ + 5 +(1 row) + +BEGIN; +/* Also test FOR SHARE/UPDATE conflicts in BGW */ +SELECT * FROM test_bgw.conc_part ORDER BY id LIMIT 1 FOR SHARE; + id +---- + 1 +(1 row) + +/* Run partitioning bgworker */ +SELECT partition_table_concurrently('test_bgw.conc_part', 10, 1); +NOTICE: worker started, you can stop it with the following command: select public.stop_concurrent_part_task('conc_part'); + partition_table_concurrently +------------------------------ + +(1 row) + +/* Wait until bgworker starts */ +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + +ROLLBACK; +/* Wait until it finises */ +DO $$ +DECLARE + ops int8; +BEGIN + LOOP + SELECT count(*) + FROM pathman_concurrent_part_tasks + WHERE processed < 500 -- protect from endless loops + INTO ops; + + IF ops > 0 THEN + PERFORM pg_sleep(0.2); + ELSE + EXIT; + END IF; + END LOOP; +END +$$ LANGUAGE plpgsql; +/* Check amount of tasks and rows in parent and partitions */ +SELECT count(*) FROM pathman_concurrent_part_tasks; + count +------- + 0 +(1 row) + +SELECT count(*) FROM ONLY test_bgw.conc_part; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_bgw.conc_part; + count +------- + 500 +(1 row) + +DROP TABLE test_bgw.conc_part CASCADE; +NOTICE: drop cascades to 5 other objects DROP SCHEMA test_bgw CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index 7eedaff2..1d8a0146 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -5,6 +5,7 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA test_bgw; + /* * Tests for SpawnPartitionsWorker */ @@ -74,5 +75,51 @@ DROP TABLE test_bgw.test_5 CASCADE; +/* + * Tests for ConcurrentPartWorker + */ + +CREATE TABLE test_bgw.conc_part(id INT4 NOT NULL); +INSERT INTO test_bgw.conc_part SELECT generate_series(1, 500); +SELECT create_hash_partitions('test_bgw.conc_part', 'id', 5, false); + +BEGIN; +/* Also test FOR SHARE/UPDATE conflicts in BGW */ +SELECT * FROM test_bgw.conc_part ORDER BY id LIMIT 1 FOR SHARE; +/* Run partitioning bgworker */ +SELECT partition_table_concurrently('test_bgw.conc_part', 10, 1); +/* Wait until bgworker starts */ +SELECT pg_sleep(1); +ROLLBACK; + +/* Wait until it finises */ +DO $$ +DECLARE + ops int8; +BEGIN + LOOP + SELECT count(*) + FROM pathman_concurrent_part_tasks + WHERE processed < 500 -- protect from endless loops + INTO ops; + + IF ops > 0 THEN + PERFORM pg_sleep(0.2); + ELSE + EXIT; + END IF; + END LOOP; +END +$$ LANGUAGE plpgsql; + +/* Check amount of tasks and rows in parent and partitions */ +SELECT count(*) FROM pathman_concurrent_part_tasks; +SELECT count(*) FROM ONLY test_bgw.conc_part; +SELECT count(*) FROM test_bgw.conc_part; + +DROP TABLE test_bgw.conc_part CASCADE; + + + DROP SCHEMA test_bgw CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/include/pathman_workers.h b/src/include/pathman_workers.h index 25ab5e1d..6cf73ca5 100644 --- a/src/include/pathman_workers.h +++ b/src/include/pathman_workers.h @@ -112,10 +112,29 @@ cps_set_status(ConcurrentPartSlot *slot, ConcurrentPartSlotStatus status) SpinLockRelease(&slot->mutex); } +static inline const char * +cps_print_status(ConcurrentPartSlotStatus status) +{ + switch(status) + { + case CPS_FREE: + return "free"; + + case CPS_WORKING: + return "working"; + + case CPS_STOPPING: + return "stopping"; + + default: + return "[unknown]"; + } +} + /* Number of worker slots for concurrent partitioning */ -#define PART_WORKER_SLOTS 10 +#define PART_WORKER_SLOTS max_worker_processes /* Max number of attempts per batch */ #define PART_WORKER_MAX_ATTEMPTS 60 diff --git a/src/pathman_workers.c b/src/pathman_workers.c index e3bb7bf5..bb8f954c 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -57,7 +57,7 @@ extern PGDLLEXPORT void bgw_main_concurrent_part(Datum main_arg); static void handle_sigterm(SIGNAL_ARGS); static void bg_worker_load_config(const char *bgw_name); -static void start_bg_worker(const char bgworker_name[BGW_MAXLEN], +static bool start_bgworker(const char bgworker_name[BGW_MAXLEN], const char bgworker_proc[BGW_MAXLEN], Datum bgw_arg, bool wait_for_shutdown); @@ -90,6 +90,7 @@ static const char *concurrent_part_bgw = "ConcurrentPartWorker"; Size estimate_concurrent_part_task_slots_size(void) { + /* NOTE: we suggest that max_worker_processes is in PGC_POSTMASTER */ return sizeof(ConcurrentPartSlot) * PART_WORKER_SLOTS; } @@ -125,6 +126,7 @@ init_concurrent_part_task_slots(void) /* * Handle SIGTERM in BGW's process. + * Use it in favor of bgworker_die(). */ static void handle_sigterm(SIGNAL_ARGS) @@ -160,8 +162,8 @@ bg_worker_load_config(const char *bgw_name) /* * Common function to start background worker. */ -static void -start_bg_worker(const char bgworker_name[BGW_MAXLEN], +static bool +start_bgworker(const char bgworker_name[BGW_MAXLEN], const char bgworker_proc[BGW_MAXLEN], Datum bgw_arg, bool wait_for_shutdown) { @@ -218,10 +220,9 @@ start_bg_worker(const char bgworker_name[BGW_MAXLEN], switch (exec_state) { + /* Caller might want to handle this case */ case BGW_COULD_NOT_START: - elog(ERROR, "Unable to create background %s for pg_pathman", - bgworker_name); - break; + return false; case BGW_PM_DIED: ereport(ERROR, @@ -232,6 +233,8 @@ start_bg_worker(const char bgworker_name[BGW_MAXLEN], default: break; } + + return true; } @@ -311,10 +314,10 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) #endif /* Start worker and wait for it to finish */ - start_bg_worker(spawn_partitions_bgw, - CppAsString(bgw_main_spawn_partitions), - UInt32GetDatum(segment_handle), - true); + (void) start_bgworker(spawn_partitions_bgw, + CppAsString(bgw_main_spawn_partitions), + UInt32GetDatum(segment_handle), + true); /* Save the result (partition Oid) */ child_oid = bgw_args->result; @@ -324,7 +327,7 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) if (child_oid == InvalidOid) ereport(ERROR, - (errmsg("Attempt to spawn new partitions of relation \"%s\" failed", + (errmsg("attempt to spawn new partitions of relation \"%s\" failed", get_rel_name_or_relid(relid)), errhint("See server log for more details."))); @@ -412,6 +415,15 @@ bgw_main_spawn_partitions(Datum main_arg) * ------------------------------------- */ +/* Free bgworker's CPS slot */ +static void +free_cps_slot(int code, Datum arg) +{ + ConcurrentPartSlot *part_slot =(ConcurrentPartSlot *) DatumGetPointer(arg); + + cps_set_status(part_slot, CPS_FREE); +} + /* * Entry point for ConcurrentPartWorker's process. */ @@ -424,7 +436,14 @@ bgw_main_concurrent_part(Datum main_arg) char *sql = NULL; ConcurrentPartSlot *part_slot; - /* Establish signal handlers before unblocking signals. */ + /* Update concurrent part slot */ + part_slot = &concurrent_part_slots[DatumGetInt32(main_arg)]; + part_slot->pid = MyProcPid; + + /* Establish atexit callback that will fre CPS slot */ + on_proc_exit(free_cps_slot, PointerGetDatum(part_slot)); + + /* Establish signal handlers before unblocking signals */ pqsignal(SIGTERM, handle_sigterm); /* We're now ready to receive signals */ @@ -433,10 +452,6 @@ bgw_main_concurrent_part(Datum main_arg) /* Create resource owner */ CurrentResourceOwner = ResourceOwnerCreate(NULL, concurrent_part_bgw); - /* Update concurrent part slot */ - part_slot = &concurrent_part_slots[DatumGetInt32(main_arg)]; - part_slot->pid = MyProcPid; - /* Disable auto partition propagation */ SetAutoPartitionEnabled(false); @@ -461,6 +476,8 @@ bgw_main_concurrent_part(Datum main_arg) failed = false; rows = 0; + CHECK_FOR_INTERRUPTS(); + /* Start new transaction (syscache access etc.) */ StartTransactionCommand(); @@ -592,12 +609,13 @@ bgw_main_concurrent_part(Datum main_arg) /* Add rows to total_rows */ SpinLockAcquire(&part_slot->mutex); part_slot->total_rows += rows; -/* Report debug message */ + SpinLockRelease(&part_slot->mutex); + #ifdef USE_ASSERT_CHECKING + /* Report debug message */ elog(DEBUG1, "%s: relocated %d rows, total: " UINT64_FORMAT " [%u]", concurrent_part_bgw, rows, part_slot->total_rows, MyProcPid); #endif - SpinLockRelease(&part_slot->mutex); } /* If other backend requested to stop us, quit */ @@ -608,9 +626,6 @@ bgw_main_concurrent_part(Datum main_arg) /* Reclaim the resources */ pfree(sql); - - /* Mark slot as FREE */ - cps_set_status(part_slot, CPS_FREE); } @@ -694,9 +709,8 @@ partition_table_concurrently(PG_FUNCTION_ARGS) if (empty_slot_idx >= 0 && empty_slot_idx != i) SpinLockRelease(&concurrent_part_slots[empty_slot_idx].mutex); - elog(ERROR, - "table \"%s\" is already being partitioned", - get_rel_name(relid)); + ereport(ERROR, (errmsg("table \"%s\" is already being partitioned", + get_rel_name(relid)))); } /* Normally we don't want to keep it */ @@ -706,7 +720,9 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* Looks like we could not find an empty slot */ if (empty_slot_idx < 0) - elog(ERROR, "no empty worker slots found"); + ereport(ERROR, (ERRCODE_CONFIGURATION_LIMIT_EXCEEDED, + errmsg("no empty worker slots found"), + errhint("consider increasing max_worker_processes"))); else { /* Initialize concurrent part slot */ @@ -719,10 +735,14 @@ partition_table_concurrently(PG_FUNCTION_ARGS) } /* Start worker (we should not wait) */ - start_bg_worker(concurrent_part_bgw, - CppAsString(bgw_main_concurrent_part), - Int32GetDatum(empty_slot_idx), - false); + if (!start_bgworker(concurrent_part_bgw, + CppAsString(bgw_main_concurrent_part), + Int32GetDatum(empty_slot_idx), + false)) + { + /* Couldn't start, free CPS slot */ + cps_set_status(&concurrent_part_slots[empty_slot_idx], CPS_FREE); + } /* Tell user everything's fine */ elog(NOTICE, @@ -807,22 +827,8 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) values[Anum_pathman_cp_tasks_processed - 1] = cur_slot->total_rows; /* Now build a status string */ - switch(cur_slot->worker_status) - { - case CPS_WORKING: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("working")); - break; - - case CPS_STOPPING: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("stopping")); - break; - - default: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("[unknown]")); - } + values[Anum_pathman_cp_tasks_status - 1] = + CStringGetTextDatum(cps_print_status(cur_slot->worker_status)); /* Form output tuple */ htup = heap_form_tuple(funcctx->tuple_desc, values, isnull); @@ -857,26 +863,25 @@ stop_concurrent_part_task(PG_FUNCTION_ARGS) { ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i]; - HOLD_INTERRUPTS(); SpinLockAcquire(&cur_slot->mutex); if (cur_slot->worker_status != CPS_FREE && cur_slot->relid == relid && cur_slot->dbid == MyDatabaseId) { - elog(NOTICE, "worker will stop after it finishes current batch"); - /* Change worker's state & set 'worker_found' */ cur_slot->worker_status = CPS_STOPPING; worker_found = true; } SpinLockRelease(&cur_slot->mutex); - RESUME_INTERRUPTS(); } if (worker_found) + { + elog(NOTICE, "worker will stop after it finishes current batch"); PG_RETURN_BOOL(true); + } else { elog(ERROR, "cannot find worker for relation \"%s\"", From 485f02577705bd39dbc0758cc7f38e9507a0a2ae Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Oct 2017 16:24:53 +0300 Subject: [PATCH 0768/1124] improve error messages in BGWs --- src/pathman_workers.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index bb8f954c..f2944bfb 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -237,6 +237,16 @@ start_bgworker(const char bgworker_name[BGW_MAXLEN], return true; } +/* + * Show generic error message if we failed to start bgworker. + */ +static inline void +start_bgworker_errmsg(const char *bgworker_name) +{ + ereport(ERROR, (errmsg("could not start %s", bgworker_name), + errhint("consider increasing max_worker_processes"))); +} + /* * -------------------------------------- @@ -314,10 +324,13 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) #endif /* Start worker and wait for it to finish */ - (void) start_bgworker(spawn_partitions_bgw, - CppAsString(bgw_main_spawn_partitions), - UInt32GetDatum(segment_handle), - true); + if (!start_bgworker(spawn_partitions_bgw, + CppAsString(bgw_main_spawn_partitions), + UInt32GetDatum(segment_handle), + true)) + { + start_bgworker_errmsg(spawn_partitions_bgw); + } /* Save the result (partition Oid) */ child_oid = bgw_args->result; @@ -742,6 +755,8 @@ partition_table_concurrently(PG_FUNCTION_ARGS) { /* Couldn't start, free CPS slot */ cps_set_status(&concurrent_part_slots[empty_slot_idx], CPS_FREE); + + start_bgworker_errmsg(concurrent_part_bgw); } /* Tell user everything's fine */ From 6494216767a6d3fb96c9eac63cd3fc0dedc43468 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Oct 2017 19:37:24 +0300 Subject: [PATCH 0769/1124] fix race conditions in tests in pathman_bgw --- expected/pathman_bgw.out | 29 ++++++++++++++++++++++++----- sql/pathman_bgw.sql | 29 ++++++++++++++++++++++++----- src/pathman_workers.c | 2 +- 3 files changed, 49 insertions(+), 11 deletions(-) diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index f7136533..a38d2096 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -181,19 +181,38 @@ ROLLBACK; /* Wait until it finises */ DO $$ DECLARE - ops int8; + ops int8; + rows int8; + rows_old int8 := 0; + i int4 := 0; -- protect from endless loop BEGIN LOOP - SELECT count(*) + SELECT processed FROM pathman_concurrent_part_tasks - WHERE processed < 500 -- protect from endless loops - INTO ops; + WHERE relid = 'test_bgw.conc_part'::regclass + INTO rows; + + -- get number of partitioning tasks + GET DIAGNOSTICS ops = ROW_COUNT; IF ops > 0 THEN PERFORM pg_sleep(0.2); + + ASSERT rows IS NOT NULL; + + IF rows_old = rows THEN + i = i + 1; + END IF; ELSE - EXIT; + EXIT; -- exit loop END IF; + + IF i > 50 THEN + RAISE WARNING 'looks like partitioning bgw is stuck!'; + EXIT; -- exit loop + END IF; + + rows_old = rows; END LOOP; END $$ LANGUAGE plpgsql; diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index 1d8a0146..e8d7df4f 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -95,19 +95,38 @@ ROLLBACK; /* Wait until it finises */ DO $$ DECLARE - ops int8; + ops int8; + rows int8; + rows_old int8 := 0; + i int4 := 0; -- protect from endless loop BEGIN LOOP - SELECT count(*) + SELECT processed FROM pathman_concurrent_part_tasks - WHERE processed < 500 -- protect from endless loops - INTO ops; + WHERE relid = 'test_bgw.conc_part'::regclass + INTO rows; + + -- get number of partitioning tasks + GET DIAGNOSTICS ops = ROW_COUNT; IF ops > 0 THEN PERFORM pg_sleep(0.2); + + ASSERT rows IS NOT NULL; + + IF rows_old = rows THEN + i = i + 1; + END IF; ELSE - EXIT; + EXIT; -- exit loop END IF; + + IF i > 50 THEN + RAISE WARNING 'looks like partitioning bgw is stuck!'; + EXIT; -- exit loop + END IF; + + rows_old = rows; END LOOP; END $$ LANGUAGE plpgsql; diff --git a/src/pathman_workers.c b/src/pathman_workers.c index f2944bfb..10bf15ad 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -432,7 +432,7 @@ bgw_main_spawn_partitions(Datum main_arg) static void free_cps_slot(int code, Datum arg) { - ConcurrentPartSlot *part_slot =(ConcurrentPartSlot *) DatumGetPointer(arg); + ConcurrentPartSlot *part_slot = (ConcurrentPartSlot *) DatumGetPointer(arg); cps_set_status(part_slot, CPS_FREE); } From a49fdacb423b9e3e9dfb45875ffb3542ca66dbc1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Oct 2017 15:27:23 +0300 Subject: [PATCH 0770/1124] bugfixes and improved error handling in ConcurrentPartWorker --- src/include/pathman_workers.h | 2 +- src/pathman_workers.c | 136 +++++++++++++++++++++------------- 2 files changed, 84 insertions(+), 54 deletions(-) diff --git a/src/include/pathman_workers.h b/src/include/pathman_workers.h index 6cf73ca5..be4d6425 100644 --- a/src/include/pathman_workers.h +++ b/src/include/pathman_workers.h @@ -74,7 +74,7 @@ typedef struct pid_t pid; /* worker's PID */ Oid dbid; /* database which contains the relation */ Oid relid; /* table to be partitioned concurrently */ - uint64 total_rows; /* total amount of rows processed */ + int64 total_rows; /* total amount of rows processed */ int32 batch_size; /* number of rows in a batch */ float8 sleep_time; /* how long should we sleep in case of error? */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 10bf15ad..ccacdace 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -443,11 +443,12 @@ free_cps_slot(int code, Datum arg) void bgw_main_concurrent_part(Datum main_arg) { - int rows; + ConcurrentPartSlot *part_slot; + char *sql = NULL; + int64 rows; bool failed; int failures_count = 0; - char *sql = NULL; - ConcurrentPartSlot *part_slot; + LOCKMODE lockmode = RowExclusiveLock; /* Update concurrent part slot */ part_slot = &concurrent_part_slots[DatumGetInt32(main_arg)]; @@ -479,12 +480,14 @@ bgw_main_concurrent_part(Datum main_arg) /* Do the job */ do { - MemoryContext old_mcxt; + MemoryContext old_mcxt; Oid types[2] = { OIDOID, INT4OID }; Datum vals[2] = { part_slot->relid, part_slot->batch_size }; bool nulls[2] = { false, false }; + bool rel_locked = false; + /* Reset loop variables */ failed = false; rows = 0; @@ -520,44 +523,73 @@ bgw_main_concurrent_part(Datum main_arg) /* Exec ret = _partition_data_concurrent() */ PG_TRY(); { - /* Make sure that relation exists and has partitions */ - if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid)) && - get_pathman_relation_info(part_slot->relid) != NULL) - { - int ret; - bool isnull; + int ret; + bool isnull; - ret = SPI_execute_with_args(sql, 2, types, vals, nulls, false, 0); - if (ret == SPI_OK_SELECT) - { - TupleDesc tupdesc = SPI_tuptable->tupdesc; - HeapTuple tuple = SPI_tuptable->vals[0]; + /* Lock relation for DELETE and INSERT */ + if (!ConditionalLockRelationOid(part_slot->relid, lockmode)) + { + elog(ERROR, "could not take lock on relation %u", part_slot->relid); + } - Assert(SPI_processed == 1); /* there should be 1 result at most */ + /* Great, now relation is locked */ + rel_locked = true; - rows = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 1, &isnull)); + /* Make sure that relation exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid))) + { + /* Exit after we raise ERROR */ + failures_count = PART_WORKER_MAX_ATTEMPTS; - Assert(!isnull); /* ... and ofc it must not be NULL */ - } + elog(ERROR, "relation %u does not exist", part_slot->relid); } - /* Otherwise it's time to exit */ - else + + /* Make sure that relation has partitions */ + if (get_pathman_relation_info(part_slot->relid) == NULL) { + /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; - elog(LOG, "relation \"%u\" is not partitioned (or does not exist)", - part_slot->relid); + elog(ERROR, "relation \"%s\" is not partitioned", + get_rel_name(part_slot->relid)); + } + + /* Call concurrent partitioning function */ + ret = SPI_execute_with_args(sql, 2, types, vals, nulls, false, 0); + if (ret == SPI_OK_SELECT) + { + TupleDesc tupdesc = SPI_tuptable->tupdesc; + HeapTuple tuple = SPI_tuptable->vals[0]; + + /* There should be 1 result at most */ + Assert(SPI_processed == 1); + + /* Extract number of processed rows */ + rows = DatumGetInt64(SPI_getbinval(tuple, tupdesc, 1, &isnull)); + Assert(!isnull); /* ... and ofc it must not be NULL */ } + /* Else raise generic error */ + else elog(ERROR, "partitioning function returned %u", ret); + + /* Finally, unlock our partitioned table */ + UnlockRelationOid(part_slot->relid, lockmode); } PG_CATCH(); { /* * The most common exception we can catch here is a deadlock with * concurrent user queries. Check that attempts count doesn't exceed - * some reasonable value + * some reasonable value. */ - ErrorData *error; - char *sleep_time_str; + ErrorData *error; + + /* Unlock relation if we caught ERROR too early */ + if (rel_locked) + UnlockRelationOid(part_slot->relid, lockmode); + + /* Increase number of failures and set 'failed' status */ + failures_count++; + failed = true; /* Switch to the original context & copy edata */ MemoryContextSwitchTo(old_mcxt); @@ -565,21 +597,15 @@ bgw_main_concurrent_part(Datum main_arg) FlushErrorState(); /* Print messsage for this BGWorker to server log */ - sleep_time_str = datum_to_cstring(Float8GetDatum(part_slot->sleep_time), - FLOAT8OID); - failures_count++; ereport(LOG, (errmsg("%s: %s", concurrent_part_bgw, error->message), - errdetail("attempt: %d/%d, sleep time: %s", + errdetail("attempt: %d/%d, sleep time: %.2f", failures_count, PART_WORKER_MAX_ATTEMPTS, - sleep_time_str))); - pfree(sleep_time_str); /* free the time string */ + (float) part_slot->sleep_time))); + /* Finally, free error data */ FreeErrorData(error); - - /* Set 'failed' flag */ - failed = true; } PG_END_TRY(); @@ -606,9 +632,10 @@ bgw_main_concurrent_part(Datum main_arg) /* Failed this time, wait */ else if (failed) { - /* Abort transaction and sleep for a second */ + /* Abort transaction */ AbortCurrentTransaction(); + /* Sleep for a specified amount of time (default 1s) */ DirectFunctionCall1(pg_sleep, Float8GetDatum(part_slot->sleep_time)); } @@ -626,8 +653,10 @@ bgw_main_concurrent_part(Datum main_arg) #ifdef USE_ASSERT_CHECKING /* Report debug message */ - elog(DEBUG1, "%s: relocated %d rows, total: " UINT64_FORMAT " [%u]", - concurrent_part_bgw, rows, part_slot->total_rows, MyProcPid); + elog(DEBUG1, "%s: " + "relocated" INT64_FORMAT "rows, " + "total: " INT64_FORMAT, + concurrent_part_bgw, rows, part_slot->total_rows); #endif } @@ -636,9 +665,6 @@ bgw_main_concurrent_part(Datum main_arg) break; } while(rows > 0 || failed); /* do while there's still rows to be relocated */ - - /* Reclaim the resources */ - pfree(sql); } @@ -824,26 +850,33 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) /* Iterate through worker slots */ for (i = userctx->cur_idx; i < PART_WORKER_SLOTS; i++) { - ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i]; + ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i], + slot_copy; HeapTuple htup = NULL; - HOLD_INTERRUPTS(); + /* Copy slot to process local memory */ SpinLockAcquire(&cur_slot->mutex); + memcpy(&slot_copy, cur_slot, sizeof(ConcurrentPartSlot)); + SpinLockRelease(&cur_slot->mutex); - if (cur_slot->worker_status != CPS_FREE) + if (slot_copy.worker_status != CPS_FREE) { Datum values[Natts_pathman_cp_tasks]; bool isnull[Natts_pathman_cp_tasks] = { 0 }; - values[Anum_pathman_cp_tasks_userid - 1] = cur_slot->userid; - values[Anum_pathman_cp_tasks_pid - 1] = cur_slot->pid; - values[Anum_pathman_cp_tasks_dbid - 1] = cur_slot->dbid; - values[Anum_pathman_cp_tasks_relid - 1] = cur_slot->relid; - values[Anum_pathman_cp_tasks_processed - 1] = cur_slot->total_rows; + values[Anum_pathman_cp_tasks_userid - 1] = slot_copy.userid; + values[Anum_pathman_cp_tasks_pid - 1] = slot_copy.pid; + values[Anum_pathman_cp_tasks_dbid - 1] = slot_copy.dbid; + values[Anum_pathman_cp_tasks_relid - 1] = slot_copy.relid; + + /* Record processed rows */ + values[Anum_pathman_cp_tasks_processed - 1] = + /* FIXME: use Int64GetDatum() in release 1.5 */ + Int32GetDatum((int32) slot_copy.total_rows); /* Now build a status string */ values[Anum_pathman_cp_tasks_status - 1] = - CStringGetTextDatum(cps_print_status(cur_slot->worker_status)); + CStringGetTextDatum(cps_print_status(slot_copy.worker_status)); /* Form output tuple */ htup = heap_form_tuple(funcctx->tuple_desc, values, isnull); @@ -852,9 +885,6 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) userctx->cur_idx = i + 1; } - SpinLockRelease(&cur_slot->mutex); - RESUME_INTERRUPTS(); - /* Return tuple if needed */ if (htup) SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(htup)); From 6b00d812b9396353fff72d42181278c4bd19b68f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Oct 2017 15:44:58 +0300 Subject: [PATCH 0771/1124] hide false positives found by clang analyzer --- src/pathman_workers.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index ccacdace..8cd23fd7 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -534,12 +534,14 @@ bgw_main_concurrent_part(Datum main_arg) /* Great, now relation is locked */ rel_locked = true; + (void) rel_locked; /* mute clang analyzer */ /* Make sure that relation exists */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid))) { /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; + (void) failures_count; /* mute clang analyzer */ elog(ERROR, "relation %u does not exist", part_slot->relid); } @@ -549,6 +551,7 @@ bgw_main_concurrent_part(Datum main_arg) { /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; + (void) failures_count; /* mute clang analyzer */ elog(ERROR, "relation \"%s\" is not partitioned", get_rel_name(part_slot->relid)); From 546a99bb0b43158c15dfabbe960c14b5fff83059 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Oct 2017 16:00:54 +0300 Subject: [PATCH 0772/1124] return 64-bit total_rows from show_concurrent_part_tasks_internal() --- init.sql | 2 +- src/pathman_workers.c | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/init.sql b/init.sql index 5dd808ec..f54d48eb 100644 --- a/init.sql +++ b/init.sql @@ -285,7 +285,7 @@ RETURNS TABLE ( pid INT, dbid OID, relid REGCLASS, - processed INT, + processed INT8, status TEXT) AS 'pg_pathman', 'show_concurrent_part_tasks_internal' LANGUAGE C STRICT; diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 8cd23fd7..b5cb0721 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -837,7 +837,7 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_relid, "relid", REGCLASSOID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_processed, - "processed", INT4OID, -1, 0); + "processed", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_status, "status", TEXTOID, -1, 0); @@ -874,8 +874,7 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) /* Record processed rows */ values[Anum_pathman_cp_tasks_processed - 1] = - /* FIXME: use Int64GetDatum() in release 1.5 */ - Int32GetDatum((int32) slot_copy.total_rows); + Int64GetDatum(slot_copy.total_rows); /* Now build a status string */ values[Anum_pathman_cp_tasks_status - 1] = From 4d7ce5db980005cd5fb4f309d0eea1ff7740b557 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Oct 2017 16:09:21 +0300 Subject: [PATCH 0773/1124] check type of 'rows' in bgw_main_concurrent_part() --- src/pathman_workers.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 8cd23fd7..ffc0f4a5 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -569,6 +569,7 @@ bgw_main_concurrent_part(Datum main_arg) /* Extract number of processed rows */ rows = DatumGetInt64(SPI_getbinval(tuple, tupdesc, 1, &isnull)); + Assert(tupdesc->attrs[0]->atttypid == INT8OID); /* check type */ Assert(!isnull); /* ... and ofc it must not be NULL */ } /* Else raise generic error */ From 2e0efa436a93a22476e3cb274d8435892ece1b4f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 13 Oct 2017 14:20:15 +0300 Subject: [PATCH 0774/1124] improve pathman_bgw tests --- expected/pathman_bgw.out | 3 ++- sql/pathman_bgw.sql | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index a38d2096..a02cfc65 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -200,7 +200,8 @@ BEGIN ASSERT rows IS NOT NULL; - IF rows_old = rows THEN + -- rows should increase! + IF rows_old <= rows THEN i = i + 1; END IF; ELSE diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index e8d7df4f..edd40c81 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -114,7 +114,8 @@ BEGIN ASSERT rows IS NOT NULL; - IF rows_old = rows THEN + -- rows should increase! + IF rows_old <= rows THEN i = i + 1; END IF; ELSE From c44d6d5f85ea77b3fda7f23d253cc53f92b25405 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 13 Oct 2017 14:26:35 +0300 Subject: [PATCH 0775/1124] bump lib version to 1.4.7 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 31e669e8..2718f180 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.6", + "version": "1.4.7", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.6", + "version": "1.4.7", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index b887d37b..33af45fa 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10406 + 10407 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 8aea9295..73f58c8b 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -157,7 +157,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010406 +#define CURRENT_LIB_VERSION 0x010407 void *pathman_cache_search_relid(HTAB *cache_table, From 0faf90ffed748638729839c7e8bd0c25f4e22419 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 17 Oct 2017 16:49:35 +0300 Subject: [PATCH 0776/1124] fix error code in pathman_workers.c --- src/pathman_workers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index ffc0f4a5..d6d9a953 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -763,7 +763,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* Looks like we could not find an empty slot */ if (empty_slot_idx < 0) - ereport(ERROR, (ERRCODE_CONFIGURATION_LIMIT_EXCEEDED, + ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), errmsg("no empty worker slots found"), errhint("consider increasing max_worker_processes"))); else From 9a7050562b992c299995c10da52bafbe0f8c6d17 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 17 Oct 2017 17:49:44 +0300 Subject: [PATCH 0777/1124] add more sanity checks to pathman_bgw tests --- expected/pathman_bgw.out | 8 ++++++-- sql/pathman_bgw.sql | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index a02cfc65..4166ef4e 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -187,6 +187,7 @@ DECLARE i int4 := 0; -- protect from endless loop BEGIN LOOP + -- get total number of processed rows SELECT processed FROM pathman_concurrent_part_tasks WHERE relid = 'test_bgw.conc_part'::regclass @@ -200,9 +201,12 @@ BEGIN ASSERT rows IS NOT NULL; - -- rows should increase! - IF rows_old <= rows THEN + IF rows_old = rows THEN i = i + 1; + ELSIF rows < rows_old THEN + RAISE EXCEPTION 'rows is decreasing: new %, old %', rows, rows_old; + ELSIF rows > 500 THEN + RAISE EXCEPTION 'processed % rows', rows; END IF; ELSE EXIT; -- exit loop diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index edd40c81..e05a829d 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -101,6 +101,7 @@ DECLARE i int4 := 0; -- protect from endless loop BEGIN LOOP + -- get total number of processed rows SELECT processed FROM pathman_concurrent_part_tasks WHERE relid = 'test_bgw.conc_part'::regclass @@ -114,9 +115,12 @@ BEGIN ASSERT rows IS NOT NULL; - -- rows should increase! - IF rows_old <= rows THEN + IF rows_old = rows THEN i = i + 1; + ELSIF rows < rows_old THEN + RAISE EXCEPTION 'rows is decreasing: new %, old %', rows, rows_old; + ELSIF rows > 500 THEN + RAISE EXCEPTION 'processed % rows', rows; END IF; ELSE EXIT; -- exit loop From cc7cc957198633859bd9fa559352d48514200f4b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 21 Oct 2017 20:41:57 +0300 Subject: [PATCH 0778/1124] improve compatibility with pg_dump --- expected/pathman_utility_stmt.out | 114 +++++------------------ sql/pathman_utility_stmt.sql | 28 +++--- src/hooks.c | 2 +- src/utility_stmt_hooking.c | 144 ++++++------------------------ 4 files changed, 62 insertions(+), 226 deletions(-) diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 95c64f58..37149f1e 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -24,70 +24,6 @@ VACUUM FULL copy_stmt_hooking.test_1; VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY TO */ -COPY copy_stmt_hooking.test TO stdout; -1 comment \N \N -2 comment \N \N -3 comment \N \N -4 comment \N \N -5 comment \N \N -6 comment \N \N -7 comment \N \N -8 comment \N \N -9 comment \N \N -10 comment \N \N -11 comment \N \N -12 comment \N \N -13 comment \N \N -14 comment \N \N -15 comment \N \N -16 comment \N \N -17 comment \N \N -18 comment \N \N -19 comment \N \N -20 comment \N \N -\copy copy_stmt_hooking.test to stdout (format csv) -1,comment,, -2,comment,, -3,comment,, -4,comment,, -5,comment,, -6,comment,, -7,comment,, -8,comment,, -9,comment,, -10,comment,, -11,comment,, -12,comment,, -13,comment,, -14,comment,, -15,comment,, -16,comment,, -17,comment,, -18,comment,, -19,comment,, -20,comment,, -\copy copy_stmt_hooking.test(comment) to stdout -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment /* DELETE ROWS, COPY FROM */ DELETE FROM copy_stmt_hooking.test; COPY copy_stmt_hooking.test FROM stdin; @@ -113,32 +49,30 @@ VACUUM FULL copy_stmt_hooking.test_1; VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY FROM (specified columns) */ -COPY copy_stmt_hooking.test (val) TO stdout; -1 -6 -7 -11 -16 -COPY copy_stmt_hooking.test (val, comment) TO stdout; -1 test_1 -6 test_2 -7 test_2 -11 test_3 -16 test_4 -COPY copy_stmt_hooking.test (c3, val, comment) TO stdout; -0 1 test_1 -0 6 test_2 -0 7 test_2 -0 11 test_3 -0 16 test_4 -COPY copy_stmt_hooking.test (val, comment, c3, c4) TO stdout; +/* COPY TO */ +COPY copy_stmt_hooking.test TO stdout; /* not ok */ +WARNING: COPY TO will only select rows from parent table "test" +COPY copy_stmt_hooking.test (val) TO stdout; /* not ok */ +WARNING: COPY TO will only select rows from parent table "test" +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; +1 test_1 0 0 +6 test_2 0 0 +7 test_2 0 0 +11 test_3 0 0 +16 test_4 0 0 +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout (FORMAT CSV); +1,test_1,0,0 +6,test_2,0,0 +7,test_2,0,0 +11,test_3,0,0 +16,test_4,0,0 +\copy (SELECT * FROM copy_stmt_hooking.test) TO stdout 1 test_1 0 0 6 test_2 0 0 7 test_2 0 0 11 test_3 0 0 16 test_4 0 0 -/* COPY TO (partition does not exist, NOT allowed to create partitions) */ +/* COPY FROM (partition does not exist, NOT allowed to create partitions) */ SET pg_pathman.enable_auto_partition = OFF; COPY copy_stmt_hooking.test FROM stdin; ERROR: no suitable partition for key '21' @@ -147,7 +81,7 @@ SELECT * FROM copy_stmt_hooking.test WHERE val > 20; -----+---------+----+---- (0 rows) -/* COPY TO (partition does not exist, allowed to create partitions) */ +/* COPY FROM (partition does not exist, allowed to create partitions) */ SET pg_pathman.enable_auto_partition = ON; COPY copy_stmt_hooking.test FROM stdin; SELECT * FROM copy_stmt_hooking.test WHERE val > 20; @@ -194,8 +128,8 @@ WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test_6'::REGCLASS; 3 (1 row) -/* COPY FROM (test transformed tuples) */ -COPY copy_stmt_hooking.test (val, c3, c4) TO stdout; +/* test transformed tuples */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; 1 0 0 6 0 0 7 0 0 @@ -203,9 +137,9 @@ COPY copy_stmt_hooking.test (val, c3, c4) TO stdout; 16 0 0 21 0 0 26 1 2 -/* COPY TO (insert into table with dropped column) */ +/* COPY FROM (insert into table with dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; -/* COPY TO (insert into table without dropped column) */ +/* COPY FROM (insert into table without dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; /* check tuples from last partition (without dropped column) */ SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index 62636f00..c7d25051 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -28,11 +28,6 @@ VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY TO */ -COPY copy_stmt_hooking.test TO stdout; -\copy copy_stmt_hooking.test to stdout (format csv) -\copy copy_stmt_hooking.test(comment) to stdout - /* DELETE ROWS, COPY FROM */ DELETE FROM copy_stmt_hooking.test; COPY copy_stmt_hooking.test FROM stdin; @@ -52,20 +47,21 @@ VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY FROM (specified columns) */ -COPY copy_stmt_hooking.test (val) TO stdout; -COPY copy_stmt_hooking.test (val, comment) TO stdout; -COPY copy_stmt_hooking.test (c3, val, comment) TO stdout; -COPY copy_stmt_hooking.test (val, comment, c3, c4) TO stdout; +/* COPY TO */ +COPY copy_stmt_hooking.test TO stdout; /* not ok */ +COPY copy_stmt_hooking.test (val) TO stdout; /* not ok */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout (FORMAT CSV); +\copy (SELECT * FROM copy_stmt_hooking.test) TO stdout -/* COPY TO (partition does not exist, NOT allowed to create partitions) */ +/* COPY FROM (partition does not exist, NOT allowed to create partitions) */ SET pg_pathman.enable_auto_partition = OFF; COPY copy_stmt_hooking.test FROM stdin; 21 test_no_part 0 0 \. SELECT * FROM copy_stmt_hooking.test WHERE val > 20; -/* COPY TO (partition does not exist, allowed to create partitions) */ +/* COPY FROM (partition does not exist, allowed to create partitions) */ SET pg_pathman.enable_auto_partition = ON; COPY copy_stmt_hooking.test FROM stdin; 21 test_no_part 0 0 @@ -98,16 +94,16 @@ WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test'::REGCLASS; SELECT count(*) FROM pg_attribute WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test_6'::REGCLASS; +/* test transformed tuples */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; -/* COPY FROM (test transformed tuples) */ -COPY copy_stmt_hooking.test (val, c3, c4) TO stdout; -/* COPY TO (insert into table with dropped column) */ +/* COPY FROM (insert into table with dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; 2 1 2 \. -/* COPY TO (insert into table without dropped column) */ +/* COPY FROM (insert into table without dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; 27 1 2 \. diff --git a/src/hooks.c b/src/hooks.c index 690c398a..b8fc39db 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -886,7 +886,7 @@ pathman_process_utility_hook(Node *first_arg, stmt_location, stmt_len, &processed); if (completionTag) snprintf(completionTag, COMPLETION_TAG_BUFSIZE, - "PATHMAN COPY " UINT64_FORMAT, processed); + "COPY " UINT64_FORMAT, processed); return; /* don't call standard_ProcessUtility() or hooks */ } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index f24b9543..103f194e 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -55,6 +55,10 @@ ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; #endif +#define PATHMAN_COPY_READ_LOCK AccessShareLock +#define PATHMAN_COPY_WRITE_LOCK RowExclusiveLock + + static uint64 PathmanCopyFrom(CopyState cstate, Relation parent_rel, List *range_table, @@ -94,8 +98,8 @@ is_pathman_related_copy(Node *parsetree) /* Get partition's Oid while locking it */ parent_relid = RangeVarGetRelid(copy_stmt->relation, (copy_stmt->is_from ? - RowExclusiveLock : - AccessShareLock), + PATHMAN_COPY_WRITE_LOCK : + PATHMAN_COPY_READ_LOCK), false); /* Check that relation is partitioned */ @@ -346,12 +350,12 @@ PathmanDoCopy(const CopyStmt *stmt, uint64 *processed) { CopyState cstate; - bool is_from = stmt->is_from; - bool pipe = (stmt->filename == NULL); + ParseState *pstate; Relation rel; - Node *query = NULL; List *range_table = NIL; - ParseState *pstate; + bool is_from = stmt->is_from, + pipe = (stmt->filename == NULL), + is_old_protocol = PG_PROTOCOL_MAJOR(FrontendProtocol) < 3 && pipe; /* Disallow COPY TO/FROM file or program except to superusers. */ if (!pipe && !superuser()) @@ -404,96 +408,22 @@ PathmanDoCopy(const CopyStmt *stmt, } ExecCheckRTPerms(range_table, true); - /* - * We should perform a query instead of low-level heap scan whenever: - * a) table has a RLS policy; - * b) table is partitioned & it's COPY FROM. - */ - if (check_enable_rls(rte->relid, InvalidOid, false) == RLS_ENABLED || - is_from == false) /* rewrite COPY table TO statements */ + /* Disable COPY FROM if table has RLS */ + if (is_from && check_enable_rls(rte->relid, InvalidOid, false) == RLS_ENABLED) { - SelectStmt *select; - RangeVar *from; - List *target_list = NIL; - - if (is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("COPY FROM not supported with row-level security"), errhint("Use INSERT statements instead."))); + } - /* Build target list */ - if (!stmt->attlist) - { - ColumnRef *cr; - ResTarget *target; - - cr = makeNode(ColumnRef); - cr->fields = list_make1(makeNode(A_Star)); - cr->location = -1; - - /* Build the ResTarget and add the ColumnRef to it. */ - target = makeNode(ResTarget); - target->name = NULL; - target->indirection = NIL; - target->val = (Node *) cr; - target->location = -1; - - target_list = list_make1(target); - } - else - { - ListCell *lc; - - foreach(lc, stmt->attlist) - { - ColumnRef *cr; - ResTarget *target; - - /* - * Build the ColumnRef for each column. The ColumnRef - * 'fields' property is a String 'Value' node (see - * nodes/value.h) that corresponds to the column name - * respectively. - */ - cr = makeNode(ColumnRef); - cr->fields = list_make1(lfirst(lc)); - cr->location = -1; - - /* Build the ResTarget and add the ColumnRef to it. */ - target = makeNode(ResTarget); - target->name = NULL; - target->indirection = NIL; - target->val = (Node *) cr; - target->location = -1; - - /* Add each column to the SELECT statements target list */ - target_list = lappend(target_list, target); - } - } - - /* - * Build RangeVar for from clause, fully qualified based on the - * relation which we have opened and locked. - */ - from = makeRangeVar(get_namespace_name(RelationGetNamespace(rel)), - RelationGetRelationName(rel), -1); - - /* Build query */ - select = makeNode(SelectStmt); - select->targetList = target_list; - select->fromClause = list_make1(from); - - query = (Node *) select; - - /* - * Close the relation for now, but keep the lock on it to prevent - * changes between now and when we start the query-based COPY. - * - * We'll reopen it later as part of the query-based COPY. - */ - heap_close(rel, NoLock); - rel = NULL; + /* Disable COPY TO */ + if (!is_from) + { + ereport(WARNING, + (errmsg("COPY TO will only select rows from parent table \"%s\"", + RelationGetRelationName(rel)), + errhint("Consider using the COPY (SELECT ...) TO variant."))); } } @@ -503,19 +433,12 @@ PathmanDoCopy(const CopyStmt *stmt, pstate = make_parsestate(NULL); pstate->p_sourcetext = queryString; - /* COPY ... FROM ... */ if (is_from) { - bool is_old_protocol = PG_PROTOCOL_MAJOR(FrontendProtocol) < 3 && - stmt->filename == NULL; - - /* There should be relation */ - if (!rel) elog(FATAL, "No relation for PATHMAN COPY FROM"); - /* check read-only transaction and parallel mode */ if (XactReadOnly && !rel->rd_islocaltemp) - PreventCommandIfReadOnly("PATHMAN COPY FROM"); - PreventCommandIfParallelMode("PATHMAN COPY FROM"); + PreventCommandIfReadOnly("COPY FROM"); + PreventCommandIfParallelMode("COPY FROM"); cstate = BeginCopyFromCompat(pstate, rel, stmt->filename, stmt->is_program, NULL, stmt->attlist, @@ -523,31 +446,14 @@ PathmanDoCopy(const CopyStmt *stmt, *processed = PathmanCopyFrom(cstate, rel, range_table, is_old_protocol); EndCopyFrom(cstate); } - /* COPY ... TO ... */ else { - CopyStmt modified_copy_stmt; - - /* We should've created a query */ - Assert(query); - - /* Copy 'stmt' and override some of the fields */ - modified_copy_stmt = *stmt; - modified_copy_stmt.relation = NULL; - modified_copy_stmt.query = query; - /* Call standard DoCopy using a new CopyStmt */ - DoCopyCompat(pstate, &modified_copy_stmt, stmt_location, stmt_len, - processed); + DoCopyCompat(pstate, stmt, stmt_location, stmt_len, processed); } - /* - * Close the relation. If reading, we can release the AccessShareLock we - * got; if writing, we should hold the lock until end of transaction to - * ensure that updates will be committed before lock is released. - */ - if (rel != NULL) - heap_close(rel, (is_from ? NoLock : AccessShareLock)); + /* Close the relation, but keep it locked */ + heap_close(rel, (is_from ? NoLock : PATHMAN_COPY_READ_LOCK)); } /* From 3370dc42b21875f05c5a949966d4763199c6f963 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 23 Oct 2017 16:29:32 +0300 Subject: [PATCH 0779/1124] add issue template --- .github/ISSUE_TEMPLATE.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE.md diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000..5ad2562c --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,28 @@ + + + +### Problem description + +Explain your problem here (it's always better to provide reproduction steps) ... + + + +### Environment + + + + + + + + + + + + + From 83dc5afcc96bec3d51a3f62d76a695568b8ced4d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 25 Oct 2017 19:43:49 +0300 Subject: [PATCH 0780/1124] fix complete cache invalidation event handling --- src/hooks.c | 23 +++- src/include/init.h | 1 + src/include/relation_info.h | 2 + src/init.c | 8 ++ src/relation_info.c | 232 ++++++++++++++++++++++++++---------- 5 files changed, 199 insertions(+), 67 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index b8fc39db..3503f857 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -36,6 +36,11 @@ #include "utils/lsyscache.h" +#ifdef USE_ASSERT_CHECKING +#define USE_RELCACHE_LOGGING +#endif + + /* Borrowed from joinpath.c */ #define PATH_PARAM_BY_REL(path, rel) \ ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids)) @@ -808,6 +813,18 @@ pathman_relcache_hook(Datum arg, Oid relid) if (!IsPathmanReady()) return; + /* Special case: flush whole relcache */ + if (relid == InvalidOid) + { + delay_invalidation_whole_cache(); + +#ifdef USE_RELCACHE_LOGGING + elog(DEBUG2, "Invalidation message for all relations [%u]", MyProcPid); +#endif + + return; + } + /* We shouldn't even consider special OIDs */ if (relid < FirstNormalObjectId) return; @@ -827,16 +844,20 @@ pathman_relcache_hook(Datum arg, Oid relid) { delay_invalidation_parent_rel(parent_relid); +#ifdef USE_RELCACHE_LOGGING elog(DEBUG2, "Invalidation message for partition %u [%u]", relid, MyProcPid); +#endif } /* We can't say, perform full invalidation procedure */ else { delay_invalidation_vague_rel(relid); - elog(DEBUG2, "Invalidation message for vague relation %u [%u]", +#ifdef USE_RELCACHE_LOGGING + elog(DEBUG2, "Invalidation message for vague rel %u [%u]", relid, MyProcPid); +#endif } } diff --git a/src/include/init.h b/src/include/init.h index 73f58c8b..763292f0 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -54,6 +54,7 @@ typedef struct #define PATHMAN_MCXT_COUNT 4 extern MemoryContext TopPathmanContext; +extern MemoryContext PathmanInvalJobsContext; extern MemoryContext PathmanRelationCacheContext; extern MemoryContext PathmanParentCacheContext; extern MemoryContext PathmanBoundCacheContext; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index cbc16b6e..c4bc3a05 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -275,6 +275,7 @@ const PartRelationInfo *refresh_pathman_relation_info(Oid relid, Datum *values, bool allow_incomplete); PartRelationInfo *invalidate_pathman_relation_info(Oid relid, bool *found); +void invalidate_pathman_relation_info_cache(const Oid *parents, int parents_count); void remove_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, @@ -296,6 +297,7 @@ char *canonicalize_partitioning_expression(const Oid relid, /* Global invalidation routines */ void delay_pathman_shutdown(void); +void delay_invalidation_whole_cache(void); void delay_invalidation_parent_rel(Oid parent); void delay_invalidation_vague_rel(Oid vague_rel); void finish_delayed_invalidation(void); diff --git a/src/init.c b/src/init.c index 3729bd16..80ba4f0a 100644 --- a/src/init.c +++ b/src/init.c @@ -41,6 +41,7 @@ /* Various memory contexts for caches */ MemoryContext TopPathmanContext = NULL; +MemoryContext PathmanInvalJobsContext = NULL; MemoryContext PathmanRelationCacheContext = NULL; MemoryContext PathmanParentCacheContext = NULL; MemoryContext PathmanBoundCacheContext = NULL; @@ -312,6 +313,7 @@ init_local_cache(void) if (TopPathmanContext) { /* Check that child contexts exist */ + Assert(MemoryContextIsValid(PathmanInvalJobsContext)); Assert(MemoryContextIsValid(PathmanRelationCacheContext)); Assert(MemoryContextIsValid(PathmanParentCacheContext)); Assert(MemoryContextIsValid(PathmanBoundCacheContext)); @@ -322,6 +324,7 @@ init_local_cache(void) /* Initialize pg_pathman's memory contexts */ else { + Assert(PathmanInvalJobsContext == NULL); Assert(PathmanRelationCacheContext == NULL); Assert(PathmanParentCacheContext == NULL); Assert(PathmanBoundCacheContext == NULL); @@ -331,6 +334,11 @@ init_local_cache(void) CppAsString(TopPathmanContext), ALLOCSET_DEFAULT_SIZES); + PathmanInvalJobsContext = + AllocSetContextCreate(TopMemoryContext, + CppAsString(PathmanInvalJobsContext), + ALLOCSET_SMALL_SIZES); + /* For PartRelationInfo */ PathmanRelationCacheContext = AllocSetContextCreate(TopPathmanContext, diff --git a/src/relation_info.c b/src/relation_info.c index cb33c29d..e032f036 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -33,6 +33,7 @@ #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/hsearch.h" +#include "utils/inval.h" #include "utils/memutils.h" #include "utils/ruleutils.h" #include "utils/syscache.h" @@ -53,6 +54,11 @@ #define COOK_PART_EXPR_ERROR "failed to analyze partitioning expression \"%s\"" +#ifdef USE_ASSERT_CHECKING +#define USE_RELINFO_LOGGING +#endif + + /* Comparison function info */ typedef struct cmp_func_info { @@ -70,22 +76,26 @@ bool pg_pathman_enable_bounds_cache = true; * We delay all invalidation jobs received in relcache hook. */ static List *delayed_invalidation_parent_rels = NIL; -static List *delayed_invalidation_vague_rels = NIL; +static List *delayed_invalidation_vague_rels = NIL; +static bool delayed_invalidation_whole_cache = false; static bool delayed_shutdown = false; /* pathman was dropped */ +#define INVAL_LIST_MAX_ITEMS 10000 + /* Add unique Oid to list, allocate in TopPathmanContext */ #define list_add_unique(list, oid) \ do { \ - MemoryContext old_mcxt = MemoryContextSwitchTo(TopPathmanContext); \ - list = list_append_unique_oid(list, ObjectIdGetDatum(oid)); \ + MemoryContext old_mcxt = MemoryContextSwitchTo(PathmanInvalJobsContext); \ + list = list_append_unique_oid(list, (oid)); \ MemoryContextSwitchTo(old_mcxt); \ } while (0) -#define free_invalidation_list(list) \ +#define free_invalidation_lists() \ do { \ - list_free(list); \ - list = NIL; \ + MemoryContextReset(PathmanInvalJobsContext); \ + delayed_invalidation_parent_rels = NIL; \ + delayed_invalidation_vague_rels = NIL; \ } while (0) /* Handy wrappers for Oids */ @@ -101,6 +111,8 @@ static Oid get_parent_of_partition_internal(Oid partition, static Expr *get_partition_constraint_expr(Oid partition); +static void free_prel_partitions(PartRelationInfo *prel); + static void fill_prel_with_partitions(PartRelationInfo *prel, const Oid *partitions, const uint32 parts_count); @@ -323,35 +335,70 @@ invalidate_pathman_relation_info(Oid relid, bool *found) relid, action, &prel_found); - /* Handle valid PartRelationInfo */ - if ((action == HASH_FIND || - (action == HASH_ENTER && prel_found)) && PrelIsValid(prel)) - { - /* Remove this parent from parents cache */ - ForgetParent(prel); + /* It's a new entry, mark it 'invalid' */ + if (prel && !prel_found) + prel->valid = false; - /* Drop cached bounds etc */ - MemoryContextDelete(prel->mcxt); + /* Clear the remaining resources */ + free_prel_partitions(prel); + + /* Set 'found' if necessary */ + if (found) *found = prel_found; + +#ifdef USE_ASSERT_CHECKING + elog(DEBUG2, + "dispatch_cache: invalidating %s record for parent %u [%u]", + (prel ? "live" : "NULL"), relid, MyProcPid); +#endif + + return prel; +} + +/* Invalidate PartRelationInfo cache entries that exist in 'parents` array */ +void +invalidate_pathman_relation_info_cache(const Oid *parents, int parents_count) +{ + HASH_SEQ_STATUS stat; + PartRelationInfo *prel; + List *prel_bad = NIL; + ListCell *lc; + int i; + + for (i = 0; i < parents_count; i++) + { + invalidate_pathman_relation_info(parents[i], NULL); } - /* Set important default values */ - if (prel) + hash_seq_init(&stat, partitioned_rels); + + while ((prel = (PartRelationInfo *) hash_seq_search(&stat)) != NULL) { - prel->children = NULL; - prel->ranges = NULL; - prel->mcxt = NULL; + Oid parent_relid = PrelParentRelid(prel); - prel->valid = false; /* now cache entry is invalid */ + /* Does this entry exist in PATHMAN_CONFIG table? */ + if (!bsearch_oid(parent_relid, parents, parents_count)) + { + /* All entry to 'outdated' list */ + prel_bad = lappend_oid(prel_bad, parent_relid); + + /* Clear the remaining resources */ + free_prel_partitions(prel); + } } - /* Set 'found' if necessary */ - if (found) *found = prel_found; + /* Remove outdated entries */ + foreach (lc, prel_bad) + { + pathman_cache_search_relid(partitioned_rels, + lfirst_oid(lc), + HASH_REMOVE, + NULL); + } +#ifdef USE_ASSERT_CHECKING elog(DEBUG2, - "Invalidating record for relation %u in pg_pathman's cache [%u]", - relid, MyProcPid); - - return prel; + "dispatch_cache: invalidated all records [%u]", MyProcPid); +#endif } /* Get PartRelationInfo from local cache. */ @@ -387,9 +434,11 @@ get_pathman_relation_info(Oid relid) } } +#ifdef USE_RELINFO_LOGGING elog(DEBUG2, - "Fetching %s record for relation %u from pg_pathman's cache [%u]", + "dispatch_cache: fetching %s record for parent %u [%u]", (prel ? "live" : "NULL"), relid, MyProcPid); +#endif /* Make sure that 'prel' is valid */ Assert(!prel || PrelIsValid(prel)); @@ -423,7 +472,7 @@ get_pathman_relation_info_after_lock(Oid relid, return prel; } -/* Remove PartRelationInfo from local cache. */ +/* Remove PartRelationInfo from local cache */ void remove_pathman_relation_info(Oid relid) { @@ -434,11 +483,39 @@ remove_pathman_relation_info(Oid relid) /* Now let's remove the entry completely */ if (found) + { pathman_cache_search_relid(partitioned_rels, relid, HASH_REMOVE, NULL); - elog(DEBUG2, - "Removing record for relation %u in pg_pathman's cache [%u]", - relid, MyProcPid); +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, + "dispatch_cache: removing record for parent %u [%u]", + relid, MyProcPid); +#endif + } +} + +static void +free_prel_partitions(PartRelationInfo *prel) +{ + /* Handle valid PartRelationInfo */ + if (PrelIsValid(prel)) + { + /* Remove this parent from parents cache */ + ForgetParent(prel); + + /* Drop cached bounds etc */ + MemoryContextDelete(prel->mcxt); + } + + /* Set important default values */ + if (prel) + { + prel->children = NULL; + prel->ranges = NULL; + prel->mcxt = NULL; + + prel->valid = false; /* now cache entry is invalid */ + } } /* Fill PartRelationInfo with partition-related info */ @@ -854,26 +931,55 @@ delay_pathman_shutdown(void) delayed_shutdown = true; } +/* Add new delayed invalidation job for whole dispatch cache */ +void +delay_invalidation_whole_cache(void) +{ + /* Free useless invalidation lists */ + free_invalidation_lists(); + + delayed_invalidation_whole_cache = true; +} + +/* Generic wrapper for lists */ +static void +delay_invalidation_event(List **inval_list, Oid relation) +{ + /* Skip if we already need to drop whole cache */ + if (delayed_invalidation_whole_cache) + return; + + if (list_length(*inval_list) > INVAL_LIST_MAX_ITEMS) + { + /* Too many events, drop whole cache */ + delay_invalidation_whole_cache(); + return; + } + + list_add_unique(*inval_list, relation); +} + /* Add new delayed invalidation job for a [ex-]parent relation */ void delay_invalidation_parent_rel(Oid parent) { - list_add_unique(delayed_invalidation_parent_rels, parent); + delay_invalidation_event(&delayed_invalidation_parent_rels, parent); } /* Add new delayed invalidation job for a vague relation */ void delay_invalidation_vague_rel(Oid vague_rel) { - list_add_unique(delayed_invalidation_vague_rels, vague_rel); + delay_invalidation_event(&delayed_invalidation_vague_rels, vague_rel); } /* Finish all pending invalidation jobs if possible */ void finish_delayed_invalidation(void) -{ +{ /* Exit early if there's nothing to do */ - if (delayed_invalidation_parent_rels == NIL && + if (delayed_invalidation_whole_cache == false && + delayed_invalidation_parent_rels == NIL && delayed_invalidation_vague_rels == NIL && delayed_shutdown == false) { @@ -888,6 +994,8 @@ finish_delayed_invalidation(void) bool parents_fetched = false; ListCell *lc; + AcceptInvalidationMessages(); + /* Handle the probable 'DROP EXTENSION' case */ if (delayed_shutdown) { @@ -908,14 +1016,31 @@ finish_delayed_invalidation(void) unload_config(); /* Disregard all remaining invalidation jobs */ - free_invalidation_list(delayed_invalidation_parent_rels); - free_invalidation_list(delayed_invalidation_vague_rels); + delayed_invalidation_whole_cache = false; + free_invalidation_lists(); /* No need to continue, exit */ return; } } + /* We might be asked to perform a complete cache invalidation */ + if (delayed_invalidation_whole_cache) + { + /* Unset 'invalidation_whole_cache' flag */ + delayed_invalidation_whole_cache = false; + + /* Fetch all partitioned tables */ + if (!parents_fetched) + { + parents = read_parent_oids(&parents_count); + parents_fetched = true; + } + + /* Invalidate live entries and remove dead ones */ + invalidate_pathman_relation_info_cache(parents, parents_count); + } + /* Process relations that are (or were) definitely partitioned */ foreach (lc, delayed_invalidation_parent_rels) { @@ -992,8 +1117,8 @@ finish_delayed_invalidation(void) } } - free_invalidation_list(delayed_invalidation_parent_rels); - free_invalidation_list(delayed_invalidation_vague_rels); + /* Finally, free invalidation jobs lists */ + free_invalidation_lists(); if (parents) pfree(parents); @@ -1009,20 +1134,14 @@ finish_delayed_invalidation(void) void cache_parent_of_partition(Oid partition, Oid parent) { - bool found; PartParentInfo *ppar; ppar = pathman_cache_search_relid(parent_cache, partition, HASH_ENTER, - &found); - elog(DEBUG2, - found ? - "Refreshing record for child %u in pg_pathman's cache [%u]" : - "Creating new record for child %u in pg_pathman's cache [%u]", - partition, MyProcPid); + NULL); - ppar->child_rel = partition; + ppar->child_rel = partition; ppar->parent_rel = parent; } @@ -1052,30 +1171,11 @@ get_parent_of_partition_internal(Oid partition, PartParentSearch *status, HASHACTION action) { - const char *action_str; /* "Fetching"\"Resetting" */ Oid parent; PartParentInfo *ppar = pathman_cache_search_relid(parent_cache, partition, HASH_FIND, NULL); - /* Set 'action_str' */ - switch (action) - { - case HASH_REMOVE: - action_str = "Resetting"; - break; - - case HASH_FIND: - action_str = "Fetching"; - break; - - default: - elog(ERROR, "Unexpected HTAB action %u", action); - } - - elog(DEBUG2, - "%s %s record for child %u from pg_pathman's cache [%u]", - action_str, (ppar ? "live" : "NULL"), partition, MyProcPid); if (ppar) { From c45a95ab354f03bec617bd62a818d5eadcc06fc0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 25 Oct 2017 20:08:26 +0300 Subject: [PATCH 0781/1124] mute warning produced by cppcheck --- src/relation_info.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/relation_info.c b/src/relation_info.c index e032f036..b46c62ee 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -990,7 +990,7 @@ finish_delayed_invalidation(void) if (IsTransactionState()) { Oid *parents = NULL; - int parents_count; + int parents_count = 0; bool parents_fetched = false; ListCell *lc; From 861f84f783055ae39a7f2544603b75eca19d4e38 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 31 Oct 2017 12:28:05 +0300 Subject: [PATCH 0782/1124] bump lib version to 1.4.8 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 2718f180..2718a8da 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.7", + "version": "1.4.8", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.7", + "version": "1.4.8", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 33af45fa..7c090761 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10407 + 10408 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 763292f0..a2f7ec77 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010407 +#define CURRENT_LIB_VERSION 0x010408 void *pathman_cache_search_relid(HTAB *cache_table, From 7e2ef6cc005e39dfe5b6867ff67fb7cad7892516 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 31 Oct 2017 12:34:40 +0300 Subject: [PATCH 0783/1124] fix docs --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2935ff3c..3f3a80ba 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ More interesting features are yet to come. Stay tuned! * `RuntimeAppend` & `RuntimeMergeAppend` custom plan nodes to pick partitions at runtime; * `PartitionFilter`: an efficient drop-in replacement for INSERT triggers; * Automatic partition creation for new INSERTed data (only for RANGE partitioning); - * Improved `COPY FROM\TO` statement that is able to insert rows directly into partitions; + * Improved `COPY FROM` statement that is able to insert rows directly into partitions; * UPDATE triggers generation out of the box (will be replaced with custom nodes too); * User-defined callbacks for partition creation event handling; * Non-blocking concurrent table partitioning; From 2c24811f7ae5135b41278d928e4651b8cbf39927 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 2 Nov 2017 15:43:35 +0300 Subject: [PATCH 0784/1124] fix warnings in pythonic tests --- tests/python/.flake8 | 2 + tests/python/.style.yapf | 2 +- tests/python/partitioning_test.py | 86 +++++++++++++------------------ 3 files changed, 38 insertions(+), 52 deletions(-) create mode 100644 tests/python/.flake8 diff --git a/tests/python/.flake8 b/tests/python/.flake8 new file mode 100644 index 00000000..7d6f9f71 --- /dev/null +++ b/tests/python/.flake8 @@ -0,0 +1,2 @@ +[flake8] +ignore = E241, E501 diff --git a/tests/python/.style.yapf b/tests/python/.style.yapf index e2ca7ba3..88f004bb 100644 --- a/tests/python/.style.yapf +++ b/tests/python/.style.yapf @@ -2,4 +2,4 @@ based_on_style = pep8 spaces_before_comment = 4 split_before_logical_operator = false -column_limit=90 +column_limit=100 diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 2d8cb858..853de564 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -13,12 +13,11 @@ import re import subprocess import threading -import json import time import unittest from distutils.version import LooseVersion -from testgres import get_new_node, get_bin_path, get_pg_config +from testgres import get_new_node, get_bin_path, get_pg_version # set setup base logging config, it can be turned on by `use_logging` # parameter on node setup @@ -54,7 +53,7 @@ } logging.config.dictConfig(LOG_CONFIG) -version = LooseVersion(get_pg_config().get("VERSION_NUM")) +version = LooseVersion(get_pg_version()) # Helper function for json equality @@ -106,23 +105,6 @@ def start_new_pathman_cluster(self, return node - def catchup_replica(self, master, replica): - """ Wait until replica synchronizes with master """ - if version >= LooseVersion('10'): - wait_lsn_query = """ - SELECT pg_current_wal_lsn() <= replay_lsn - FROM pg_stat_replication - WHERE application_name = '{0}' - """ - else: - wait_lsn_query = """ - SELECT pg_current_xlog_location() <= replay_location - FROM pg_stat_replication - WHERE application_name = '{0}' - """ - - master.poll_query_until('postgres', wait_lsn_query.format(replica.name)) - def test_concurrent(self): """ Test concurrent partitioning """ @@ -158,8 +140,7 @@ def test_replication(self): with self.start_new_pathman_cluster(allow_streaming=True, test_data=True) as node: with node.replicate('node2') as replica: replica.start() - # wait until replica catches up - self.catchup_replica(node, replica) + replica.catchup() # check that results are equal self.assertEqual( @@ -169,7 +150,9 @@ def test_replication(self): # enable parent and see if it is enabled in replica node.psql('postgres', "select enable_parent('abc')") - self.catchup_replica(node, replica) + # wait until replica catches up + replica.catchup() + self.assertEqual( node.psql('postgres', 'explain (costs off) select * from abc'), replica.psql('postgres', 'explain (costs off) select * from abc')) @@ -182,7 +165,10 @@ def test_replication(self): # check that UPDATE in pathman_config_params invalidates cache node.psql('postgres', 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) + + # wait until replica catches up + replica.catchup() + self.assertEqual( node.psql('postgres', 'explain (costs off) select * from abc'), replica.psql('postgres', 'explain (costs off) select * from abc')) @@ -688,7 +674,7 @@ def con2_thread(): explain (analyze, costs off, timing off) select * from drop_test where val = any (select generate_series(1, 40, 34)) - """) # query selects from drop_test_1 and drop_test_4 + """) # query selects from drop_test_1 and drop_test_4 con2.commit() @@ -712,15 +698,14 @@ def con2_thread(): # return all values in tuple queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) - # Step 1: cache partitioned table in con1 con1.begin() - con1.execute('select count(*) from drop_test') # load pathman's cache + con1.execute('select count(*) from drop_test') # load pathman's cache con1.commit() # Step 2: cache partitioned table in con2 con2.begin() - con2.execute('select count(*) from drop_test') # load pathman's cache + con2.execute('select count(*) from drop_test') # load pathman's cache con2.commit() # Step 3: drop first partition of 'drop_test' @@ -786,12 +771,12 @@ def con2_thread(): # Step 1: lock partitioned table in con1 con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('select count(*) from ins_test') # load pathman's cache con1.execute('lock table ins_test in share update exclusive mode') # Step 2: try inserting new value in con2 (waiting) con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache + con2.execute('select count(*) from ins_test') # load pathman's cache t = threading.Thread(target=con2_thread) t.start() @@ -853,12 +838,12 @@ def con2_thread(): # Step 1: initilize con1 con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('select count(*) from ins_test') # load pathman's cache # Step 2: initilize con2 con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - con2.commit() # unlock relations + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) con1.execute( @@ -1031,12 +1016,12 @@ def turnon_pathman(node): get_bin_path("pg_dump"), "-p {}".format(node.port), "initial" ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via COPY + cmp_full), # dump as plain text and restore via COPY (turnoff_pathman, turnon_pathman, [ get_bin_path("pg_dump"), "-p {}".format(node.port), "--inserts", "initial" ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via INSERTs + cmp_full), # dump as plain text and restore via INSERTs (None, None, [ get_bin_path("pg_dump"), "-p {}".format(node.port), "--format=custom", "initial" @@ -1052,7 +1037,7 @@ def turnon_pathman(node): dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) - if (preproc != None): + if (preproc is not None): preproc(node) # transfer and restore data @@ -1065,12 +1050,12 @@ def turnon_pathman(node): stderr=fnull) p2.communicate(input=stdoutdata) - if (postproc != None): + if (postproc is not None): postproc(node) # validate data with node.connect('initial') as con1, \ - node.connect('copy') as con2: + node.connect('copy') as con2: # compare plans and contents of initial and copy cmp_result = cmp_dbs(con1, con2) @@ -1092,8 +1077,8 @@ def turnon_pathman(node): config_params_initial[row[0]] = row[1:] for row in con2.execute(config_params_query): config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) + self.assertEqual(config_params_initial, config_params_copy, + "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) # compare constraints on each partition constraints_query = """ @@ -1106,8 +1091,8 @@ def turnon_pathman(node): constraints_initial[row[0]] = row[1:] for row in con2.execute(constraints_query): constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) + self.assertEqual(constraints_initial, constraints_copy, + "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) # clear copy database node.psql('copy', 'drop schema public cascade') @@ -1128,9 +1113,9 @@ def test_concurrent_detach(self): test_interval = int(math.ceil(detach_timeout * num_detachs)) insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/insert_current_timestamp.pgbench" + + "/pgbench_scripts/insert_current_timestamp.pgbench" detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/detachs_in_timeout.pgbench" + + "/pgbench_scripts/detachs_in_timeout.pgbench" # Check pgbench scripts on existance self.assertTrue( @@ -1202,16 +1187,14 @@ def test_update_node_plan1(self): Test scan on all partititions when using update node. We can't use regression tests here because 9.5 and 9.6 give different plans - ''' + ''' with get_new_node('test_update_node') as node: node.init() - node.append_conf( - 'postgresql.conf', - """ - shared_preload_libraries=\'pg_pathman\' - pg_pathman.override_copy=false - pg_pathman.enable_partitionrouter=on + node.append_conf('postgresql.conf', """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false + pg_pathman.enable_partitionrouter=on """) node.start() @@ -1275,5 +1258,6 @@ def test_update_node_plan1(self): node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') + if __name__ == "__main__": unittest.main() From e5280fb5e2d80956ec81b6b98bfe550d0fa3a8e6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 9 Nov 2017 14:16:15 +0300 Subject: [PATCH 0785/1124] WIP some fundamental changes to caches --- src/hooks.c | 48 +- src/include/init.h | 48 +- src/include/relation_info.h | 252 ++-- src/init.c | 99 +- src/partition_filter.c | 4 +- src/pl_funcs.c | 49 +- src/pl_range_funcs.c | 15 +- src/planner_tree_modification.c | 2 +- src/relation_info.c | 2002 +++++++++++++------------------ src/utility_stmt_hooking.c | 4 +- 10 files changed, 1078 insertions(+), 1445 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 96a7feb0..2a968683 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -812,8 +812,6 @@ pathman_shmem_startup_hook(void) void pathman_relcache_hook(Datum arg, Oid relid) { - Oid parent_relid; - /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) return; @@ -821,51 +819,29 @@ pathman_relcache_hook(Datum arg, Oid relid) if (!IsPathmanReady()) return; - /* Special case: flush whole relcache */ + /* Invalidation event for whole cache */ if (relid == InvalidOid) { - delay_invalidation_whole_cache(); - -#ifdef USE_RELCACHE_LOGGING - elog(DEBUG2, "Invalidation message for all relations [%u]", MyProcPid); -#endif - - return; + invalidate_pathman_status_info_cache(); } - /* We shouldn't even consider special OIDs */ - if (relid < FirstNormalObjectId) - return; - /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ if (relid == get_pathman_config_relid(false)) + { delay_pathman_shutdown(); + } - /* Invalidate PartBoundInfo cache if needed */ - forget_bounds_of_partition(relid); - - /* Invalidate PartParentInfo cache if needed */ - parent_relid = forget_parent_of_partition(relid, NULL); - - /* It *might have been a partition*, invalidate parent */ - if (OidIsValid(parent_relid)) + /* Invalidation event for some user table */ + else if (relid >= FirstNormalObjectId) { - delay_invalidation_parent_rel(parent_relid); + /* Invalidate PartBoundInfo entry if needed */ + forget_bounds_of_partition(relid); -#ifdef USE_RELCACHE_LOGGING - elog(DEBUG2, "Invalidation message for partition %u [%u]", - relid, MyProcPid); -#endif - } - /* We can't say, perform full invalidation procedure */ - else - { - delay_invalidation_vague_rel(relid); + /* Invalidate PartParentInfo entry if needed */ + forget_parent_of_partition(relid); -#ifdef USE_RELCACHE_LOGGING - elog(DEBUG2, "Invalidation message for vague rel %u [%u]", - relid, MyProcPid); -#endif + /* Invalidate PartStatusInfo entry if needed */ + invalidate_pathman_status_info(relid); } } diff --git a/src/include/init.h b/src/include/init.h index aab2e266..799e1c2d 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -46,22 +46,21 @@ typedef struct do { \ Assert(CurrentMemoryContext != TopMemoryContext); \ Assert(CurrentMemoryContext != TopPathmanContext); \ - Assert(CurrentMemoryContext != PathmanRelationCacheContext); \ - Assert(CurrentMemoryContext != PathmanParentCacheContext); \ - Assert(CurrentMemoryContext != PathmanBoundCacheContext); \ + Assert(CurrentMemoryContext != PathmanParentsCacheContext); \ + Assert(CurrentMemoryContext != PathmanStatusCacheContext); \ + Assert(CurrentMemoryContext != PathmanBoundsCacheContext); \ } while (0) #define PATHMAN_MCXT_COUNT 4 extern MemoryContext TopPathmanContext; -extern MemoryContext PathmanInvalJobsContext; -extern MemoryContext PathmanRelationCacheContext; -extern MemoryContext PathmanParentCacheContext; -extern MemoryContext PathmanBoundCacheContext; +extern MemoryContext PathmanParentsCacheContext; +extern MemoryContext PathmanStatusCacheContext; +extern MemoryContext PathmanBoundsCacheContext; -extern HTAB *partitioned_rels; -extern HTAB *parent_cache; -extern HTAB *bound_cache; +extern HTAB *parents_cache; +extern HTAB *status_cache; +extern HTAB *bounds_cache; /* pg_pathman's initialization state */ extern PathmanInitState pathman_init_state; @@ -70,28 +69,29 @@ extern PathmanInitState pathman_init_state; extern bool pathman_hooks_enabled; +#define PATHMAN_TOP_CONTEXT "maintenance" +#define PATHMAN_PARENTS_CACHE "partition parents cache" +#define PATHMAN_STATUS_CACHE "partition status cache" +#define PATHMAN_BOUNDS_CACHE "partition bounds cache" + + /* Transform pg_pathman's memory context into simple name */ static inline const char * -simpify_mcxt_name(MemoryContext mcxt) +simplify_mcxt_name(MemoryContext mcxt) { - static const char *top_mcxt = "maintenance", - *rel_mcxt = "partition dispatch cache", - *parent_mcxt = "partition parents cache", - *bound_mcxt = "partition bounds cache"; - if (mcxt == TopPathmanContext) - return top_mcxt; + return PATHMAN_TOP_CONTEXT; - else if (mcxt == PathmanRelationCacheContext) - return rel_mcxt; + else if (mcxt == PathmanParentsCacheContext) + return PATHMAN_PARENTS_CACHE; - else if (mcxt == PathmanParentCacheContext) - return parent_mcxt; + else if (mcxt == PathmanStatusCacheContext) + return PATHMAN_STATUS_CACHE; - else if (mcxt == PathmanBoundCacheContext) - return bound_mcxt; + else if (mcxt == PathmanBoundsCacheContext) + return PATHMAN_BOUNDS_CACHE; - else elog(ERROR, "error in function " CppAsString(simpify_mcxt_name)); + else elog(ERROR, "unknown memory context"); } diff --git a/src/include/relation_info.h b/src/include/relation_info.h index dadc3511..70f2eedc 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -110,9 +110,7 @@ cmp_bounds(FmgrInfo *cmp_func, } -/* - * Partitioning type. - */ +/* Partitioning type */ typedef enum { PT_ANY = 0, /* for part type traits (virtual type) */ @@ -120,9 +118,7 @@ typedef enum PT_RANGE } PartType; -/* - * Child relation info for RANGE partitioning. - */ +/* Child relation info for RANGE partitioning */ typedef struct { Oid child_oid; @@ -130,16 +126,60 @@ typedef struct max; } RangeEntry; +/* + * PartStatusInfo + * Cached partitioning status of the specified relation. + * Allows us to quickly search for PartRelationInfo. + */ +typedef struct PartStatusInfo +{ + Oid relid; /* key */ + int32 refcount; /* reference counter */ + bool is_valid; /* is this entry fresh? */ + struct PartRelationInfo *prel; +} PartStatusInfo; + +/* + * PartParentInfo + * Cached parent of the specified partition. + * Allows us to quickly search for PartRelationInfo. + */ +typedef struct PartParentInfo +{ + Oid child_relid; /* key */ + Oid parent_relid; +} PartParentInfo; + +/* + * PartBoundInfo + * Cached bounds of the specified partition. + * Allows us to deminish overhead of check constraints. + */ +typedef struct PartBoundInfo +{ + Oid child_relid; /* key */ + + PartType parttype; + + /* For RANGE partitions */ + Bound range_min; + Bound range_max; + bool byval; + + /* For HASH partitions */ + uint32 part_idx; +} PartBoundInfo; + /* * PartRelationInfo * Per-relation partitioning information. * Allows us to perform partition pruning. */ -typedef struct +typedef struct PartRelationInfo { - Oid key; /* partitioned table's Oid */ - bool valid, /* is this entry valid? */ - enable_parent; /* should plan include parent? */ + PartStatusInfo *psin; /* entry holding this prel */ + + bool enable_parent; /* should plan include parent? */ PartType parttype; /* partitioning type (HASH | RANGE) */ @@ -170,55 +210,11 @@ typedef struct #define PART_EXPR_VARNO ( 1 ) -/* - * PartParentInfo - * Cached parent of the specified partition. - * Allows us to quickly search for PartRelationInfo. - */ -typedef struct -{ - Oid child_rel; /* key */ - Oid parent_rel; -} PartParentInfo; - -/* - * PartBoundInfo - * Cached bounds of the specified partition. - * Allows us to deminish overhead of check constraints. - */ -typedef struct -{ - Oid child_rel; /* key */ - - PartType parttype; - - /* For RANGE partitions */ - Bound range_min; - Bound range_max; - bool byval; - - /* For HASH partitions */ - uint32 part_idx; -} PartBoundInfo; - -/* - * PartParentSearch - * Represents status of a specific cached entry. - * Returned by [for]get_parent_of_partition(). - */ -typedef enum -{ - PPS_ENTRY_NOT_FOUND = 0, - PPS_ENTRY_PARENT, /* entry was found, but pg_pathman doesn't know it */ - PPS_ENTRY_PART_PARENT, /* entry is parent and is known by pg_pathman */ - PPS_NOT_SURE /* can't determine (not transactional state) */ -} PartParentSearch; - /* * PartRelationInfo field access macros & functions. */ -#define PrelParentRelid(prel) ( (prel)->key ) +#define PrelParentRelid(prel) ( (prel)->psin->relid ) #define PrelGetChildrenArray(prel) ( (prel)->children ) @@ -226,13 +222,9 @@ typedef enum #define PrelChildrenCount(prel) ( (prel)->children_count ) -#define PrelIsValid(prel) ( (prel) && (prel)->valid ) - static inline uint32 PrelLastChild(const PartRelationInfo *prel) { - Assert(PrelIsValid(prel)); - if (PrelChildrenCount(prel) == 0) elog(ERROR, "pg_pathman's cache entry for relation %u has 0 children", PrelParentRelid(prel)); @@ -258,13 +250,13 @@ PrelExpressionColumnNames(const PartRelationInfo *prel) } static inline Node * -PrelExpressionForRelid(const PartRelationInfo *prel, Index rel_index) +PrelExpressionForRelid(const PartRelationInfo *prel, Index rti) { /* TODO: implement some kind of cache */ Node *expr = copyObject(prel->expr); - if (rel_index != PART_EXPR_VARNO) - ChangeVarNodes(expr, PART_EXPR_VARNO, rel_index, 0); + if (rti != PART_EXPR_VARNO) + ChangeVarNodes(expr, PART_EXPR_VARNO, rti, 0); return expr; } @@ -273,54 +265,16 @@ AttrNumber *PrelExpressionAttributesMap(const PartRelationInfo *prel, TupleDesc source_tupdesc, int *map_length); +/* + * PartStatusInfo field access macros & functions. + */ -const PartRelationInfo *refresh_pathman_relation_info(Oid relid, - Datum *values, - bool allow_incomplete); -PartRelationInfo *invalidate_pathman_relation_info(Oid relid, bool *found); -void invalidate_pathman_relation_info_cache(const Oid *parents, int parents_count); -void remove_pathman_relation_info(Oid relid); -const PartRelationInfo *get_pathman_relation_info(Oid relid); -const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result); - -/* Partitioning expression routines */ -Node *parse_partitioning_expression(const Oid relid, - const char *expr_cstr, - char **query_string_out, - Node **parsetree_out); - -Datum cook_partitioning_expression(const Oid relid, - const char *expr_cstr, - Oid *expr_type); +#define PsinIsValid(psin) ( (psin)->is_valid ) -char *canonicalize_partitioning_expression(const Oid relid, - const char *expr_cstr); -bool is_equal_to_partitioning_expression(Oid relid, char *expression, - Oid value_type); +#define PsinReferenceCount(psin) ( (psin)->refcount ) -/* Global invalidation routines */ -void delay_pathman_shutdown(void); -void delay_invalidation_whole_cache(void); -void delay_invalidation_parent_rel(Oid parent); -void delay_invalidation_vague_rel(Oid vague_rel); -void finish_delayed_invalidation(void); - -/* Parent cache */ -void cache_parent_of_partition(Oid partition, Oid parent); -Oid forget_parent_of_partition(Oid partition, PartParentSearch *status); -Oid get_parent_of_partition(Oid partition, PartParentSearch *status); - -/* Bounds cache */ -void forget_bounds_of_partition(Oid partition); -PartBoundInfo *get_bounds_of_partition(Oid partition, - const PartRelationInfo *prel); -Datum get_lower_bound(Oid parent_relid, Oid value_type); -Datum get_upper_bound(Oid relid, Oid value_type); /* PartType wrappers */ - static inline void WrongPartType(PartType parttype) { @@ -341,16 +295,13 @@ DatumGetPartType(Datum datum) static inline char * PartTypeToCString(PartType parttype) { - static char *hash_str = "1", - *range_str = "2"; - switch (parttype) { case PT_HASH: - return hash_str; + return "1"; case PT_RANGE: - return range_str; + return "2"; default: WrongPartType(parttype); @@ -359,41 +310,68 @@ PartTypeToCString(PartType parttype) } -/* PartRelationInfo checker */ +/* Dispatch cache */ +void refresh_pathman_relation_info(Oid relid); +const PartRelationInfo *get_pathman_relation_info(Oid relid); +const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, + bool unlock_if_not_found, + LockAcquireResult *lock_result); + void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, const PartType expected_part_type); +/* Status cache */ +PartStatusInfo *open_pathman_status_info(Oid relid); +void close_pathman_status_info(PartStatusInfo *psin); +void invalidate_pathman_status_info(Oid relid); +void invalidate_pathman_status_info_cache(void); -/* - * Useful functions & macros for freeing memory. - */ +/* Bounds cache */ +void forget_bounds_of_partition(Oid partition); +PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); +Datum get_lower_bound(Oid partition_relid, Oid value_type); +Datum get_upper_bound(Oid partition_relid, Oid value_type); -/* Remove all references to this parent from parents cache */ -static inline void -ForgetParent(PartRelationInfo *prel) -{ - uint32 i; +/* Parent cache */ +void cache_parent_of_partition(Oid partition, Oid parent); +void forget_parent_of_partition(Oid partition); +Oid get_parent_of_partition(Oid partition); - AssertArg(MemoryContextIsValid(prel->mcxt)); +/* Partitioning expression routines */ +Node *parse_partitioning_expression(const Oid relid, + const char *expr_cstr, + char **query_string_out, + Node **parsetree_out); - /* Remove relevant PartParentInfos */ - if (prel->children) - { - for (i = 0; i < PrelChildrenCount(prel); i++) - { - Oid child = prel->children[i]; - - /* Skip if Oid is invalid (e.g. initialization error) */ - if (!OidIsValid(child)) - continue; - - /* If it's *always been* relid's partition, free cache */ - if (PrelParentRelid(prel) == get_parent_of_partition(child, NULL)) - forget_parent_of_partition(child, NULL); - } - } -} +Datum cook_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type); + +char *canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr); + +bool is_equal_to_partitioning_expression(const Oid relid, + const char *expression, + const Oid value_type); + +/* Partitioning expression routines */ +Node *parse_partitioning_expression(const Oid relid, + const char *expr_cstr, + char **query_string_out, + Node **parsetree_out); + +Datum cook_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type); + +char *canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr); + + +/* Global invalidation routines */ +void delay_pathman_shutdown(void); +void finish_delayed_invalidation(void); /* For pg_pathman.enable_bounds_cache GUC */ diff --git a/src/init.c b/src/init.c index 0a9f7da6..545eb670 100644 --- a/src/init.c +++ b/src/init.c @@ -43,19 +43,18 @@ /* Various memory contexts for caches */ MemoryContext TopPathmanContext = NULL; -MemoryContext PathmanInvalJobsContext = NULL; -MemoryContext PathmanRelationCacheContext = NULL; -MemoryContext PathmanParentCacheContext = NULL; -MemoryContext PathmanBoundCacheContext = NULL; +MemoryContext PathmanParentsCacheContext = NULL; +MemoryContext PathmanStatusCacheContext = NULL; +MemoryContext PathmanBoundsCacheContext = NULL; /* Storage for PartRelationInfos */ -HTAB *partitioned_rels = NULL; +HTAB *parents_cache = NULL; /* Storage for PartParentInfos */ -HTAB *parent_cache = NULL; +HTAB *status_cache = NULL; /* Storage for PartBoundInfos */ -HTAB *bound_cache = NULL; +HTAB *bounds_cache = NULL; /* pg_pathman's init status */ PathmanInitState pathman_init_state; @@ -309,18 +308,17 @@ init_local_cache(void) HASHCTL ctl; /* Destroy caches, just in case */ - hash_destroy(partitioned_rels); - hash_destroy(parent_cache); - hash_destroy(bound_cache); + hash_destroy(parents_cache); + hash_destroy(status_cache); + hash_destroy(bounds_cache); /* Reset pg_pathman's memory contexts */ if (TopPathmanContext) { /* Check that child contexts exist */ - Assert(MemoryContextIsValid(PathmanInvalJobsContext)); - Assert(MemoryContextIsValid(PathmanRelationCacheContext)); - Assert(MemoryContextIsValid(PathmanParentCacheContext)); - Assert(MemoryContextIsValid(PathmanBoundCacheContext)); + Assert(MemoryContextIsValid(PathmanParentsCacheContext)); + Assert(MemoryContextIsValid(PathmanStatusCacheContext)); + Assert(MemoryContextIsValid(PathmanBoundsCacheContext)); /* Clear children */ MemoryContextResetChildren(TopPathmanContext); @@ -328,66 +326,60 @@ init_local_cache(void) /* Initialize pg_pathman's memory contexts */ else { - Assert(PathmanInvalJobsContext == NULL); - Assert(PathmanRelationCacheContext == NULL); - Assert(PathmanParentCacheContext == NULL); - Assert(PathmanBoundCacheContext == NULL); + Assert(PathmanParentsCacheContext == NULL); + Assert(PathmanStatusCacheContext == NULL); + Assert(PathmanBoundsCacheContext == NULL); TopPathmanContext = AllocSetContextCreate(TopMemoryContext, - CppAsString(TopPathmanContext), + PATHMAN_TOP_CONTEXT, ALLOCSET_DEFAULT_SIZES); - PathmanInvalJobsContext = - AllocSetContextCreate(TopMemoryContext, - CppAsString(PathmanInvalJobsContext), - ALLOCSET_SMALL_SIZES); - - /* For PartRelationInfo */ - PathmanRelationCacheContext = + /* For PartParentInfo */ + PathmanParentsCacheContext = AllocSetContextCreate(TopPathmanContext, - CppAsString(PathmanRelationCacheContext), - ALLOCSET_DEFAULT_SIZES); + PATHMAN_PARENTS_CACHE, + ALLOCSET_SMALL_SIZES); - /* For PartParentInfo */ - PathmanParentCacheContext = + /* For PartStatusInfo */ + PathmanStatusCacheContext = AllocSetContextCreate(TopPathmanContext, - CppAsString(PathmanParentCacheContext), - ALLOCSET_DEFAULT_SIZES); + PATHMAN_STATUS_CACHE, + ALLOCSET_SMALL_SIZES); /* For PartBoundInfo */ - PathmanBoundCacheContext = + PathmanBoundsCacheContext = AllocSetContextCreate(TopPathmanContext, - CppAsString(PathmanBoundCacheContext), - ALLOCSET_DEFAULT_SIZES); + PATHMAN_BOUNDS_CACHE, + ALLOCSET_SMALL_SIZES); } memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(PartRelationInfo); - ctl.hcxt = PathmanRelationCacheContext; + ctl.hcxt = PathmanParentsCacheContext; - partitioned_rels = hash_create("pg_pathman's partition dispatch cache", - PART_RELS_SIZE, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + parents_cache = hash_create(PATHMAN_PARENTS_CACHE, + PART_RELS_SIZE, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(PartParentInfo); - ctl.hcxt = PathmanParentCacheContext; + ctl.entrysize = sizeof(PartStatusInfo); + ctl.hcxt = PathmanStatusCacheContext; - parent_cache = hash_create("pg_pathman's partition parents cache", + status_cache = hash_create(PATHMAN_STATUS_CACHE, PART_RELS_SIZE * CHILD_FACTOR, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(PartBoundInfo); - ctl.hcxt = PathmanBoundCacheContext; + ctl.hcxt = PathmanBoundsCacheContext; - bound_cache = hash_create("pg_pathman's partition bounds cache", - PART_RELS_SIZE * CHILD_FACTOR, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + bounds_cache = hash_create(PATHMAN_BOUNDS_CACHE, + PART_RELS_SIZE * CHILD_FACTOR, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } /* @@ -397,13 +389,13 @@ static void fini_local_cache(void) { /* First, destroy hash tables */ - hash_destroy(partitioned_rels); - hash_destroy(parent_cache); - hash_destroy(bound_cache); + hash_destroy(parents_cache); + hash_destroy(status_cache); + hash_destroy(bounds_cache); - partitioned_rels = NULL; - parent_cache = NULL; - bound_cache = NULL; + parents_cache = NULL; + status_cache = NULL; + bounds_cache = NULL; /* Now we can clear allocations */ MemoryContextResetChildren(TopPathmanContext); @@ -876,9 +868,6 @@ startup_invalidate_parent(Datum *values, bool *isnull, void *context) PATHMAN_CONFIG, relid), errhint(INIT_ERROR_HINT))); } - - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(relid, NULL); } /* diff --git a/src/partition_filter.c b/src/partition_filter.c index 78123c71..33424e06 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -455,7 +455,7 @@ select_partition_for_insert(ExprState *expr_state, value, prel->ev_type); /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_relid, NULL); + refresh_pathman_relation_info(parent_relid); } else partition_relid = parts[0]; @@ -467,7 +467,7 @@ select_partition_for_insert(ExprState *expr_state, if (rri_holder == NULL) { /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_relid, NULL); + refresh_pathman_relation_info(parent_relid); /* Get a fresh PartRelationInfo */ prel = get_pathman_relation_info(parent_relid); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ac6b0dca..197c2347 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -126,26 +126,12 @@ get_number_of_partitions_pl(PG_FUNCTION_ARGS) Datum get_parent_of_partition_pl(PG_FUNCTION_ARGS) { - Oid partition = PG_GETARG_OID(0); - PartParentSearch parent_search; - Oid parent; - bool emit_error = PG_GETARG_BOOL(1); + Oid partition = PG_GETARG_OID(0), + parent = get_parent_of_partition(partition); - /* Fetch parent & write down search status */ - parent = get_parent_of_partition(partition, &parent_search); - - /* We MUST be sure :) */ - Assert(parent_search != PPS_NOT_SURE); - - /* It must be parent known by pg_pathman */ - if (parent_search == PPS_ENTRY_PART_PARENT) + if (OidIsValid(parent)) PG_RETURN_OID(parent); - if (emit_error) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("\"%s\" is not a partition", - get_rel_name_or_relid(partition)))); - PG_RETURN_NULL(); } @@ -160,8 +146,7 @@ is_equal_to_partitioning_expression_pl(PG_FUNCTION_ARGS) char *expr = TextDatumGetCString(PG_GETARG_TEXT_P(1)); Oid value_type = PG_GETARG_OID(2); - result = is_equal_to_partitioning_expression(parent_relid, expr, - value_type); + result = is_equal_to_partitioning_expression(parent_relid, expr, value_type); PG_RETURN_BOOL(result); } @@ -171,10 +156,10 @@ is_equal_to_partitioning_expression_pl(PG_FUNCTION_ARGS) Datum get_lower_bound_pl(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); + Oid partition_relid = PG_GETARG_OID(0); Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - PG_RETURN_POINTER(get_lower_bound(relid, value_type)); + PG_RETURN_POINTER(get_lower_bound(partition_relid, value_type)); } /* @@ -183,10 +168,10 @@ get_lower_bound_pl(PG_FUNCTION_ARGS) Datum get_upper_bound_pl(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); + Oid partition_relid = PG_GETARG_OID(0); Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - PG_RETURN_POINTER(get_upper_bound(relid, value_type)); + PG_RETURN_POINTER(get_upper_bound(partition_relid, value_type)); } /* @@ -269,14 +254,14 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) usercxt = (show_cache_stats_cxt *) palloc(sizeof(show_cache_stats_cxt)); usercxt->pathman_contexts[0] = TopPathmanContext; - usercxt->pathman_contexts[1] = PathmanRelationCacheContext; - usercxt->pathman_contexts[2] = PathmanParentCacheContext; - usercxt->pathman_contexts[3] = PathmanBoundCacheContext; + usercxt->pathman_contexts[1] = PathmanParentsCacheContext; + usercxt->pathman_contexts[2] = PathmanStatusCacheContext; + usercxt->pathman_contexts[3] = PathmanBoundsCacheContext; usercxt->pathman_htables[0] = NULL; /* no HTAB for this entry */ - usercxt->pathman_htables[1] = partitioned_rels; - usercxt->pathman_htables[2] = parent_cache; - usercxt->pathman_htables[3] = bound_cache; + usercxt->pathman_htables[1] = parents_cache; + usercxt->pathman_htables[2] = status_cache; + usercxt->pathman_htables[3] = bounds_cache; usercxt->current_item = 0; @@ -318,7 +303,7 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) current_htab = usercxt->pathman_htables[usercxt->current_item]; values[Anum_pathman_cs_context - 1] = - CStringGetTextDatum(simpify_mcxt_name(current_mcxt)); + CStringGetTextDatum(simplify_mcxt_name(current_mcxt)); /* We can't check stats of mcxt prior to 9.6 */ #if PG_VERSION_NUM >= 90600 @@ -864,9 +849,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) /* Some flags might change during refresh attempt */ save_pathman_init_state(&init_state); - refresh_pathman_relation_info(relid, - values, - false); /* initialize immediately */ + get_pathman_relation_info(relid); } PG_CATCH(); { diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 37b8fcb9..93a78241 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -403,7 +403,6 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) { Oid partition_relid, parent_relid; - PartParentSearch parent_search; RangeEntry *ranges; const PartRelationInfo *prel; uint32 i; @@ -415,8 +414,8 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'partition_relid' should not be NULL"))); - parent_relid = get_parent_of_partition(partition_relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + parent_relid = get_parent_of_partition(partition_relid); + if (!OidIsValid(parent_relid)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation \"%s\" is not a partition", get_rel_name_or_relid(partition_relid)))); @@ -615,7 +614,6 @@ Datum merge_range_partitions(PG_FUNCTION_ARGS) { Oid parent = InvalidOid; - PartParentSearch parent_search; ArrayType *arr = PG_GETARG_ARRAYTYPE_P(0); Oid *partitions; @@ -658,10 +656,10 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* Check if all partitions are from the same parent */ for (i = 0; i < nparts; i++) { - Oid cur_parent = get_parent_of_partition(partitions[i], &parent_search); + Oid cur_parent = get_parent_of_partition(partitions[i]); /* If we couldn't find a parent, it's not a partition */ - if (parent_search != PPS_ENTRY_PART_PARENT) + if (!OidIsValid(cur_parent)) ereport(ERROR, (errmsg("cannot merge partitions"), errdetail("relation \"%s\" is not a partition", get_rel_name_or_relid(partitions[i])))); @@ -783,15 +781,14 @@ Datum drop_range_partition_expand_next(PG_FUNCTION_ARGS) { const PartRelationInfo *prel; - PartParentSearch parent_search; Oid relid = PG_GETARG_OID(0), parent; RangeEntry *ranges; int i; /* Get parent's relid */ - parent = get_parent_of_partition(relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + parent = get_parent_of_partition(relid); + if (!OidIsValid(parent)) elog(ERROR, "relation \"%s\" is not a partition", get_rel_name_or_relid(relid)); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index e8bcc129..9e6d64e1 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -584,7 +584,7 @@ partition_router_visitor(Plan *plan, void *context) const PartRelationInfo *prel; /* Find topmost parent */ - while ((tmp_relid = get_parent_of_partition(relid, NULL)) != InvalidOid) + while (OidIsValid(tmp_relid = get_parent_of_partition(relid))) relid = tmp_relid; /* Check that table is partitioned */ diff --git a/src/relation_info.c b/src/relation_info.c index e327bc57..77a81fc0 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -75,44 +75,19 @@ bool pg_pathman_enable_bounds_cache = true; /* * We delay all invalidation jobs received in relcache hook. */ -static List *delayed_invalidation_parent_rels = NIL; -static List *delayed_invalidation_vague_rels = NIL; -static bool delayed_invalidation_whole_cache = false; static bool delayed_shutdown = false; /* pathman was dropped */ -#define INVAL_LIST_MAX_ITEMS 10000 - -/* Add unique Oid to list, allocate in TopPathmanContext */ -#define list_add_unique(list, oid) \ - do { \ - MemoryContext old_mcxt = MemoryContextSwitchTo(PathmanInvalJobsContext); \ - list = list_append_unique_oid(list, (oid)); \ - MemoryContextSwitchTo(old_mcxt); \ - } while (0) - -#define free_invalidation_lists() \ - do { \ - MemoryContextReset(PathmanInvalJobsContext); \ - delayed_invalidation_parent_rels = NIL; \ - delayed_invalidation_vague_rels = NIL; \ - } while (0) - /* Handy wrappers for Oids */ #define bsearch_oid(key, array, array_size) \ bsearch((const void *) &(key), (array), (array_size), sizeof(Oid), oid_cmp) -static bool try_invalidate_parent(Oid relid, Oid *parents, int parents_count); -static Oid try_catalog_parent_search(Oid partition, PartParentSearch *status); -static Oid get_parent_of_partition_internal(Oid partition, - PartParentSearch *status, - HASHACTION action); +static PartRelationInfo *build_pathman_relation_info(Oid relid, Datum *values); +static void free_pathman_relation_info(PartRelationInfo *prel); static Expr *get_partition_constraint_expr(Oid partition); -static void free_prel_partitions(PartRelationInfo *prel); - static void fill_prel_with_partitions(PartRelationInfo *prel, const Oid *partitions, const uint32 parts_count); @@ -141,381 +116,205 @@ init_relation_info_static_data(void) NULL); } + /* - * refresh\invalidate\get\remove PartRelationInfo functions. + * Partition dispatch routines. */ -const PartRelationInfo * -refresh_pathman_relation_info(Oid relid, - Datum *values, - bool allow_incomplete) +/* TODO: comment */ +void +refresh_pathman_relation_info(Oid relid) { - const LOCKMODE lockmode = AccessShareLock; - const TypeCacheEntry *typcache; - Oid *prel_children; - uint32 prel_children_count = 0, - i; - PartRelationInfo *prel; - Datum param_values[Natts_pathman_config_params]; - bool param_isnull[Natts_pathman_config_params]; - char *expr; - MemoryContext old_mcxt; - - AssertTemporaryContext(); - prel = invalidate_pathman_relation_info(relid, NULL); - Assert(prel); - - /* Try locking parent, exit fast if 'allow_incomplete' */ - if (allow_incomplete) - { - if (!ConditionalLockRelationOid(relid, lockmode)) - return NULL; /* leave an invalid entry */ - } - else LockRelationOid(relid, lockmode); - /* Check if parent exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) - { - /* Nope, it doesn't, remove this entry and exit */ - UnlockRelationOid(relid, lockmode); - remove_pathman_relation_info(relid); - return NULL; /* exit */ - } +} - /* Make both arrays point to NULL */ - prel->children = NULL; - prel->ranges = NULL; +/* Get PartRelationInfo from local cache */ +const PartRelationInfo * +get_pathman_relation_info(Oid relid) +{ + PartStatusInfo *psin = open_pathman_status_info(relid); + PartRelationInfo *prel = psin ? psin->prel : NULL; - /* Set partitioning type */ - prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, + "fetching %s record for parent %u [%u]", + (prel ? "live" : "NULL"), relid, MyProcPid); +#endif - /* Fetch cooked partitioning expression */ - expr = TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); + return prel; +} - /* Create a new memory context to store expression tree etc */ - prel->mcxt = AllocSetContextCreate(PathmanRelationCacheContext, - CppAsString(refresh_pathman_relation_info), - ALLOCSET_SMALL_SIZES); +/* Acquire lock on a table and try to get PartRelationInfo */ +const PartRelationInfo * +get_pathman_relation_info_after_lock(Oid relid, + bool unlock_if_not_found, + LockAcquireResult *lock_result) +{ + const PartRelationInfo *prel; + LockAcquireResult acquire_result; - /* Switch to persistent memory context */ - old_mcxt = MemoryContextSwitchTo(prel->mcxt); + /* Restrict concurrent partition creation (it's dangerous) */ + acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); - /* Build partitioning expression tree */ - prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); - prel->expr = (Node *) stringToNode(expr); - fix_opfuncids(prel->expr); + /* Invalidate cache entry (see AcceptInvalidationMessages()) */ + refresh_pathman_relation_info(relid); - /* Extract Vars and varattnos of partitioning expression */ - prel->expr_vars = NIL; - prel->expr_atts = NULL; - prel->expr_vars = pull_var_clause_compat(prel->expr, 0, 0); - pull_varattnos((Node *) prel->expr_vars, PART_EXPR_VARNO, &prel->expr_atts); + /* Set 'lock_result' if asked to */ + if (lock_result) + *lock_result = acquire_result; - MemoryContextSwitchTo(old_mcxt); + prel = get_pathman_relation_info(relid); + if (!prel && unlock_if_not_found) + UnlockRelationOid(relid, ShareUpdateExclusiveLock); - /* First, fetch type of partitioning expression */ - prel->ev_type = exprType(prel->expr); - prel->ev_typmod = exprTypmod(prel->expr); - prel->ev_collid = exprCollation(prel->expr); + return prel; +} - /* Fetch HASH & CMP fuctions and other stuff from type cache */ - typcache = lookup_type_cache(prel->ev_type, - TYPECACHE_CMP_PROC | TYPECACHE_HASH_PROC); +/* Build a new PartRelationInfo for relation (might emit ERROR) */ +static PartRelationInfo * +build_pathman_relation_info(Oid relid, Datum *values) +{ + const LOCKMODE lockmode = AccessShareLock; + MemoryContext prel_mcxt; + PartRelationInfo *prel; - prel->ev_byval = typcache->typbyval; - prel->ev_len = typcache->typlen; - prel->ev_align = typcache->typalign; + AssertTemporaryContext(); - prel->cmp_proc = typcache->cmp_proc; - prel->hash_proc = typcache->hash_proc; + /* Lock parent table */ + LockRelationOid(relid, lockmode); - /* Try searching for children (don't wait if we can't lock) */ - switch (find_inheritance_children_array(relid, lockmode, - allow_incomplete, - &prel_children_count, - &prel_children)) + /* Check if parent exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) { - /* If there's no children at all, remove this entry */ - case FCS_NO_CHILDREN: - elog(DEBUG2, "refresh: relation %u has no children [%u]", - relid, MyProcPid); - - UnlockRelationOid(relid, lockmode); - remove_pathman_relation_info(relid); - return NULL; /* exit */ - - /* If can't lock children, leave an invalid entry */ - case FCS_COULD_NOT_LOCK: - elog(DEBUG2, "refresh: cannot lock children of relation %u [%u]", - relid, MyProcPid); - - UnlockRelationOid(relid, lockmode); - return NULL; /* exit */ - - /* Found some children, just unlock parent */ - case FCS_FOUND: - elog(DEBUG2, "refresh: found children of relation %u [%u]", - relid, MyProcPid); + /* Nope, it doesn't, remove this entry and exit */ + UnlockRelationOid(relid, lockmode); + return NULL; /* exit */ + } - UnlockRelationOid(relid, lockmode); - break; /* continue */ + /* Create a new memory context to store expression tree etc */ + prel_mcxt = AllocSetContextCreate(PathmanParentsCacheContext, + __FUNCTION__, + ALLOCSET_SMALL_SIZES); - /* Error: unknown result code */ - default: - elog(ERROR, "error in function " - CppAsString(find_inheritance_children_array)); - } + /* Create a new PartRelationInfo */ + prel = MemoryContextAlloc(prel_mcxt, sizeof(PartRelationInfo)); + prel->mcxt = prel_mcxt; - /* - * Fill 'prel' with partition info, raise ERROR if anything is wrong. - * This way PartRelationInfo will remain 'invalid', and 'get' procedure - * will try to refresh it again (and again), until the error is fixed - * by user manually (i.e. invalid check constraints etc). - */ + /* Memory leak protection */ PG_TRY(); { - fill_prel_with_partitions(prel, prel_children, prel_children_count); - } - PG_CATCH(); - { - /* Remove this parent from parents cache */ - ForgetParent(prel); - - /* Delete unused 'prel_mcxt' */ - MemoryContextDelete(prel->mcxt); - + MemoryContext old_mcxt; + const TypeCacheEntry *typcache; + char *expr; + Datum param_values[Natts_pathman_config_params]; + bool param_isnull[Natts_pathman_config_params]; + Oid *prel_children; + uint32 prel_children_count = 0, + i; + + /* Make both arrays point to NULL */ prel->children = NULL; prel->ranges = NULL; - prel->mcxt = NULL; - - /* Rethrow ERROR further */ - PG_RE_THROW(); - } - PG_END_TRY(); - /* Peform some actions for each child */ - for (i = 0; i < prel_children_count; i++) - { - /* Add "partition+parent" pair to cache */ - cache_parent_of_partition(prel_children[i], relid); - - /* Now it's time to unlock this child */ - UnlockRelationOid(prel_children[i], lockmode); - } - - if (prel_children) - pfree(prel_children); - - /* Read additional parameters ('enable_parent' at the moment) */ - if (read_pathman_params(relid, param_values, param_isnull)) - { - prel->enable_parent = param_values[Anum_pathman_config_params_enable_parent - 1]; - } - /* Else set default values if they cannot be found */ - else - { - prel->enable_parent = DEFAULT_PATHMAN_ENABLE_PARENT; - } + /* Set partitioning type */ + prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - /* We've successfully built a cache entry */ - prel->valid = true; - - return prel; -} + /* Fetch cooked partitioning expression */ + expr = TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); -/* Invalidate PartRelationInfo cache entry. Create new entry if 'found' is NULL. */ -PartRelationInfo * -invalidate_pathman_relation_info(Oid relid, bool *found) -{ - bool prel_found; - HASHACTION action = found ? HASH_FIND : HASH_ENTER; - PartRelationInfo *prel; + /* Switch to persistent memory context */ + old_mcxt = MemoryContextSwitchTo(prel->mcxt); - prel = pathman_cache_search_relid(partitioned_rels, - relid, action, - &prel_found); + /* Build partitioning expression tree */ + prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); + prel->expr = (Node *) stringToNode(expr); + fix_opfuncids(prel->expr); - /* It's a new entry, mark it 'invalid' */ - if (prel && !prel_found) - prel->valid = false; + /* Extract Vars and varattnos of partitioning expression */ + prel->expr_vars = NIL; + prel->expr_atts = NULL; + prel->expr_vars = pull_var_clause_compat(prel->expr, 0, 0); + pull_varattnos((Node *) prel->expr_vars, PART_EXPR_VARNO, &prel->expr_atts); - /* Clear the remaining resources */ - free_prel_partitions(prel); + MemoryContextSwitchTo(old_mcxt); - /* Set 'found' if necessary */ - if (found) *found = prel_found; + /* First, fetch type of partitioning expression */ + prel->ev_type = exprType(prel->expr); + prel->ev_typmod = exprTypmod(prel->expr); + prel->ev_collid = exprCollation(prel->expr); -#ifdef USE_ASSERT_CHECKING - elog(DEBUG2, - "dispatch_cache: invalidating %s record for parent %u [%u]", - (prel ? "live" : "NULL"), relid, MyProcPid); -#endif + /* Fetch HASH & CMP fuctions and other stuff from type cache */ + typcache = lookup_type_cache(prel->ev_type, + TYPECACHE_CMP_PROC | TYPECACHE_HASH_PROC); - return prel; -} + prel->ev_byval = typcache->typbyval; + prel->ev_len = typcache->typlen; + prel->ev_align = typcache->typalign; -/* Invalidate PartRelationInfo cache entries that exist in 'parents` array */ -void -invalidate_pathman_relation_info_cache(const Oid *parents, int parents_count) -{ - HASH_SEQ_STATUS stat; - PartRelationInfo *prel; - List *prel_bad = NIL; - ListCell *lc; - int i; + prel->cmp_proc = typcache->cmp_proc; + prel->hash_proc = typcache->hash_proc; - for (i = 0; i < parents_count; i++) - { - invalidate_pathman_relation_info(parents[i], NULL); - } + /* Try searching for children */ + (void) find_inheritance_children_array(relid, lockmode, false, + &prel_children_count, + &prel_children); - hash_seq_init(&stat, partitioned_rels); + /* Fill 'prel' with partition info, raise ERROR if anything is wrong */ + fill_prel_with_partitions(prel, prel_children, prel_children_count); - while ((prel = (PartRelationInfo *) hash_seq_search(&stat)) != NULL) - { - Oid parent_relid = PrelParentRelid(prel); + /* Unlock the parent */ + UnlockRelationOid(relid, lockmode); - /* Does this entry exist in PATHMAN_CONFIG table? */ - if (!bsearch_oid(parent_relid, parents, parents_count)) + /* Now it's time to take care of children */ + for (i = 0; i < prel_children_count; i++) { - /* All entry to 'outdated' list */ - prel_bad = lappend_oid(prel_bad, parent_relid); + /* Cache this child */ + cache_parent_of_partition(prel_children[i], relid); - /* Clear the remaining resources */ - free_prel_partitions(prel); + /* Unlock this child */ + UnlockRelationOid(prel_children[i], lockmode); } - } - - /* Remove outdated entries */ - foreach (lc, prel_bad) - { - pathman_cache_search_relid(partitioned_rels, - lfirst_oid(lc), - HASH_REMOVE, - NULL); - } - -#ifdef USE_ASSERT_CHECKING - elog(DEBUG2, - "dispatch_cache: invalidated all records [%u]", MyProcPid); -#endif -} -/* Get PartRelationInfo from local cache. */ -const PartRelationInfo * -get_pathman_relation_info(Oid relid) -{ - const PartRelationInfo *prel = pathman_cache_search_relid(partitioned_rels, - relid, HASH_FIND, - NULL); - /* Refresh PartRelationInfo if needed */ - if (prel && !PrelIsValid(prel)) - { - ItemPointerData iptr; - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; + if (prel_children) + pfree(prel_children); - /* Check that PATHMAN_CONFIG table contains this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) + /* Read additional parameters ('enable_parent' at the moment) */ + if (read_pathman_params(relid, param_values, param_isnull)) { - bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; - if (upd_expr) - pathman_config_refresh_parsed_expression(relid, values, isnull, &iptr); - - /* Refresh partitioned table cache entry (might turn NULL) */ - prel = refresh_pathman_relation_info(relid, values, false); + prel->enable_parent = + param_values[Anum_pathman_config_params_enable_parent - 1]; } - - /* Else clear remaining cache entry */ + /* Else set default values if they cannot be found */ else { - remove_pathman_relation_info(relid); - prel = NULL; /* don't forget to reset 'prel' */ + prel->enable_parent = DEFAULT_PATHMAN_ENABLE_PARENT; } } - -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, - "dispatch_cache: fetching %s record for parent %u [%u]", - (prel ? "live" : "NULL"), relid, MyProcPid); -#endif - - /* Make sure that 'prel' is valid */ - Assert(!prel || PrelIsValid(prel)); - - return prel; -} - -/* Acquire lock on a table and try to get PartRelationInfo */ -const PartRelationInfo * -get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result) -{ - const PartRelationInfo *prel; - LockAcquireResult acquire_result; - - /* Restrict concurrent partition creation (it's dangerous) */ - acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); - - /* Invalidate cache entry (see AcceptInvalidationMessages()) */ - invalidate_pathman_relation_info(relid, NULL); - - /* Set 'lock_result' if asked to */ - if (lock_result) - *lock_result = acquire_result; - - prel = get_pathman_relation_info(relid); - if (!prel && unlock_if_not_found) - UnlockRelationOid(relid, ShareUpdateExclusiveLock); - - return prel; -} - -/* Remove PartRelationInfo from local cache */ -void -remove_pathman_relation_info(Oid relid) -{ - bool found; - - /* Free resources */ - invalidate_pathman_relation_info(relid, &found); - - /* Now let's remove the entry completely */ - if (found) + PG_CATCH(); { - pathman_cache_search_relid(partitioned_rels, relid, HASH_REMOVE, NULL); + /* Free this entry */ + free_pathman_relation_info(prel); -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, - "dispatch_cache: removing record for parent %u [%u]", - relid, MyProcPid); -#endif + /* Rethrow ERROR further */ + PG_RE_THROW(); } -} + PG_END_TRY(); -static void -free_prel_partitions(PartRelationInfo *prel) -{ - /* Handle valid PartRelationInfo */ - if (PrelIsValid(prel)) + /* Free trivial entries */ + if (PrelChildrenCount(prel) == 0) { - /* Remove this parent from parents cache */ - ForgetParent(prel); - - /* Drop cached bounds etc */ - MemoryContextDelete(prel->mcxt); + free_pathman_relation_info(prel); + prel = NULL; } - /* Set important default values */ - if (prel) - { - prel->children = NULL; - prel->ranges = NULL; - prel->mcxt = NULL; + return prel; +} - prel->valid = false; /* now cache entry is invalid */ - } +/* Free PartRelationInfo struct safely */ +static void +free_pathman_relation_info(PartRelationInfo *prel) +{ + MemoryContextDelete(prel->mcxt); } /* Fill PartRelationInfo with partition-related info */ @@ -548,7 +347,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, /* Create temporary memory context for loop */ temp_mcxt = AllocSetContextCreate(CurrentMemoryContext, CppAsString(fill_prel_with_partitions), - ALLOCSET_DEFAULT_SIZES); + ALLOCSET_SMALL_SIZES); /* Initialize bounds of partitions */ for (i = 0; i < PrelChildrenCount(prel); i++) @@ -570,13 +369,13 @@ fill_prel_with_partitions(PartRelationInfo *prel, switch (prel->parttype) { case PT_HASH: - prel->children[pbin->part_idx] = pbin->child_rel; + prel->children[pbin->part_idx] = pbin->child_relid; break; case PT_RANGE: { /* Copy child's Oid */ - prel->ranges[i].child_oid = pbin->child_rel; + prel->ranges[i].child_oid = pbin->child_relid; /* Copy all min & max Datums to the persistent mcxt */ old_mcxt = MemoryContextSwitchTo(prel->mcxt); @@ -640,7 +439,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, #endif } -/* qsort comparison function for RangeEntries */ +/* qsort() comparison function for RangeEntries */ static int cmp_range_entries(const void *p1, const void *p2, void *arg) { @@ -651,558 +450,351 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) return cmp_bounds(&info->flinfo, info->collid, &v1->min, &v2->min); } - /* - * Partitioning expression routines. + * Common PartRelationInfo checks. Emit ERROR if anything is wrong. */ - -/* Wraps expression in SELECT query and returns parse tree */ -Node * -parse_partitioning_expression(const Oid relid, - const char *expr_cstr, - char **query_string_out, /* ret value #1 */ - Node **parsetree_out) /* ret value #2 */ +void +shout_if_prel_is_invalid(const Oid parent_oid, + const PartRelationInfo *prel, + const PartType expected_part_type) { - SelectStmt *select_stmt; - List *parsetree_list; - MemoryContext old_mcxt; - - const char *sql = "SELECT (%s) FROM ONLY %s.%s"; - char *relname = get_rel_name(relid), - *nspname = get_namespace_name(get_rel_namespace(relid)); - char *query_string = psprintf(sql, expr_cstr, - quote_identifier(nspname), - quote_identifier(relname)); - - old_mcxt = CurrentMemoryContext; + if (!prel) + elog(ERROR, "relation \"%s\" has no partitions", + get_rel_name_or_relid(parent_oid)); - PG_TRY(); - { - parsetree_list = raw_parser(query_string); - } - PG_CATCH(); + /* Check partitioning type unless it's "ANY" */ + if (expected_part_type != PT_ANY && + expected_part_type != prel->parttype) { - ErrorData *error; + char *expected_str; - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - error = CopyErrorData(); - FlushErrorState(); + switch (expected_part_type) + { + case PT_HASH: + expected_str = "HASH"; + break; - /* Adjust error message */ - error->detail = error->message; - error->message = psprintf(PARSE_PART_EXPR_ERROR, expr_cstr); - error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; - error->cursorpos = 0; - error->internalpos = 0; + case PT_RANGE: + expected_str = "RANGE"; + break; - ReThrowError(error); - } - PG_END_TRY(); + default: + WrongPartType(expected_part_type); + expected_str = NULL; /* keep compiler happy */ + } - if (list_length(parsetree_list) != 1) - elog(ERROR, "expression \"%s\" produced more than one query", expr_cstr); + elog(ERROR, "relation \"%s\" is not partitioned by %s", + get_rel_name_or_relid(parent_oid), + expected_str); + } +} -#if PG_VERSION_NUM >= 100000 - select_stmt = (SelectStmt *) ((RawStmt *) linitial(parsetree_list))->stmt; -#else - select_stmt = (SelectStmt *) linitial(parsetree_list); -#endif +/* + * Remap partitioning expression columns for tuple source relation. + * This is a simplified version of functions that return TupleConversionMap. + * It should be faster if expression uses a few fields of relation. + */ +AttrNumber * +PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc, + int *map_length) +{ + Oid parent_relid = PrelParentRelid(prel); + int source_natts = source_tupdesc->natts, + expr_natts = 0; + AttrNumber *result, + i; + bool is_trivial = true; - if (query_string_out) - *query_string_out = query_string; + /* Get largest attribute number used in expression */ + i = -1; + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + expr_natts = i; - if (parsetree_out) - *parsetree_out = (Node *) linitial(parsetree_list); + /* Allocate array for map */ + result = (AttrNumber *) palloc0(expr_natts * sizeof(AttrNumber)); - return ((ResTarget *) linitial(select_stmt->targetList))->val; -} + /* Find a match for each attribute */ + i = -1; + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + { + AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; + char *attname = get_attname(parent_relid, attnum); + int j; -/* Parse partitioning expression and return its type and nodeToString() as TEXT */ -Datum -cook_partitioning_expression(const Oid relid, - const char *expr_cstr, - Oid *expr_type_out) /* ret value #1 */ -{ - Node *parse_tree; - List *query_tree_list; + Assert(attnum <= expr_natts); - char *query_string, - *expr_serialized = ""; /* keep compiler happy */ + for (j = 0; j < source_natts; j++) + { + Form_pg_attribute att = TupleDescAttr(source_tupdesc, j); - Datum expr_datum; + if (att->attisdropped) + continue; /* attrMap[attnum - 1] is already 0 */ - MemoryContext parse_mcxt, - old_mcxt; + if (strcmp(NameStr(att->attname), attname) == 0) + { + result[attnum - 1] = (AttrNumber) (j + 1); + break; + } + } - AssertTemporaryContext(); + if (result[attnum - 1] == 0) + elog(ERROR, "cannot find column \"%s\" in child relation", attname); - /* - * We use separate memory context here, just to make sure we won't - * leave anything behind after parsing, rewriting and planning. - */ - parse_mcxt = AllocSetContextCreate(CurrentMemoryContext, - CppAsString(cook_partitioning_expression), - ALLOCSET_DEFAULT_SIZES); + if (result[attnum - 1] != attnum) + is_trivial = false; + } - /* Switch to mcxt for cooking :) */ - old_mcxt = MemoryContextSwitchTo(parse_mcxt); + /* Check if map is trivial */ + if (is_trivial) + { + pfree(result); + return NULL; + } - /* First we have to build a raw AST */ - (void) parse_partitioning_expression(relid, expr_cstr, - &query_string, &parse_tree); + *map_length = expr_natts; + return result; +} - /* We don't need pg_pathman's magic here */ - pathman_hooks_enabled = false; - PG_TRY(); - { - Query *query; - Node *expr; - int expr_attr; - Relids expr_varnos; - Bitmapset *expr_varattnos = NULL; +/* + * Partitioning status cache routines. + */ - /* This will fail with ERROR in case of wrong expression */ - query_tree_list = pg_analyze_and_rewrite_compat(parse_tree, query_string, - NULL, 0, NULL); +PartStatusInfo * +open_pathman_status_info(Oid relid) +{ + PartStatusInfo *psin; + bool found; + bool refresh; - /* Sanity check #1 */ - if (list_length(query_tree_list) != 1) - elog(ERROR, "partitioning expression produced more than 1 query"); + /* Should always be called in transaction */ + Assert(IsTransactionState()); - query = (Query *) linitial(query_tree_list); + /* We don't cache catalog objects */ + if (relid < FirstNormalObjectId) + return NULL; - /* Sanity check #2 */ - if (list_length(query->targetList) != 1) - elog(ERROR, "there should be exactly 1 partitioning expression"); + /* Create a new entry for this table if needed */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_ENTER, + &found); - /* Sanity check #3 */ - if (query_tree_walker(query, query_contains_subqueries, NULL, 0)) - elog(ERROR, "subqueries are not allowed in partitioning expression"); + /* Initialize new entry */ + if (!found) + { + psin->refcount = 0; + psin->is_valid = false; + psin->prel = NULL; + } - expr = (Node *) ((TargetEntry *) linitial(query->targetList))->expr; - expr = eval_const_expressions(NULL, expr); + /* Should we refresh this entry? */ + refresh = !psin->is_valid && psin->refcount == 0; - /* Sanity check #4 */ - if (contain_mutable_functions(expr)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("functions in partitioning expression" - " must be marked IMMUTABLE"))); + if (refresh) + { + ItemPointerData iptr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; - /* Sanity check #5 */ - expr_varnos = pull_varnos(expr); - if (bms_num_members(expr_varnos) != 1 || - relid != ((RangeTblEntry *) linitial(query->rtable))->relid) + /* Set basic fields */ + psin->is_valid = false; + + /* Free old dispatch info */ + if (psin->prel) { - elog(ERROR, "partitioning expression should reference table \"%s\"", - get_rel_name(relid)); + free_pathman_relation_info(psin->prel); + psin->prel = NULL; } - /* Sanity check #6 */ - pull_varattnos(expr, bms_singleton_member(expr_varnos), &expr_varattnos); - expr_attr = -1; - while ((expr_attr = bms_next_member(expr_varattnos, expr_attr)) >= 0) + /* Check if PATHMAN_CONFIG table contains this relation */ + if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) { - AttrNumber attnum = expr_attr + FirstLowInvalidHeapAttributeNumber; - HeapTuple htup; - - /* Check that there's no system attributes in expression */ - if (attnum < InvalidAttrNumber) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("system attributes are not supported"))); - - htup = SearchSysCache2(ATTNUM, - ObjectIdGetDatum(relid), - Int16GetDatum(attnum)); - if (HeapTupleIsValid(htup)) - { - bool nullable; + bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; - /* Fetch 'nullable' and free syscache tuple */ - nullable = !((Form_pg_attribute) GETSTRUCT(htup))->attnotnull; - ReleaseSysCache(htup); + if (upd_expr) + pathman_config_refresh_parsed_expression(relid, values, + isnull, &iptr); - if (nullable) - ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), - errmsg("column \"%s\" should be marked NOT NULL", - get_attname(relid, attnum)))); - } + /* Build a partitioned table cache entry (might emit ERROR) */ + psin->prel = build_pathman_relation_info(relid, values); } - /* Free sets */ - bms_free(expr_varnos); - bms_free(expr_varattnos); - - Assert(expr); - expr_serialized = nodeToString(expr); - - /* Set 'expr_type_out' if needed */ - if (expr_type_out) - *expr_type_out = exprType(expr); + /* Good, entry is valid */ + psin->is_valid = true; } - PG_CATCH(); - { - ErrorData *error; - - /* Don't forget to enable pg_pathman's hooks */ - pathman_hooks_enabled = true; - - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - error = CopyErrorData(); - FlushErrorState(); - - /* Adjust error message */ - error->detail = error->message; - error->message = psprintf(COOK_PART_EXPR_ERROR, expr_cstr); - error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; - error->cursorpos = 0; - error->internalpos = 0; - ReThrowError(error); - } - PG_END_TRY(); + /* Increase refcount */ + psin->refcount++; - /* Don't forget to enable pg_pathman's hooks */ - pathman_hooks_enabled = true; + return psin; +} - /* Switch to previous mcxt */ - MemoryContextSwitchTo(old_mcxt); +void +close_pathman_status_info(PartStatusInfo *psin) +{ + /* Should always be called in transaction */ + Assert(IsTransactionState()); - /* Get Datum of serialized expression (right mcxt) */ - expr_datum = CStringGetTextDatum(expr_serialized); + /* Should not be NULL */ + Assert(psin); - /* Free memory */ - MemoryContextDelete(parse_mcxt); + /* Should be referenced elsewhere */ + Assert(psin->refcount > 0); - return expr_datum; + /* Decrease recount */ + psin->refcount--; } -/* Canonicalize user's expression (trim whitespaces etc) */ -char * -canonicalize_partitioning_expression(const Oid relid, - const char *expr_cstr) +void +invalidate_pathman_status_info(Oid relid) { - Node *parse_tree; - Expr *expr; - char *query_string; - Query *query; - - AssertTemporaryContext(); + PartStatusInfo *psin; - /* First we have to build a raw AST */ - (void) parse_partitioning_expression(relid, expr_cstr, - &query_string, &parse_tree); + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); - query = parse_analyze_compat(parse_tree, query_string, NULL, 0, NULL); - expr = ((TargetEntry *) linitial(query->targetList))->expr; + if (psin) + { +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + relid, MyProcPid); +#endif - /* We don't care about memory efficiency here */ - return deparse_expression((Node *) expr, - deparse_context_for(get_rel_name(relid), relid), - false, false); + /* Mark entry as invalid */ + psin->is_valid = false; + } } -/* Check if query has subqueries */ -static bool -query_contains_subqueries(Node *node, void *context) +void +invalidate_pathman_status_info_cache(void) { - if (node == NULL) - return false; + HASH_SEQ_STATUS status; + PartStatusInfo *psin; - /* We've met a subquery */ - if (IsA(node, Query)) - return true; + while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) + { +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + psin->relid, MyProcPid); +#endif - return expression_tree_walker(node, query_contains_subqueries, NULL); + /* Mark entry as invalid */ + psin->is_valid = false; + } } /* - * Functions for delayed invalidation. + * Partition bounds cache routines. */ -/* Add new delayed pathman shutdown job (DROP EXTENSION) */ -void -delay_pathman_shutdown(void) -{ - delayed_shutdown = true; -} - -/* Add new delayed invalidation job for whole dispatch cache */ -void -delay_invalidation_whole_cache(void) -{ - /* Free useless invalidation lists */ - free_invalidation_lists(); - - delayed_invalidation_whole_cache = true; -} - -/* Generic wrapper for lists */ -static void -delay_invalidation_event(List **inval_list, Oid relation) -{ - /* Skip if we already need to drop whole cache */ - if (delayed_invalidation_whole_cache) - return; - - if (list_length(*inval_list) > INVAL_LIST_MAX_ITEMS) - { - /* Too many events, drop whole cache */ - delay_invalidation_whole_cache(); - return; - } - - list_add_unique(*inval_list, relation); -} - -/* Add new delayed invalidation job for a [ex-]parent relation */ -void -delay_invalidation_parent_rel(Oid parent) -{ - delay_invalidation_event(&delayed_invalidation_parent_rels, parent); -} - -/* Add new delayed invalidation job for a vague relation */ +/* Remove partition's constraint from cache */ void -delay_invalidation_vague_rel(Oid vague_rel) +forget_bounds_of_partition(Oid partition) { - delay_invalidation_event(&delayed_invalidation_vague_rels, vague_rel); -} + PartBoundInfo *pbin; -/* Finish all pending invalidation jobs if possible */ -void -finish_delayed_invalidation(void) -{ - /* Exit early if there's nothing to do */ - if (delayed_invalidation_whole_cache == false && - delayed_invalidation_parent_rels == NIL && - delayed_invalidation_vague_rels == NIL && - delayed_shutdown == false) - { - return; - } + /* Should we search in bounds cache? */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_FIND, + NULL) : + NULL; /* don't even bother */ - /* Check that current state is transactional */ - if (IsTransactionState()) + /* Free this entry */ + if (pbin) { - Oid *parents = NULL; - int parents_count = 0; - bool parents_fetched = false; - ListCell *lc; - - AcceptInvalidationMessages(); - - /* Handle the probable 'DROP EXTENSION' case */ - if (delayed_shutdown) - { - Oid cur_pathman_config_relid; - - /* Unset 'shutdown' flag */ - delayed_shutdown = false; - - /* Get current PATHMAN_CONFIG relid */ - cur_pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, - get_pathman_schema()); - - /* Check that PATHMAN_CONFIG table has indeed been dropped */ - if (cur_pathman_config_relid == InvalidOid || - cur_pathman_config_relid != get_pathman_config_relid(true)) - { - /* Ok, let's unload pg_pathman's config */ - unload_config(); - - /* Disregard all remaining invalidation jobs */ - delayed_invalidation_whole_cache = false; - free_invalidation_lists(); - - /* No need to continue, exit */ - return; - } - } - - /* We might be asked to perform a complete cache invalidation */ - if (delayed_invalidation_whole_cache) - { - /* Unset 'invalidation_whole_cache' flag */ - delayed_invalidation_whole_cache = false; - - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - - /* Invalidate live entries and remove dead ones */ - invalidate_pathman_relation_info_cache(parents, parents_count); - } - - /* Process relations that are (or were) definitely partitioned */ - foreach (lc, delayed_invalidation_parent_rels) - { - Oid parent = lfirst_oid(lc); - - /* Skip if it's a TOAST table */ - if (IsToastNamespace(get_rel_namespace(parent))) - continue; - - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - - /* Check if parent still exists */ - if (bsearch_oid(parent, parents, parents_count)) - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent, NULL); - else - remove_pathman_relation_info(parent); - } - - /* Process all other vague cases */ - foreach (lc, delayed_invalidation_vague_rels) + /* Call pfree() if it's RANGE bounds */ + if (pbin->parttype == PT_RANGE) { - Oid vague_rel = lfirst_oid(lc); - - /* Skip if it's a TOAST table */ - if (IsToastNamespace(get_rel_namespace(vague_rel))) - continue; - - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - - /* It might be a partitioned table or a partition */ - if (!try_invalidate_parent(vague_rel, parents, parents_count)) - { - PartParentSearch search; - Oid parent; - List *fresh_rels = delayed_invalidation_parent_rels; - - parent = get_parent_of_partition(vague_rel, &search); - - switch (search) - { - /* - * Two main cases: - * - It's *still* parent (in PATHMAN_CONFIG) - * - It *might have been* parent before (not in PATHMAN_CONFIG) - */ - case PPS_ENTRY_PART_PARENT: - case PPS_ENTRY_PARENT: - { - /* Skip if we've already refreshed this parent */ - if (!list_member_oid(fresh_rels, parent)) - try_invalidate_parent(parent, parents, parents_count); - } - break; - - /* How come we still don't know?? */ - case PPS_NOT_SURE: - elog(ERROR, "Unknown table status, this should never happen"); - break; - - default: - break; - } - } + FreeBound(&pbin->range_min, pbin->byval); + FreeBound(&pbin->range_max, pbin->byval); } - /* Finally, free invalidation jobs lists */ - free_invalidation_lists(); - - if (parents) - pfree(parents); + /* Finally remove this entry from cache */ + pathman_cache_search_relid(bounds_cache, + partition, + HASH_REMOVE, + NULL); } } +/* Return partition's constraint as expression tree */ +PartBoundInfo * +get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) +{ + PartBoundInfo *pbin; -/* - * cache\forget\get PartParentInfo functions. - */ + /* + * We might end up building the constraint + * tree that we wouldn't want to keep. + */ + AssertTemporaryContext(); -/* Create "partition+parent" pair in local cache */ -void -cache_parent_of_partition(Oid partition, Oid parent) -{ - PartParentInfo *ppar; + /* PartRelationInfo must be provided */ + Assert(prel != NULL); - ppar = pathman_cache_search_relid(parent_cache, - partition, - HASH_ENTER, - NULL); + /* Should always be called in transaction */ + Assert(IsTransactionState()); - ppar->child_rel = partition; - ppar->parent_rel = parent; -} + /* Should we search in bounds cache? */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_FIND, + NULL) : + NULL; /* don't even bother */ -/* Remove "partition+parent" pair from cache & return parent's Oid */ -Oid -forget_parent_of_partition(Oid partition, PartParentSearch *status) -{ - return get_parent_of_partition_internal(partition, status, HASH_REMOVE); -} + /* Build new entry */ + if (!pbin) + { + PartBoundInfo pbin_local; + Expr *con_expr; -/* Return partition parent's Oid */ -Oid -get_parent_of_partition(Oid partition, PartParentSearch *status) -{ - return get_parent_of_partition_internal(partition, status, HASH_FIND); -} + /* Initialize other fields */ + pbin_local.child_relid = partition; + pbin_local.byval = prel->ev_byval; -/* Check that expression is equal to expression of some partitioned table */ -bool -is_equal_to_partitioning_expression(Oid relid, char *expression, - Oid value_type) -{ - const PartRelationInfo *prel; - char *cexpr; - Oid expr_type; + /* Try to build constraint's expression tree (may emit ERROR) */ + con_expr = get_partition_constraint_expr(partition); - /* - * Cook and get a canonicalized expression, - * we don't need a result of the cooking - */ - cook_partitioning_expression(relid, expression, &expr_type); - cexpr = canonicalize_partitioning_expression(relid, expression); + /* Grab bounds/hash and fill in 'pbin_local' (may emit ERROR) */ + fill_pbin_with_bounds(&pbin_local, prel, con_expr); - prel = get_pathman_relation_info(relid); + /* We strive to delay the creation of cache's entry */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_ENTER, + NULL) : + palloc(sizeof(PartBoundInfo)); - /* caller should have been check it already */ - Assert(prel != NULL); + /* Copy data from 'pbin_local' */ + memcpy(pbin, &pbin_local, sizeof(PartBoundInfo)); + } - return (getBaseType(expr_type) == value_type) && - (strcmp(cexpr, prel->expr_cstr) == 0); + return pbin; } /* Get lower bound of a partition */ Datum -get_lower_bound(Oid relid, Oid value_type) +get_lower_bound(Oid partition_relid, Oid value_type) { Oid parent_relid; Datum result; const PartRelationInfo *prel; - PartBoundInfo *pbin; - PartParentSearch parent_search; + const PartBoundInfo *pbin; - parent_relid = get_parent_of_partition(relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + parent_relid = get_parent_of_partition(partition_relid); + if (!OidIsValid(parent_relid)) elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(relid)); + get_rel_name_or_relid(partition_relid)); prel = get_pathman_relation_info(parent_relid); - Assert(prel && prel->parttype == PT_RANGE); - pbin = get_bounds_of_partition(relid, prel); - Assert(prel != NULL); + pbin = get_bounds_of_partition(partition_relid, prel); if (IsInfinite(&pbin->range_min)) return PointerGetDatum(NULL); @@ -1216,23 +808,20 @@ get_lower_bound(Oid relid, Oid value_type) /* Get upper bound of a partition */ Datum -get_upper_bound(Oid relid, Oid value_type) +get_upper_bound(Oid partition_relid, Oid value_type) { Oid parent_relid; Datum result; const PartRelationInfo *prel; - PartBoundInfo *pbin; - PartParentSearch parent_search; + const PartBoundInfo *pbin; - parent_relid = get_parent_of_partition(relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + parent_relid = get_parent_of_partition(partition_relid); + if (!OidIsValid(parent_relid)) elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(relid)); + get_rel_name_or_relid(partition_relid)); prel = get_pathman_relation_info(parent_relid); - Assert(prel && prel->parttype == PT_RANGE); - pbin = get_bounds_of_partition(relid, prel); - Assert(prel != NULL); + pbin = get_bounds_of_partition(partition_relid, prel); if (IsInfinite(&pbin->range_max)) return PointerGetDatum(NULL); @@ -1245,279 +834,73 @@ get_upper_bound(Oid relid, Oid value_type) } /* - * Get [and remove] "partition+parent" pair from cache, - * also check syscache if 'status' is provided. + * Get constraint expression tree of a partition. * - * "status == NULL" implies that we don't care about - * neither syscache nor PATHMAN_CONFIG table contents. + * build_check_constraint_name_internal() is used to build conname. */ -static Oid -get_parent_of_partition_internal(Oid partition, - PartParentSearch *status, - HASHACTION action) +static Expr * +get_partition_constraint_expr(Oid partition) { - Oid parent; - PartParentInfo *ppar = pathman_cache_search_relid(parent_cache, - partition, - HASH_FIND, - NULL); + Oid conid; /* constraint Oid */ + char *conname; /* constraint name */ + HeapTuple con_tuple; + Datum conbin_datum; + bool conbin_isnull; + Expr *expr; /* expression tree for constraint */ - if (ppar) - { - if (status) *status = PPS_ENTRY_PART_PARENT; - parent = ppar->parent_rel; + conname = build_check_constraint_name_relid_internal(partition); + conid = get_relation_constraint_oid(partition, conname, true); - /* Remove entry if necessary */ - if (action == HASH_REMOVE) - pathman_cache_search_relid(parent_cache, partition, - HASH_REMOVE, NULL); + if (!OidIsValid(conid)) + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("constraint \"%s\" of partition \"%s\" does not exist", + conname, get_rel_name_or_relid(partition)), + errhint(INIT_ERROR_HINT))); } - /* Try fetching parent from syscache if 'status' is provided */ - else if (status) - parent = try_catalog_parent_search(partition, status); - else - parent = InvalidOid; /* we don't have to set status */ - - return parent; -} -/* Try to find parent of a partition using catalog & PATHMAN_CONFIG */ -static Oid -try_catalog_parent_search(Oid partition, PartParentSearch *status) -{ - if (!IsTransactionState()) + con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); + conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, + Anum_pg_constraint_conbin, + &conbin_isnull); + if (conbin_isnull) { - /* We could not perform search */ - if (status) *status = PPS_NOT_SURE; + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(WARNING, + (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", + conname, get_rel_name_or_relid(partition)), + errhint(INIT_ERROR_HINT))); + pfree(conname); - return InvalidOid; + return NULL; /* could not parse */ } - else - { - Relation relation; - ScanKeyData key[1]; - SysScanDesc scan; - HeapTuple inheritsTuple; - Oid parent = InvalidOid; - - /* At first we assume parent does not exist (not a partition) */ - if (status) *status = PPS_ENTRY_NOT_FOUND; + pfree(conname); - relation = heap_open(InheritsRelationId, AccessShareLock); + /* Finally we get a constraint expression tree */ + expr = (Expr *) stringToNode(TextDatumGetCString(conbin_datum)); - ScanKeyInit(&key[0], - Anum_pg_inherits_inhrelid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(partition)); + /* Don't foreget to release syscache tuple */ + ReleaseSysCache(con_tuple); - scan = systable_beginscan(relation, InheritsRelidSeqnoIndexId, - true, NULL, 1, key); + return expr; +} - while ((inheritsTuple = systable_getnext(scan)) != NULL) - { - parent = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhparent; +/* Fill PartBoundInfo with bounds/hash */ +static void +fill_pbin_with_bounds(PartBoundInfo *pbin, + const PartRelationInfo *prel, + const Expr *constraint_expr) +{ + AssertTemporaryContext(); - /* - * NB: don't forget that 'inh' flag does not immediately - * mean that this is a pg_pathman's partition. It might - * be just a casual inheriting table. - */ - if (status) *status = PPS_ENTRY_PARENT; + /* Copy partitioning type to 'pbin' */ + pbin->parttype = prel->parttype; - /* Check that PATHMAN_CONFIG contains this table */ - if (pathman_config_contains_relation(parent, NULL, NULL, NULL, NULL)) - { - /* We've found the entry, update status */ - if (status) *status = PPS_ENTRY_PART_PARENT; - } - - break; /* there should be no more rows */ - } - - systable_endscan(scan); - heap_close(relation, AccessShareLock); - - return parent; - } -} - -/* Try to invalidate cache entry for relation 'parent' */ -static bool -try_invalidate_parent(Oid relid, Oid *parents, int parents_count) -{ - /* Check if this is a partitioned table */ - if (bsearch_oid(relid, parents, parents_count)) - { - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(relid, NULL); - - /* Success */ - return true; - } - - /* Clear remaining cache entry */ - remove_pathman_relation_info(relid); - - /* Not a partitioned relation */ - return false; -} - - -/* - * forget\get constraint functions. - */ - -/* Remove partition's constraint from cache */ -void -forget_bounds_of_partition(Oid partition) -{ - PartBoundInfo *pbin; - - /* Should we search in bounds cache? */ - pbin = pg_pathman_enable_bounds_cache ? - pathman_cache_search_relid(bound_cache, - partition, - HASH_FIND, - NULL) : - NULL; /* don't even bother */ - - /* Free this entry */ - if (pbin) - { - /* Call pfree() if it's RANGE bounds */ - if (pbin->parttype == PT_RANGE) - { - FreeBound(&pbin->range_min, pbin->byval); - FreeBound(&pbin->range_max, pbin->byval); - } - - /* Finally remove this entry from cache */ - pathman_cache_search_relid(bound_cache, - partition, - HASH_REMOVE, - NULL); - } -} - -/* Return partition's constraint as expression tree */ -PartBoundInfo * -get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) -{ - PartBoundInfo *pbin; - - /* - * We might end up building the constraint - * tree that we wouldn't want to keep. - */ - AssertTemporaryContext(); - - /* Should we search in bounds cache? */ - pbin = pg_pathman_enable_bounds_cache ? - pathman_cache_search_relid(bound_cache, - partition, - HASH_FIND, - NULL) : - NULL; /* don't even bother */ - - /* Build new entry */ - if (!pbin) - { - PartBoundInfo pbin_local; - Expr *con_expr; - - /* Initialize other fields */ - pbin_local.child_rel = partition; - pbin_local.byval = prel->ev_byval; - - /* Try to build constraint's expression tree (may emit ERROR) */ - con_expr = get_partition_constraint_expr(partition); - - /* Grab bounds/hash and fill in 'pbin_local' (may emit ERROR) */ - fill_pbin_with_bounds(&pbin_local, prel, con_expr); - - /* We strive to delay the creation of cache's entry */ - pbin = pg_pathman_enable_bounds_cache ? - pathman_cache_search_relid(bound_cache, - partition, - HASH_ENTER, - NULL) : - palloc(sizeof(PartBoundInfo)); - - /* Copy data from 'pbin_local' */ - memcpy(pbin, &pbin_local, sizeof(PartBoundInfo)); - } - - return pbin; -} - -/* - * Get constraint expression tree of a partition. - * - * build_check_constraint_name_internal() is used to build conname. - */ -static Expr * -get_partition_constraint_expr(Oid partition) -{ - Oid conid; /* constraint Oid */ - char *conname; /* constraint name */ - HeapTuple con_tuple; - Datum conbin_datum; - bool conbin_isnull; - Expr *expr; /* expression tree for constraint */ - - conname = build_check_constraint_name_relid_internal(partition); - conid = get_relation_constraint_oid(partition, conname, true); - - if (!OidIsValid(conid)) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("constraint \"%s\" of partition \"%s\" does not exist", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); - } - - con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); - conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, - Anum_pg_constraint_conbin, - &conbin_isnull); - if (conbin_isnull) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(WARNING, - (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); - pfree(conname); - - return NULL; /* could not parse */ - } - pfree(conname); - - /* Finally we get a constraint expression tree */ - expr = (Expr *) stringToNode(TextDatumGetCString(conbin_datum)); - - /* Don't foreget to release syscache tuple */ - ReleaseSysCache(con_tuple); - - return expr; -} - -/* Fill PartBoundInfo with bounds/hash */ -static void -fill_pbin_with_bounds(PartBoundInfo *pbin, - const PartRelationInfo *prel, - const Expr *constraint_expr) -{ - AssertTemporaryContext(); - - /* Copy partitioning type to 'pbin' */ - pbin->parttype = prel->parttype; - - /* Perform a partitioning_type-dependent task */ - switch (prel->parttype) - { - case PT_HASH: + /* Perform a partitioning_type-dependent task */ + switch (prel->parttype) + { + case PT_HASH: { if (!validate_hash_constraint(constraint_expr, prel, &pbin->part_idx)) @@ -1525,7 +908,7 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, (errmsg("wrong constraint format for HASH partition \"%s\"", - get_rel_name_or_relid(pbin->child_rel)), + get_rel_name_or_relid(pbin->child_relid)), errhint(INIT_ERROR_HINT))); } } @@ -1543,7 +926,7 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, MemoryContext old_mcxt; /* Switch to the persistent memory context */ - old_mcxt = MemoryContextSwitchTo(PathmanBoundCacheContext); + old_mcxt = MemoryContextSwitchTo(PathmanBoundsCacheContext); pbin->range_min = lower_null ? MakeBoundInf(MINUS_INFINITY) : @@ -1565,7 +948,7 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, (errmsg("wrong constraint format for RANGE partition \"%s\"", - get_rel_name_or_relid(pbin->child_rel)), + get_rel_name_or_relid(pbin->child_relid)), errhint(INIT_ERROR_HINT))); } } @@ -1582,113 +965,442 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, /* - * Common PartRelationInfo checks. Emit ERROR if anything is wrong. + * Partition parents cache routines. */ + +/* Add parent of partition to cache */ void -shout_if_prel_is_invalid(const Oid parent_oid, - const PartRelationInfo *prel, - const PartType expected_part_type) +cache_parent_of_partition(Oid partition, Oid parent) { - if (!prel) - elog(ERROR, "relation \"%s\" has no partitions", - get_rel_name_or_relid(parent_oid)); + PartParentInfo *ppar; - if (!PrelIsValid(prel)) - elog(ERROR, "pg_pathman's cache contains invalid entry " - "for relation \"%s\" [%u]", - get_rel_name_or_relid(parent_oid), - MyProcPid); + /* Why would we want to call it not in transaction? */ + Assert(IsTransactionState()); - /* Check partitioning type unless it's "ANY" */ - if (expected_part_type != PT_ANY && - expected_part_type != prel->parttype) + /* Create a new cache entry */ + ppar = pathman_cache_search_relid(parents_cache, + partition, + HASH_ENTER, + NULL); + + /* Fill entry with parent */ + ppar->parent_relid = parent; +} + +/* Remove parent of partition from cache */ +void +forget_parent_of_partition(Oid partition) +{ + pathman_cache_search_relid(parents_cache, + partition, + HASH_REMOVE, + NULL); +} + +/* Return parent of partition */ +Oid +get_parent_of_partition(Oid partition) +{ + PartParentInfo *ppar; + + /* Should always be called in transaction */ + Assert(IsTransactionState()); + + /* We don't cache catalog objects */ + if (partition < FirstNormalObjectId) + return InvalidOid; + + ppar = pathman_cache_search_relid(parents_cache, + partition, + HASH_FIND, + NULL); + + /* Nice, we have a cached entry */ + if (ppar) { - char *expected_str; + return ppar->child_relid; + } + /* Bad luck, let's search in catalog */ + else + { + Relation relation; + ScanKeyData key[1]; + SysScanDesc scan; + HeapTuple htup; + Oid parent = InvalidOid; - switch (expected_part_type) + relation = heap_open(InheritsRelationId, AccessShareLock); + + ScanKeyInit(&key[0], + Anum_pg_inherits_inhrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(partition)); + + scan = systable_beginscan(relation, InheritsRelidSeqnoIndexId, + true, NULL, 1, key); + + while ((htup = systable_getnext(scan)) != NULL) { - case PT_HASH: - expected_str = "HASH"; - break; + /* Extract parent from catalog tuple */ + Oid inhparent = ((Form_pg_inherits) GETSTRUCT(htup))->inhparent; - case PT_RANGE: - expected_str = "RANGE"; - break; + /* Check that PATHMAN_CONFIG contains this table */ + if (pathman_config_contains_relation(inhparent, NULL, NULL, NULL, NULL)) + { + /* We should return this parent */ + parent = inhparent; - default: - WrongPartType(expected_part_type); - expected_str = NULL; /* keep compiler happy */ + /* Now, let's cache this parent */ + cache_parent_of_partition(partition, parent); + } + + break; /* there should be no more rows */ } - elog(ERROR, "relation \"%s\" is not partitioned by %s", - get_rel_name_or_relid(parent_oid), - expected_str); + systable_endscan(scan); + heap_close(relation, AccessShareLock); + + return parent; } } + /* - * Remap partitioning expression columns for tuple source relation. - * This is a simplified version of functions that return TupleConversionMap. - * It should be faster if expression uses a few fields of relation. + * Partitioning expression routines. */ -AttrNumber * -PrelExpressionAttributesMap(const PartRelationInfo *prel, - TupleDesc source_tupdesc, - int *map_length) -{ - Oid parent_relid = PrelParentRelid(prel); - int source_natts = source_tupdesc->natts, - expr_natts = 0; - AttrNumber *result, - i; - bool is_trivial = true; - /* Get largest attribute number used in expression */ - i = -1; - while ((i = bms_next_member(prel->expr_atts, i)) >= 0) - expr_natts = i; +/* Wraps expression in SELECT query and returns parse tree */ +Node * +parse_partitioning_expression(const Oid relid, + const char *expr_cstr, + char **query_string_out, /* ret value #1 */ + Node **parsetree_out) /* ret value #2 */ +{ + SelectStmt *select_stmt; + List *parsetree_list; + MemoryContext old_mcxt; - /* Allocate array for map */ - result = (AttrNumber *) palloc0(expr_natts * sizeof(AttrNumber)); + const char *sql = "SELECT (%s) FROM ONLY %s.%s"; + char *relname = get_rel_name(relid), + *nspname = get_namespace_name(get_rel_namespace(relid)); + char *query_string = psprintf(sql, expr_cstr, + quote_identifier(nspname), + quote_identifier(relname)); - /* Find a match for each attribute */ - i = -1; - while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + old_mcxt = CurrentMemoryContext; + + PG_TRY(); { - AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; - char *attname = get_attname(parent_relid, attnum); - int j; + parsetree_list = raw_parser(query_string); + } + PG_CATCH(); + { + ErrorData *error; - Assert(attnum <= expr_natts); + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + error = CopyErrorData(); + FlushErrorState(); - for (j = 0; j < source_natts; j++) + /* Adjust error message */ + error->detail = error->message; + error->message = psprintf(PARSE_PART_EXPR_ERROR, expr_cstr); + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; + + ReThrowError(error); + } + PG_END_TRY(); + + if (list_length(parsetree_list) != 1) + elog(ERROR, "expression \"%s\" produced more than one query", expr_cstr); + +#if PG_VERSION_NUM >= 100000 + select_stmt = (SelectStmt *) ((RawStmt *) linitial(parsetree_list))->stmt; +#else + select_stmt = (SelectStmt *) linitial(parsetree_list); +#endif + + if (query_string_out) + *query_string_out = query_string; + + if (parsetree_out) + *parsetree_out = (Node *) linitial(parsetree_list); + + return ((ResTarget *) linitial(select_stmt->targetList))->val; +} + +/* Parse partitioning expression and return its type and nodeToString() as TEXT */ +Datum +cook_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type_out) /* ret value #1 */ +{ + Node *parse_tree; + List *query_tree_list; + + char *query_string, + *expr_serialized = ""; /* keep compiler happy */ + + Datum expr_datum; + + MemoryContext parse_mcxt, + old_mcxt; + + AssertTemporaryContext(); + + /* + * We use separate memory context here, just to make sure we won't + * leave anything behind after parsing, rewriting and planning. + */ + parse_mcxt = AllocSetContextCreate(CurrentMemoryContext, + CppAsString(cook_partitioning_expression), + ALLOCSET_DEFAULT_SIZES); + + /* Switch to mcxt for cooking :) */ + old_mcxt = MemoryContextSwitchTo(parse_mcxt); + + /* First we have to build a raw AST */ + (void) parse_partitioning_expression(relid, expr_cstr, + &query_string, &parse_tree); + + /* We don't need pg_pathman's magic here */ + pathman_hooks_enabled = false; + + PG_TRY(); + { + Query *query; + Node *expr; + int expr_attr; + Relids expr_varnos; + Bitmapset *expr_varattnos = NULL; + + /* This will fail with ERROR in case of wrong expression */ + query_tree_list = pg_analyze_and_rewrite_compat(parse_tree, query_string, + NULL, 0, NULL); + + /* Sanity check #1 */ + if (list_length(query_tree_list) != 1) + elog(ERROR, "partitioning expression produced more than 1 query"); + + query = (Query *) linitial(query_tree_list); + + /* Sanity check #2 */ + if (list_length(query->targetList) != 1) + elog(ERROR, "there should be exactly 1 partitioning expression"); + + /* Sanity check #3 */ + if (query_tree_walker(query, query_contains_subqueries, NULL, 0)) + elog(ERROR, "subqueries are not allowed in partitioning expression"); + + expr = (Node *) ((TargetEntry *) linitial(query->targetList))->expr; + expr = eval_const_expressions(NULL, expr); + + /* Sanity check #4 */ + if (contain_mutable_functions(expr)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("functions in partitioning expression" + " must be marked IMMUTABLE"))); + + /* Sanity check #5 */ + expr_varnos = pull_varnos(expr); + if (bms_num_members(expr_varnos) != 1 || + relid != ((RangeTblEntry *) linitial(query->rtable))->relid) { - Form_pg_attribute att = TupleDescAttr(source_tupdesc, j); + elog(ERROR, "partitioning expression should reference table \"%s\"", + get_rel_name(relid)); + } - if (att->attisdropped) - continue; /* attrMap[attnum - 1] is already 0 */ + /* Sanity check #6 */ + pull_varattnos(expr, bms_singleton_member(expr_varnos), &expr_varattnos); + expr_attr = -1; + while ((expr_attr = bms_next_member(expr_varattnos, expr_attr)) >= 0) + { + AttrNumber attnum = expr_attr + FirstLowInvalidHeapAttributeNumber; + HeapTuple htup; - if (strcmp(NameStr(att->attname), attname) == 0) + /* Check that there's no system attributes in expression */ + if (attnum < InvalidAttrNumber) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("system attributes are not supported"))); + + htup = SearchSysCache2(ATTNUM, + ObjectIdGetDatum(relid), + Int16GetDatum(attnum)); + if (HeapTupleIsValid(htup)) { - result[attnum - 1] = (AttrNumber) (j + 1); - break; + bool nullable; + + /* Fetch 'nullable' and free syscache tuple */ + nullable = !((Form_pg_attribute) GETSTRUCT(htup))->attnotnull; + ReleaseSysCache(htup); + + if (nullable) + ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), + errmsg("column \"%s\" should be marked NOT NULL", + get_attname(relid, attnum)))); } } - if (result[attnum - 1] == 0) - elog(ERROR, "cannot find column \"%s\" in child relation", attname); + /* Free sets */ + bms_free(expr_varnos); + bms_free(expr_varattnos); - if (result[attnum - 1] != attnum) - is_trivial = false; - } + Assert(expr); + expr_serialized = nodeToString(expr); - /* Check if map is trivial */ - if (is_trivial) + /* Set 'expr_type_out' if needed */ + if (expr_type_out) + *expr_type_out = exprType(expr); + } + PG_CATCH(); { - pfree(result); - return NULL; + ErrorData *error; + + /* Don't forget to enable pg_pathman's hooks */ + pathman_hooks_enabled = true; + + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + error = CopyErrorData(); + FlushErrorState(); + + /* Adjust error message */ + error->detail = error->message; + error->message = psprintf(COOK_PART_EXPR_ERROR, expr_cstr); + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; + + ReThrowError(error); } + PG_END_TRY(); - *map_length = expr_natts; - return result; + /* Don't forget to enable pg_pathman's hooks */ + pathman_hooks_enabled = true; + + /* Switch to previous mcxt */ + MemoryContextSwitchTo(old_mcxt); + + /* Get Datum of serialized expression (right mcxt) */ + expr_datum = CStringGetTextDatum(expr_serialized); + + /* Free memory */ + MemoryContextDelete(parse_mcxt); + + return expr_datum; +} + +/* Canonicalize user's expression (trim whitespaces etc) */ +char * +canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr) +{ + Node *parse_tree; + Expr *expr; + char *query_string; + Query *query; + + AssertTemporaryContext(); + + /* First we have to build a raw AST */ + (void) parse_partitioning_expression(relid, expr_cstr, + &query_string, &parse_tree); + + query = parse_analyze_compat(parse_tree, query_string, NULL, 0, NULL); + expr = ((TargetEntry *) linitial(query->targetList))->expr; + + /* We don't care about memory efficiency here */ + return deparse_expression((Node *) expr, + deparse_context_for(get_rel_name(relid), relid), + false, false); +} + +/* Check that expression is equal to expression of some partitioned table */ +bool +is_equal_to_partitioning_expression(const Oid relid, + const char *expression, + const Oid value_type) +{ + const PartRelationInfo *prel; + char *cexpr; + Oid expr_type; + + /* + * Cook and get a canonicalized expression, + * we don't need a result of the cooking + */ + cook_partitioning_expression(relid, expression, &expr_type); + cexpr = canonicalize_partitioning_expression(relid, expression); + + prel = get_pathman_relation_info(relid); + Assert(prel); + + return (getBaseType(expr_type) == value_type) && + (strcmp(cexpr, prel->expr_cstr) == 0); +} + +/* Check if query has subqueries */ +static bool +query_contains_subqueries(Node *node, void *context) +{ + if (node == NULL) + return false; + + /* We've met a subquery */ + if (IsA(node, Query)) + return true; + + return expression_tree_walker(node, query_contains_subqueries, NULL); +} + + +/* + * Functions for delayed invalidation. + */ + +/* Add new delayed pathman shutdown job (DROP EXTENSION) */ +void +delay_pathman_shutdown(void) +{ + delayed_shutdown = true; +} + +/* Finish all pending invalidation jobs if possible */ +void +finish_delayed_invalidation(void) +{ + /* Check that current state is transactional */ + if (IsTransactionState()) + { + AcceptInvalidationMessages(); + + /* Handle the probable 'DROP EXTENSION' case */ + if (delayed_shutdown) + { + Oid cur_pathman_config_relid; + + /* Unset 'shutdown' flag */ + delayed_shutdown = false; + + /* Get current PATHMAN_CONFIG relid */ + cur_pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, + get_pathman_schema()); + + /* Check that PATHMAN_CONFIG table has indeed been dropped */ + if (cur_pathman_config_relid == InvalidOid || + cur_pathman_config_relid != get_pathman_config_relid(true)) + { + /* Ok, let's unload pg_pathman's config */ + unload_config(); + + /* No need to continue, exit */ + return; + } + } + + + } } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 2c8a7249..b08b53e1 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -146,7 +146,6 @@ is_pathman_related_table_rename(Node *parsetree, RenameStmt *rename_stmt = (RenameStmt *) parsetree; Oid relation_oid, parent_relid; - PartParentSearch parent_search; const PartRelationInfo *prel; Assert(IsPathmanReady()); @@ -177,8 +176,7 @@ is_pathman_related_table_rename(Node *parsetree, } /* Assume it's a partition, fetch its parent */ - parent_relid = get_parent_of_partition(relation_oid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + if (!OidIsValid(parent_relid = get_parent_of_partition(relation_oid))) return false; /* Is parent partitioned? */ From 01d9ad605658713877f66b9bc3830c2094d29cdb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 10 Nov 2017 17:15:48 +0300 Subject: [PATCH 0786/1124] move refcount etc into PartRelationInfo --- src/hooks.c | 4 +- src/include/relation_info.h | 33 ++--- src/init.c | 2 +- src/relation_info.c | 275 ++++++++++++++++++------------------ 4 files changed, 153 insertions(+), 161 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 2a968683..d02ec265 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -822,7 +822,7 @@ pathman_relcache_hook(Datum arg, Oid relid) /* Invalidation event for whole cache */ if (relid == InvalidOid) { - invalidate_pathman_status_info_cache(); + invalidate_pathman_relation_info_cache(); } /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ @@ -841,7 +841,7 @@ pathman_relcache_hook(Datum arg, Oid relid) forget_parent_of_partition(relid); /* Invalidate PartStatusInfo entry if needed */ - invalidate_pathman_status_info(relid); + invalidate_pathman_relation_info(relid); } } diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 70f2eedc..1a07ba00 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -133,16 +133,14 @@ typedef struct */ typedef struct PartStatusInfo { - Oid relid; /* key */ - int32 refcount; /* reference counter */ - bool is_valid; /* is this entry fresh? */ + Oid relid; /* key */ struct PartRelationInfo *prel; } PartStatusInfo; /* * PartParentInfo * Cached parent of the specified partition. - * Allows us to quickly search for PartRelationInfo. + * Allows us to quickly search for parent PartRelationInfo. */ typedef struct PartParentInfo { @@ -177,7 +175,9 @@ typedef struct PartBoundInfo */ typedef struct PartRelationInfo { - PartStatusInfo *psin; /* entry holding this prel */ + Oid relid; /* key */ + int32 refcount; /* reference counter */ + bool fresh; /* is this entry fresh? */ bool enable_parent; /* should plan include parent? */ @@ -214,7 +214,7 @@ typedef struct PartRelationInfo * PartRelationInfo field access macros & functions. */ -#define PrelParentRelid(prel) ( (prel)->psin->relid ) +#define PrelParentRelid(prel) ( (prel)->relid ) #define PrelGetChildrenArray(prel) ( (prel)->children ) @@ -222,6 +222,10 @@ typedef struct PartRelationInfo #define PrelChildrenCount(prel) ( (prel)->children_count ) +#define PrelReferenceCount(prel) ( (prel)->refcount ) + +#define PrelIsFresh(prel) ( (prel)->fresh ) + static inline uint32 PrelLastChild(const PartRelationInfo *prel) { @@ -265,14 +269,6 @@ AttrNumber *PrelExpressionAttributesMap(const PartRelationInfo *prel, TupleDesc source_tupdesc, int *map_length); -/* - * PartStatusInfo field access macros & functions. - */ - -#define PsinIsValid(psin) ( (psin)->is_valid ) - -#define PsinReferenceCount(psin) ( (psin)->refcount ) - /* PartType wrappers */ static inline void @@ -312,6 +308,9 @@ PartTypeToCString(PartType parttype) /* Dispatch cache */ void refresh_pathman_relation_info(Oid relid); +void invalidate_pathman_relation_info(Oid relid); +void invalidate_pathman_relation_info_cache(void); +void close_pathman_relation_info(PartRelationInfo *prel); const PartRelationInfo *get_pathman_relation_info(Oid relid); const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, bool unlock_if_not_found, @@ -321,12 +320,6 @@ void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, const PartType expected_part_type); -/* Status cache */ -PartStatusInfo *open_pathman_status_info(Oid relid); -void close_pathman_status_info(PartStatusInfo *psin); -void invalidate_pathman_status_info(Oid relid); -void invalidate_pathman_status_info_cache(void); - /* Bounds cache */ void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); diff --git a/src/init.c b/src/init.c index 545eb670..58479939 100644 --- a/src/init.c +++ b/src/init.c @@ -692,7 +692,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, UnregisterSnapshot(snapshot); heap_close(rel, AccessShareLock); - elog(DEBUG2, "PATHMAN_CONFIG table %s relation %u", + elog(DEBUG2, "PATHMAN_CONFIG %s relation %u", (contains_rel ? "contains" : "doesn't contain"), relid); return contains_rel; diff --git a/src/relation_info.c b/src/relation_info.c index 77a81fc0..9537f9a9 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -83,6 +83,7 @@ static bool delayed_shutdown = false; /* pathman was dropped */ bsearch((const void *) &(key), (array), (array_size), sizeof(Oid), oid_cmp) +static void invalidate_pathman_status_info(PartStatusInfo *psin); static PartRelationInfo *build_pathman_relation_info(Oid relid, Datum *values); static void free_pathman_relation_info(PartRelationInfo *prel); @@ -128,20 +129,147 @@ refresh_pathman_relation_info(Oid relid) } +/* TODO: comment */ +void +invalidate_pathman_relation_info(Oid relid) +{ + PartStatusInfo *psin; + + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + + if (psin) + { +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + relid, MyProcPid); +#endif + + invalidate_pathman_status_info(psin); + } +} + +/* TODO: comment */ +void +invalidate_pathman_relation_info_cache(void) +{ + HASH_SEQ_STATUS status; + PartStatusInfo *psin; + + while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) + { +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + psin->relid, MyProcPid); +#endif + + invalidate_pathman_status_info(psin); + } +} + +/* TODO: comment */ +static void +invalidate_pathman_status_info(PartStatusInfo *psin) +{ + /* Mark entry as invalid */ + if (psin->prel && PrelReferenceCount(psin->prel) > 0) + { + PrelIsFresh(psin->prel) = false; + } + else + { + (void) pathman_cache_search_relid(status_cache, + psin->relid, + HASH_REMOVE, + NULL); + } +} + +/* TODO: comment */ +void +close_pathman_relation_info(PartRelationInfo *prel) +{ + +} + /* Get PartRelationInfo from local cache */ const PartRelationInfo * get_pathman_relation_info(Oid relid) { - PartStatusInfo *psin = open_pathman_status_info(relid); - PartRelationInfo *prel = psin ? psin->prel : NULL; + PartStatusInfo *psin; + bool refresh; + + /* Should always be called in transaction */ + Assert(IsTransactionState()); + + /* We don't create entries for catalog */ + if (relid < FirstNormalObjectId) + return NULL; + + /* Create a new entry for this table if needed */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + + /* Should we build a new PartRelationInfo? */ + refresh = psin ? + (psin->prel && + !PrelIsFresh(psin->prel) && + PrelReferenceCount(psin->prel) == 0) : + true; + + if (refresh) + { + PartRelationInfo *prel = NULL; + ItemPointerData iptr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + /* Check if PATHMAN_CONFIG table contains this relation */ + if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) + { + bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; + + /* Update pending partitioning expression */ + if (upd_expr) + pathman_config_refresh_parsed_expression(relid, values, + isnull, &iptr); + + /* Build a partitioned table cache entry (might emit ERROR) */ + prel = build_pathman_relation_info(relid, values); + } + + /* Create a new entry for this table if needed */ + if (!psin) + { + bool found; + + psin = pathman_cache_search_relid(status_cache, + relid, HASH_ENTER, + &found); + Assert(!found); + } + /* Otherwise, free old entry */ + else if (psin->prel) + { + free_pathman_relation_info(psin->prel); + } + + /* Cache fresh entry */ + psin->prel = prel; + } #ifdef USE_RELINFO_LOGGING elog(DEBUG2, "fetching %s record for parent %u [%u]", - (prel ? "live" : "NULL"), relid, MyProcPid); + (psin->prel ? "live" : "NULL"), relid, MyProcPid); #endif - return prel; + if (psin->prel) + PrelReferenceCount(psin->prel) += 1; + + return psin->prel; } /* Acquire lock on a table and try to get PartRelationInfo */ @@ -156,9 +284,6 @@ get_pathman_relation_info_after_lock(Oid relid, /* Restrict concurrent partition creation (it's dangerous) */ acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); - /* Invalidate cache entry (see AcceptInvalidationMessages()) */ - refresh_pathman_relation_info(relid); - /* Set 'lock_result' if asked to */ if (lock_result) *lock_result = acquire_result; @@ -170,7 +295,7 @@ get_pathman_relation_info_after_lock(Oid relid, return prel; } -/* Build a new PartRelationInfo for relation (might emit ERROR) */ +/* Build a new PartRelationInfo for partitioned relation */ static PartRelationInfo * build_pathman_relation_info(Oid relid, Datum *values) { @@ -198,7 +323,10 @@ build_pathman_relation_info(Oid relid, Datum *values) /* Create a new PartRelationInfo */ prel = MemoryContextAlloc(prel_mcxt, sizeof(PartRelationInfo)); - prel->mcxt = prel_mcxt; + prel->relid = relid; + prel->refcount = 0; + prel->fresh = true; + prel->mcxt = prel_mcxt; /* Memory leak protection */ PG_TRY(); @@ -557,135 +685,6 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, } -/* - * Partitioning status cache routines. - */ - -PartStatusInfo * -open_pathman_status_info(Oid relid) -{ - PartStatusInfo *psin; - bool found; - bool refresh; - - /* Should always be called in transaction */ - Assert(IsTransactionState()); - - /* We don't cache catalog objects */ - if (relid < FirstNormalObjectId) - return NULL; - - /* Create a new entry for this table if needed */ - psin = pathman_cache_search_relid(status_cache, - relid, HASH_ENTER, - &found); - - /* Initialize new entry */ - if (!found) - { - psin->refcount = 0; - psin->is_valid = false; - psin->prel = NULL; - } - - /* Should we refresh this entry? */ - refresh = !psin->is_valid && psin->refcount == 0; - - if (refresh) - { - ItemPointerData iptr; - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - - /* Set basic fields */ - psin->is_valid = false; - - /* Free old dispatch info */ - if (psin->prel) - { - free_pathman_relation_info(psin->prel); - psin->prel = NULL; - } - - /* Check if PATHMAN_CONFIG table contains this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) - { - bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; - - if (upd_expr) - pathman_config_refresh_parsed_expression(relid, values, - isnull, &iptr); - - /* Build a partitioned table cache entry (might emit ERROR) */ - psin->prel = build_pathman_relation_info(relid, values); - } - - /* Good, entry is valid */ - psin->is_valid = true; - } - - /* Increase refcount */ - psin->refcount++; - - return psin; -} - -void -close_pathman_status_info(PartStatusInfo *psin) -{ - /* Should always be called in transaction */ - Assert(IsTransactionState()); - - /* Should not be NULL */ - Assert(psin); - - /* Should be referenced elsewhere */ - Assert(psin->refcount > 0); - - /* Decrease recount */ - psin->refcount--; -} - -void -invalidate_pathman_status_info(Oid relid) -{ - PartStatusInfo *psin; - - psin = pathman_cache_search_relid(status_cache, - relid, HASH_FIND, - NULL); - - if (psin) - { -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, "invalidation message for relation %u [%u]", - relid, MyProcPid); -#endif - - /* Mark entry as invalid */ - psin->is_valid = false; - } -} - -void -invalidate_pathman_status_info_cache(void) -{ - HASH_SEQ_STATUS status; - PartStatusInfo *psin; - - while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) - { -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, "invalidation message for relation %u [%u]", - psin->relid, MyProcPid); -#endif - - /* Mark entry as invalid */ - psin->is_valid = false; - } -} - - /* * Partition bounds cache routines. */ From b5d6405699223e9d931aff63dd621b5393d12c1c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Nov 2017 16:15:11 +0300 Subject: [PATCH 0787/1124] WIP refactoring, introduce has_pathman_relation_info() --- init.sql | 90 ++++++++++++++----------------------- range.sql | 43 +++++++++++++++++- src/hooks.c | 39 ++++++++-------- src/include/relation_info.h | 9 ++-- src/include/xact_handling.h | 2 - src/pathman_workers.c | 2 +- src/pl_funcs.c | 80 ++++++++++++++++----------------- src/relation_info.c | 32 +++++++++---- src/utility_stmt_hooking.c | 26 ++++++----- src/xact_handling.c | 20 --------- 10 files changed, 177 insertions(+), 166 deletions(-) diff --git a/init.sql b/init.sql index f54d48eb..001bb097 100644 --- a/init.sql +++ b/init.sql @@ -513,38 +513,6 @@ BEGIN END $$ LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( - parent_relid REGCLASS) -RETURNS TEXT AS $$ -DECLARE - seq_name TEXT; - -BEGIN - seq_name := @extschema@.build_sequence_name(parent_relid); - - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); - EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); - - RETURN seq_name; -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; /* mute NOTICE message */ - -CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( - parent_relid REGCLASS) -RETURNS VOID AS $$ -DECLARE - seq_name TEXT; - -BEGIN - seq_name := @extschema@.build_sequence_name(parent_relid); - - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; /* mute NOTICE message */ - /* * Drop partitions. If delete_data set to TRUE, partitions * will be dropped with all the data. @@ -686,43 +654,51 @@ EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); /* - * Partitioning key. + * Get partitioning key. */ CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( - relid REGCLASS) + parent_relid REGCLASS) RETURNS TEXT AS $$ - SELECT expr FROM @extschema@.pathman_config WHERE partrel = relid; + SELECT expr + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; $$ LANGUAGE sql STRICT; /* - * Partitioning key type. + * Get partitioning key type. */ CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( - relid REGCLASS) -RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type' + parent_relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' LANGUAGE C STRICT; /* - * Partitioning type. + * Get partitioning type. */ CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( - relid REGCLASS) + parent_relid REGCLASS) RETURNS INT4 AS $$ - SELECT parttype FROM @extschema@.pathman_config WHERE partrel = relid; + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; $$ LANGUAGE sql STRICT; - /* * Get number of partitions managed by pg_pathman. */ CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( - parent_relid REGCLASS) -RETURNS INT4 AS 'pg_pathman', 'get_number_of_partitions_pl' -LANGUAGE C STRICT; + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT count(*)::INT4 + FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid; +$$ +LANGUAGE sql STRICT; /* * Get parent of pg_pathman's partition. @@ -806,9 +782,9 @@ LANGUAGE C STRICT; * Add record to pathman_config (RANGE) and validate partitions. */ CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( - parent_relid REGCLASS, - expression TEXT, - range_interval TEXT) + parent_relid REGCLASS, + expression TEXT, + range_interval TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; @@ -816,8 +792,8 @@ LANGUAGE C; * Add record to pathman_config (HASH) and validate partitions. */ CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( - parent_relid REGCLASS, - expression TEXT) + parent_relid REGCLASS, + expression TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; @@ -866,9 +842,9 @@ LANGUAGE C; * Get parent of pg_pathman's partition. */ CREATE OR REPLACE FUNCTION @extschema@.is_equal_to_partitioning_expression( - parent_relid REGCLASS, - expression TEXT, - value_type OID) + parent_relid REGCLASS, + expression TEXT, + value_type OID) RETURNS BOOL AS 'pg_pathman', 'is_equal_to_partitioning_expression_pl' LANGUAGE C STRICT; @@ -877,8 +853,8 @@ LANGUAGE C STRICT; * bound_value is used to determine the type of bound */ CREATE OR REPLACE FUNCTION @extschema@.get_lower_bound( - relid REGCLASS, - bound_value ANYELEMENT + relid REGCLASS, + bound_value ANYELEMENT ) RETURNS ANYELEMENT AS 'pg_pathman', 'get_lower_bound_pl' LANGUAGE C STRICT; @@ -887,8 +863,8 @@ LANGUAGE C STRICT; * Get upper bound of a partition */ CREATE OR REPLACE FUNCTION @extschema@.get_upper_bound( - relid REGCLASS, - bound_value ANYELEMENT + relid REGCLASS, + bound_value ANYELEMENT ) RETURNS ANYELEMENT AS 'pg_pathman', 'get_upper_bound_pl' LANGUAGE C STRICT; diff --git a/range.sql b/range.sql index fa72df8d..8e64adb3 100644 --- a/range.sql +++ b/range.sql @@ -1020,6 +1020,44 @@ END $$ LANGUAGE plpgsql; +/* + * Create a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( + parent_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); + + RETURN seq_name; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Drop a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + + /* * Merge multiple partitions. All data will be copied to the first one. * The rest of partitions will be dropped. @@ -1041,7 +1079,6 @@ CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' LANGUAGE C STRICT; - CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( parent_relid REGCLASS, bounds ANYARRAY, @@ -1075,12 +1112,14 @@ CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( RETURNS TEXT AS 'pg_pathman', 'build_range_condition' LANGUAGE C; +/* + * Generate a name for naming sequence. + */ CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( parent_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' LANGUAGE C STRICT; - /* * Returns N-th range (as an array of two elements). */ diff --git a/src/hooks.c b/src/hooks.c index d02ec265..e8a882d9 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -63,13 +63,13 @@ allow_star_schema_join(PlannerInfo *root, } -set_join_pathlist_hook_type set_join_pathlist_next = NULL; -set_rel_pathlist_hook_type set_rel_pathlist_hook_next = NULL; -planner_hook_type planner_hook_next = NULL; -post_parse_analyze_hook_type post_parse_analyze_hook_next = NULL; -shmem_startup_hook_type shmem_startup_hook_next = NULL; -ProcessUtility_hook_type process_utility_hook_next = NULL; -ExecutorRun_hook_type executor_run_hook_next = NULL; +set_join_pathlist_hook_type set_join_pathlist_next = NULL; +set_rel_pathlist_hook_type set_rel_pathlist_hook_next = NULL; +planner_hook_type planner_hook_next = NULL; +post_parse_analyze_hook_type post_parse_analyze_hook_next = NULL; +shmem_startup_hook_type shmem_startup_hook_next = NULL; +ProcessUtility_hook_type process_utility_hook_next = NULL; +ExecutorRun_hook_type executor_run_hook_next = NULL; /* Take care of joins */ @@ -101,7 +101,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (!IsPathmanReady() || !pg_pathman_enable_runtimeappend) return; - /* We should only consider base relations */ + /* We should only consider base inner relations */ if (innerrel->reloptkind != RELOPT_BASEREL) return; @@ -113,9 +113,13 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (jointype == JOIN_FULL || jointype == JOIN_RIGHT) return; - /* Check that innerrel is a BASEREL with PartRelationInfo */ - if (innerrel->reloptkind != RELOPT_BASEREL || - !(inner_prel = get_pathman_relation_info(inner_rte->relid))) + /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, + inner_rte)) + return; + + /* Proceed iff relation 'innerrel' is partitioned */ + if ((inner_prel = get_pathman_relation_info(inner_rte->relid)) == NULL) return; /* @@ -142,7 +146,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, Oid outer_baserel = root->simple_rte_array[rti]->relid; /* Is it partitioned? */ - if (get_pathman_relation_info(outer_baserel)) + if (has_pathman_relation_info(outer_baserel)) count++; } @@ -153,11 +157,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, "of partitioned tables are not supported"))); } - /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ - if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, - inner_rte)) - return; - /* * These codes are used internally in the planner, but are not supported * by the executor (nor, indeed, by most of the planner). @@ -223,7 +222,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, Assert(outer); } - /* No way to do this in a parameterized inner path */ + /* No way to do this in a parameterized inner path */ if (saved_jointype == JOIN_UNIQUE_INNER) return; @@ -607,7 +606,7 @@ pathman_enable_assign_hook(bool newval, void *extra) /* * Planner hook. It disables inheritance for tables that have been partitioned * by pathman to prevent standart PostgreSQL partitioning mechanism from - * handling that tables. + * handling those tables. */ PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) @@ -679,7 +678,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* * Post parse analysis hook. It makes sure the config is loaded before executing - * any statement, including utility commands + * any statement, including utility commands. */ void pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 1a07ba00..e5503fe7 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -311,10 +311,11 @@ void refresh_pathman_relation_info(Oid relid); void invalidate_pathman_relation_info(Oid relid); void invalidate_pathman_relation_info_cache(void); void close_pathman_relation_info(PartRelationInfo *prel); -const PartRelationInfo *get_pathman_relation_info(Oid relid); -const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result); +bool has_pathman_relation_info(Oid relid); +PartRelationInfo *get_pathman_relation_info(Oid relid); +PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, + bool unlock_if_not_found, + LockAcquireResult *lock_result); void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, diff --git a/src/include/xact_handling.h b/src/include/xact_handling.h index a762f197..fe9f976c 100644 --- a/src/include/xact_handling.h +++ b/src/include/xact_handling.h @@ -32,7 +32,5 @@ bool xact_is_set_stmt(Node *stmt, const char *name); bool xact_is_alter_pathman_stmt(Node *stmt); bool xact_object_is_visible(TransactionId obj_xmin); -void prevent_data_modification_internal(Oid relid); - #endif /* XACT_HANDLING_H */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index db0e1da7..2db579b3 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -547,7 +547,7 @@ bgw_main_concurrent_part(Datum main_arg) } /* Make sure that relation has partitions */ - if (get_pathman_relation_info(part_slot->relid) == NULL) + if (!has_pathman_relation_info(part_slot->relid)) { /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 197c2347..35312ff9 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -42,9 +42,9 @@ /* Function declarations */ PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); +PG_FUNCTION_INFO_V1( get_partition_key_type_pl ); PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); PG_FUNCTION_INFO_V1( get_base_type_pl ); -PG_FUNCTION_INFO_V1( get_partition_key_type ); PG_FUNCTION_INFO_V1( get_tablespace_pl ); PG_FUNCTION_INFO_V1( show_cache_stats_internal ); @@ -105,32 +105,36 @@ typedef struct */ /* - * Get number of relation's partitions managed by pg_pathman. + * Return parent of a specified partition. */ Datum -get_number_of_partitions_pl(PG_FUNCTION_ARGS) +get_parent_of_partition_pl(PG_FUNCTION_ARGS) { - Oid parent = PG_GETARG_OID(0); - const PartRelationInfo *prel; + Oid partition = PG_GETARG_OID(0), + parent = get_parent_of_partition(partition); - /* If we couldn't find PartRelationInfo, return 0 */ - if ((prel = get_pathman_relation_info(parent)) == NULL) - PG_RETURN_INT32(0); + if (OidIsValid(parent)) + PG_RETURN_OID(parent); - PG_RETURN_INT32(PrelChildrenCount(prel)); + PG_RETURN_NULL(); } /* - * Get parent of a specified partition. + * Return partition key type. */ Datum -get_parent_of_partition_pl(PG_FUNCTION_ARGS) +get_partition_key_type_pl(PG_FUNCTION_ARGS) { - Oid partition = PG_GETARG_OID(0), - parent = get_parent_of_partition(partition); + Oid relid = PG_GETARG_OID(0); + PartRelationInfo *prel; - if (OidIsValid(parent)) - PG_RETURN_OID(parent); + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + Oid result = prel->ev_type; + close_pathman_relation_info(prel); + + PG_RETURN_OID(result); + } PG_RETURN_NULL(); } @@ -151,7 +155,7 @@ is_equal_to_partitioning_expression_pl(PG_FUNCTION_ARGS) } /* - * Get min bound value for parent relation + * Get min bound value for parent relation. */ Datum get_lower_bound_pl(PG_FUNCTION_ARGS) @@ -163,7 +167,7 @@ get_lower_bound_pl(PG_FUNCTION_ARGS) } /* - * Get min bound value for parent relation + * Get min bound value for parent relation. */ Datum get_upper_bound_pl(PG_FUNCTION_ARGS) @@ -183,21 +187,6 @@ get_base_type_pl(PG_FUNCTION_ARGS) PG_RETURN_OID(getBaseType(PG_GETARG_OID(0))); } -/* - * Return partition key type. - */ -Datum -get_partition_key_type(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - const PartRelationInfo *prel; - - prel = get_pathman_relation_info(relid); - shout_if_prel_is_invalid(relid, prel, PT_ANY); - - PG_RETURN_OID(prel->ev_type); -} - /* * Return tablespace name of a specified relation. */ @@ -650,7 +639,6 @@ is_date_type(PG_FUNCTION_ARGS) PG_RETURN_BOOL(is_date_type_internal(PG_GETARG_OID(0))); } - Datum is_tuple_convertible(PG_FUNCTION_ARGS) { @@ -685,13 +673,13 @@ is_tuple_convertible(PG_FUNCTION_ARGS) PG_RETURN_BOOL(res); } + /* * ------------------------ * Useful string builders * ------------------------ */ - Datum build_check_constraint_name(PG_FUNCTION_ARGS) { @@ -706,13 +694,13 @@ build_check_constraint_name(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); } + /* * ------------------------ * Cache & config updates * ------------------------ */ - /* * Try to add previously partitioned table to PATHMAN_CONFIG. */ @@ -889,7 +877,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } - /* * Invalidate relcache to refresh PartRelationInfo. */ @@ -954,12 +941,12 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) */ /* - * Acquire appropriate lock on a partitioned relation. + * Prevent concurrent modifiction of partitioning schema. */ Datum prevent_part_modification(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); + Oid relid = PG_GETARG_OID(0); /* Lock partitioned relation till transaction's end */ LockRelationOid(relid, ShareUpdateExclusiveLock); @@ -973,7 +960,19 @@ prevent_part_modification(PG_FUNCTION_ARGS) Datum prevent_data_modification(PG_FUNCTION_ARGS) { - prevent_data_modification_internal(PG_GETARG_OID(0)); + Oid relid = PG_GETARG_OID(0); + + /* + * Check that isolation level is READ COMMITTED. + * Else we won't be able to see new rows + * which could slip through locks. + */ + if (!xact_is_level_read_committed()) + ereport(ERROR, + (errmsg("Cannot perform blocking partitioning operation"), + errdetail("Expected READ COMMITTED isolation level"))); + + LockRelationOid(relid, AccessExclusiveLock); PG_RETURN_VOID(); } @@ -1126,6 +1125,7 @@ is_operator_supported(PG_FUNCTION_ARGS) PG_RETURN_BOOL(OidIsValid(opid)); } + /* * ------- * DEBUG @@ -1145,7 +1145,7 @@ debug_capture(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -/* NOTE: just in case */ +/* Return pg_pathman's shared library version */ Datum pathman_version(PG_FUNCTION_ARGS) { diff --git a/src/relation_info.c b/src/relation_info.c index 9537f9a9..030407c5 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -190,11 +190,27 @@ invalidate_pathman_status_info(PartStatusInfo *psin) void close_pathman_relation_info(PartRelationInfo *prel) { + PrelReferenceCount(prel) -= 1; +} + +/* Check if relation is partitioned by pg_pathman */ +bool +has_pathman_relation_info(Oid relid) +{ + PartRelationInfo *prel; + + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + close_pathman_relation_info(prel); + return true; + } + + return false; } /* Get PartRelationInfo from local cache */ -const PartRelationInfo * +PartRelationInfo * get_pathman_relation_info(Oid relid) { PartStatusInfo *psin; @@ -273,13 +289,13 @@ get_pathman_relation_info(Oid relid) } /* Acquire lock on a table and try to get PartRelationInfo */ -const PartRelationInfo * +PartRelationInfo * get_pathman_relation_info_after_lock(Oid relid, bool unlock_if_not_found, LockAcquireResult *lock_result) { - const PartRelationInfo *prel; - LockAcquireResult acquire_result; + PartRelationInfo *prel; + LockAcquireResult acquire_result; /* Restrict concurrent partition creation (it's dangerous) */ acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); @@ -1297,10 +1313,10 @@ char * canonicalize_partitioning_expression(const Oid relid, const char *expr_cstr) { - Node *parse_tree; - Expr *expr; - char *query_string; - Query *query; + Node *parse_tree; + Expr *expr; + char *query_string; + Query *query; AssertTemporaryContext(); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index b08b53e1..60581ed9 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -105,7 +105,7 @@ is_pathman_related_copy(Node *parsetree) false); /* Check that relation is partitioned */ - if (get_pathman_relation_info(parent_relid)) + if (has_pathman_relation_info(parent_relid)) { ListCell *lc; @@ -143,10 +143,9 @@ is_pathman_related_table_rename(Node *parsetree, Oid *relation_oid_out, /* ret value #1 */ bool *is_parent_out) /* ret value #2 */ { - RenameStmt *rename_stmt = (RenameStmt *) parsetree; - Oid relation_oid, - parent_relid; - const PartRelationInfo *prel; + RenameStmt *rename_stmt = (RenameStmt *) parsetree; + Oid relation_oid, + parent_relid; Assert(IsPathmanReady()); @@ -166,7 +165,7 @@ is_pathman_related_table_rename(Node *parsetree, false); /* Assume it's a parent */ - if (get_pathman_relation_info(relation_oid)) + if (has_pathman_relation_info(relation_oid)) { if (relation_oid_out) *relation_oid_out = relation_oid; @@ -176,11 +175,12 @@ is_pathman_related_table_rename(Node *parsetree, } /* Assume it's a partition, fetch its parent */ - if (!OidIsValid(parent_relid = get_parent_of_partition(relation_oid))) + parent_relid = get_parent_of_partition(relation_oid); + if (!OidIsValid(parent_relid)) return false; /* Is parent partitioned? */ - if ((prel = get_pathman_relation_info(parent_relid)) != NULL) + if (has_pathman_relation_info(parent_relid)) { if (relation_oid_out) *relation_oid_out = relation_oid; @@ -201,10 +201,10 @@ is_pathman_related_alter_column_type(Node *parsetree, AttrNumber *attr_number_out, PartType *part_type_out) { - AlterTableStmt *alter_table_stmt = (AlterTableStmt *) parsetree; - ListCell *lc; - Oid parent_relid; - const PartRelationInfo *prel; + AlterTableStmt *alter_table_stmt = (AlterTableStmt *) parsetree; + ListCell *lc; + Oid parent_relid; + PartRelationInfo *prel; Assert(IsPathmanReady()); @@ -226,6 +226,8 @@ is_pathman_related_alter_column_type(Node *parsetree, /* Return 'parent_relid' and 'prel->parttype' */ if (parent_relid_out) *parent_relid_out = parent_relid; if (part_type_out) *part_type_out = prel->parttype; + + close_pathman_relation_info(prel); } else return false; diff --git a/src/xact_handling.c b/src/xact_handling.c index c6696cce..ff22a040 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -212,23 +212,3 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid) SET_LOCKTAG_RELATION(*tag, dbid, relid); } - - -/* - * Lock relation exclusively & check for current isolation level. - */ -void -prevent_data_modification_internal(Oid relid) -{ - /* - * Check that isolation level is READ COMMITTED. - * Else we won't be able to see new rows - * which could slip through locks. - */ - if (!xact_is_level_read_committed()) - ereport(ERROR, - (errmsg("Cannot perform blocking partitioning operation"), - errdetail("Expected READ COMMITTED isolation level"))); - - LockRelationOid(relid, AccessExclusiveLock); -} From d1a20b69b0f9cadcac3ba1890246676a15e6c994 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 13 Nov 2017 18:19:00 +0300 Subject: [PATCH 0788/1124] WIP huge refactoring in *.sql files, remove checks for subpartitions --- init.sql | 34 +---- range.sql | 256 ++++++------------------------------ src/include/relation_info.h | 6 - src/pl_funcs.c | 44 ------- src/relation_info.c | 86 +----------- 5 files changed, 42 insertions(+), 384 deletions(-) diff --git a/init.sql b/init.sql index 001bb097..12546cca 100644 --- a/init.sql +++ b/init.sql @@ -704,8 +704,7 @@ LANGUAGE sql STRICT; * Get parent of pg_pathman's partition. */ CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition( - partition_relid REGCLASS, - raise_error BOOL DEFAULT TRUE) + partition_relid REGCLASS) RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' LANGUAGE C STRICT; @@ -838,37 +837,6 @@ CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; -/* - * Get parent of pg_pathman's partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.is_equal_to_partitioning_expression( - parent_relid REGCLASS, - expression TEXT, - value_type OID) -RETURNS BOOL AS 'pg_pathman', 'is_equal_to_partitioning_expression_pl' -LANGUAGE C STRICT; - -/* - * Get lower bound of a partitioned relation - * bound_value is used to determine the type of bound - */ -CREATE OR REPLACE FUNCTION @extschema@.get_lower_bound( - relid REGCLASS, - bound_value ANYELEMENT -) -RETURNS ANYELEMENT AS 'pg_pathman', 'get_lower_bound_pl' -LANGUAGE C STRICT; - -/* - * Get upper bound of a partition - */ -CREATE OR REPLACE FUNCTION @extschema@.get_upper_bound( - relid REGCLASS, - bound_value ANYELEMENT -) -RETURNS ANYELEMENT AS 'pg_pathman', 'get_upper_bound_pl' -LANGUAGE C STRICT; - /* * DEBUG: Place this inside some plpgsql fuction and set breakpoint. */ diff --git a/range.sql b/range.sql index 8e64adb3..dad82ff2 100644 --- a/range.sql +++ b/range.sql @@ -46,29 +46,6 @@ BEGIN END $$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION @extschema@.has_parent_partitioned_by_expression( - parent_relid REGCLASS, - expression TEXT, - expr_type REGTYPE) -RETURNS BOOL AS $$ -DECLARE - relid REGCLASS; - part_type INTEGER; -BEGIN - relid := @extschema@.get_parent_of_partition(parent_relid, false); - IF relid IS NOT NULL THEN - part_type := @extschema@.get_partition_type(relid); - IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( - relid, expression, expr_type) - THEN - RETURN TRUE; - END IF; - END IF; - - RETURN FALSE; -END -$$ LANGUAGE plpgsql; - /* * Creates RANGE partitions for specified relation based on datetime attribute */ @@ -82,37 +59,17 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE rows_count BIGINT; - value_type REGTYPE; max_value start_value%TYPE; cur_value start_value%TYPE := start_value; end_value start_value%TYPE; - lower_bound start_value%TYPE = NULL; - upper_bound start_value%TYPE = NULL; part_count INTEGER := 0; i INTEGER; + BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); - value_type := @extschema@.get_base_type(pg_typeof(start_value)); - - /* - * Check that we're trying to make subpartitions. - * If expressions are same then we set and use upper bound. - * We change start_value if it's greater than lower bound. - */ - IF @extschema@.has_parent_partitioned_by_expression(parent_relid, - expression, value_type) - THEN - lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); - upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); - IF lower_bound != start_value THEN - start_value := lower_bound; - RAISE WARNING '"start_value" was set to %', start_value; - END IF; - END IF; - IF p_count < 0 THEN RAISE EXCEPTION '"p_count" must not be less than 0'; END IF; @@ -128,7 +85,6 @@ BEGIN p_count := 0; WHILE cur_value <= max_value - OR (upper_bound IS NOT NULL AND cur_value < upper_bound) LOOP cur_value := cur_value + p_interval; p_count := p_count + 1; @@ -140,36 +96,18 @@ BEGIN * and specifies partition count as 0 then do not check boundaries */ IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ + /* Compute right bound of partitioning through additions */ end_value := start_value; FOR i IN 1..p_count LOOP end_value := end_value + p_interval; - IF upper_bound IS NOT NULL AND end_value >= upper_bound THEN - part_count := i; - IF end_value > upper_bound THEN - RAISE WARNING '"p_interval" is not multiple of range (%, %)', - start_value, end_value; - END IF; - IF p_count != part_count THEN - p_count := part_count; - RAISE NOTICE '"p_count" was limited to %', p_count; - END IF; - - /* we got our partitions count */ - EXIT; - END IF; END LOOP; /* Check boundaries */ - EXECUTE - format('SELECT @extschema@.check_boundaries(''%s'', $1, ''%s'', ''%s''::%s)', - parent_relid, - start_value, - end_value, - value_type::TEXT) - USING - expression; + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); END IF; /* Create sequence for child partitions names */ @@ -213,38 +151,18 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( partition_data BOOLEAN DEFAULT TRUE) RETURNS INTEGER AS $$ DECLARE - value_type REGTYPE; rows_count BIGINT; max_value start_value%TYPE; cur_value start_value%TYPE := start_value; end_value start_value%TYPE; - lower_bound start_value%TYPE = NULL; - upper_bound start_value%TYPE = NULL; part_count INTEGER := 0; i INTEGER; + BEGIN PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); - value_type := @extschema@.get_base_type(pg_typeof(start_value)); - - /* - * Check that we're trying to make subpartitions. - * If expressions are same then we set and use upper bound. - * We change start_value if it's greater than lower bound. - */ - IF @extschema@.has_parent_partitioned_by_expression(parent_relid, - expression, value_type) - THEN - lower_bound := @extschema@.get_lower_bound(parent_relid, start_value); - upper_bound := @extschema@.get_upper_bound(parent_relid, start_value); - IF lower_bound != start_value THEN - start_value := lower_bound; - RAISE WARNING '"start_value" was set to %', start_value; - END IF; - END IF; - IF p_count < 0 THEN RAISE EXCEPTION 'partitions count must not be less than zero'; END IF; @@ -264,7 +182,6 @@ BEGIN p_count := 0; WHILE cur_value <= max_value - OR (upper_bound IS NOT NULL AND cur_value < upper_bound) LOOP cur_value := cur_value + p_interval; p_count := p_count + 1; @@ -276,28 +193,14 @@ BEGIN * and specifies partition count as 0 then do not check boundaries */ IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ + /* Compute right bound of partitioning through additions */ end_value := start_value; FOR i IN 1..p_count LOOP end_value := end_value + p_interval; - IF upper_bound IS NOT NULL AND end_value >= upper_bound THEN - part_count := i; - IF end_value > upper_bound THEN - RAISE WARNING '"p_interval" is not multiple of range (%, %)', - start_value, end_value; - END IF; - IF p_count != part_count THEN - p_count := part_count; - RAISE NOTICE '"p_count" was limited to %', p_count; - END IF; - - /* we got our partitions count */ - EXIT; - END IF; END LOOP; - /* check boundaries */ + /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, expression, start_value, @@ -346,7 +249,6 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE part_count INTEGER := 0; - part_bounds bounds%TYPE; BEGIN IF array_ndims(bounds) > 1 THEN @@ -361,26 +263,6 @@ BEGIN expression, partition_data); - /* - * Subpartitions checks, in array version of create_range_partitions - * we raise exception instead of notice - */ - IF @extschema@.has_parent_partitioned_by_expression(parent_relid, - expression, pg_typeof(bounds[1])) - THEN - part_bounds[1] := @extschema@.get_lower_bound(parent_relid, bounds[1]); - part_bounds[2] := @extschema@.get_upper_bound(parent_relid, bounds[1]); - IF part_bounds[1] != bounds[1] THEN - RAISE EXCEPTION 'Bounds should start from %', part_bounds[1]; - END IF; - END IF; - - IF part_bounds[2] IS NOT NULL AND - bounds[array_length(bounds, 1) - 1] > part_bounds[2] - THEN - RAISE EXCEPTION 'Lower bound of rightmost partition should be less than %', part_bounds[2]; - END IF; - /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, expression, @@ -425,7 +307,6 @@ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( RETURNS ANYARRAY AS $$ DECLARE parent_relid REGCLASS; - inhparent REGCLASS; part_type INTEGER; part_expr TEXT; part_expr_type REGTYPE; @@ -439,23 +320,22 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); - EXECUTE format('SELECT inhparent::REGCLASS FROM pg_inherits WHERE inhparent = $1 LIMIT 1') - USING partition_relid - INTO inhparent; - - if inhparent IS NOT NULL THEN - RAISE EXCEPTION 'could not split partition if it has children'; - END IF; - - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); + /* Acquire lock on partition's scheme */ + PERFORM @extschema@.prevent_part_modification(partition_relid); + /* Acquire data modification lock (prevent further modifications) */ PERFORM @extschema@.prevent_data_modification(partition_relid); + /* Check that partition is not partitioned */ + if @extschema@.get_number_of_partitions(partition_relid) > 0 THEN + RAISE EXCEPTION 'cannot split partition that has children'; + END IF; + part_expr_type = @extschema@.get_partition_key_type(parent_relid); part_expr := @extschema@.get_partition_key(parent_relid); - part_type := @extschema@.get_partition_type(parent_relid); /* Check if this is a RANGE partition */ @@ -540,7 +420,7 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); part_expr_type := @extschema@.get_partition_key_type(parent_relid); @@ -571,26 +451,6 @@ BEGIN END $$ LANGUAGE plpgsql; - -/* - * NOTE: we need this function just to determine the type - * of "upper_bound" var - */ -CREATE OR REPLACE FUNCTION @extschema@.check_against_upper_bound_internal( - relid REGCLASS, - bound_value ANYELEMENT, - error_message TEXT) -RETURNS VOID AS $$ -DECLARE - upper_bound bound_value%TYPE; -BEGIN - upper_bound := get_upper_bound(relid, bound_value); - IF bound_value >= upper_bound THEN - RAISE EXCEPTION '%', error_message; - END IF; -END -$$ LANGUAGE plpgsql; - /* * Spawn logic for append_partition(). We have to * separate this in order to pass the 'p_range'. @@ -606,12 +466,10 @@ CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( tablespace TEXT DEFAULT NULL) RETURNS TEXT AS $$ DECLARE - relid REGCLASS; part_expr_type REGTYPE; part_name TEXT; v_args_format TEXT; - part_expr TEXT; - part_type INTEGER; + BEGIN IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN RAISE EXCEPTION 'cannot append to empty partitions set'; @@ -629,24 +487,6 @@ BEGIN RAISE EXCEPTION 'Cannot append partition because last partition''s range is half open'; END IF; - /* - * In case a user has used same expression on two levels, we need to check - * that we've not reached upper bound of higher partitioned table - */ - relid := @extschema@.get_parent_of_partition(parent_relid, false); - IF relid IS NOT NULL THEN - SELECT expr FROM @extschema@.pathman_config WHERE partrel = parent_relid - INTO part_expr; - - part_type := @extschema@.get_partition_type(relid); - IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( - relid, part_expr, part_expr_type) - THEN - PERFORM @extschema@.check_against_upper_bound_internal(parent_relid, - p_range[2], 'reached upper bound in the current level of subpartitions'); - END IF; - END IF; - IF @extschema@.is_date_type(p_atttype) THEN v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); ELSE @@ -684,7 +524,7 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); part_expr_type := @extschema@.get_partition_key_type(parent_relid); @@ -788,14 +628,14 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); IF start_value >= end_value THEN RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; END IF; - /* check range overlap */ + /* Check range overlap */ IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN PERFORM @extschema@.check_range_available(parent_relid, start_value, @@ -842,7 +682,7 @@ BEGIN RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); IF NOT delete_data THEN @@ -886,16 +726,15 @@ CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( RETURNS TEXT AS $$ DECLARE part_expr TEXT; - part_expr_type REGTYPE; part_type INTEGER; rel_persistence CHAR; v_init_callback REGPROCEDURE; - relid REGCLASS; + BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); /* Ignore temporary tables */ @@ -907,41 +746,28 @@ BEGIN partition_relid::TEXT; END IF; - /* check range overlap */ + /* Check range overlap */ PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN RAISE EXCEPTION 'partition must have a compatible tuple format'; END IF; - /* - * In case a user has used same expression on two levels, we need to check - * that we've not reached upper bound of higher partitioned table - */ - relid := @extschema@.get_parent_of_partition(parent_relid, false); - IF relid IS NOT NULL THEN - part_expr_type := @extschema@.get_partition_key_type(parent_relid); - SELECT expr FROM @extschema@.pathman_config WHERE partrel = parent_relid - INTO part_expr; - - part_type := @extschema@.get_partition_type(relid); - IF (part_type = 2) AND @extschema@.is_equal_to_partitioning_expression( - relid, part_expr, part_expr_type) - THEN - PERFORM @extschema@.check_against_upper_bound_internal(parent_relid, - start_value, '"start value" exceeds upper bound of the current level of subpartitions'); - END IF; - END IF; - - /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); - part_expr := @extschema@.get_partition_key(parent_relid); + part_type := @extschema@.get_partition_type(parent_relid); IF part_expr IS NULL THEN RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; END IF; + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + /* Set check constraint */ EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', partition_relid::TEXT, @@ -978,7 +804,6 @@ CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( RETURNS TEXT AS $$ DECLARE parent_relid REGCLASS; - inhparent REGCLASS; part_type INTEGER; BEGIN @@ -987,13 +812,8 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); - EXECUTE format('SELECT inhparent::REGCLASS FROM pg_inherits WHERE inhparent = $1 LIMIT 1') - USING partition_relid - INTO inhparent; - - if inhparent IS NOT NULL THEN - RAISE EXCEPTION 'could not detach partition if it has children'; - END IF; + /* Acquire lock on partition's scheme */ + PERFORM @extschema@.prevent_part_modification(partition_relid); /* Acquire lock on parent */ PERFORM @extschema@.prevent_data_modification(parent_relid); diff --git a/src/include/relation_info.h b/src/include/relation_info.h index e5503fe7..cd262532 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -324,8 +324,6 @@ void shout_if_prel_is_invalid(const Oid parent_oid, /* Bounds cache */ void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); -Datum get_lower_bound(Oid partition_relid, Oid value_type); -Datum get_upper_bound(Oid partition_relid, Oid value_type); /* Parent cache */ void cache_parent_of_partition(Oid partition, Oid parent); @@ -345,10 +343,6 @@ Datum cook_partitioning_expression(const Oid relid, char *canonicalize_partitioning_expression(const Oid relid, const char *expr_cstr); -bool is_equal_to_partitioning_expression(const Oid relid, - const char *expression, - const Oid value_type); - /* Partitioning expression routines */ Node *parse_partitioning_expression(const Oid relid, const char *expr_cstr, diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 35312ff9..07d0cfb3 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -72,11 +72,6 @@ PG_FUNCTION_INFO_V1( check_security_policy ); PG_FUNCTION_INFO_V1( debug_capture ); PG_FUNCTION_INFO_V1( pathman_version ); -PG_FUNCTION_INFO_V1( get_lower_bound_pl ); -PG_FUNCTION_INFO_V1( get_upper_bound_pl ); -PG_FUNCTION_INFO_V1( is_equal_to_partitioning_expression_pl ); - - /* User context for function show_partition_list_internal() */ typedef struct { @@ -139,45 +134,6 @@ get_partition_key_type_pl(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } -/* - * Get parent of a specified partition. - */ -Datum -is_equal_to_partitioning_expression_pl(PG_FUNCTION_ARGS) -{ - bool result; - Oid parent_relid = PG_GETARG_OID(0); - char *expr = TextDatumGetCString(PG_GETARG_TEXT_P(1)); - Oid value_type = PG_GETARG_OID(2); - - result = is_equal_to_partitioning_expression(parent_relid, expr, value_type); - PG_RETURN_BOOL(result); -} - -/* - * Get min bound value for parent relation. - */ -Datum -get_lower_bound_pl(PG_FUNCTION_ARGS) -{ - Oid partition_relid = PG_GETARG_OID(0); - Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - - PG_RETURN_POINTER(get_lower_bound(partition_relid, value_type)); -} - -/* - * Get min bound value for parent relation. - */ -Datum -get_upper_bound_pl(PG_FUNCTION_ARGS) -{ - Oid partition_relid = PG_GETARG_OID(0); - Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - - PG_RETURN_POINTER(get_upper_bound(partition_relid, value_type)); -} - /* * Extract basic type of a domain. */ diff --git a/src/relation_info.c b/src/relation_info.c index 030407c5..df348914 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -179,6 +179,9 @@ invalidate_pathman_status_info(PartStatusInfo *psin) } else { + if (psin->prel) + free_pathman_relation_info(psin->prel); + (void) pathman_cache_search_relid(status_cache, psin->relid, HASH_REMOVE, @@ -266,11 +269,6 @@ get_pathman_relation_info(Oid relid) &found); Assert(!found); } - /* Otherwise, free old entry */ - else if (psin->prel) - { - free_pathman_relation_info(psin->prel); - } /* Cache fresh entry */ psin->prel = prel; @@ -794,60 +792,6 @@ get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) return pbin; } -/* Get lower bound of a partition */ -Datum -get_lower_bound(Oid partition_relid, Oid value_type) -{ - Oid parent_relid; - Datum result; - const PartRelationInfo *prel; - const PartBoundInfo *pbin; - - parent_relid = get_parent_of_partition(partition_relid); - if (!OidIsValid(parent_relid)) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(partition_relid)); - - prel = get_pathman_relation_info(parent_relid); - pbin = get_bounds_of_partition(partition_relid, prel); - - if (IsInfinite(&pbin->range_min)) - return PointerGetDatum(NULL); - - result = BoundGetValue(&pbin->range_min); - if (value_type != prel->ev_type) - result = perform_type_cast(result, prel->ev_type, value_type, NULL); - - return result; -} - -/* Get upper bound of a partition */ -Datum -get_upper_bound(Oid partition_relid, Oid value_type) -{ - Oid parent_relid; - Datum result; - const PartRelationInfo *prel; - const PartBoundInfo *pbin; - - parent_relid = get_parent_of_partition(partition_relid); - if (!OidIsValid(parent_relid)) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(partition_relid)); - - prel = get_pathman_relation_info(parent_relid); - pbin = get_bounds_of_partition(partition_relid, prel); - - if (IsInfinite(&pbin->range_max)) - return PointerGetDatum(NULL); - - result = BoundGetValue(&pbin->range_max); - if (value_type != prel->ev_type) - result = perform_type_cast(result, prel->ev_type, value_type, NULL); - - return result; -} - /* * Get constraint expression tree of a partition. * @@ -1333,30 +1277,6 @@ canonicalize_partitioning_expression(const Oid relid, false, false); } -/* Check that expression is equal to expression of some partitioned table */ -bool -is_equal_to_partitioning_expression(const Oid relid, - const char *expression, - const Oid value_type) -{ - const PartRelationInfo *prel; - char *cexpr; - Oid expr_type; - - /* - * Cook and get a canonicalized expression, - * we don't need a result of the cooking - */ - cook_partitioning_expression(relid, expression, &expr_type); - cexpr = canonicalize_partitioning_expression(relid, expression); - - prel = get_pathman_relation_info(relid); - Assert(prel); - - return (getBaseType(expr_type) == value_type) && - (strcmp(cexpr, prel->expr_cstr) == 0); -} - /* Check if query has subqueries */ static bool query_contains_subqueries(Node *node, void *context) From 8a4698ca93190c8ec94333e9e02c9ce1e143fdc5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 14 Nov 2017 14:58:46 +0300 Subject: [PATCH 0789/1124] WIP refactoring, fix functions close_pathman_relation_info() & drop_range_partition_expand_next() --- src/include/partition_creation.h | 10 --- src/include/relation_info.h | 10 +++ src/include/utils.h | 3 +- src/partition_creation.c | 139 ++++++++++++------------------- src/pl_funcs.c | 8 +- src/pl_hash_funcs.c | 2 +- src/pl_range_funcs.c | 136 ++++++++++++++++-------------- src/relation_info.c | 7 +- src/utility_stmt_hooking.c | 1 + src/utils.c | 1 + 10 files changed, 148 insertions(+), 169 deletions(-) diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index c0dd91e6..63768a95 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -81,16 +81,6 @@ void drop_pathman_check_constraint(Oid relid); void add_pathman_check_constraint(Oid relid, Constraint *constraint); -/* Update triggers */ -void create_single_update_trigger_internal(Oid partition_relid, - const char *trigname, - List *columns); - -bool has_update_trigger_internal(Oid parent); - -void drop_single_update_trigger_internal(Oid relid, - const char *trigname); - /* Partitioning callback type */ typedef enum { diff --git a/src/include/relation_info.h b/src/include/relation_info.h index cd262532..34d88f0c 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -12,6 +12,8 @@ #define RELATION_INFO_H +#include "utils.h" + #include "postgres.h" #include "access/attnum.h" #include "access/sysattr.h" @@ -89,6 +91,14 @@ FreeBound(Bound *bound, bool byval) pfree(DatumGetPointer(BoundGetValue(bound))); } +static inline char * +BoundToCString(const Bound *bound, Oid value_type) +{ + return IsInfinite(bound) ? + pstrdup("NULL") : + datum_to_cstring(bound->value, value_type); +} + static inline int cmp_bounds(FmgrInfo *cmp_func, const Oid collid, diff --git a/src/include/utils.h b/src/include/utils.h index 8fccded1..b45ed1db 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -12,10 +12,9 @@ #define PATHMAN_UTILS_H -#include "pathman.h" - #include "postgres.h" #include "parser/parse_oper.h" +#include "fmgr.h" /* diff --git a/src/partition_creation.c b/src/partition_creation.c index e1c1f1bb..05a6f508 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -334,16 +334,16 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, PG_TRY(); { - const PartRelationInfo *prel; - LockAcquireResult lock_result; /* could we lock the parent? */ - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; + LockAcquireResult lock_result; /* could we lock the parent? */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) { - Oid base_bound_type; /* base type of prel->ev_type */ - Oid base_value_type; /* base type of value_type */ + PartRelationInfo *prel; + Oid base_bound_type; /* base type of prel->ev_type */ + Oid base_value_type; /* base type of value_type */ /* Fetch PartRelationInfo by 'relid' */ prel = get_pathman_relation_info_after_lock(relid, true, &lock_result); @@ -426,6 +426,9 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, value, base_value_type, prel->ev_collid); } + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } else elog(ERROR, "table \"%s\" is not partitioned", @@ -1356,56 +1359,57 @@ check_range_available(Oid parent_relid, Oid value_type, bool raise_error) { - const PartRelationInfo *prel; - RangeEntry *ranges; - FmgrInfo cmp_func; - uint32 i; + PartRelationInfo *prel; + bool result = true; /* Try fetching the PartRelationInfo structure */ - prel = get_pathman_relation_info(parent_relid); - - /* If there's no prel, return TRUE (overlap is not possible) */ - if (!prel) + if ((prel = get_pathman_relation_info(parent_relid)) != NULL) { - ereport(WARNING, (errmsg("table \"%s\" is not partitioned", - get_rel_name_or_relid(parent_relid)))); - return true; - } + RangeEntry *ranges; + FmgrInfo cmp_func; + uint32 i; - /* Emit an error if it is not partitioned by RANGE */ - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + /* Emit an error if it is not partitioned by RANGE */ + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); - /* Fetch comparison function */ - fill_type_cmp_fmgr_info(&cmp_func, - getBaseType(value_type), - getBaseType(prel->ev_type)); + /* Fetch comparison function */ + fill_type_cmp_fmgr_info(&cmp_func, + getBaseType(value_type), + getBaseType(prel->ev_type)); - ranges = PrelGetRangesArray(prel); - for (i = 0; i < PrelChildrenCount(prel); i++) - { - int c1, c2; + ranges = PrelGetRangesArray(prel); + for (i = 0; i < PrelChildrenCount(prel); i++) + { + int c1, c2; - c1 = cmp_bounds(&cmp_func, prel->ev_collid, start, &ranges[i].max); - c2 = cmp_bounds(&cmp_func, prel->ev_collid, end, &ranges[i].min); + c1 = cmp_bounds(&cmp_func, prel->ev_collid, start, &ranges[i].max); + c2 = cmp_bounds(&cmp_func, prel->ev_collid, end, &ranges[i].min); - /* There's something! */ - if (c1 < 0 && c2 > 0) - { - if (raise_error) - elog(ERROR, "specified range [%s, %s) overlaps " - "with existing partitions", - IsInfinite(start) ? - "NULL" : - datum_to_cstring(BoundGetValue(start), value_type), - IsInfinite(end) ? - "NULL" : - datum_to_cstring(BoundGetValue(end), value_type)); - - else return false; + /* There's something! */ + if (c1 < 0 && c2 > 0) + { + if (raise_error) + { + elog(ERROR, "specified range [%s, %s) overlaps " + "with existing partitions", + BoundToCString(start, value_type), + BoundToCString(end, value_type)); + } + /* Too bad, so sad */ + else result = false; + } } + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + } + else + { + ereport(WARNING, (errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)))); } - return true; + return result; } /* Build HASH check constraint expression tree */ @@ -1669,15 +1673,15 @@ invoke_init_callback_internal(init_callback_params *cb_params) *end_value = NULL; Bound sv_datum = cb_params->params.range_params.start_value, ev_datum = cb_params->params.range_params.end_value; - Oid type = cb_params->params.range_params.value_type; + Oid value_type = cb_params->params.range_params.value_type; /* Convert min to CSTRING */ if (!IsInfinite(&sv_datum)) - start_value = datum_to_cstring(BoundGetValue(&sv_datum), type); + start_value = BoundToCString(&sv_datum, value_type); /* Convert max to CSTRING */ if (!IsInfinite(&ev_datum)) - end_value = datum_to_cstring(BoundGetValue(&ev_datum), type); + end_value = BoundToCString(&ev_datum, value_type); pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); @@ -1861,42 +1865,3 @@ build_partitioning_expression(Oid parent_relid, return expr; } - -/* - * ------------------------- - * Update triggers management - * ------------------------- - */ - -/* Create trigger for partition */ -void -create_single_update_trigger_internal(Oid partition_relid, - const char *trigname, - List *columns) -{ - CreateTrigStmt *stmt; - List *func; - - func = list_make2(makeString(get_namespace_name(get_pathman_schema())), - makeString(CppAsString(pathman_update_trigger_func))); - - stmt = makeNode(CreateTrigStmt); - stmt->trigname = (char *) trigname; - stmt->relation = makeRangeVarFromRelid(partition_relid); - stmt->funcname = func; - stmt->args = NIL; - stmt->row = true; - stmt->timing = TRIGGER_TYPE_BEFORE; - stmt->events = TRIGGER_TYPE_UPDATE; - stmt->columns = columns; - stmt->whenClause = NULL; - stmt->isconstraint = false; - stmt->deferrable = false; - stmt->initdeferred = false; - stmt->constrrel = NULL; - - (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, - InvalidOid, InvalidOid, false); - - CommandCounterIncrement(); -} diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 07d0cfb3..ef2288f3 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -437,8 +437,8 @@ show_partition_list_internal(PG_FUNCTION_ARGS) if (!IsInfinite(&re->min)) { Datum rmin = CStringGetTextDatum( - datum_to_cstring(BoundGetValue(&re->min), - prel->ev_type)); + BoundToCString(&re->min, + prel->ev_type)); values[Anum_pathman_pl_range_min - 1] = rmin; } @@ -448,8 +448,8 @@ show_partition_list_internal(PG_FUNCTION_ARGS) if (!IsInfinite(&re->max)) { Datum rmax = CStringGetTextDatum( - datum_to_cstring(BoundGetValue(&re->max), - prel->ev_type)); + BoundToCString(&re->max, + prel->ev_type)); values[Anum_pathman_pl_range_max - 1] = rmax; } diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 4f4238f5..f4a44b71 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -54,7 +54,7 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) RangeVar **rangevars = NULL; /* Check that there's no partitions yet */ - if (get_pathman_relation_info(parent_relid)) + if (has_pathman_relation_info(parent_relid)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot add new HASH partitions"))); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 93a78241..007a2937 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -72,7 +72,6 @@ static void modify_range_constraint(Oid partition_relid, Oid expression_type, const Bound *lower, const Bound *upper); -static void drop_table_by_oid(Oid relid); static bool interval_is_trivial(Oid atttype, Datum interval, Oid interval_type); @@ -710,11 +709,13 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Look for the specified partition */ for (j = 0; j < PrelChildrenCount(prel); j++) + { if (ranges[j].child_oid == parts[i]) { rentry_list = lappend(rentry_list, &ranges[j]); break; } + } } /* Check that partitions are adjacent */ @@ -765,67 +766,97 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Drop obsolete partitions */ for (i = 1; i < nparts; i++) - drop_table_by_oid(parts[i]); + { + ObjectAddress object; + + ObjectAddressSet(object, RelationRelationId, parts[i]); + performDeletion(&object, DROP_CASCADE, 0); + } } /* * Drops partition and expands the next partition - * so that it could cover the dropped one + * so that it could cover the dropped one. * - * This function was written in order to support Oracle-like ALTER TABLE ... - * DROP PARTITION. In Oracle partitions only have upper bound and when - * partition is dropped the next one automatically covers freed range + * This function was written in order to support + * Oracle-like ALTER TABLE ... DROP PARTITION. + * + * In Oracle partitions only have upper bound and when partition + * is dropped the next one automatically covers freed range. */ Datum drop_range_partition_expand_next(PG_FUNCTION_ARGS) { - const PartRelationInfo *prel; - Oid relid = PG_GETARG_OID(0), - parent; - RangeEntry *ranges; - int i; + Oid partition = PG_GETARG_OID(0), + parent; + PartRelationInfo *prel; + + /* Lock the partition we're going to drop */ + LockRelationOid(partition, AccessExclusiveLock); + + /* Check if partition exists */ + if (!SearchSysCacheExists1(RELOID, partition)) + elog(ERROR, "relation %u does not exist", partition); /* Get parent's relid */ - parent = get_parent_of_partition(relid); + parent = get_parent_of_partition(partition); if (!OidIsValid(parent)) elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(relid)); + get_rel_name(partition)); - /* Fetch PartRelationInfo and perform some checks */ - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_RANGE); + if ((prel = get_pathman_relation_info(parent)) != NULL) + { + ObjectAddress object; + RangeEntry *ranges; + int i; - /* Fetch ranges array */ - ranges = PrelGetRangesArray(prel); + /* Emit an error if it is not partitioned by RANGE */ + shout_if_prel_is_invalid(parent, prel, PT_RANGE); - /* Looking for partition in child relations */ - for (i = 0; i < PrelChildrenCount(prel); i++) - if (ranges[i].child_oid == relid) - break; + /* Fetch ranges array */ + ranges = PrelGetRangesArray(prel); - /* - * It must be in ranges array because we already - * know that this table is a partition - */ - Assert(i < PrelChildrenCount(prel)); + /* Looking for partition in child relations */ + for (i = 0; i < PrelChildrenCount(prel); i++) + if (ranges[i].child_oid == partition) + break; - /* Expand next partition if it exists */ - if (i < PrelChildrenCount(prel) - 1) - { - RangeEntry *cur = &ranges[i], - *next = &ranges[i + 1]; - - /* Drop old constraint and create a new one */ - modify_range_constraint(next->child_oid, - prel->expr_cstr, - prel->ev_type, - &cur->min, - &next->max); - } + /* Should have found it */ + Assert(i < PrelChildrenCount(prel)); + + /* Expand next partition if it exists */ + if (i < PrelChildrenCount(prel) - 1) + { + RangeEntry *cur = &ranges[i], + *next = &ranges[i + 1]; + Oid next_partition = next->child_oid; + LOCKMODE lockmode = AccessExclusiveLock; + + /* Lock next partition */ + LockRelationOid(next_partition, lockmode); + + /* Does next partition exist? */ + if (SearchSysCacheExists1(RELOID, next_partition)) + { + /* Stretch next partition to cover range */ + modify_range_constraint(next_partition, + prel->expr_cstr, + prel->ev_type, + &cur->min, + &next->max); + } + /* Bad luck, unlock missing partition */ + else UnlockRelationOid(next_partition, lockmode); + } - /* Finally drop this partition */ - drop_table_by_oid(relid); + /* Drop partition */ + ObjectAddressSet(object, RelationRelationId, partition); + performDeletion(&object, DROP_CASCADE, 0); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + } PG_RETURN_VOID(); } @@ -1226,24 +1257,3 @@ check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges) last = cur; } } - -/* - * Drop table using it's Oid - */ -static void -drop_table_by_oid(Oid relid) -{ - DropStmt *n = makeNode(DropStmt); - const char *relname = get_qualified_rel_name(relid); - - n->removeType = OBJECT_TABLE; - n->missing_ok = false; - n->objects = list_make1(stringToQualifiedNameList(relname)); -#if PG_VERSION_NUM < 100000 - n->arguments = NIL; -#endif - n->behavior = DROP_RESTRICT; /* default behavior */ - n->concurrent = false; - - RemoveRelations(n); -} diff --git a/src/relation_info.c b/src/relation_info.c index df348914..44635ebf 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -193,6 +193,9 @@ invalidate_pathman_status_info(PartStatusInfo *psin) void close_pathman_relation_info(PartRelationInfo *prel) { + /* Check that refcount is valid */ + Assert(PrelReferenceCount(prel) > 0); + PrelReferenceCount(prel) -= 1; } @@ -977,7 +980,7 @@ get_parent_of_partition(Oid partition) /* Nice, we have a cached entry */ if (ppar) { - return ppar->child_relid; + return ppar->parent_relid; } /* Bad luck, let's search in catalog */ else @@ -1115,7 +1118,7 @@ cook_partitioning_expression(const Oid relid, */ parse_mcxt = AllocSetContextCreate(CurrentMemoryContext, CppAsString(cook_partitioning_expression), - ALLOCSET_DEFAULT_SIZES); + ALLOCSET_SMALL_SIZES); /* Switch to mcxt for cooking :) */ old_mcxt = MemoryContextSwitchTo(parse_mcxt); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 60581ed9..1f376c20 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -26,6 +26,7 @@ #include "commands/tablecmds.h" #include "foreign/fdwapi.h" #include "miscadmin.h" +#include "nodes/makefuncs.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/memutils.h" diff --git a/src/utils.c b/src/utils.c index ba4a754f..da0d314b 100644 --- a/src/utils.c +++ b/src/utils.c @@ -10,6 +10,7 @@ * ------------------------------------------------------------------------ */ +#include "pathman.h" #include "utils.h" #include "access/htup_details.h" From f81fcf7211dddcf80928a01ea56a827f4548ab28 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 14 Nov 2017 19:48:19 +0300 Subject: [PATCH 0790/1124] WIP simplified find_deepest_partition(), make use of has_pathman_relation_info() --- src/planner_tree_modification.c | 184 ++++++++++++++------------------ 1 file changed, 78 insertions(+), 106 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 9e6d64e1..7b465dee 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -98,15 +98,6 @@ typedef struct } transform_query_cxt; - -typedef enum -{ - FP_FOUND, /* Found partition */ - FP_PLAIN_TABLE, /* Table isn't partitioned by pg_pathman */ - FP_NON_SINGULAR_RESULT /* Multiple or no partitions */ -} FindPartitionResult; - - static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse, transform_query_cxt *context); @@ -117,8 +108,7 @@ static void partition_router_visitor(Plan *plan, void *context); static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); -static FindPartitionResult find_deepest_partition(Oid relid, Index idx, - Expr *quals, Oid *partition); +static Oid find_deepest_partition(Oid relid, Index rti, Expr *quals); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); static bool modifytable_contains_fdw(List *rtable, ModifyTable *node); @@ -283,8 +273,8 @@ pathman_transform_query_walker(Node *node, void *context) default: break; } - next_context.parent_sublink = NULL; - next_context.parent_cte = NULL; + next_context.parent_sublink = NULL; + next_context.parent_cte = NULL; /* Assign Query a 'queryId' */ assign_query_id(query); @@ -350,7 +340,7 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Table may be partitioned */ if (rte->inh) { - const PartRelationInfo *prel; + PartRelationInfo *prel; #ifdef LEGACY_ROWMARKS_95 /* Don't process queries with RowMarks on 9.5 */ @@ -361,15 +351,15 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Proceed if table is partitioned by pg_pathman */ if ((prel = get_pathman_relation_info(rte->relid)) != NULL) { - /* - * HACK: unset the 'inh' flag to disable standard - * planning. We'll set it again later. - */ + /* HACK: unset the 'inh' flag to disable standard planning */ rte->inh = false; /* Try marking it using PARENTHOOD_ALLOWED */ assign_rel_parenthood_status(parse->queryId, rte, PARENTHOOD_ALLOWED); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } } /* Else try marking it using PARENTHOOD_DISALLOWED */ @@ -382,15 +372,11 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) static void handle_modification_query(Query *parse, transform_query_cxt *context) { - RangeTblEntry *rte; - Expr *quals; - Index result_rel; - Oid child; - FindPartitionResult fp_result; - ParamListInfo params; - - /* Fetch index of result relation */ - result_rel = parse->resultRelation; + RangeTblEntry *rte; + Expr *quals; + Oid child; + Index result_rel = parse->resultRelation; + ParamListInfo params = context->query_params; /* Exit if it's not a DELETE or UPDATE query */ if (result_rel == 0 || (parse->commandType != CMD_UPDATE && @@ -400,24 +386,20 @@ handle_modification_query(Query *parse, transform_query_cxt *context) rte = rt_fetch(result_rel, parse->rtable); /* Exit if it's DELETE FROM ONLY table */ - if (!rte->inh) return; + if (!rte->inh) + return; quals = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); - params = context->query_params; - /* Check if we can replace PARAMs with CONSTs */ if (params && clause_contains_params((Node *) quals)) quals = (Expr *) eval_extern_params_mutator((Node *) quals, params); - /* Parse syntax tree and extract deepest partition */ - fp_result = find_deepest_partition(rte->relid, result_rel, quals, &child); + /* Parse syntax tree and extract deepest partition if possible */ + child = find_deepest_partition(rte->relid, result_rel, quals); - /* - * If only one partition is affected, - * substitute parent table with partition. - */ - if (fp_result == FP_FOUND) + /* Substitute parent table with partition */ + if (OidIsValid(child)) { Relation child_rel, parent_rel; @@ -450,7 +432,7 @@ handle_modification_query(Query *parse, transform_query_cxt *context) } /* Both tables are already locked */ - child_rel = heap_open(child, NoLock); + child_rel = heap_open(child, NoLock); parent_rel = heap_open(parent, NoLock); /* Build a conversion map (may be trivial, i.e. NULL) */ @@ -459,15 +441,15 @@ handle_modification_query(Query *parse, transform_query_cxt *context) free_conversion_map((TupleConversionMap *) tuple_map); /* Close relations (should remain locked, though) */ - heap_close(child_rel, NoLock); + heap_close(child_rel, NoLock); heap_close(parent_rel, NoLock); - /* Exit if tuple map was NOT trivial */ - if (tuple_map) /* just checking the pointer! */ + /* Exit if tuple map WAS NOT trivial */ + if (tuple_map) return; /* Update RTE's relid and relkind (for FDW) */ - rte->relid = child; + rte->relid = child; rte->relkind = child_relkind; /* HACK: unset the 'inh' flag (no children) */ @@ -521,12 +503,11 @@ partition_filter_visitor(Plan *plan, void *context) lc3 = list_head(modify_table->returningLists); forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) { - Index rindex = lfirst_int(lc2); - Oid relid = getrelid(rindex, rtable); - const PartRelationInfo *prel = get_pathman_relation_info(relid); + Index rindex = lfirst_int(lc2); + Oid relid = getrelid(rindex, rtable); /* Check that table is partitioned */ - if (prel) + if (has_pathman_relation_info(relid)) { List *returning_list = NIL; @@ -578,19 +559,16 @@ partition_router_visitor(Plan *plan, void *context) lc3 = list_head(modify_table->returningLists); forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) { - Index rindex = lfirst_int(lc2); - Oid tmp_relid, - relid = getrelid(rindex, rtable); - const PartRelationInfo *prel; + Index rindex = lfirst_int(lc2); + Oid relid = getrelid(rindex, rtable), + tmp_relid; /* Find topmost parent */ while (OidIsValid(tmp_relid = get_parent_of_partition(relid))) relid = tmp_relid; /* Check that table is partitioned */ - prel = get_pathman_relation_info(relid); - - if (prel) + if (has_pathman_relation_info(relid)) { List *returning_list = NIL; @@ -694,75 +672,69 @@ modifytable_contains_fdw(List *rtable, ModifyTable *node) } /* - * Find a single deepest subpartition. - * Return InvalidOid if that's impossible. + * Find a single deepest subpartition using quals. + * Return InvalidOid if it's not possible. */ -static FindPartitionResult -find_deepest_partition(Oid relid, Index idx, Expr *quals, Oid *partition) +static Oid +find_deepest_partition(Oid relid, Index rti, Expr *quals) { - const PartRelationInfo *prel; - Node *prel_expr; - WalkerContext context; - List *ranges; - WrapperNode *wrap; - - prel = get_pathman_relation_info(relid); - - /* Exit if it's not partitioned */ - if (!prel) - return FP_PLAIN_TABLE; - - /* Exit if we must include parent */ - if (prel->enable_parent) - return FP_NON_SINGULAR_RESULT; + PartRelationInfo *prel; + Oid result = InvalidOid; /* Exit if there's no quals (no use) */ if (!quals) - return FP_NON_SINGULAR_RESULT; + return result; - /* Prepare partitioning expression */ - prel_expr = PrelExpressionForRelid(prel, idx); + /* Try pruning if table is partitioned */ + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + Node *prel_expr; + WalkerContext context; + List *ranges; + WrapperNode *wrap; - ranges = list_make1_irange_full(prel, IR_COMPLETE); + /* Prepare partitioning expression */ + prel_expr = PrelExpressionForRelid(prel, rti); - /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel_expr, prel, NULL); - wrap = walk_expr_tree(quals, &context); - ranges = irange_list_intersection(ranges, wrap->rangeset); + /* First we select all available partitions... */ + ranges = list_make1_irange_full(prel, IR_COMPLETE); - if (irange_list_length(ranges) == 1) - { - IndexRange irange = linitial_irange(ranges); + /* Parse syntax tree and extract partition ranges */ + InitWalkerContext(&context, prel_expr, prel, NULL); + wrap = walk_expr_tree(quals, &context); + ranges = irange_list_intersection(ranges, wrap->rangeset); - if (irange_lower(irange) == irange_upper(irange)) + switch (irange_list_length(ranges)) { - Oid *children = PrelGetChildrenArray(prel), - child = children[irange_lower(irange)], - subpartition; - FindPartitionResult result; - - /* Try to go deeper and see if there is subpartition */ - result = find_deepest_partition(child, - idx, - quals, - &subpartition); - switch(result) - { - case FP_FOUND: - *partition = subpartition; - return FP_FOUND; + /* Scan only parent (don't do constraint elimination) */ + case 0: + result = relid; + break; - case FP_PLAIN_TABLE: - *partition = child; - return FP_FOUND; + /* Handle the remaining partition */ + case 1: + if (!prel->enable_parent) + { + IndexRange irange = linitial_irange(ranges); + Oid *children = PrelGetChildrenArray(prel), + child = children[irange_lower(irange)]; - case FP_NON_SINGULAR_RESULT: - return FP_NON_SINGULAR_RESULT; - } + /* Try to go deeper and see if there are subpartitions */ + result = find_deepest_partition(child, rti, quals); + } + break; + + default: + break; } + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } + /* Otherwise, return this table */ + else result = relid; - return FP_NON_SINGULAR_RESULT; + return result; } /* Replace extern param nodes with consts */ From d383d71dbb551b892084f095351daf37098841a6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 17 Nov 2017 18:07:40 +0300 Subject: [PATCH 0791/1124] WIP place some more close_pathman_relation_info() --- src/hooks.c | 398 ++++++++++++++++---------------- src/include/relation_info.h | 3 - src/partition_creation.c | 16 +- src/pathman_workers.c | 16 +- src/pl_funcs.c | 33 +-- src/pl_range_funcs.c | 203 ++++++++-------- src/planner_tree_modification.c | 7 +- src/relation_info.c | 23 -- src/utility_stmt_hooking.c | 24 +- 9 files changed, 363 insertions(+), 360 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index e8a882d9..e9b894c7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -84,7 +84,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, JoinCostWorkspace workspace; JoinType saved_jointype = jointype; RangeTblEntry *inner_rte = root->simple_rte_array[innerrel->relid]; - const PartRelationInfo *inner_prel; + PartRelationInfo *inner_prel; List *joinclauses, *otherclauses; WalkerContext context; @@ -109,8 +109,10 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (inner_rte->inh) return; - /* We can't handle full or right outer joins */ - if (jointype == JOIN_FULL || jointype == JOIN_RIGHT) + /* We don't support these join types (since inner will be parameterized) */ + if (jointype == JOIN_FULL || + jointype == JOIN_RIGHT || + jointype == JOIN_UNIQUE_INNER) return; /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ @@ -157,12 +159,9 @@ pathman_join_pathlist_hook(PlannerInfo *root, "of partitioned tables are not supported"))); } - /* - * These codes are used internally in the planner, but are not supported - * by the executor (nor, indeed, by most of the planner). - */ + /* Replace virtual join types with a real one */ if (jointype == JOIN_UNIQUE_OUTER || jointype == JOIN_UNIQUE_INNER) - jointype = JOIN_INNER; /* replace with a proper value */ + jointype = JOIN_INNER; /* Extract join clauses which will separate partitions */ if (IS_OUTER_JOIN(extra->sjinfo->jointype)) @@ -222,11 +221,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, Assert(outer); } - /* No way to do this in a parameterized inner path */ - if (saved_jointype == JOIN_UNIQUE_INNER) - return; - - /* Make inner path depend on outerrel's columns */ required_inner = bms_union(PATH_REQ_OUTER((Path *) cur_inner_path), outerrel->relids); @@ -245,10 +239,10 @@ pathman_join_pathlist_hook(PlannerInfo *root, innerrel->relid))) continue; + /* Try building RuntimeAppend path, skip if it's not possible */ inner = create_runtimeappend_path(root, cur_inner_path, ppi, paramsel); if (!inner) - return; /* could not build it, retreat! */ - + continue; required_nestloop = calc_nestloop_required_outer_compat(outer, inner); @@ -263,7 +257,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, ((!bms_overlap(required_nestloop, extra->param_source_rels) && !allow_star_schema_join(root, outer, inner)) || have_dangerous_phv(root, outer->parent->relids, required_inner))) - return; + continue; initial_cost_nestloop_compat(root, &workspace, jointype, outer, inner, extra); @@ -299,6 +293,9 @@ pathman_join_pathlist_hook(PlannerInfo *root, /* Finally we can add the new NestLoop path */ add_path(joinrel, (Path *) nest_path); } + + /* Don't forget to close 'inner_prel'! */ + close_pathman_relation_info(inner_prel); } /* Cope with simple relations */ @@ -308,8 +305,21 @@ pathman_rel_pathlist_hook(PlannerInfo *root, Index rti, RangeTblEntry *rte) { - const PartRelationInfo *prel; - int irange_len; + PartRelationInfo *prel; + Relation parent_rel; /* parent's relation (heap) */ + PlanRowMark *parent_rowmark; /* parent's rowmark */ + Oid *children; /* selected children oids */ + List *ranges, /* a list of IndexRanges */ + *wrappers; /* a list of WrapperNodes */ + PathKey *pathkeyAsc = NULL, + *pathkeyDesc = NULL; + double paramsel = 1.0; /* default part selectivity */ + WalkerContext context; + Node *part_expr; + List *part_clauses; + ListCell *lc; + int irange_len, + i; /* Invoke original hook if needed */ if (set_rel_pathlist_hook_next != NULL) @@ -344,231 +354,221 @@ pathman_rel_pathlist_hook(PlannerInfo *root, return; /* Proceed iff relation 'rel' is partitioned */ - if ((prel = get_pathman_relation_info(rte->relid)) != NULL) - { - Relation parent_rel; /* parent's relation (heap) */ - PlanRowMark *parent_rowmark; /* parent's rowmark */ - Oid *children; /* selected children oids */ - List *ranges, /* a list of IndexRanges */ - *wrappers; /* a list of WrapperNodes */ - PathKey *pathkeyAsc = NULL, - *pathkeyDesc = NULL; - double paramsel = 1.0; /* default part selectivity */ - WalkerContext context; - Node *part_expr; - List *part_clauses; - ListCell *lc; - int i; + if ((prel = get_pathman_relation_info(rte->relid)) == NULL) + return; - /* - * Check that this child is not the parent table itself. - * This is exactly how standard inheritance works. - * - * Helps with queries like this one: - * - * UPDATE test.tmp t SET value = 2 - * WHERE t.id IN (SELECT id - * FROM test.tmp2 t2 - * WHERE id = t.id); - * - * Since we disable optimizations on 9.5, we - * have to skip parent table that has already - * been expanded by standard inheritance. - */ - if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL) + /* + * Check that this child is not the parent table itself. + * This is exactly how standard inheritance works. + * + * Helps with queries like this one: + * + * UPDATE test.tmp t SET value = 2 + * WHERE t.id IN (SELECT id + * FROM test.tmp2 t2 + * WHERE id = t.id); + * + * Since we disable optimizations on 9.5, we + * have to skip parent table that has already + * been expanded by standard inheritance. + */ + if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL) + { + foreach (lc, root->append_rel_list) { - foreach (lc, root->append_rel_list) - { - AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); - RangeTblEntry *cur_parent_rte, - *cur_child_rte; + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); + RangeTblEntry *cur_parent_rte, + *cur_child_rte; - /* This 'appinfo' is not for this child */ - if (appinfo->child_relid != rti) - continue; + /* This 'appinfo' is not for this child */ + if (appinfo->child_relid != rti) + continue; - cur_parent_rte = root->simple_rte_array[appinfo->parent_relid]; - cur_child_rte = rte; /* we already have it, saves time */ + cur_parent_rte = root->simple_rte_array[appinfo->parent_relid]; + cur_child_rte = rte; /* we already have it, saves time */ - /* This child == its own parent table! */ - if (cur_parent_rte->relid == cur_child_rte->relid) - return; - } + /* This child == its own parent table! */ + if (cur_parent_rte->relid == cur_child_rte->relid) + goto cleanup; } + } - /* Make copy of partitioning expression and fix Var's varno attributes */ - part_expr = PrelExpressionForRelid(prel, rti); + /* Make copy of partitioning expression and fix Var's varno attributes */ + part_expr = PrelExpressionForRelid(prel, rti); - /* Get partitioning-related clauses (do this before append_child_relation()) */ - part_clauses = get_partitioning_clauses(rel->baserestrictinfo, prel, rti); + /* Get partitioning-related clauses (do this before append_child_relation()) */ + part_clauses = get_partitioning_clauses(rel->baserestrictinfo, prel, rti); - if (prel->parttype == PT_RANGE) - { - /* - * Get pathkeys for ascending and descending sort by partitioned column. - */ - List *pathkeys; - TypeCacheEntry *tce; - - /* Determine operator type */ - tce = lookup_type_cache(prel->ev_type, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); - - /* Make pathkeys */ - pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, - tce->lt_opr, NULL, false); - if (pathkeys) - pathkeyAsc = (PathKey *) linitial(pathkeys); - pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, - tce->gt_opr, NULL, false); - if (pathkeys) - pathkeyDesc = (PathKey *) linitial(pathkeys); - } + if (prel->parttype == PT_RANGE) + { + /* + * Get pathkeys for ascending and descending sort by partitioned column. + */ + List *pathkeys; + TypeCacheEntry *tce; + + /* Determine operator type */ + tce = lookup_type_cache(prel->ev_type, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); + + /* Make pathkeys */ + pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, + tce->lt_opr, NULL, false); + if (pathkeys) + pathkeyAsc = (PathKey *) linitial(pathkeys); + pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, + tce->gt_opr, NULL, false); + if (pathkeys) + pathkeyDesc = (PathKey *) linitial(pathkeys); + } - /* mark as partitioned table */ - MarkPartitionedRTE(rti); + /* mark as partitioned table */ + MarkPartitionedRTE(rti); - children = PrelGetChildrenArray(prel); - ranges = list_make1_irange_full(prel, IR_COMPLETE); + children = PrelGetChildrenArray(prel); + ranges = list_make1_irange_full(prel, IR_COMPLETE); - /* Make wrappers over restrictions and collect final rangeset */ - InitWalkerContext(&context, part_expr, prel, NULL); - wrappers = NIL; - foreach(lc, rel->baserestrictinfo) - { - WrapperNode *wrap; - RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); + /* Make wrappers over restrictions and collect final rangeset */ + InitWalkerContext(&context, part_expr, prel, NULL); + wrappers = NIL; + foreach(lc, rel->baserestrictinfo) + { + WrapperNode *wrap; + RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); - wrap = walk_expr_tree(rinfo->clause, &context); + wrap = walk_expr_tree(rinfo->clause, &context); - paramsel *= wrap->paramsel; - wrappers = lappend(wrappers, wrap); - ranges = irange_list_intersection(ranges, wrap->rangeset); - } + paramsel *= wrap->paramsel; + wrappers = lappend(wrappers, wrap); + ranges = irange_list_intersection(ranges, wrap->rangeset); + } - /* Get number of selected partitions */ - irange_len = irange_list_length(ranges); - if (prel->enable_parent) - irange_len++; /* also add parent */ + /* Get number of selected partitions */ + irange_len = irange_list_length(ranges); + if (prel->enable_parent) + irange_len++; /* also add parent */ - /* Expand simple_rte_array and simple_rel_array */ - if (irange_len > 0) - { - int current_len = root->simple_rel_array_size, - new_len = current_len + irange_len; + /* Expand simple_rte_array and simple_rel_array */ + if (irange_len > 0) + { + int current_len = root->simple_rel_array_size, + new_len = current_len + irange_len; - /* Expand simple_rel_array */ - root->simple_rel_array = (RelOptInfo **) - repalloc(root->simple_rel_array, - new_len * sizeof(RelOptInfo *)); + /* Expand simple_rel_array */ + root->simple_rel_array = (RelOptInfo **) + repalloc(root->simple_rel_array, + new_len * sizeof(RelOptInfo *)); - memset((void *) &root->simple_rel_array[current_len], 0, - irange_len * sizeof(RelOptInfo *)); + memset((void *) &root->simple_rel_array[current_len], 0, + irange_len * sizeof(RelOptInfo *)); - /* Expand simple_rte_array */ - root->simple_rte_array = (RangeTblEntry **) - repalloc(root->simple_rte_array, - new_len * sizeof(RangeTblEntry *)); + /* Expand simple_rte_array */ + root->simple_rte_array = (RangeTblEntry **) + repalloc(root->simple_rte_array, + new_len * sizeof(RangeTblEntry *)); - memset((void *) &root->simple_rte_array[current_len], 0, - irange_len * sizeof(RangeTblEntry *)); + memset((void *) &root->simple_rte_array[current_len], 0, + irange_len * sizeof(RangeTblEntry *)); - /* Don't forget to update array size! */ - root->simple_rel_array_size = new_len; - } + /* Don't forget to update array size! */ + root->simple_rel_array_size = new_len; + } - /* Parent has already been locked by rewriter */ - parent_rel = heap_open(rte->relid, NoLock); + /* Parent has already been locked by rewriter */ + parent_rel = heap_open(rte->relid, NoLock); - parent_rowmark = get_plan_rowmark(root->rowMarks, rti); + parent_rowmark = get_plan_rowmark(root->rowMarks, rti); - /* - * WARNING: 'prel' might become invalid after append_child_relation(). - */ + /* + * WARNING: 'prel' might become invalid after append_child_relation(). + */ - /* Add parent if asked to */ - if (prel->enable_parent) - append_child_relation(root, parent_rel, parent_rowmark, - rti, 0, rte->relid, NULL); + /* Add parent if asked to */ + if (prel->enable_parent) + append_child_relation(root, parent_rel, parent_rowmark, + rti, 0, rte->relid, NULL); - /* Iterate all indexes in rangeset and append child relations */ - foreach(lc, ranges) - { - IndexRange irange = lfirst_irange(lc); + /* Iterate all indexes in rangeset and append child relations */ + foreach(lc, ranges) + { + IndexRange irange = lfirst_irange(lc); - for (i = irange_lower(irange); i <= irange_upper(irange); i++) - append_child_relation(root, parent_rel, parent_rowmark, - rti, i, children[i], wrappers); - } + for (i = irange_lower(irange); i <= irange_upper(irange); i++) + append_child_relation(root, parent_rel, parent_rowmark, + rti, i, children[i], wrappers); + } - /* Now close parent relation */ - heap_close(parent_rel, NoLock); + /* Now close parent relation */ + heap_close(parent_rel, NoLock); - /* Clear path list and make it point to NIL */ - list_free_deep(rel->pathlist); - rel->pathlist = NIL; + /* Clear path list and make it point to NIL */ + list_free_deep(rel->pathlist); + rel->pathlist = NIL; #if PG_VERSION_NUM >= 90600 - /* Clear old partial path list */ - list_free(rel->partial_pathlist); - rel->partial_pathlist = NIL; + /* Clear old partial path list */ + list_free(rel->partial_pathlist); + rel->partial_pathlist = NIL; #endif - /* Generate new paths using the rels we've just added */ - set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); - set_append_rel_size_compat(root, rel, rti); + /* Generate new paths using the rels we've just added */ + set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); + set_append_rel_size_compat(root, rel, rti); #if PG_VERSION_NUM >= 90600 - /* consider gathering partial paths for the parent appendrel */ - generate_gather_paths(root, rel); + /* consider gathering partial paths for the parent appendrel */ + generate_gather_paths(root, rel); #endif - /* No need to go further (both nodes are disabled), return */ - if (!(pg_pathman_enable_runtimeappend || - pg_pathman_enable_runtime_merge_append)) - return; + /* Skip if both custom nodes are disabled */ + if (!(pg_pathman_enable_runtimeappend || + pg_pathman_enable_runtime_merge_append)) + goto cleanup; - /* Skip if there's no PARAMs in partitioning-related clauses */ - if (!clause_contains_params((Node *) part_clauses)) - return; + /* Skip if there's no PARAMs in partitioning-related clauses */ + if (!clause_contains_params((Node *) part_clauses)) + goto cleanup; - /* Generate Runtime[Merge]Append paths if needed */ - foreach (lc, rel->pathlist) + /* Generate Runtime[Merge]Append paths if needed */ + foreach (lc, rel->pathlist) + { + AppendPath *cur_path = (AppendPath *) lfirst(lc); + Relids inner_required = PATH_REQ_OUTER((Path *) cur_path); + Path *inner_path = NULL; + ParamPathInfo *ppi; + + /* Skip if rel contains some join-related stuff or path type mismatched */ + if (!(IsA(cur_path, AppendPath) || IsA(cur_path, MergeAppendPath)) || + rel->has_eclass_joins || rel->joininfo) { - AppendPath *cur_path = (AppendPath *) lfirst(lc); - Relids inner_required = PATH_REQ_OUTER((Path *) cur_path); - Path *inner_path = NULL; - ParamPathInfo *ppi; - - /* Skip if rel contains some join-related stuff or path type mismatched */ - if (!(IsA(cur_path, AppendPath) || IsA(cur_path, MergeAppendPath)) || - rel->has_eclass_joins || rel->joininfo) - { - continue; - } - - /* Get existing parameterization */ - ppi = get_appendrel_parampathinfo(rel, inner_required); + continue; + } - if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) - inner_path = create_runtimeappend_path(root, cur_path, - ppi, paramsel); - else if (IsA(cur_path, MergeAppendPath) && - pg_pathman_enable_runtime_merge_append) - { - /* Check struct layout compatibility */ - if (offsetof(AppendPath, subpaths) != - offsetof(MergeAppendPath, subpaths)) - elog(FATAL, "Struct layouts of AppendPath and " - "MergeAppendPath differ"); - - inner_path = create_runtimemergeappend_path(root, cur_path, - ppi, paramsel); - } + /* Get existing parameterization */ + ppi = get_appendrel_parampathinfo(rel, inner_required); - if (inner_path) - add_path(rel, inner_path); + if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) + inner_path = create_runtimeappend_path(root, cur_path, + ppi, paramsel); + else if (IsA(cur_path, MergeAppendPath) && + pg_pathman_enable_runtime_merge_append) + { + /* Check struct layout compatibility */ + if (offsetof(AppendPath, subpaths) != + offsetof(MergeAppendPath, subpaths)) + elog(FATAL, "Struct layouts of AppendPath and " + "MergeAppendPath differ"); + + inner_path = create_runtimemergeappend_path(root, cur_path, + ppi, paramsel); } + + if (inner_path) + add_path(rel, inner_path); } + +cleanup: + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } /* diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 34d88f0c..f3796d28 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -323,9 +323,6 @@ void invalidate_pathman_relation_info_cache(void); void close_pathman_relation_info(PartRelationInfo *prel); bool has_pathman_relation_info(Oid relid); PartRelationInfo *get_pathman_relation_info(Oid relid); -PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result); void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, diff --git a/src/partition_creation.c b/src/partition_creation.c index 05a6f508..32ad269b 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -334,7 +334,6 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, PG_TRY(); { - LockAcquireResult lock_result; /* could we lock the parent? */ Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; @@ -342,18 +341,29 @@ create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) { PartRelationInfo *prel; + LockAcquireResult lock_result; /* could we lock the parent? */ Oid base_bound_type; /* base type of prel->ev_type */ Oid base_value_type; /* base type of value_type */ + /* Prevent modifications of partitioning scheme */ + lock_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); + /* Fetch PartRelationInfo by 'relid' */ - prel = get_pathman_relation_info_after_lock(relid, true, &lock_result); + prel = get_pathman_relation_info(relid); shout_if_prel_is_invalid(relid, prel, PT_RANGE); /* Fetch base types of prel->ev_type & value_type */ base_bound_type = getBaseType(prel->ev_type); base_value_type = getBaseType(value_type); - /* Search for a suitable partition if we didn't hold it */ + /* + * Search for a suitable partition if we didn't hold it, + * since somebody might have just created it for us. + * + * If the table is locked, it means that we've + * already failed to find a suitable partition + * and called this function to do the job. + */ Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); if (lock_result == LOCKACQUIRE_OK) { diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 2db579b3..c48451d9 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -691,6 +691,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) int empty_slot_idx = -1, /* do we have a slot for BGWorker? */ i; TransactionId rel_xmin; + LOCKMODE lockmode = ShareUpdateExclusiveLock; /* Check batch_size */ if (batch_size < 1 || batch_size > 10000) @@ -703,12 +704,12 @@ partition_table_concurrently(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'sleep_time' should not be less than 0.5"))); + /* Prevent concurrent function calls */ + LockRelationOid(relid, lockmode); + /* Check if relation is a partitioned table */ - shout_if_prel_is_invalid(relid, - /* We also lock the parent relation */ - get_pathman_relation_info_after_lock(relid, true, NULL), - /* Partitioning type does not matter here */ - PT_ANY); + if (!has_pathman_relation_info(relid)) + shout_if_prel_is_invalid(relid, NULL, PT_ANY); /* Check that partitioning operation result is visible */ if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin, NULL)) @@ -723,7 +724,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* * Look for an empty slot and also check that a concurrent - * partitioning operation for this table hasn't been started yet + * partitioning operation for this table hasn't started yet. */ for (i = 0; i < PART_WORKER_SLOTS; i++) { @@ -797,6 +798,9 @@ partition_table_concurrently(PG_FUNCTION_ARGS) CppAsString(stop_concurrent_part_task), get_rel_name(relid)); + /* We don't need this lock anymore */ + UnlockRelationOid(relid, lockmode); + PG_RETURN_VOID(); } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ef2288f3..ef68c11e 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -75,22 +75,22 @@ PG_FUNCTION_INFO_V1( pathman_version ); /* User context for function show_partition_list_internal() */ typedef struct { - Relation pathman_config; - HeapScanDesc pathman_config_scan; - Snapshot snapshot; + Relation pathman_config; + HeapScanDesc pathman_config_scan; + Snapshot snapshot; - const PartRelationInfo *current_prel; /* selected PartRelationInfo */ + PartRelationInfo *current_prel; /* selected PartRelationInfo */ - Size child_number; /* child we're looking at */ - SPITupleTable *tuptable; /* buffer for tuples */ + Size child_number; /* child we're looking at */ + SPITupleTable *tuptable; /* buffer for tuples */ } show_partition_list_cxt; /* User context for function show_pathman_cache_stats_internal() */ typedef struct { - MemoryContext pathman_contexts[PATHMAN_MCXT_COUNT]; - HTAB *pathman_htables[PATHMAN_MCXT_COUNT]; - int current_item; + MemoryContext pathman_contexts[PATHMAN_MCXT_COUNT]; + HTAB *pathman_htables[PATHMAN_MCXT_COUNT]; + int current_item; } show_cache_stats_cxt; /* @@ -362,10 +362,10 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* Iterate through pathman cache */ for (;;) { - const PartRelationInfo *prel; - HeapTuple htup; - Datum values[Natts_pathman_partition_list]; - bool isnull[Natts_pathman_partition_list] = { 0 }; + HeapTuple htup; + Datum values[Natts_pathman_partition_list]; + bool isnull[Natts_pathman_partition_list] = { 0 }; + PartRelationInfo *prel; /* Fetch next PartRelationInfo if needed */ if (usercxt->current_prel == NULL) @@ -401,6 +401,9 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* If we've run out of partitions, switch to the next 'prel' */ if (usercxt->child_number >= PrelChildrenCount(prel)) { + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + usercxt->current_prel = NULL; usercxt->child_number = 0; @@ -787,13 +790,13 @@ add_to_pathman_config(PG_FUNCTION_ARGS) { pfree(children); - /* Now try to create a PartRelationInfo */ PG_TRY(); { /* Some flags might change during refresh attempt */ save_pathman_init_state(&init_state); - get_pathman_relation_info(relid); + /* Now try to create a PartRelationInfo */ + has_pathman_relation_info(relid); } PG_CATCH(); { diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 007a2937..0e40dcb8 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -56,22 +56,29 @@ PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); PG_FUNCTION_INFO_V1( validate_interval_value ); -static char *deparse_constraint(Oid relid, Node *expr); -static ArrayType *construct_infinitable_array(Bound *elems, - int nelems, - Oid elmtype, - int elmlen, - bool elmbyval, - char elmalign); -static void check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges); +static ArrayType *construct_bounds_array(Bound *elems, + int nelems, + Oid elmtype, + int elmlen, + bool elmbyval, + char elmalign); + +static void check_range_adjacence(Oid cmp_proc, + Oid collid, + List *ranges); + static void merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts); + +static char *deparse_constraint(Oid relid, Node *expr); + static void modify_range_constraint(Oid partition_relid, const char *expression, Oid expression_type, const Bound *lower, const Bound *upper); + static bool interval_is_trivial(Oid atttype, Datum interval, Oid interval_type); @@ -400,11 +407,12 @@ generate_range_bounds_pl(PG_FUNCTION_ARGS) Datum get_part_range_by_oid(PG_FUNCTION_ARGS) { - Oid partition_relid, - parent_relid; - RangeEntry *ranges; - const PartRelationInfo *prel; - uint32 i; + Oid partition_relid, + parent_relid; + Oid arg_type; + RangeEntry *ranges; + PartRelationInfo *prel; + uint32 i; if (!PG_ARGISNULL(0)) { @@ -419,11 +427,13 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) errmsg("relation \"%s\" is not a partition", get_rel_name_or_relid(partition_relid)))); + /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent_relid); shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); /* Check type of 'dummy' (for correct output) */ - if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 1)) != getBaseType(prel->ev_type)) + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_typeof(dummy) should be %s", format_type_be(getBaseType(prel->ev_type))))); @@ -432,6 +442,7 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) /* Look for the specified partition */ for (i = 0; i < PrelChildrenCount(prel); i++) + { if (ranges[i].child_oid == partition_relid) { ArrayType *arr; @@ -440,12 +451,15 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) elems[0] = ranges[i].min; elems[1] = ranges[i].max; - arr = construct_infinitable_array(elems, 2, - prel->ev_type, prel->ev_len, - prel->ev_byval, prel->ev_align); + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); PG_RETURN_ARRAYTYPE_P(arr); } + } /* No partition found, report error */ ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -466,11 +480,13 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) Datum get_part_range_by_idx(PG_FUNCTION_ARGS) { - Oid parent_relid; - int partition_idx = 0; - Bound elems[2]; - RangeEntry *ranges; - const PartRelationInfo *prel; + Oid parent_relid; + int partition_idx = 0; + Oid arg_type; + Bound elems[2]; + RangeEntry *ranges; + PartRelationInfo *prel; + ArrayType *arr; if (!PG_ARGISNULL(0)) { @@ -486,11 +502,13 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'partition_idx' should not be NULL"))); + /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent_relid); shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); /* Check type of 'dummy' (for correct output) */ - if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 2)) != getBaseType(prel->ev_type)) + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_typeof(dummy) should be %s", format_type_be(getBaseType(prel->ev_type))))); @@ -520,11 +538,13 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) elems[0] = ranges[partition_idx].min; elems[1] = ranges[partition_idx].max; - PG_RETURN_ARRAYTYPE_P(construct_infinitable_array(elems, 2, - prel->ev_type, - prel->ev_len, - prel->ev_byval, - prel->ev_align)); + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); + + PG_RETURN_ARRAYTYPE_P(arr); } @@ -688,8 +708,10 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) *first, *last; FmgrInfo cmp_proc; + ObjectAddresses *objects = new_object_addresses(); int i; + /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); @@ -702,7 +724,8 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Process partitions */ for (i = 0; i < nparts; i++) { - int j; + ObjectAddress object; + int j; /* Prevent modification of partitions */ LockRelationOid(parts[0], AccessExclusiveLock); @@ -716,6 +739,9 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) break; } } + + ObjectAddressSet(object, RelationRelationId, parts[i]); + add_exact_object_address(&object, objects); } /* Check that partitions are adjacent */ @@ -765,13 +791,8 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) SPI_finish(); /* Drop obsolete partitions */ - for (i = 1; i < nparts; i++) - { - ObjectAddress object; - - ObjectAddressSet(object, RelationRelationId, parts[i]); - performDeletion(&object, DROP_CASCADE, 0); - } + performMultipleDeletions(objects, DROP_CASCADE, 0); + free_object_addresses(objects); } @@ -791,72 +812,74 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) Oid partition = PG_GETARG_OID(0), parent; PartRelationInfo *prel; + ObjectAddress object; + RangeEntry *ranges; + int i; /* Lock the partition we're going to drop */ LockRelationOid(partition, AccessExclusiveLock); /* Check if partition exists */ - if (!SearchSysCacheExists1(RELOID, partition)) + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partition))) elog(ERROR, "relation %u does not exist", partition); /* Get parent's relid */ parent = get_parent_of_partition(partition); - if (!OidIsValid(parent)) + + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); + + /* Check if parent exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent))) elog(ERROR, "relation \"%s\" is not a partition", get_rel_name(partition)); - if ((prel = get_pathman_relation_info(parent)) != NULL) - { - ObjectAddress object; - RangeEntry *ranges; - int i; - - /* Emit an error if it is not partitioned by RANGE */ - shout_if_prel_is_invalid(parent, prel, PT_RANGE); + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); - /* Fetch ranges array */ - ranges = PrelGetRangesArray(prel); + /* Fetch ranges array */ + ranges = PrelGetRangesArray(prel); - /* Looking for partition in child relations */ - for (i = 0; i < PrelChildrenCount(prel); i++) - if (ranges[i].child_oid == partition) - break; + /* Looking for partition in child relations */ + for (i = 0; i < PrelChildrenCount(prel); i++) + if (ranges[i].child_oid == partition) + break; - /* Should have found it */ - Assert(i < PrelChildrenCount(prel)); + /* Should have found it */ + Assert(i < PrelChildrenCount(prel)); - /* Expand next partition if it exists */ - if (i < PrelChildrenCount(prel) - 1) - { - RangeEntry *cur = &ranges[i], - *next = &ranges[i + 1]; - Oid next_partition = next->child_oid; - LOCKMODE lockmode = AccessExclusiveLock; + /* Expand next partition if it exists */ + if (i < PrelLastChild(prel)) + { + RangeEntry *cur = &ranges[i], + *next = &ranges[i + 1]; + Oid next_partition = next->child_oid; + LOCKMODE lockmode = AccessExclusiveLock; - /* Lock next partition */ - LockRelationOid(next_partition, lockmode); + /* Lock next partition */ + LockRelationOid(next_partition, lockmode); - /* Does next partition exist? */ - if (SearchSysCacheExists1(RELOID, next_partition)) - { - /* Stretch next partition to cover range */ - modify_range_constraint(next_partition, - prel->expr_cstr, - prel->ev_type, - &cur->min, - &next->max); - } - /* Bad luck, unlock missing partition */ - else UnlockRelationOid(next_partition, lockmode); + /* Does next partition exist? */ + if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(next_partition))) + { + /* Stretch next partition to cover range */ + modify_range_constraint(next_partition, + prel->expr_cstr, + prel->ev_type, + &cur->min, + &next->max); } + /* Bad luck, unlock missing partition */ + else UnlockRelationOid(next_partition, lockmode); + } - /* Drop partition */ - ObjectAddressSet(object, RelationRelationId, partition); - performDeletion(&object, DROP_CASCADE, 0); + /* Drop partition */ + ObjectAddressSet(object, RelationRelationId, partition); + performDeletion(&object, DROP_CASCADE, 0); - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - } + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); PG_RETURN_VOID(); } @@ -1182,18 +1205,18 @@ deparse_constraint(Oid relid, Node *expr) } /* - * Build an 1d array of Bound elements + * Build an 1d array of Bound elements. * - * The main difference from construct_array() is that - * it will substitute infinite values with NULLs + * The main difference from construct_array() is that + * it will substitute infinite values with NULLs. */ static ArrayType * -construct_infinitable_array(Bound *elems, - int nelems, - Oid elemtype, - int elemlen, - bool elembyval, - char elemalign) +construct_bounds_array(Bound *elems, + int nelems, + Oid elemtype, + int elemlen, + bool elembyval, + char elemalign) { ArrayType *arr; Datum *datums; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 7b465dee..cd811cdd 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -340,8 +340,6 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Table may be partitioned */ if (rte->inh) { - PartRelationInfo *prel; - #ifdef LEGACY_ROWMARKS_95 /* Don't process queries with RowMarks on 9.5 */ if (get_parse_rowmark(parse, current_rti)) @@ -349,7 +347,7 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) #endif /* Proceed if table is partitioned by pg_pathman */ - if ((prel = get_pathman_relation_info(rte->relid)) != NULL) + if (has_pathman_relation_info(rte->relid)) { /* HACK: unset the 'inh' flag to disable standard planning */ rte->inh = false; @@ -357,9 +355,6 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Try marking it using PARENTHOOD_ALLOWED */ assign_rel_parenthood_status(parse->queryId, rte, PARENTHOOD_ALLOWED); - - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); } } /* Else try marking it using PARENTHOOD_DISALLOWED */ diff --git a/src/relation_info.c b/src/relation_info.c index 44635ebf..c2563d4e 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -289,29 +289,6 @@ get_pathman_relation_info(Oid relid) return psin->prel; } -/* Acquire lock on a table and try to get PartRelationInfo */ -PartRelationInfo * -get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result) -{ - PartRelationInfo *prel; - LockAcquireResult acquire_result; - - /* Restrict concurrent partition creation (it's dangerous) */ - acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); - - /* Set 'lock_result' if asked to */ - if (lock_result) - *lock_result = acquire_result; - - prel = get_pathman_relation_info(relid); - if (!prel && unlock_if_not_found) - UnlockRelationOid(relid, ShareUpdateExclusiveLock); - - return prel; -} - /* Build a new PartRelationInfo for partitioned relation */ static PartRelationInfo * build_pathman_relation_info(Oid relid, Datum *values) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 1f376c20..f90cca36 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -476,7 +476,6 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ResultRelInfo *parent_rri; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ - ExprContext *econtext; TupleTableSlot *myslot; MemoryContext oldcontext = CurrentMemoryContext; @@ -529,15 +528,14 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, values = (Datum *) palloc(tupDesc->natts * sizeof(Datum)); nulls = (bool *) palloc(tupDesc->natts * sizeof(bool)); - econtext = GetPerTupleExprContext(estate); - for (;;) { TupleTableSlot *slot; bool skip_tuple; Oid tuple_oid = InvalidOid; + ExprContext *econtext = GetPerTupleExprContext(estate); - const PartRelationInfo *prel; + PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; ResultRelInfo *child_result_rel; @@ -551,7 +549,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Initialize expression and expression state */ if (expr == NULL) { - expr = copyObject(prel->expr); + expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); expr_state = ExecInitExpr((Expr *) expr, NULL); } @@ -575,10 +573,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Store slot for expression evaluation */ econtext->ecxt_scantuple = slot; - /* - * Search for a matching partition. - * WARNING: 'prel' might change after this call! - */ + /* Search for a matching partition */ rri_holder = select_partition_for_insert(expr_state, econtext, estate, prel, &parts_storage); @@ -598,13 +593,12 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { HeapTuple tuple_old; - /* TODO: use 'tuple_map' directly instead of do_convert_tuple() */ tuple_old = tuple; tuple = do_convert_tuple(tuple, rri_holder->tuple_map); heap_freetuple(tuple_old); } - /* now we can set proper tuple descriptor according to child relation */ + /* Now we can set proper tuple descriptor according to child relation */ ExecSetSlotDescriptor(slot, RelationGetDescr(child_result_rel->ri_RelationDesc)); ExecStoreTuple(tuple, slot, InvalidBuffer, false); @@ -656,12 +650,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, } } + /* Switch back to query context */ MemoryContextSwitchTo(oldcontext); - /* - * In the old protocol, tell pqcomm that we can process normal protocol - * messages again. - */ + /* Required for old protocol */ if (old_protocol) pq_endmsgread(); @@ -674,6 +666,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, pfree(values); pfree(nulls); + /* Release resources for tuple table */ ExecResetTupleTable(estate->es_tupleTable, false); /* Close partitions and destroy hash table */ @@ -682,6 +675,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Close parent's indices */ ExecCloseIndices(parent_rri); + /* Release an EState along with all remaining working storage */ FreeExecutorState(estate); return processed; From c96393352dc7d6e110adf3dd299ddd7dfce791ed Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 20 Nov 2017 14:21:59 +0300 Subject: [PATCH 0792/1124] fix late updates (e.g. 1.1 => 1.4) --- src/pl_range_funcs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 1b8b2ade..5e3a7696 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -870,8 +870,11 @@ validate_interval_value(PG_FUNCTION_ARGS) /* * Fetch partitioning expression's type using * either user's expression or parsed expression. + * + * NOTE: we check number of function's arguments + * in case of late updates (e.g. 1.1 => 1.4). */ - if (PG_ARGISNULL(ARG_EXPRESSION_P)) + if (PG_ARGISNULL(ARG_EXPRESSION_P) || PG_NARGS() <= ARG_EXPRESSION_P) { Datum expr_datum; From 81be0d8c6d5cc216e4d28ae5f851366de98610b4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 21 Nov 2017 15:11:46 +0300 Subject: [PATCH 0793/1124] WIP fix more places that use cache --- src/hooks.c | 4 +- src/include/relation_info.h | 21 ++++++-- src/nodes_common.c | 36 ++++++++----- src/pl_range_funcs.c | 54 ++++++++++--------- src/relation_info.c | 105 +++++++++++++++++++++++++----------- 5 files changed, 148 insertions(+), 72 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index e9b894c7..83f040d8 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -821,7 +821,7 @@ pathman_relcache_hook(Datum arg, Oid relid) /* Invalidation event for whole cache */ if (relid == InvalidOid) { - invalidate_pathman_relation_info_cache(); + invalidate_pathman_status_info_cache(); } /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ @@ -840,7 +840,7 @@ pathman_relcache_hook(Datum arg, Oid relid) forget_parent_of_partition(relid); /* Invalidate PartStatusInfo entry if needed */ - invalidate_pathman_relation_info(relid); + invalidate_pathman_status_info(relid); } } diff --git a/src/include/relation_info.h b/src/include/relation_info.h index f3796d28..14286546 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -236,6 +236,19 @@ typedef struct PartRelationInfo #define PrelIsFresh(prel) ( (prel)->fresh ) +static inline uint32 +PrelHasPartition(const PartRelationInfo *prel, Oid partition_relid) +{ + Oid *children = PrelGetChildrenArray(prel); + uint32 i; + + for (i = 0; i < PrelChildrenCount(prel); i++) + if (children[i] == partition_relid) + return i + 1; + + return 0; +} + static inline uint32 PrelLastChild(const PartRelationInfo *prel) { @@ -316,10 +329,12 @@ PartTypeToCString(PartType parttype) } +/* Status chache */ +void invalidate_pathman_status_info(Oid relid); +void invalidate_pathman_status_info_cache(void); + /* Dispatch cache */ void refresh_pathman_relation_info(Oid relid); -void invalidate_pathman_relation_info(Oid relid); -void invalidate_pathman_relation_info_cache(void); void close_pathman_relation_info(PartRelationInfo *prel); bool has_pathman_relation_info(Oid relid); PartRelationInfo *get_pathman_relation_info(Oid relid); @@ -332,7 +347,7 @@ void shout_if_prel_is_invalid(const Oid parent_oid, void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); -/* Parent cache */ +/* Parents cache */ void cache_parent_of_partition(Oid partition, Oid parent); void forget_parent_of_partition(Oid partition); Oid get_parent_of_partition(Oid partition); diff --git a/src/nodes_common.c b/src/nodes_common.c index 7a4b71fe..66f2df12 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -558,9 +558,9 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, List *clauses, List *custom_plans, CustomScanMethods *scan_methods) { - RuntimeAppendPath *rpath = (RuntimeAppendPath *) best_path; - const PartRelationInfo *prel; - CustomScan *cscan; + RuntimeAppendPath *rpath = (RuntimeAppendPath *) best_path; + PartRelationInfo *prel; + CustomScan *cscan; prel = get_pathman_relation_info(rpath->relid); Assert(prel); @@ -630,6 +630,9 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, /* Cache 'prel->enable_parent' as well */ pack_runtimeappend_private(cscan, rpath, prel->enable_parent); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + return &cscan->scan.plan; } @@ -659,14 +662,15 @@ create_append_scan_state_common(CustomScan *node, void begin_append_common(CustomScanState *node, EState *estate, int eflags) { - RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - const PartRelationInfo *prel; + RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + PartRelationInfo *prel; #if PG_VERSION_NUM < 100000 node->ss.ps.ps_TupFromTlist = false; #endif prel = get_pathman_relation_info(scan_state->relid); + Assert(prel); /* Prepare expression according to set_set_customscan_references() */ scan_state->prel_expr = PrelExpressionForRelid(prel, INDEX_VAR); @@ -674,6 +678,9 @@ begin_append_common(CustomScanState *node, EState *estate, int eflags) /* Prepare custom expression according to set_set_customscan_references() */ scan_state->canon_custom_exprs = canonicalize_custom_exprs(scan_state->custom_exprs); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } TupleTableSlot * @@ -754,14 +761,14 @@ end_append_common(CustomScanState *node) void rescan_append_common(CustomScanState *node) { - RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - ExprContext *econtext = node->ss.ps.ps_ExprContext; - const PartRelationInfo *prel; - List *ranges; - ListCell *lc; - WalkerContext wcxt; - Oid *parts; - int nparts; + RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + ExprContext *econtext = node->ss.ps.ps_ExprContext; + PartRelationInfo *prel; + List *ranges; + ListCell *lc; + WalkerContext wcxt; + Oid *parts; + int nparts; prel = get_pathman_relation_info(scan_state->relid); Assert(prel); @@ -797,6 +804,9 @@ rescan_append_common(CustomScanState *node) scan_state->ncur_plans, scan_state->css.ss.ps.state); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + scan_state->running_idx = 0; } diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 0e40dcb8..997547f2 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -412,7 +412,7 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) Oid arg_type; RangeEntry *ranges; PartRelationInfo *prel; - uint32 i; + uint32 idx; if (!PG_ARGISNULL(0)) { @@ -441,24 +441,24 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) ranges = PrelGetRangesArray(prel); /* Look for the specified partition */ - for (i = 0; i < PrelChildrenCount(prel); i++) + if ((idx = PrelHasPartition(prel, partition_relid)) > 0) { - if (ranges[i].child_oid == partition_relid) - { - ArrayType *arr; - Bound elems[2]; + ArrayType *arr; + Bound elems[2]; - elems[0] = ranges[i].min; - elems[1] = ranges[i].max; + elems[0] = ranges[idx - 1].min; + elems[1] = ranges[idx - 1].max; - arr = construct_bounds_array(elems, 2, - prel->ev_type, - prel->ev_len, - prel->ev_byval, - prel->ev_align); + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); - PG_RETURN_ARRAYTYPE_P(arr); - } + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_ARRAYTYPE_P(arr); } /* No partition found, report error */ @@ -544,6 +544,9 @@ get_part_range_by_idx(PG_FUNCTION_ARGS) prel->ev_byval, prel->ev_align); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + PG_RETURN_ARRAYTYPE_P(arr); } @@ -702,14 +705,14 @@ merge_range_partitions(PG_FUNCTION_ARGS) static void merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) { - const PartRelationInfo *prel; - List *rentry_list = NIL; - RangeEntry *ranges, - *first, - *last; - FmgrInfo cmp_proc; - ObjectAddresses *objects = new_object_addresses(); - int i; + PartRelationInfo *prel; + List *rentry_list = NIL; + RangeEntry *ranges, + *first, + *last; + FmgrInfo cmp_proc; + ObjectAddresses *objects = new_object_addresses(); + int i; /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); @@ -749,7 +752,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* First determine the bounds of a new constraint */ first = (RangeEntry *) linitial(rentry_list); - last = (RangeEntry *) llast(rentry_list); + last = (RangeEntry *) llast(rentry_list); /* Swap ranges if 'last' < 'first' */ fmgr_info(prel->cmp_proc, &cmp_proc); @@ -793,6 +796,9 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Drop obsolete partitions */ performMultipleDeletions(objects, DROP_CASCADE, 0); free_object_addresses(objects); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } diff --git a/src/relation_info.c b/src/relation_info.c index c2563d4e..b4f75f2a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -83,9 +83,10 @@ static bool delayed_shutdown = false; /* pathman was dropped */ bsearch((const void *) &(key), (array), (array_size), sizeof(Oid), oid_cmp) -static void invalidate_pathman_status_info(PartStatusInfo *psin); static PartRelationInfo *build_pathman_relation_info(Oid relid, Datum *values); static void free_pathman_relation_info(PartRelationInfo *prel); +static void invalidate_psin_entries_using_relid(Oid relid); +static void invalidate_psin_entry(PartStatusInfo *psin); static Expr *get_partition_constraint_expr(Oid partition); @@ -119,59 +120,91 @@ init_relation_info_static_data(void) /* - * Partition dispatch routines. + * Status cache routines. */ -/* TODO: comment */ +/* Invalidate PartStatusInfo for 'relid' */ void -refresh_pathman_relation_info(Oid relid) -{ - -} - -/* TODO: comment */ -void -invalidate_pathman_relation_info(Oid relid) +invalidate_pathman_status_info(Oid relid) { PartStatusInfo *psin; + PartParentInfo *ppar; + /* Find status cache entry for this relation */ psin = pathman_cache_search_relid(status_cache, relid, HASH_FIND, NULL); - if (psin) - { -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, "invalidation message for relation %u [%u]", - relid, MyProcPid); -#endif + invalidate_psin_entry(psin); - invalidate_pathman_status_info(psin); + /* + * Find parent of this relation. + * + * We don't want to use get_parent_of_partition() + * since it relies upon the syscache. + */ + ppar = pathman_cache_search_relid(parents_cache, + relid, HASH_FIND, + NULL); + + /* Invalidate parent directly */ + if (ppar) + { + /* Find status cache entry for parent */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + if (psin) + invalidate_psin_entry(psin); } + /* Otherwise, look through all entries */ + else invalidate_psin_entries_using_relid(relid); } -/* TODO: comment */ +/* Invalidate all PartStatusInfo entries */ void -invalidate_pathman_relation_info_cache(void) +invalidate_pathman_status_info_cache(void) +{ + invalidate_psin_entries_using_relid(InvalidOid); +} + +/* Invalidate PartStatusInfo entry referencing 'relid' */ +static void +invalidate_psin_entries_using_relid(Oid relid) { HASH_SEQ_STATUS status; PartStatusInfo *psin; + hash_seq_init(&status, status_cache); + while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) { -#ifdef USE_RELINFO_LOGGING - elog(DEBUG2, "invalidation message for relation %u [%u]", - psin->relid, MyProcPid); -#endif + if (relid == InvalidOid || + psin->relid == relid || + (psin->prel && PrelHasPartition(psin->prel, relid))) + { + /* Perform invalidation */ + invalidate_psin_entry(psin); - invalidate_pathman_status_info(psin); + /* Exit if found */ + if (OidIsValid(relid)) + { + hash_seq_term(&status); + break; + } + } } } -/* TODO: comment */ +/* Invalidate single PartStatusInfo entry */ static void -invalidate_pathman_status_info(PartStatusInfo *psin) +invalidate_psin_entry(PartStatusInfo *psin) { +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + psin->relid, MyProcPid); +#endif + /* Mark entry as invalid */ if (psin->prel && PrelReferenceCount(psin->prel) > 0) { @@ -189,7 +222,19 @@ invalidate_pathman_status_info(PartStatusInfo *psin) } } -/* TODO: comment */ + +/* + * Dispatch cache routines. + */ + +/* Make changes to PartRelationInfo visible */ +void +refresh_pathman_relation_info(Oid relid) +{ + +} + +/* Close PartRelationInfo entry */ void close_pathman_relation_info(PartRelationInfo *prel) { @@ -680,7 +725,7 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, /* - * Partition bounds cache routines. + * Bounds cache routines. */ /* Remove partition's constraint from cache */ @@ -904,7 +949,7 @@ fill_pbin_with_bounds(PartBoundInfo *pbin, /* - * Partition parents cache routines. + * Parents cache routines. */ /* Add parent of partition to cache */ From c8690fc1dab69830caa2834762a448aa8977ed42 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 21 Nov 2017 15:36:54 +0300 Subject: [PATCH 0794/1124] WIP refactoring & fixes in PathmanCopyFrom() --- src/utility_stmt_hooking.c | 69 ++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 32 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index f90cca36..d52dd330 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -474,13 +474,11 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ResultPartsStorage parts_storage; ResultRelInfo *parent_rri; + ExprState *expr_state = NULL; + MemoryContext query_mcxt = CurrentMemoryContext; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ TupleTableSlot *myslot; - MemoryContext oldcontext = CurrentMemoryContext; - - Node *expr = NULL; - ExprState *expr_state = NULL; uint64 processed = 0; @@ -531,28 +529,18 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, for (;;) { TupleTableSlot *slot; - bool skip_tuple; + bool skip_tuple = false; Oid tuple_oid = InvalidOid; ExprContext *econtext = GetPerTupleExprContext(estate); PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; - ResultRelInfo *child_result_rel; + ResultRelInfo *child_rri; CHECK_FOR_INTERRUPTS(); ResetPerTupleExprContext(estate); - /* Fetch PartRelationInfo for parent relation */ - prel = get_pathman_relation_info(RelationGetRelid(parent_rel)); - - /* Initialize expression and expression state */ - if (expr == NULL) - { - expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); - expr_state = ExecInitExpr((Expr *) expr, NULL); - } - /* Switch into per tuple memory context */ MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -573,20 +561,39 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Store slot for expression evaluation */ econtext->ecxt_scantuple = slot; + /* Fetch PartRelationInfo for parent relation */ + prel = get_pathman_relation_info(RelationGetRelid(parent_rel)); + + /* Initialize expression state */ + if (expr_state == NULL) + { + MemoryContext old_mcxt; + Node *expr; + + old_mcxt = MemoryContextSwitchTo(query_mcxt); + + expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); + expr_state = ExecInitExpr((Expr *) expr, NULL); + + MemoryContextSwitchTo(old_mcxt); + } + /* Search for a matching partition */ rri_holder = select_partition_for_insert(expr_state, econtext, estate, prel, &parts_storage); + child_rri = rri_holder->result_rel_info; - child_result_rel = rri_holder->result_rel_info; + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); /* Magic: replace parent's ResultRelInfo with ours */ - estate->es_result_relation_info = child_result_rel; + estate->es_result_relation_info = child_rri; /* * Constraints might reference the tableoid column, so initialize * t_tableOid before evaluating them. */ - tuple->t_tableOid = RelationGetRelid(child_result_rel->ri_RelationDesc); + tuple->t_tableOid = RelationGetRelid(child_rri->ri_RelationDesc); /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) @@ -599,19 +606,17 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, } /* Now we can set proper tuple descriptor according to child relation */ - ExecSetSlotDescriptor(slot, RelationGetDescr(child_result_rel->ri_RelationDesc)); + ExecSetSlotDescriptor(slot, RelationGetDescr(child_rri->ri_RelationDesc)); ExecStoreTuple(tuple, slot, InvalidBuffer, false); /* Triggers and stuff need to be invoked in query context. */ - MemoryContextSwitchTo(oldcontext); - - skip_tuple = false; + MemoryContextSwitchTo(query_mcxt); /* BEFORE ROW INSERT Triggers */ - if (child_result_rel->ri_TrigDesc && - child_result_rel->ri_TrigDesc->trig_insert_before_row) + if (child_rri->ri_TrigDesc && + child_rri->ri_TrigDesc->trig_insert_before_row) { - slot = ExecBRInsertTriggers(estate, child_result_rel, slot); + slot = ExecBRInsertTriggers(estate, child_rri, slot); if (slot == NULL) /* "do nothing" */ skip_tuple = true; @@ -625,18 +630,18 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, List *recheckIndexes = NIL; /* Check the constraints of the tuple */ - if (child_result_rel->ri_RelationDesc->rd_att->constr) - ExecConstraints(child_result_rel, slot, estate); + if (child_rri->ri_RelationDesc->rd_att->constr) + ExecConstraints(child_rri, slot, estate); /* OK, store the tuple and create index entries for it */ - simple_heap_insert(child_result_rel->ri_RelationDesc, tuple); + simple_heap_insert(child_rri->ri_RelationDesc, tuple); - if (child_result_rel->ri_NumIndices > 0) + if (child_rri->ri_NumIndices > 0) recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false, NULL, NIL); /* AFTER ROW INSERT Triggers (FIXME: NULL transition) */ - ExecARInsertTriggersCompat(estate, child_result_rel, tuple, + ExecARInsertTriggersCompat(estate, child_rri, tuple, recheckIndexes, NULL); list_free(recheckIndexes); @@ -651,7 +656,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, } /* Switch back to query context */ - MemoryContextSwitchTo(oldcontext); + MemoryContextSwitchTo(query_mcxt); /* Required for old protocol */ if (old_protocol) From 1b380ff81cb995cfbc032a47f48be38818e850f2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 22 Nov 2017 20:38:58 +0300 Subject: [PATCH 0795/1124] small refactoring for ResultPartsStorage --- src/include/partition_filter.h | 20 ++++-- src/partition_filter.c | 123 ++++++++++++++++----------------- src/utility_stmt_hooking.c | 12 ++-- 3 files changed, 78 insertions(+), 77 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 841cd0cb..d298bb34 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -49,8 +49,9 @@ typedef struct } ResultRelInfoHolder; -/* Standard size of ResultPartsStorage entry */ -#define ResultPartsStorageStandard 0 +/* Default settings for ResultPartsStorage */ +#define RPS_DEFAULT_ENTRY_SIZE sizeof(ResultPartsStorage) +#define RPS_DEFAULT_SPECULATIVE false /* speculative inserts */ /* Forward declaration (for on_new_rri_holder()) */ struct ResultPartsStorage; @@ -137,18 +138,25 @@ extern CustomExecMethods partition_filter_exec_methods; void init_partition_filter_static_data(void); -/* ResultPartsStorage init\fini\scan function */ +/* + * ResultPartsStorage API (select partition for INSERT & UPDATE). + */ + +/* Initialize storage for some parent table */ void init_result_parts_storage(ResultPartsStorage *parts_storage, + ResultRelInfo *parent_rri, EState *estate, - bool speculative_inserts, + CmdType cmd_type, Size table_entry_size, + bool speculative_inserts, on_new_rri_holder on_new_rri_holder_cb, - void *on_new_rri_holder_cb_arg, - CmdType cmd_type); + void *on_new_rri_holder_cb_arg); +/* Free storage and opened relations */ void fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels); +/* Find ResultRelInfo holder in storage */ ResultRelInfoHolder * scan_result_parts_storage(Oid partid, ResultPartsStorage *storage); diff --git a/src/partition_filter.c b/src/partition_filter.c index 33424e06..035db748 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -144,30 +144,30 @@ init_partition_filter_static_data(void) /* Initialize ResultPartsStorage (hash table etc) */ void init_result_parts_storage(ResultPartsStorage *parts_storage, + ResultRelInfo *parent_rri, EState *estate, - bool speculative_inserts, + CmdType cmd_type, Size table_entry_size, + bool speculative_inserts, on_new_rri_holder on_new_rri_holder_cb, - void *on_new_rri_holder_cb_arg, - CmdType cmd_type) + void *on_new_rri_holder_cb_arg) { HASHCTL *result_rels_table_config = &parts_storage->result_rels_table_config; memset(result_rels_table_config, 0, sizeof(HASHCTL)); result_rels_table_config->keysize = sizeof(Oid); - - /* Use sizeof(ResultRelInfoHolder) if table_entry_size is 0 */ - if (table_entry_size == ResultPartsStorageStandard) - result_rels_table_config->entrysize = sizeof(ResultRelInfoHolder); - else - result_rels_table_config->entrysize = table_entry_size; + result_rels_table_config->entrysize = table_entry_size; parts_storage->result_rels_table = hash_create("ResultRelInfo storage", 10, result_rels_table_config, HASH_ELEM | HASH_BLOBS); + Assert(parent_rri); + parts_storage->base_rri = parent_rri; + + Assert(estate); parts_storage->estate = estate; - parts_storage->base_rri = NULL; + /* Callback might be NULL */ parts_storage->on_new_rri_holder_callback = on_new_rri_holder_cb; parts_storage->callback_arg = on_new_rri_holder_cb_arg; @@ -225,7 +225,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) rri_holder = hash_search(parts_storage->result_rels_table, (const void *) &partid, - HASH_ENTER, &found); + HASH_FIND, &found); /* If not found, create & cache new ResultRelInfo */ if (!found) @@ -237,6 +237,7 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) Index child_rte_idx; ResultRelInfo *child_result_rel_info; List *translated_vars; + MemoryContext old_mcxt; /* Check that 'base_rri' is set */ if (!parts_storage->base_rri) @@ -246,15 +247,18 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) LockRelationOid(partid, parts_storage->head_open_lock_mode); if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partid))) { - /* Don't forget to drop invalid hash table entry */ - hash_search(parts_storage->result_rels_table, - (const void *) &partid, - HASH_REMOVE, NULL); - UnlockRelationOid(partid, parts_storage->head_open_lock_mode); return NULL; } + /* Switch to query-level mcxt for allocations */ + old_mcxt = MemoryContextSwitchTo(parts_storage->estate->es_query_cxt); + + /* Create a new cache entry for this partition */ + rri_holder = hash_search(parts_storage->result_rels_table, + (const void *) &partid, + HASH_ENTER, NULL); + parent_rte = rt_fetch(parts_storage->base_rri->ri_RangeTableIndex, parts_storage->estate->es_range_table); @@ -300,15 +304,15 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Copy necessary fields from saved ResultRelInfo */ CopyToResultRelInfo(ri_WithCheckOptions); CopyToResultRelInfo(ri_WithCheckOptionExprs); + CopyToResultRelInfo(ri_projectReturning); + CopyToResultRelInfo(ri_onConflictSetProj); + CopyToResultRelInfo(ri_onConflictSetWhere); + if (parts_storage->command_type != CMD_UPDATE) CopyToResultRelInfo(ri_junkFilter); else child_result_rel_info->ri_junkFilter = NULL; - CopyToResultRelInfo(ri_projectReturning); - CopyToResultRelInfo(ri_onConflictSetProj); - CopyToResultRelInfo(ri_onConflictSetWhere); - /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ child_result_rel_info->ri_ConstraintExprs = NULL; @@ -334,8 +338,11 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) parts_storage, parts_storage->callback_arg); - /* Finally append ResultRelInfo to storage->es_alloc_result_rels */ + /* Append ResultRelInfo to storage->es_alloc_result_rels */ append_rri_to_estate(parts_storage->estate, child_result_rel_info); + + /* Don't forget to switch back! */ + MemoryContextSwitchTo(old_mcxt); } return rri_holder; @@ -426,7 +433,6 @@ select_partition_for_insert(ExprState *expr_state, const PartRelationInfo *prel, ResultPartsStorage *parts_storage) { - MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; Oid parent_relid = PrelParentRelid(prel), partition_relid = InvalidOid; @@ -448,27 +454,22 @@ select_partition_for_insert(ExprState *expr_state, parts = find_partitions_for_value(value, prel->ev_type, prel, &nparts); if (nparts > 1) + { elog(ERROR, ERR_PART_ATTR_MULTIPLE); + } else if (nparts == 0) { partition_relid = create_partitions_for_value(parent_relid, value, prel->ev_type); - - /* get_pathman_relation_info() will refresh this entry */ - refresh_pathman_relation_info(parent_relid); } else partition_relid = parts[0]; - old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + /* Get ResultRelationInfo holder for the selected partition */ rri_holder = scan_result_parts_storage(partition_relid, parts_storage); - MemoryContextSwitchTo(old_mcxt); /* This partition has been dropped, repeat with a new 'prel' */ if (rri_holder == NULL) { - /* get_pathman_relation_info() will refresh this entry */ - refresh_pathman_relation_info(parent_relid); - /* Get a fresh PartRelationInfo */ prel = get_pathman_relation_info(parent_relid); @@ -520,9 +521,9 @@ prepare_expr_state(const PartRelationInfo *prel, EState *estate, bool try_map) { - ExprState *expr_state; - MemoryContext old_mcxt; - Node *expr; + ExprState *expr_state; + MemoryContext old_mcxt; + Node *expr; /* Make sure we use query memory context */ old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); @@ -650,12 +651,12 @@ partition_filter_create_scan_state(CustomScan *node) void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { - PartitionFilterState *state = (PartitionFilterState *) node; - PlanState *child_state; - ResultRelInfo *current_rri; - Relation current_rel; - const PartRelationInfo *prel; - bool try_map; + PartitionFilterState *state = (PartitionFilterState *) node; + PlanState *child_state; + ResultRelInfo *current_rri; + Relation current_rel; + PartRelationInfo *prel; + bool try_map; /* It's convenient to store PlanState in 'custom_ps' */ child_state = ExecInitNode(state->subplan, estate, eflags); @@ -665,9 +666,6 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) current_rri = estate->es_result_relation_info; current_rel = current_rri->ri_RelationDesc; - /* Fetch PartRelationInfo for this partitioned relation */ - prel = get_pathman_relation_info(state->partitioned_table); - /* * In UPDATE queries we have to work with child relation tlist, * but expression contains varattnos of base relation, so we @@ -678,19 +676,21 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) try_map = state->command_type == CMD_UPDATE && RelationGetRelid(current_rel) != state->partitioned_table; + /* Fetch PartRelationInfo for this partitioned relation */ + prel = get_pathman_relation_info(state->partitioned_table); + /* Build a partitioning expression state */ state->expr_state = prepare_expr_state(prel, current_rel, estate, try_map); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + /* Init ResultRelInfo cache */ - init_result_parts_storage(&state->result_parts, estate, + init_result_parts_storage(&state->result_parts, current_rri, + estate, state->command_type, + RPS_DEFAULT_ENTRY_SIZE, state->on_conflict_action != ONCONFLICT_NONE, - ResultPartsStorageStandard, - prepare_rri_for_insert, - (void *) state, - state->command_type); - - /* Don't forget to initialize 'base_rri'! */ - state->result_parts.base_rri = current_rri; + prepare_rri_for_insert, (void *) state); /* No warnings yet */ state->warning_triggered = false; @@ -714,22 +714,14 @@ partition_filter_exec(CustomScanState *node) if (!TupIsNull(slot)) { - MemoryContext old_mcxt; - const PartRelationInfo *prel; - ResultRelInfoHolder *rri_holder; - ResultRelInfo *resultRelInfo; + MemoryContext old_mcxt; + PartRelationInfo *prel; + ResultRelInfoHolder *rri_holder; + ResultRelInfo *resultRelInfo; /* Fetch PartRelationInfo for this partitioned relation */ - prel = get_pathman_relation_info(state->partitioned_table); - if (!prel) - { - if (!state->warning_triggered) - elog(WARNING, "table \"%s\" is not partitioned, " - INSERT_NODE_NAME " will behave as a normal INSERT", - get_rel_name_or_relid(state->partitioned_table)); - - return slot; - } + if ((prel = get_pathman_relation_info(state->partitioned_table)) == NULL) + return slot; /* table is not partitioned anymore */ /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -775,6 +767,9 @@ partition_filter_exec(CustomScanState *node) slot = state->tup_convert_slot; } + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + return slot; } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index d52dd330..96a35989 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -498,13 +498,11 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, estate->es_range_table = range_table; /* Initialize ResultPartsStorage */ - init_result_parts_storage(&parts_storage, estate, false, - ResultPartsStorageStandard, - prepare_rri_for_copy, NULL, - CMD_INSERT); - - /* Don't forget to initialize 'base_rri'! */ - parts_storage.base_rri = parent_rri; + init_result_parts_storage(&parts_storage, parent_rri, + estate, CMD_INSERT, + RPS_DEFAULT_ENTRY_SIZE, + RPS_DEFAULT_SPECULATIVE, + prepare_rri_for_copy, NULL); /* Set up a tuple slot too */ myslot = ExecInitExtraTupleSlot(estate); From b605edd3687835908eedd770c44cfd41d85b97ba Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 31 Oct 2017 14:14:18 +0300 Subject: [PATCH 0796/1124] Adapted for https://github.com/arssher/postgresql/tree/foreign_copy_from. --- src/utility_stmt_hooking.c | 52 ++++++++++++++++++++++++++++++-------- 1 file changed, 42 insertions(+), 10 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 103f194e..93908d38 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -499,7 +499,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Initialize ResultPartsStorage */ init_result_parts_storage(&parts_storage, estate, false, ResultPartsStorageStandard, - prepare_rri_for_copy, NULL); + prepare_rri_for_copy, cstate); parts_storage.saved_rel_info = parent_result_rel; /* Set up a tuple slot too */ @@ -634,13 +634,20 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Check the constraints of the tuple */ if (child_result_rel->ri_RelationDesc->rd_att->constr) ExecConstraints(child_result_rel, slot, estate); + if (!child_result_rel->ri_FdwRoutine) + { + /* OK, store the tuple and create index entries for it */ + simple_heap_insert(child_result_rel->ri_RelationDesc, tuple); - /* OK, store the tuple and create index entries for it */ - simple_heap_insert(child_result_rel->ri_RelationDesc, tuple); - - if (child_result_rel->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), - estate, false, NULL, NIL); + if (child_result_rel->ri_NumIndices > 0) + recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), + estate, false, NULL, NIL); + } + else /* FDW table */ + { + child_result_rel->ri_FdwRoutine->ForeignNextCopyFrom( + estate, child_result_rel, cstate); + } /* AFTER ROW INSERT Triggers (FIXME: NULL transition) */ ExecARInsertTriggersCompat(estate, child_result_rel, tuple, @@ -677,6 +684,24 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ExecResetTupleTable(estate->es_tupleTable, false); + { + /* Shut down FDWs. TODO: make hook in fini_result_parts_storage? */ + HASH_SEQ_STATUS stat; + ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ + + hash_seq_init(&stat, parts_storage.result_rels_table); + while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + { + ResultRelInfo *resultRelInfo = rri_holder->result_rel_info; + + if (resultRelInfo->ri_FdwRoutine) + { + resultRelInfo->ri_FdwRoutine->EndForeignCopyFrom( + estate, resultRelInfo); + } + } + } + /* Close partitions and destroy hash table */ fini_result_parts_storage(&parts_storage, true); @@ -689,7 +714,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, } /* - * COPY FROM does not support FDWs, emit ERROR. + * Init COPY FROM, if supported. */ static void prepare_rri_for_copy(EState *estate, @@ -699,10 +724,17 @@ prepare_rri_for_copy(EState *estate, { ResultRelInfo *rri = rri_holder->result_rel_info; FdwRoutine *fdw_routine = rri->ri_FdwRoutine; + CopyState cstate = (CopyState) arg; if (fdw_routine != NULL) - elog(ERROR, "cannot copy to foreign partition \"%s\"", - get_rel_name(RelationGetRelid(rri->ri_RelationDesc))); + { + if (!FdwCopyFromIsSupported(fdw_routine)) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("FDW adapter for relation \"%s\" doesn't support COPY FROM", + RelationGetRelationName(rri->ri_RelationDesc)))); + rri->ri_FdwRoutine->BeginForeignCopyFrom(estate, rri, cstate); + } } /* From 74f40cc87b4a547b6261e7b0c80277477f59bb13 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 2 Nov 2017 18:49:35 +0300 Subject: [PATCH 0797/1124] COPYing FROM to parent table instead of foreign, assuming we using shardman. Also, callback args simplified. --- src/include/partition_filter.h | 16 +++---- src/partition_filter.c | 82 +++++++++++++++----------------- src/utility_stmt_hooking.c | 85 ++++++++++++++++------------------ 3 files changed, 84 insertions(+), 99 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 85ddcf91..0cd08c36 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -43,17 +43,15 @@ typedef struct } ResultRelInfoHolder; -/* Forward declaration (for on_new_rri_holder()) */ +/* Forward declaration (for on_rri_holder()) */ struct ResultPartsStorage; typedef struct ResultPartsStorage ResultPartsStorage; /* - * Callback to be fired at rri_holder creation. + * Callback to be fired at rri_holder creation/destruction. */ -typedef void (*on_new_rri_holder)(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); +typedef void (*on_rri_holder)(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); /* * Cached ResultRelInfos of partitions. @@ -66,7 +64,7 @@ struct ResultPartsStorage bool speculative_inserts; /* for ExecOpenIndices() */ - on_new_rri_holder on_new_rri_holder_callback; + on_rri_holder on_new_rri_holder_callback; void *callback_arg; EState *estate; /* pointer to executor's state */ @@ -116,11 +114,11 @@ void init_result_parts_storage(ResultPartsStorage *parts_storage, EState *estate, bool speculative_inserts, Size table_entry_size, - on_new_rri_holder on_new_rri_holder_cb, + on_rri_holder on_new_rri_holder_cb, void *on_new_rri_holder_cb_arg); void fini_result_parts_storage(ResultPartsStorage *parts_storage, - bool close_rels); + bool close_rels, on_rri_holder hook); ResultRelInfoHolder * scan_result_parts_storage(Oid partid, ResultPartsStorage *storage); diff --git a/src/partition_filter.c b/src/partition_filter.c index 214b926a..a1886c4d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -68,18 +68,12 @@ CustomScanMethods partition_filter_plan_methods; CustomExecMethods partition_filter_exec_methods; -static void prepare_rri_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); -static void prepare_rri_returning_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); -static void prepare_rri_fdw_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); +static void prepare_rri_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); +static void prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); +static void prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); static Node *fix_returning_list_mutator(Node *node, void *state); static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); @@ -143,7 +137,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, EState *estate, bool speculative_inserts, Size table_entry_size, - on_new_rri_holder on_new_rri_holder_cb, + on_rri_holder on_new_rri_holder_cb, void *on_new_rri_holder_cb_arg) { HASHCTL *result_rels_table_config = &parts_storage->result_rels_table_config; @@ -177,16 +171,21 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, /* Free ResultPartsStorage (close relations etc) */ void -fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) +fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels, + on_rri_holder hook) { HASH_SEQ_STATUS stat; ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ - /* Close partitions and free free conversion-related stuff */ - if (close_rels) + hash_seq_init(&stat, parts_storage->result_rels_table); + while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) { - hash_seq_init(&stat, parts_storage->result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + /* Call destruction hook, if needed */ + if (hook != NULL) + hook(rri_holder, parts_storage); + + /* Close partitions and free free conversion-related stuff */ + if (close_rels) { ExecCloseIndices(rri_holder->result_rel_info); @@ -202,13 +201,8 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) free_conversion_map(rri_holder->tuple_map); } - } - - /* Else just free conversion-related stuff */ - else - { - hash_seq_init(&stat, parts_storage->result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + /* Else just free conversion-related stuff */ + else { /* Skip if there's no map */ if (!rri_holder->tuple_map) @@ -329,10 +323,8 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Call on_new_rri_holder_callback() if needed */ if (parts_storage->on_new_rri_holder_callback) - parts_storage->on_new_rri_holder_callback(parts_storage->estate, - rri_holder, - parts_storage, - parts_storage->callback_arg); + parts_storage->on_new_rri_holder_callback(rri_holder, + parts_storage); /* Finally append ResultRelInfo to storage->es_alloc_result_rels */ append_rri_to_estate(parts_storage->estate, child_result_rel_info); @@ -702,7 +694,7 @@ partition_filter_end(CustomScanState *node) PartitionFilterState *state = (PartitionFilterState *) node; /* Executor will close rels via estate->es_result_relations */ - fini_result_parts_storage(&state->result_parts, false); + fini_result_parts_storage(&state->result_parts, false, NULL); Assert(list_length(node->custom_ps) == 1); ExecEndNode((PlanState *) linitial(node->custom_ps)); @@ -793,21 +785,17 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) /* Main trigger */ static void -prepare_rri_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { - prepare_rri_returning_for_insert(estate, rri_holder, rps_storage, arg); - prepare_rri_fdw_for_insert(estate, rri_holder, rps_storage, arg); + prepare_rri_returning_for_insert(rri_holder, rps_storage); + prepare_rri_fdw_for_insert(rri_holder, rps_storage); } /* Prepare 'RETURNING *' tlist & projection */ static void -prepare_rri_returning_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { PartitionFilterState *pfstate; List *returning_list; @@ -815,12 +803,15 @@ prepare_rri_returning_for_insert(EState *estate, *parent_rri; Index parent_rt_idx; TupleTableSlot *result_slot; + EState *estate; + + estate = rps_storage->estate; /* We don't need to do anything ff there's no map */ if (!rri_holder->tuple_map) return; - pfstate = (PartitionFilterState *) arg; + pfstate = (PartitionFilterState *) rps_storage->callback_arg; returning_list = pfstate->returning_list; /* Exit if there's no RETURNING list */ @@ -857,14 +848,15 @@ prepare_rri_returning_for_insert(EState *estate, /* Prepare FDW access structs */ static void -prepare_rri_fdw_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { ResultRelInfo *rri = rri_holder->result_rel_info; FdwRoutine *fdw_routine = rri->ri_FdwRoutine; Oid partid; + EState *estate; + + estate = rps_storage->estate; /* Nothing to do if not FDW */ if (fdw_routine == NULL) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 93908d38..6ad88bf6 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -64,10 +64,10 @@ static uint64 PathmanCopyFrom(CopyState cstate, List *range_table, bool old_protocol); -static void prepare_rri_for_copy(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); +static void prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); +static void finish_rri_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); /* @@ -105,20 +105,6 @@ is_pathman_related_copy(Node *parsetree) /* Check that relation is partitioned */ if (get_pathman_relation_info(parent_relid)) { - ListCell *lc; - - /* Analyze options list */ - foreach (lc, copy_stmt->options) - { - DefElem *defel = (DefElem *) lfirst(lc); - - Assert(IsA(defel, DefElem)); - - /* We do not support freeze */ - if (strcmp(defel->defname, "freeze") == 0) - elog(ERROR, "freeze is not supported for partitioned tables"); - } - /* Emit ERROR if we can't see the necessary symbols */ #ifdef DISABLE_PATHMAN_COPY elog(ERROR, "COPY is not supported for partitioned tables on Windows"); @@ -481,6 +467,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, uint64 processed = 0; + /* We do not support freeze */ + if (cstate->freeze) + elog(ERROR, "freeze is not supported for partitioned tables"); + tupDesc = RelationGetDescr(parent_rel); @@ -684,26 +674,8 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ExecResetTupleTable(estate->es_tupleTable, false); - { - /* Shut down FDWs. TODO: make hook in fini_result_parts_storage? */ - HASH_SEQ_STATUS stat; - ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ - - hash_seq_init(&stat, parts_storage.result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) - { - ResultRelInfo *resultRelInfo = rri_holder->result_rel_info; - - if (resultRelInfo->ri_FdwRoutine) - { - resultRelInfo->ri_FdwRoutine->EndForeignCopyFrom( - estate, resultRelInfo); - } - } - } - /* Close partitions and destroy hash table */ - fini_result_parts_storage(&parts_storage, true); + fini_result_parts_storage(&parts_storage, true, finish_rri_copy); /* Close parent's indices */ ExecCloseIndices(parent_result_rel); @@ -717,23 +689,46 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, * Init COPY FROM, if supported. */ static void -prepare_rri_for_copy(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { - ResultRelInfo *rri = rri_holder->result_rel_info; - FdwRoutine *fdw_routine = rri->ri_FdwRoutine; - CopyState cstate = (CopyState) arg; + ResultRelInfo *rri = rri_holder->result_rel_info; + FdwRoutine *fdw_routine = rri->ri_FdwRoutine; + CopyState cstate = (CopyState) rps_storage->callback_arg; + ResultRelInfo *parent_rri; + const char *parent_relname; + EState *estate; + + estate = rps_storage->estate; if (fdw_routine != NULL) { + parent_rri = rps_storage->saved_rel_info; + parent_relname = psprintf( + "%s.%s", "public", + quote_identifier(RelationGetRelationName(parent_rri->ri_RelationDesc))); if (!FdwCopyFromIsSupported(fdw_routine)) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("FDW adapter for relation \"%s\" doesn't support COPY FROM", RelationGetRelationName(rri->ri_RelationDesc)))); - rri->ri_FdwRoutine->BeginForeignCopyFrom(estate, rri, cstate); + fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_relname); + } +} + +/* + * Shut down FDWs. + */ +static void +finish_rri_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) +{ + ResultRelInfo *resultRelInfo = rri_holder->result_rel_info; + + if (resultRelInfo->ri_FdwRoutine) + { + resultRelInfo->ri_FdwRoutine->EndForeignCopyFrom( + rps_storage->estate, resultRelInfo); } } From 551f4f23dfbfe67dd04b77b823c2ba74dda72b66 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 1 Dec 2017 12:26:59 +0300 Subject: [PATCH 0798/1124] Do COPY FROM to foreign parts only when needed. That is, when 1) pg_pathman was compiled against postgres with shardman patches. 2) Shardman's COPY FROM was explicitly asked by setting renderzvous var. Also, check for 'freeze' option early, as before, to keep regression tests as they are. --- src/utility_stmt_hooking.c | 88 +++++++++++++++++++++++++++++--------- 1 file changed, 67 insertions(+), 21 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 6ad88bf6..e8ddd3de 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -22,6 +22,7 @@ #include "access/xact.h" #include "catalog/namespace.h" #include "commands/copy.h" +#include "commands/defrem.h" #include "commands/trigger.h" #include "commands/tablecmds.h" #include "foreign/fdwapi.h" @@ -105,6 +106,26 @@ is_pathman_related_copy(Node *parsetree) /* Check that relation is partitioned */ if (get_pathman_relation_info(parent_relid)) { + ListCell *lc; + + /* Analyze options list */ + foreach (lc, copy_stmt->options) + { + DefElem *defel = lfirst_node(DefElem, lc); + + /* We do not support freeze */ + /* + * It would be great to allow copy.c extract option value and + * check it ready. However, there is no possibility (hooks) to do + * that before messaging 'ok, begin streaming data' to the client, + * which is ugly and confusing: e.g. it would require us to + * actually send something in regression tests before we notice + * the error. + */ + if (strcmp(defel->defname, "freeze") == 0 && defGetBoolean(defel)) + elog(ERROR, "freeze is not supported for partitioned tables"); + } + /* Emit ERROR if we can't see the necessary symbols */ #ifdef DISABLE_PATHMAN_COPY elog(ERROR, "COPY is not supported for partitioned tables on Windows"); @@ -467,11 +488,6 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, uint64 processed = 0; - /* We do not support freeze */ - if (cstate->freeze) - elog(ERROR, "freeze is not supported for partitioned tables"); - - tupDesc = RelationGetDescr(parent_rel); parent_result_rel = makeNode(ResultRelInfo); @@ -633,11 +649,13 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false, NULL, NIL); } +#ifdef PG_SHARDMAN else /* FDW table */ { child_result_rel->ri_FdwRoutine->ForeignNextCopyFrom( estate, child_result_rel, cstate); } +#endif /* AFTER ROW INSERT Triggers (FIXME: NULL transition) */ ExecARInsertTriggersCompat(estate, child_result_rel, tuple, @@ -694,25 +712,51 @@ prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, { ResultRelInfo *rri = rri_holder->result_rel_info; FdwRoutine *fdw_routine = rri->ri_FdwRoutine; - CopyState cstate = (CopyState) rps_storage->callback_arg; - ResultRelInfo *parent_rri; - const char *parent_relname; - EState *estate; - - estate = rps_storage->estate; if (fdw_routine != NULL) { - parent_rri = rps_storage->saved_rel_info; - parent_relname = psprintf( - "%s.%s", "public", - quote_identifier(RelationGetRelationName(parent_rri->ri_RelationDesc))); - if (!FdwCopyFromIsSupported(fdw_routine)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("FDW adapter for relation \"%s\" doesn't support COPY FROM", - RelationGetRelationName(rri->ri_RelationDesc)))); - fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_relname); + /* + * If this Postgres has no idea about shardman, behave as usual: + * vanilla Postgres doesn't support COPY FROM to foreign partitions. + * However, shardman patches to core extend FDW API to allow it, + * though currently postgres_fdw does so in a bit perverted way: we + * redirect COPY FROM to parent table on foreign server, assuming it + * exists, and let it direct tuple to proper partition. This is + * because otherwise we have to modify logic of managing connections + * in postgres_fdw and keep many connections open to one server from + * one backend. + */ +#ifndef PG_SHARDMAN + goto bail_out; /* to avoid 'unused label' warning */ +#else + { /* separate block to avoid 'unused var' warnings */ + CopyState cstate = (CopyState) rps_storage->callback_arg; + ResultRelInfo *parent_rri; + const char *parent_relname; + EState *estate; + + /* shardman COPY FROM requested? */ + if (*find_rendezvous_variable( + "shardman_pathman_copy_from_rendezvous") == NULL) + goto bail_out; + + estate = rps_storage->estate; + parent_rri = rps_storage->saved_rel_info; + parent_relname = psprintf( + "%s.%s", "public", + quote_identifier(RelationGetRelationName(parent_rri->ri_RelationDesc))); + if (!FdwCopyFromIsSupported(fdw_routine)) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("FDW adapter for relation \"%s\" doesn't support COPY FROM", + RelationGetRelationName(rri->ri_RelationDesc)))); + fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_relname); + return; + } +#endif +bail_out: + elog(ERROR, "cannot copy to foreign partition \"%s\"", + get_rel_name(RelationGetRelid(rri->ri_RelationDesc))); } } @@ -723,6 +767,7 @@ static void finish_rri_copy(ResultRelInfoHolder *rri_holder, const ResultPartsStorage *rps_storage) { +#ifdef PG_SHARDMAN ResultRelInfo *resultRelInfo = rri_holder->result_rel_info; if (resultRelInfo->ri_FdwRoutine) @@ -730,6 +775,7 @@ finish_rri_copy(ResultRelInfoHolder *rri_holder, resultRelInfo->ri_FdwRoutine->EndForeignCopyFrom( rps_storage->estate, resultRelInfo); } +#endif } /* From d7520bbf4d50e514a88b5925daf3989219ded480 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 1 Dec 2017 16:21:36 +0300 Subject: [PATCH 0799/1124] Code simpified and improved a bit. --- src/utility_stmt_hooking.c | 44 +++++++++++--------------------------- 1 file changed, 12 insertions(+), 32 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index e8ddd3de..e64c1542 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -718,43 +718,23 @@ prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, /* * If this Postgres has no idea about shardman, behave as usual: * vanilla Postgres doesn't support COPY FROM to foreign partitions. - * However, shardman patches to core extend FDW API to allow it, - * though currently postgres_fdw does so in a bit perverted way: we - * redirect COPY FROM to parent table on foreign server, assuming it - * exists, and let it direct tuple to proper partition. This is - * because otherwise we have to modify logic of managing connections - * in postgres_fdw and keep many connections open to one server from - * one backend. + * However, shardman patches to core extend FDW API to allow it. */ -#ifndef PG_SHARDMAN - goto bail_out; /* to avoid 'unused label' warning */ -#else - { /* separate block to avoid 'unused var' warnings */ +#ifdef PG_SHARDMAN + /* shardman COPY FROM requested? */ + if (*find_rendezvous_variable( + "shardman_pathman_copy_from_rendezvous") != NULL && + FdwCopyFromIsSupported(fdw_routine)) + { CopyState cstate = (CopyState) rps_storage->callback_arg; - ResultRelInfo *parent_rri; - const char *parent_relname; - EState *estate; - - /* shardman COPY FROM requested? */ - if (*find_rendezvous_variable( - "shardman_pathman_copy_from_rendezvous") == NULL) - goto bail_out; - - estate = rps_storage->estate; - parent_rri = rps_storage->saved_rel_info; - parent_relname = psprintf( - "%s.%s", "public", - quote_identifier(RelationGetRelationName(parent_rri->ri_RelationDesc))); - if (!FdwCopyFromIsSupported(fdw_routine)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("FDW adapter for relation \"%s\" doesn't support COPY FROM", - RelationGetRelationName(rri->ri_RelationDesc)))); - fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_relname); + ResultRelInfo *parent_rri = rps_storage->saved_rel_info; + EState *estate = rps_storage->estate; + + fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_rri); return; } #endif -bail_out: + elog(ERROR, "cannot copy to foreign partition \"%s\"", get_rel_name(RelationGetRelid(rri->ri_RelationDesc))); } From 6a1ad9a48a4e5dbfc7ddf5d99d8236c3316182c9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 6 Dec 2017 15:40:13 +0300 Subject: [PATCH 0800/1124] improve support for ONLY in select, insert, delete etc --- Makefile | 2 +- expected/pathman_only.out | 29 ++- expected/pathman_only_1.out | 247 -------------------- src/compat/relation_tags.c | 251 --------------------- src/hooks.c | 20 +- src/include/compat/debug_compat_features.h | 1 - src/include/compat/relation_tags.h | 78 ------- src/include/planner_tree_modification.h | 12 +- src/planner_tree_modification.c | 93 ++++---- 9 files changed, 93 insertions(+), 640 deletions(-) delete mode 100644 expected/pathman_only_1.out delete mode 100644 src/compat/relation_tags.c delete mode 100644 src/include/compat/relation_tags.h diff --git a/Makefile b/Makefile index 9e036208..79f674ec 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ - src/compat/pg_compat.o src/compat/relation_tags.o src/compat/rowmarks_fix.o \ + src/compat/pg_compat.o src/compat/rowmarks_fix.o \ $(WIN32RES) ifdef USE_PGXS diff --git a/expected/pathman_only.out b/expected/pathman_only.out index f90dc56e..28471cf3 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -137,7 +137,34 @@ UNION SELECT * FROM test_only.from_only_test; EXPLAIN (COSTS OFF) SELECT * FROM test_only.from_only_test a JOIN ONLY test_only.from_only_test b USING(val); -ERROR: it is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + /* should be OK */ EXPLAIN (COSTS OFF) WITH q1 AS (SELECT * FROM test_only.from_only_test), diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out deleted file mode 100644 index 77fc0dc5..00000000 --- a/expected/pathman_only_1.out +++ /dev/null @@ -1,247 +0,0 @@ -/* - * --------------------------------------------- - * NOTE: This test behaves differenly on PgPro - * --------------------------------------------- - */ -\set VERBOSITY terse -SET search_path = 'public'; -CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_only; -/* Test special case: ONLY statement with not-ONLY for partitioned table */ -CREATE TABLE test_only.from_only_test(val INT NOT NULL); -INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); -SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); -NOTICE: sequence "from_only_test_seq" does not exist, skipping - create_range_partitions -------------------------- - 10 -(1 row) - -VACUUM ANALYZE; -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM ONLY test_only.from_only_test -UNION SELECT * FROM test_only.from_only_test; - QUERY PLAN -------------------------------------------------- - HashAggregate - Group Key: from_only_test.val - -> Append - -> Seq Scan on from_only_test - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 -(15 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM test_only.from_only_test -UNION SELECT * FROM ONLY test_only.from_only_test; - QUERY PLAN -------------------------------------------------- - HashAggregate - Group Key: from_only_test_1.val - -> Append - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - -> Seq Scan on from_only_test -(15 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM test_only.from_only_test -UNION SELECT * FROM test_only.from_only_test -UNION SELECT * FROM ONLY test_only.from_only_test; - QUERY PLAN ---------------------------------------------------------------------- - HashAggregate - Group Key: from_only_test_1.val - -> Append - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 from_only_test_2_1 - -> Seq Scan on from_only_test_3 from_only_test_3_1 - -> Seq Scan on from_only_test_4 from_only_test_4_1 - -> Seq Scan on from_only_test_5 from_only_test_5_1 - -> Seq Scan on from_only_test_6 from_only_test_6_1 - -> Seq Scan on from_only_test_7 from_only_test_7_1 - -> Seq Scan on from_only_test_8 from_only_test_8_1 - -> Seq Scan on from_only_test_9 from_only_test_9_1 - -> Seq Scan on from_only_test_10 from_only_test_10_1 - -> Seq Scan on from_only_test -(26 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM ONLY test_only.from_only_test -UNION SELECT * FROM test_only.from_only_test -UNION SELECT * FROM test_only.from_only_test; - QUERY PLAN ---------------------------------------------------------------------- - HashAggregate - Group Key: from_only_test.val - -> Append - -> Seq Scan on from_only_test - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 from_only_test_2_1 - -> Seq Scan on from_only_test_3 from_only_test_3_1 - -> Seq Scan on from_only_test_4 from_only_test_4_1 - -> Seq Scan on from_only_test_5 from_only_test_5_1 - -> Seq Scan on from_only_test_6 from_only_test_6_1 - -> Seq Scan on from_only_test_7 from_only_test_7_1 - -> Seq Scan on from_only_test_8 from_only_test_8_1 - -> Seq Scan on from_only_test_9 from_only_test_9_1 - -> Seq Scan on from_only_test_10 from_only_test_10_1 -(26 rows) - -/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ -EXPLAIN (COSTS OFF) -SELECT * FROM test_only.from_only_test a -JOIN ONLY test_only.from_only_test b USING(val); - QUERY PLAN ---------------------------------------------- - Nested Loop - -> Seq Scan on from_only_test b - -> Custom Scan (RuntimeAppend) - -> Seq Scan on from_only_test_1 a - -> Seq Scan on from_only_test_2 a - -> Seq Scan on from_only_test_3 a - -> Seq Scan on from_only_test_4 a - -> Seq Scan on from_only_test_5 a - -> Seq Scan on from_only_test_6 a - -> Seq Scan on from_only_test_7 a - -> Seq Scan on from_only_test_8 a - -> Seq Scan on from_only_test_9 a - -> Seq Scan on from_only_test_10 a -(13 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -WITH q1 AS (SELECT * FROM test_only.from_only_test), - q2 AS (SELECT * FROM ONLY test_only.from_only_test) -SELECT * FROM q1 JOIN q2 USING(val); - QUERY PLAN ---------------------------------------------- - Hash Join - Hash Cond: (q1.val = q2.val) - CTE q1 - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - CTE q2 - -> Seq Scan on from_only_test - -> CTE Scan on q1 - -> Hash - -> CTE Scan on q2 -(19 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) -SELECT * FROM test_only.from_only_test JOIN q1 USING(val); - QUERY PLAN ----------------------------------------------------------- - Nested Loop - CTE q1 - -> Seq Scan on from_only_test from_only_test_1 - -> CTE Scan on q1 - -> Custom Scan (RuntimeAppend) - -> Seq Scan on from_only_test_1 from_only_test - -> Seq Scan on from_only_test_2 from_only_test - -> Seq Scan on from_only_test_3 from_only_test - -> Seq Scan on from_only_test_4 from_only_test - -> Seq Scan on from_only_test_5 from_only_test - -> Seq Scan on from_only_test_6 from_only_test - -> Seq Scan on from_only_test_7 from_only_test - -> Seq Scan on from_only_test_8 from_only_test - -> Seq Scan on from_only_test_9 from_only_test - -> Seq Scan on from_only_test_10 from_only_test -(15 rows) - -/* should be OK */ -EXPLAIN (COSTS OFF) -SELECT * FROM test_only.from_only_test -WHERE val = (SELECT val FROM ONLY test_only.from_only_test - ORDER BY val ASC - LIMIT 1); - QUERY PLAN ------------------------------------------------------------------ - Custom Scan (RuntimeAppend) - InitPlan 1 (returns $0) - -> Limit - -> Sort - Sort Key: from_only_test_1.val - -> Seq Scan on from_only_test from_only_test_1 - -> Seq Scan on from_only_test_1 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_2 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_3 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_4 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_5 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_6 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_7 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_8 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_9 from_only_test - Filter: (val = $0) - -> Seq Scan on from_only_test_10 from_only_test - Filter: (val = $0) -(26 rows) - -DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 12 other objects -DROP EXTENSION pg_pathman; diff --git a/src/compat/relation_tags.c b/src/compat/relation_tags.c deleted file mode 100644 index 383dd1f5..00000000 --- a/src/compat/relation_tags.c +++ /dev/null @@ -1,251 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * relation_tags.c - * Attach custom (Key, Value) pairs to an arbitrary RangeTblEntry - * NOTE: implementations for vanilla and PostgresPro differ - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#include "compat/relation_tags.h" -#include "planner_tree_modification.h" - -#include "nodes/nodes.h" - - -#ifndef NATIVE_RELATION_TAGS - -/* - * This table is used to ensure that partitioned relation - * cant't be referenced as ONLY and non-ONLY at the same time. - */ -static HTAB *per_table_relation_tags = NULL; - -/* - * Single row of 'per_table_relation_tags'. - * NOTE: do not reorder these fields. - */ -typedef struct -{ - Oid relid; /* key (part #1) */ - uint32 queryId; /* key (part #2) */ - List *relation_tags; -} relation_tags_entry; - -#endif - -/* Also used in get_refcount_relation_tags() etc... */ -static int per_table_relation_tags_refcount = 0; - - - -/* Look through RTE's relation tags */ -List * -rte_fetch_tag(const uint32 query_id, - const RangeTblEntry *rte, - const char *key) -{ -#ifdef NATIVE_RELATION_TAGS - - return relation_tags_search(rte->custom_tags, key); - -#else - - relation_tags_entry *htab_entry, - htab_key = { rte->relid, query_id, NIL /* unused */ }; - - /* Skip if table is not initialized */ - if (per_table_relation_tags) - { - /* Search by 'htab_key' */ - htab_entry = hash_search(per_table_relation_tags, - &htab_key, HASH_FIND, NULL); - - if (htab_entry) - return relation_tags_search(htab_entry->relation_tags, key); - } - - /* Not found, return stub value */ - return NIL; - -#endif -} - -/* Attach new relation tag to RTE. Returns KVP with duplicate key. */ -List * -rte_attach_tag(const uint32 query_id, - RangeTblEntry *rte, - List *key_value_pair) -{ - /* Common variables */ - MemoryContext old_mcxt; - const char *current_key; - List *existing_kvp, - *temp_tags; /* rte->custom_tags OR - htab_entry->relation_tags */ - -#ifdef NATIVE_RELATION_TAGS - - /* Load relation tags to 'temp_tags' */ - temp_tags = rte->custom_tags; - -#else - - relation_tags_entry *htab_entry, - htab_key = { rte->relid, query_id, NIL /* unused */ }; - bool found; - - /* We prefer to initialize this table lazily */ - if (!per_table_relation_tags) - { - const long start_elems = 50; - HASHCTL hashctl; - - memset(&hashctl, 0, sizeof(HASHCTL)); - hashctl.entrysize = sizeof(relation_tags_entry); - hashctl.keysize = offsetof(relation_tags_entry, relation_tags); - hashctl.hcxt = RELATION_TAG_MCXT; - - per_table_relation_tags = hash_create("Custom tags for RangeTblEntry", - start_elems, &hashctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - } - - /* Search by 'htab_key' */ - htab_entry = hash_search(per_table_relation_tags, - &htab_key, HASH_ENTER, &found); - - /* Don't forget to initialize list! */ - if (!found) - htab_entry->relation_tags = NIL; - - /* Load relation tags to 'temp_tags' */ - temp_tags = htab_entry->relation_tags; - -#endif - - /* Check that 'key_value_pair' is valid */ - AssertArg(key_value_pair && list_length(key_value_pair) == 2); - - /* Extract key of this KVP */ - rte_deconstruct_tag(key_value_pair, ¤t_key, NULL); - - /* Check if KVP with such key already exists */ - existing_kvp = relation_tags_search(temp_tags, current_key); - if (existing_kvp) - return existing_kvp; /* return KVP with duplicate key */ - - /* Add this KVP to relation tags list */ - old_mcxt = MemoryContextSwitchTo(RELATION_TAG_MCXT); - temp_tags = lappend(temp_tags, key_value_pair); - MemoryContextSwitchTo(old_mcxt); - -/* Finally store 'temp_tags' to relation tags list */ -#ifdef NATIVE_RELATION_TAGS - rte->custom_tags = temp_tags; -#else - htab_entry->relation_tags = temp_tags; -#endif - - /* Success! */ - return NIL; -} - - - -/* Extract key & value from 'key_value_pair' */ -void -rte_deconstruct_tag(const List *key_value_pair, - const char **key, /* ret value #1 */ - const Value **value) /* ret value #2 */ -{ - const char *r_key; - const Value *r_value; - - AssertArg(key_value_pair && list_length(key_value_pair) == 2); - - r_key = (const char *) strVal(linitial(key_value_pair)); - r_value = (const Value *) lsecond(key_value_pair); - - /* Check that 'key' is valid */ - Assert(IsA(linitial(key_value_pair), String)); - - /* Check that 'value' is valid or NULL */ - Assert(r_value == NULL || - IsA(r_value, Integer) || - IsA(r_value, Float) || - IsA(r_value, String)); - - /* Finally return key & value */ - if (key) *key = r_key; - if (value) *value = r_value; -} - -/* Search through list of 'relation_tags' */ -List * -relation_tags_search(List *relation_tags, const char *key) -{ - ListCell *lc; - - AssertArg(key); - - /* Scan KVP list */ - foreach (lc, relation_tags) - { - List *current_kvp = (List *) lfirst(lc); - const char *current_key; - - /* Extract key of this KVP */ - rte_deconstruct_tag(current_kvp, ¤t_key, NULL); - - /* Check if this is the KVP we're looking for */ - if (strcmp(key, current_key) == 0) - return current_kvp; - } - - /* Nothing! */ - return NIL; -} - - - -/* Increate usage counter by 1 */ -void -incr_refcount_relation_tags(void) -{ - /* Increment reference counter */ - if (++per_table_relation_tags_refcount <= 0) - elog(WARNING, "imbalanced %s", - CppAsString(incr_refcount_relation_tags)); -} - -/* Return current value of usage counter */ -uint32 -get_refcount_relation_tags(void) -{ - /* incr_refcount_parenthood_statuses() is called by pathman_planner_hook() */ - return per_table_relation_tags_refcount; -} - -/* Reset all cached statuses if needed (query end) */ -void -decr_refcount_relation_tags(void) -{ - /* Decrement reference counter */ - if (--per_table_relation_tags_refcount < 0) - elog(WARNING, "imbalanced %s", - CppAsString(decr_refcount_relation_tags)); - - /* Free resources if no one is using them */ - if (per_table_relation_tags_refcount == 0) - { - reset_query_id_generator(); - -#ifndef NATIVE_RELATION_TAGS - hash_destroy(per_table_relation_tags); - per_table_relation_tags = NULL; -#endif - } -} diff --git a/src/hooks.c b/src/hooks.c index 3503f857..ebd35b61 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -11,7 +11,6 @@ */ #include "compat/pg_compat.h" -#include "compat/relation_tags.h" #include "compat/rowmarks_fix.h" #include "hooks.h" @@ -153,8 +152,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, } /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ - if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, - inner_rte)) + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(inner_rte)) return; /* @@ -340,7 +338,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, #endif /* Skip if this table is not allowed to act as parent (e.g. FROM ONLY) */ - if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, rte)) + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(rte)) return; /* Proceed iff relation 'rel' is partitioned */ @@ -626,8 +624,8 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { if (pathman_ready) { - /* Increment relation tags refcount */ - incr_refcount_relation_tags(); + /* Increase planner() calls count */ + incr_planner_calls_count(); /* Modify query tree if needed */ pathman_transform_query(parse, boundParams); @@ -644,8 +642,8 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Add PartitionFilter node for INSERT queries */ ExecuteForPlanTree(result, add_partition_filters); - /* Decrement relation tags refcount */ - decr_refcount_relation_tags(); + /* Decrement planner() calls count */ + decr_planner_calls_count(); /* HACK: restore queryId set by pg_stat_statements */ result->queryId = query_id; @@ -656,8 +654,8 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { if (pathman_ready) { - /* Caught an ERROR, decrease refcount */ - decr_refcount_relation_tags(); + /* Caught an ERROR, decrease count */ + decr_planner_calls_count(); } /* Rethrow ERROR further */ @@ -735,7 +733,7 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) } /* Process inlined SQL functions (we've already entered planning stage) */ - if (IsPathmanReady() && get_refcount_relation_tags() > 0) + if (IsPathmanReady() && get_planner_calls_count() > 0) { /* Check that pg_pathman is the last extension loaded */ if (post_parse_analyze_hook != pathman_post_parse_analysis_hook) diff --git a/src/include/compat/debug_compat_features.h b/src/include/compat/debug_compat_features.h index 8caa6d44..09f12849 100644 --- a/src/include/compat/debug_compat_features.h +++ b/src/include/compat/debug_compat_features.h @@ -12,5 +12,4 @@ #define ENABLE_PGPRO_PATCHES /* PgPro exclusive features */ -//#define ENABLE_RELATION_TAGS #define ENABLE_PATHMAN_AWARE_COPY_WIN32 diff --git a/src/include/compat/relation_tags.h b/src/include/compat/relation_tags.h deleted file mode 100644 index d5183d32..00000000 --- a/src/include/compat/relation_tags.h +++ /dev/null @@ -1,78 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * relation_tags.h - * Attach custom (Key, Value) pairs to an arbitrary RangeTblEntry - * - * NOTE: implementations for vanilla and PostgresPro differ, - * which means that subquery pull-up might break the bond - * between a RangeTblEntry and the corresponding KVPs. - * - * This subsystem was meant to replace the broken 'inh' flag - * (see get_rel_parenthood_status() for more details). - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef RELATION_TAGS_H -#define RELATION_TAGS_H - -#include "compat/debug_compat_features.h" - -#include "postgres.h" -#include "nodes/relation.h" -#include "nodes/value.h" -#include "utils/memutils.h" - - -/* Does RTE contain 'custom_tags' list? */ -/* TODO: fix this definition once PgPro contains 'relation_tags' patch */ -#if defined(ENABLE_PGPRO_PATCHES) && \ - defined(ENABLE_RELATION_TAGS) /* && ... */ -#define NATIVE_RELATION_TAGS -#endif - -/* Memory context we're going to use for tags */ -#define RELATION_TAG_MCXT TopTransactionContext - - -/* Safe TAG constructor (Integer) */ -static inline List * -make_rte_tag_int(char *key, int value) -{ - List *kvp; - MemoryContext old_mcxt; - - /* Allocate TAG in a persistent memory context */ - old_mcxt = MemoryContextSwitchTo(RELATION_TAG_MCXT); - kvp = list_make2(makeString(key), makeInteger(value)); - MemoryContextSwitchTo(old_mcxt); - - return kvp; -} - - -List *rte_fetch_tag(const uint32 query_id, - const RangeTblEntry *rte, - const char *key); - -List *rte_attach_tag(const uint32 query_id, - RangeTblEntry *rte, - List *key_value_pair); - - -List *relation_tags_search(List *custom_tags, - const char *key); - -void rte_deconstruct_tag(const List *key_value_pair, - const char **key, - const Value **value); - - -void incr_refcount_relation_tags(void); -uint32 get_refcount_relation_tags(void); -void decr_refcount_relation_tags(void); - - -#endif /* RELATION_TAGS_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index eee1ea76..b56b6734 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -44,12 +44,16 @@ typedef enum PARENTHOOD_ALLOWED /* children are enabled (default) */ } rel_parenthood_status; -void assign_rel_parenthood_status(uint32 query_id, - RangeTblEntry *rte, +void assign_rel_parenthood_status(RangeTblEntry *rte, rel_parenthood_status new_status); -rel_parenthood_status get_rel_parenthood_status(uint32 query_id, - RangeTblEntry *rte); +rel_parenthood_status get_rel_parenthood_status(RangeTblEntry *rte); + + +/* used to determine nested planner() calls */ +void incr_planner_calls_count(void); +void decr_planner_calls_count(void); +int32 get_planner_calls_count(void); #endif /* PLANNER_TREE_MODIFICATION_H */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 77540d95..9c449e00 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -10,7 +10,6 @@ * ------------------------------------------------------------------------ */ -#include "compat/relation_tags.h" #include "compat/rowmarks_fix.h" #include "partition_filter.h" @@ -103,8 +102,6 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context static void partition_filter_visitor(Plan *plan, void *context); -static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); - static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); @@ -353,13 +350,11 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) rte->inh = false; /* Try marking it using PARENTHOOD_ALLOWED */ - assign_rel_parenthood_status(parse->queryId, rte, - PARENTHOOD_ALLOWED); + assign_rel_parenthood_status(rte, PARENTHOOD_ALLOWED); } } /* Else try marking it using PARENTHOOD_DISALLOWED */ - else assign_rel_parenthood_status(parse->queryId, rte, - PARENTHOOD_DISALLOWED); + else assign_rel_parenthood_status(rte, PARENTHOOD_DISALLOWED); } } @@ -567,59 +562,34 @@ partition_filter_visitor(Plan *plan, void *context) * ----------------------------------------------- */ +#define RPS_STATUS_ASSIGNED ( (uint32) (1 << 31) ) +#define RPS_ENABLE_PARENT ( (uint32) (1 << 30) ) + /* Set parenthood status (per query level) */ void -assign_rel_parenthood_status(uint32 query_id, - RangeTblEntry *rte, +assign_rel_parenthood_status(RangeTblEntry *rte, rel_parenthood_status new_status) { - - List *old_relation_tag; - - old_relation_tag = rte_attach_tag(query_id, rte, - make_rte_tag_int(PARENTHOOD_TAG, - new_status)); - - /* We already have a PARENTHOOD_TAG, examine it's value */ - if (old_relation_tag && - tag_extract_parenthood_status(old_relation_tag) != new_status) - { - elog(ERROR, - "it is prohibited to apply ONLY modifier to partitioned " - "tables which have already been mentioned without ONLY"); - } + /* HACK: set relevant bits in RTE */ + rte->requiredPerms |= RPS_STATUS_ASSIGNED; + if (new_status == PARENTHOOD_ALLOWED) + rte->requiredPerms |= RPS_ENABLE_PARENT; } /* Get parenthood status (per query level) */ rel_parenthood_status -get_rel_parenthood_status(uint32 query_id, RangeTblEntry *rte) +get_rel_parenthood_status(RangeTblEntry *rte) { - List *relation_tag; - - relation_tag = rte_fetch_tag(query_id, rte, PARENTHOOD_TAG); - if (relation_tag) - return tag_extract_parenthood_status(relation_tag); + /* HACK: check relevant bits in RTE */ + if (rte->requiredPerms & RPS_STATUS_ASSIGNED) + return (rte->requiredPerms & RPS_ENABLE_PARENT) ? + PARENTHOOD_ALLOWED : + PARENTHOOD_DISALLOWED; /* Not found, return stub value */ return PARENTHOOD_NOT_SET; } -static rel_parenthood_status -tag_extract_parenthood_status(List *relation_tag) -{ - const Value *value; - rel_parenthood_status status; - - rte_deconstruct_tag(relation_tag, NULL, &value); - Assert(value && IsA(value, Integer)); - - status = (rel_parenthood_status) intVal(value); - Assert(status >= PARENTHOOD_NOT_SET && - status <= PARENTHOOD_ALLOWED); - - return status; -} - /* Replace extern param nodes with consts */ static Node * @@ -678,3 +648,34 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) return expression_tree_mutator(node, eval_extern_params_mutator, (void *) params); } + + +/* + * ----------------------------------------------- + * Count number of times we've visited planner() + * ----------------------------------------------- + */ + +static int32 planner_calls = 0; + +void +incr_planner_calls_count(void) +{ + Assert(planner_calls < INT32_MAX); + + planner_calls++; +} + +void +decr_planner_calls_count(void) +{ + Assert(planner_calls > 0); + + planner_calls--; +} + +int32 +get_planner_calls_count(void) +{ + return planner_calls; +} From feb446317cfb29b028325e75faf221f40c1c7a50 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 6 Dec 2017 16:41:03 +0300 Subject: [PATCH 0801/1124] attempt to fix issue #134 --- src/planner_tree_modification.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 9c449e00..3f504217 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -562,27 +562,31 @@ partition_filter_visitor(Plan *plan, void *context) * ----------------------------------------------- */ -#define RPS_STATUS_ASSIGNED ( (uint32) (1 << 31) ) -#define RPS_ENABLE_PARENT ( (uint32) (1 << 30) ) +#define RPS_STATUS_ASSIGNED ( (Index) 0x2 ) +#define RPS_ENABLE_PARENT ( (Index) 0x1 ) /* Set parenthood status (per query level) */ void assign_rel_parenthood_status(RangeTblEntry *rte, rel_parenthood_status new_status) -{ +{ + Assert(rte->rtekind != RTE_CTE); + /* HACK: set relevant bits in RTE */ - rte->requiredPerms |= RPS_STATUS_ASSIGNED; + rte->ctelevelsup |= RPS_STATUS_ASSIGNED; if (new_status == PARENTHOOD_ALLOWED) - rte->requiredPerms |= RPS_ENABLE_PARENT; + rte->ctelevelsup |= RPS_ENABLE_PARENT; } /* Get parenthood status (per query level) */ rel_parenthood_status get_rel_parenthood_status(RangeTblEntry *rte) { + Assert(rte->rtekind != RTE_CTE); + /* HACK: check relevant bits in RTE */ - if (rte->requiredPerms & RPS_STATUS_ASSIGNED) - return (rte->requiredPerms & RPS_ENABLE_PARENT) ? + if (rte->ctelevelsup & RPS_STATUS_ASSIGNED) + return (rte->ctelevelsup & RPS_ENABLE_PARENT) ? PARENTHOOD_ALLOWED : PARENTHOOD_DISALLOWED; From 12c86afdc0a26cfcf4668583ef6fb38944bc1a81 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 6 Dec 2017 18:45:56 +0300 Subject: [PATCH 0802/1124] add a few more tests regarding #134 --- expected/pathman_inserts.out | 30 ++++++++++++++++++++++++++++++ expected/pathman_inserts_1.out | 30 ++++++++++++++++++++++++++++++ sql/pathman_inserts.sql | 19 +++++++++++++++++++ 3 files changed, 79 insertions(+) diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index c3a8566f..d1dbf005 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -1032,6 +1032,36 @@ INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ ERROR: cannot spawn a partition DROP TABLE test_inserts.test_gap CASCADE; NOTICE: drop cascades to 3 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects DROP SCHEMA test_inserts CASCADE; NOTICE: drop cascades to 19 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts_1.out b/expected/pathman_inserts_1.out index 9f8633ab..8029a0a7 100644 --- a/expected/pathman_inserts_1.out +++ b/expected/pathman_inserts_1.out @@ -1032,6 +1032,36 @@ INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ ERROR: cannot spawn a partition DROP TABLE test_inserts.test_gap CASCADE; NOTICE: drop cascades to 3 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects DROP SCHEMA test_inserts CASCADE; NOTICE: drop cascades to 19 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index 7653a3e6..0f4859c4 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -199,5 +199,24 @@ INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ DROP TABLE test_inserts.test_gap CASCADE; +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; +DROP TABLE test_inserts.special_1; + +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; +DROP TABLE test_inserts.special_2; + +DROP TABLE test_inserts.test_special_only CASCADE; + + DROP SCHEMA test_inserts CASCADE; DROP EXTENSION pg_pathman CASCADE; From 13dd68e8071279d71e05a9f882bd0467b6cd7d00 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 7 Dec 2017 12:44:42 +0300 Subject: [PATCH 0803/1124] bump lib version to 1.4.9 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 2718a8da..4922b21a 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.8", + "version": "1.4.9", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.8", + "version": "1.4.9", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 7c090761..121880ea 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10408 + 10409 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index a2f7ec77..4ce40e4f 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010408 +#define CURRENT_LIB_VERSION 0x010409 void *pathman_cache_search_relid(HTAB *cache_table, From b7f4ac672668bfff63b2f9219d7c183ff7d989bb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 19 Dec 2017 16:58:04 +0300 Subject: [PATCH 0804/1124] replace obsolete info in README.md --- README.md | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 3f3a80ba..71a3895b 100644 --- a/README.md +++ b/README.md @@ -165,13 +165,9 @@ Stops a background worker performing a concurrent partitioning task. Note: worke ### Triggers ```plpgsql -create_hash_update_trigger(parent REGCLASS) +create_update_triggers(parent REGCLASS) ``` -Creates the trigger on UPDATE for HASH partitions. The UPDATE trigger isn't created by default because of the overhead. It's useful in cases when the partitioning expression's value might change. -```plpgsql -create_range_update_trigger(parent REGCLASS) -``` -Same as above, but for a RANGE-partitioned table. +Creates a for-each-row trigger to enable cross-partition UPDATE on a table partitioned by HASH/RANGE. The trigger is not created automatically because of the overhead caused by its function. You don't have to use this feature unless partitioning key might change during an UPDATE. ### Post-creation partition management ```plpgsql From f7fe78c3f420bef04bc6aff321f0b3589429b3b1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 29 Jan 2018 18:20:08 +0300 Subject: [PATCH 0805/1124] change INT32_MAX to PG_INT32_MAX for Windows --- src/planner_tree_modification.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 3f504217..0df4fc22 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -665,7 +665,7 @@ static int32 planner_calls = 0; void incr_planner_calls_count(void) { - Assert(planner_calls < INT32_MAX); + Assert(planner_calls < PG_INT32_MAX); planner_calls++; } From bc504d43828d471876ec4b3731be66df487cfef2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 30 Jan 2018 17:54:41 +0300 Subject: [PATCH 0806/1124] replace lfirst_node() with lfirst() for the sake of 9.5.5 (issue #142) --- src/utility_stmt_hooking.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index e64c1542..30301cb2 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -111,7 +111,7 @@ is_pathman_related_copy(Node *parsetree) /* Analyze options list */ foreach (lc, copy_stmt->options) { - DefElem *defel = lfirst_node(DefElem, lc); + DefElem *defel = (DefElem *) lfirst(lc); /* We do not support freeze */ /* From 945f224c4ac97f084bb8a8d51d39d8c301eb2967 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 20 Feb 2018 14:17:59 +0300 Subject: [PATCH 0807/1124] update README.md --- README.md | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 71a3895b..99d2cc9f 100644 --- a/README.md +++ b/README.md @@ -12,10 +12,10 @@ The extension is compatible with: * Postgres Pro Standard 9.5, 9.6; * Postgres Pro Enterprise; -By the way, we have a growing Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). +Take a look at our Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). ## Overview -**Partitioning** means splitting one large table into smaller pieces. Each row in such table is moved to a single partition according to the partitioning key. PostgreSQL supports partitioning via table inheritance: each partition must be created as a child table with CHECK CONSTRAINT. For example: +**Partitioning** means splitting one large table into smaller pieces. Each row in such table is moved to a single partition according to the partitioning key. PostgreSQL <= 10 supports partitioning via table inheritance: each partition must be created as a child table with CHECK CONSTRAINT: ```plpgsql CREATE TABLE test (id SERIAL PRIMARY KEY, title TEXT); @@ -23,6 +23,16 @@ CREATE TABLE test_1 (CHECK ( id >= 100 AND id < 200 )) INHERITS (test); CREATE TABLE test_2 (CHECK ( id >= 200 AND id < 300 )) INHERITS (test); ``` +PostgreSQL 10 provides native partitioning: + +```plpgsql +CREATE TABLE test(id int4, value text) PARTITION BY RANGE(id); +CREATE TABLE test_1 PARTITION OF test FOR VALUES FROM (1) TO (10); +CREATE TABLE test_2 PARTITION OF test FOR VALUES FROM (10) TO (20); +``` + +It's not so different from the classic approach; there are implicit check constraints, and most of its limitations are still relevant. + Despite the flexibility, this approach forces the planner to perform an exhaustive search and to check constraints on each partition to determine whether it should be present in the plan or not. Large amount of partitions may result in significant planning overhead. The `pg_pathman` module features partition managing functions and optimized planning mechanism which utilizes knowledge of the partitions' structure. It stores partitioning configuration in the `pathman_config` table; each row contains a single entry for a partitioned table (relation name, partitioning column and its type). During the initialization stage the `pg_pathman` module caches some information about child partitions in the shared memory, which is used later for plan construction. Before a SELECT query is executed, `pg_pathman` traverses the condition tree in search of expressions like: @@ -60,13 +70,6 @@ More interesting features are yet to come. Stay tuned! * FDW support (foreign partitions); * Various GUC toggles and configurable settings. -## Roadmap - - * Multi-level partitioning (ver 1.5); - * Improved referential integrity + foreign keys on partitioned tables (ver 1.5); - -Take a look at [this page](https://github.com/postgrespro/pg_pathman/wiki/Roadmap); - ## Installation guide To install `pg_pathman`, execute this in the module's directory: ```shell From 62d04775377225805c110bf2428eeef66335c1c7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 15 Mar 2018 16:15:56 +0300 Subject: [PATCH 0808/1124] fix incorrect usage of memcpy() in start_bgworker() --- src/pathman_workers.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index d6d9a953..e393d313 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -185,17 +185,16 @@ start_bgworker(const char bgworker_name[BGW_MAXLEN], pid_t pid; /* Initialize worker struct */ - memcpy(worker.bgw_name, bgworker_name, BGW_MAXLEN); - memcpy(worker.bgw_function_name, bgworker_proc, BGW_MAXLEN); - memcpy(worker.bgw_library_name, "pg_pathman", BGW_MAXLEN); + memset(&worker, 0, sizeof(worker)); + + snprintf(worker.bgw_name, BGW_MAXLEN, "%s", bgworker_name); + snprintf(worker.bgw_function_name, BGW_MAXLEN, "%s", bgworker_proc); + snprintf(worker.bgw_library_name, BGW_MAXLEN, "pg_pathman"); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; -#if PG_VERSION_NUM < 100000 - worker.bgw_main = NULL; -#endif worker.bgw_main_arg = bgw_arg; worker.bgw_notify_pid = MyProcPid; From d527f7032eaa9e86ca6141705341025539282ba0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 15 Mar 2018 18:00:22 +0300 Subject: [PATCH 0809/1124] backport tests from rel_future_beta --- tests/python/.gitignore | 1 + tests/python/partitioning_test.py | 2046 ++++++++++++++--------------- travis/pg-travis-test.sh | 2 +- 3 files changed, 978 insertions(+), 1071 deletions(-) create mode 100644 tests/python/.gitignore diff --git a/tests/python/.gitignore b/tests/python/.gitignore new file mode 100644 index 00000000..750ecf9f --- /dev/null +++ b/tests/python/.gitignore @@ -0,0 +1 @@ +tests.log diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 0d05c458..2c290f8d 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1,1087 +1,993 @@ #!/usr/bin/env python3 # coding: utf-8 - """ - concurrent_partitioning_test.py - Tests concurrent partitioning worker with simultaneous update queries +partitioning_test.py + Various stuff that looks out of place in regression tests - Copyright (c) 2015-2017, Postgres Professional + Copyright (c) 2015-2017, Postgres Professional """ -import unittest +import json import math -import time import os import re import subprocess import threading +import time +import unittest +import functools + +from distutils.version import LooseVersion +from testgres import get_new_node, get_pg_version +from testgres.utils import pg_version_ge + +# set setup base logging config, it can be turned on by `use_logging` +# parameter on node setup + +import logging +import logging.config + +logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tests.log') +LOG_CONFIG = { + 'version': 1, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + 'file': { + 'class': 'logging.FileHandler', + 'filename': logfile, + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + }, + 'formatters': { + 'base_format': { + 'format': '%(node)-5s: %(message)s', + }, + }, + 'root': { + 'handlers': ('file', ), + 'level': 'DEBUG', + }, +} + +logging.config.dictConfig(LOG_CONFIG) +version = LooseVersion(get_pg_version()) -from testgres import get_new_node, stop_all, get_config - -version = get_config().get("VERSION_NUM") # Helper function for json equality -def ordered(obj): - if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered(x) for x in obj) - else: - return obj - - -def if_fdw_enabled(func): - """To run tests with FDW support set environment variable TEST_FDW=1""" - def wrapper(*args, **kwargs): - if os.environ.get('FDW_DISABLED') != '1': - func(*args, **kwargs) - else: - print('Warning: FDW features tests are disabled, skipping...') - return wrapper - - -class PartitioningTests(unittest.TestCase): - - def setUp(self): - self.setup_cmd = [ - 'create table abc(id serial, t text)', - 'insert into abc select generate_series(1, 300000)', - 'select create_hash_partitions(\'abc\', \'id\', 3, partition_data := false)', - ] - - def tearDown(self): - stop_all() - - def start_new_pathman_cluster(self, name='test', allows_streaming=False): - node = get_new_node(name) - node.init(allows_streaming=allows_streaming) - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - return node - - def init_test_data(self, node): - """Initialize pg_pathman extension and test data""" - for cmd in self.setup_cmd: - node.safe_psql('postgres', cmd) - - def catchup_replica(self, master, replica): - """Wait until replica synchronizes with master""" - if version >= 100000: - wait_lsn_query = \ - 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - else: - wait_lsn_query = \ - 'SELECT pg_current_xlog_location() <= replay_location ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - master.poll_query_until('postgres', wait_lsn_query) - - def printlog(self, logfile): - with open(logfile, 'r') as log: - for line in log.readlines(): - print(line) - - def test_concurrent(self): - """Tests concurrent partitioning""" - try: - node = self.start_new_pathman_cluster() - self.init_test_data(node) - - node.psql( - 'postgres', - 'select partition_table_concurrently(\'abc\')') - - while True: - # update some rows to check for deadlocks - node.safe_psql( - 'postgres', - ''' - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - ''') - - count = node.execute( - 'postgres', - 'select count(*) from pathman_concurrent_part_tasks') - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e - - def test_replication(self): - """Tests how pg_pathman works with replication""" - node = get_new_node('master') - replica = get_new_node('repl') - - try: - # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 300000 - ) - - # check that direct UPDATE in pathman_config_params invalidates - # cache - node.psql( - 'postgres', - 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 0 - ) - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - self.printlog(replica.logs_dir + '/postgresql.log') - raise e - - def test_locks(self): - """Test that a session trying to create new partitions waits for other - sessions if they are doing the same""" - - import threading - import time - - class Flag: - def __init__(self, value): - self.flag = value - - def set(self, value): - self.flag = value - - def get(self): - return self.flag - - # There is one flag for each thread which shows if thread have done its work - flags = [Flag(False) for i in range(3)] - - # All threads synchronize though this lock - lock = threading.Lock() - - # Define thread function - def add_partition(node, flag, query): - """ We expect that this query will wait until another session - commits or rolls back""" - node.safe_psql('postgres', query) - with lock: - flag.set(True) - - # Initialize master server - node = get_new_node('master') - - try: - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + - 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' - ) - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] - for i in range(3): - thread = threading.Thread( - target=add_partition, - args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) - - # Commit transaction. Since then other sessions can create - # partitions - con.commit() - - # Now wait until each thread finishes - for thread in threads: - thread.join() - - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' - ), - b'6\n' - ) - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e - - def test_tablespace(self): - """Check tablespace support""" - - def check_tablespace(node, tablename, tablespace): - res = node.execute( - 'postgres', - 'select get_tablespace(\'{}\')'.format(tablename)) - if len(res) == 0: - return False - - return res[0][0] == tablespace - - node = get_new_node('master') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - - # create tablespace - path = os.path.join(node.data_dir, 'test_space_location') - os.mkdir(path) - node.psql( - 'postgres', - 'create tablespace test_space location \'{}\''.format(path)) - - # create table in this tablespace - node.psql( - 'postgres', - 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql( - 'postgres', - 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended\')') - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # check tablespace for split - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') - self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) - - # now let's specify tablespace explicitly - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')') - self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) - - @if_fdw_enabled - def test_foreign_table(self): - """Test foreign tables""" - - # Start master server - master = get_new_node('test') - master.init() - master.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') - - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions - master.psql( - 'postgres', - '''create table abc(id serial, name text); - select create_range_partitions('abc', 'id', 0, 10, 2)''') - - # Current user name (needed for user mapping) - username = master.execute('postgres', 'select current_user')[0][0] - - # Start foreign server - fserv = get_new_node('fserv') - fserv.init().start() - fserv.safe_psql('postgres', 'create table ftable(id serial, name text)') - fserv.safe_psql('postgres', 'insert into ftable values (25, \'foreign\')') - - # Create foreign table and attach it to partitioned table - master.safe_psql( - 'postgres', - '''create server fserv - foreign data wrapper postgres_fdw - options (dbname 'postgres', host '127.0.0.1', port '{}')'''.format(fserv.port) - ) - master.safe_psql( - 'postgres', - '''create user mapping for {0} - server fserv - options (user '{0}')'''.format(username) - ) - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (ftable) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') - - # Check that table attached to partitioned table - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable'), - b'25|foreign\n' - ) - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - b'25|foreign\n26|part\n' - ) - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') - - # HASH partitioning with FDW: - # - create hash partitioned table in master - # - create foreign table - # - replace local partition with foreign one - # - insert data - # - drop partitions - master.psql( - 'postgres', - '''create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2)''') - fserv.safe_psql('postgres', 'create table f_hash_test(id serial, name text)') - - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (f_hash_test) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select replace_hash_partition(\'hash_test_1\', \'f_hash_test\')') - master.safe_psql('postgres', 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' - ) - master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') - - def test_parallel_nodes(self): - """Test parallel queries under partitions""" - - import json - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - node.start() - - # Check version of postgres server - # If version < 9.6 skip all tests for parallel queries - if version < 90600: - return - - # Prepare test database - node.psql('postgres', 'create extension pg_pathman') - node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table range_partitioned alter column i set not null') - node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)') - node.psql('postgres', 'vacuum analyze range_partitioned') - - node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table hash_partitioned alter column i set not null') - node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)') - node.psql('postgres', 'vacuum analyze hash_partitioned') - - node.psql('postgres', """ - create or replace function query_plan(query text) returns jsonb as $$ - declare - plan jsonb; - begin - execute 'explain (costs off, format json)' || query into plan; - return plan; - end; - $$ language plpgsql; - """) - - # Test parallel select - with node.connect() as con: - con.execute('set max_parallel_workers_per_gather = 2') - if version >= 100000: - con.execute('set min_parallel_table_scan_size = 0') - else: - con.execute('set min_parallel_relation_size = 0') - con.execute('set parallel_setup_cost = 0') - con.execute('set parallel_tuple_cost = 0') - - # Check parallel aggregate plan - test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Finalize", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Partial", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check count of returned tuples - count = con.execute('select count(*) from range_partitioned where i < 1500')[0][0] - self.assertEqual(count, 1499) - - # Check simple parallel seq scan plan with limit - test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Limit", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check tuples returned by query above - res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5') - res_tuples = sorted(map(lambda x: x[0], res_tuples)) - expected = [1, 2, 3, 4, 5] - self.assertEqual(res_tuples, expected) - - # Check the case when none partition is selected in result plan - test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Result", - "Parallel Aware": false, - "One-Time Filter": "false" - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Remove all objects for testing - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_creation_insert(self): - """Test concurrent partition creation on INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute('insert into ins_test select generate_series(1, 50)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.execute('insert into ins_test values(51)') - con2.commit() - - # Step 1: lock partitioned table in con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - con1.execute('lock table ins_test in share update exclusive mode') - - # Step 2: try inserting new value in con2 (waiting) - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - t = threading.Thread(target=con2_thread) - t.start() - - # Step 3: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 4: try inserting new value in con1 (success, unlock) - con1.execute('insert into ins_test values(52)') - con1.commit() - - # Step 5: wait for con2 - t.join() - - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'ins_test'::regclass - order by range_min, range_max - """) - - # check number of partitions - self.assertEqual(len(rows), 6) - - # check range_max of partitions - self.assertEqual(int(rows[0][5]), 11) - self.assertEqual(int(rows[1][5]), 21) - self.assertEqual(int(rows[2][5]), 31) - self.assertEqual(int(rows[3][5]), 41) - self.assertEqual(int(rows[4][5]), 51) - self.assertEqual(int(rows[5][5]), 61) - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_merge_insert(self): - """Test concurrent merge_range_partitions() + INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.begin() - con2.execute('insert into ins_test values(20)') - con2.commit() - - # Step 1: initilize con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - - # Step 2: initilize con2 - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - con2.commit() # unlock relations - - # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) - con1.execute("select merge_range_partitions('ins_test_1', 'ins_test_2')") - - # Step 4: try inserting new value in con2 (waiting) - t = threading.Thread(target=con2_thread) - t.start() - - # Step 5: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 6: finish merge in con1 (success, unlock) - con1.commit() - - # Step 7: wait for con2 - t.join() - - rows = con1.execute("select *, tableoid::regclass::text from ins_test") - - # check number of rows in table - self.assertEqual(len(rows), 1) - - # check value that has been inserted - self.assertEqual(int(rows[0][0]), 20) - - # check partition that was chosen for insert - self.assertEqual(str(rows[0][1]), 'ins_test_1') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_pg_dump(self): - """ - Test using dump and restore of partitioned table through pg_dump and pg_restore tools. - - Test strategy: - - test range and hash partitioned tables; - - for each partitioned table check on restorable side the following quantities: - * constraints related to partitioning; - * init callback function and enable parent flag; - * number of rows in parent and child tables; - * plan validity of simple SELECT query under partitioned table; - - check dumping using the following parameters of pg_dump: - * format = plain | custom; - * using of inserts and copy. - - all test cases are carried out on tables half-full with data located in parent part, - the rest of data - in child tables. - """ - - import subprocess - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - """ - shared_preload_libraries=\'pg_pathman\' - pg_pathman.override_copy=false - """) - node.start() - - # Init two databases: initial and copy - node.psql('postgres', 'create database initial') - node.psql('postgres', 'create database copy') - node.psql('initial', 'create extension pg_pathman') - - # Create and fillin partitioned table in initial database - with node.connect('initial') as con: - - # create and initailly fillin tables - con.execute('create table range_partitioned (i integer not null)') - con.execute('insert into range_partitioned select i from generate_series(1, 500) i') - con.execute('create table hash_partitioned (i integer not null)') - con.execute('insert into hash_partitioned select i from generate_series(1, 500) i') - - # partition table keeping data in base table - # enable_parent parameter automatically becames true - con.execute('select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)') - con.execute('select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)') - - # fillin child tables with remain data - con.execute('insert into range_partitioned select i from generate_series(501, 1000) i') - con.execute('insert into hash_partitioned select i from generate_series(501, 1000) i') - - # set init callback - con.execute(""" - create or replace function init_partition_stub_callback(args jsonb) - returns void as $$ - begin - end - $$ language plpgsql; - """) - con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')') - con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')') - - # turn off enable_parent option - con.execute('select set_enable_parent(\'range_partitioned\', false)') - con.execute('select set_enable_parent(\'hash_partitioned\', false)') - - con.commit() - - # compare strategies - CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) - def cmp_full(con1, con2): - """Compare selection partitions in plan and contents in partitioned tables""" - - plan_query = 'explain (costs off, format json) select * from %s' - content_query = 'select * from %s order by i' - table_refs = [ - 'range_partitioned', - 'only range_partitioned', - 'hash_partitioned', - 'only hash_partitioned' - ] - for table_ref in table_refs: - plan_initial = con1.execute(plan_query % table_ref)[0][0][0]['Plan'] - plan_copy = con2.execute(plan_query % table_ref)[0][0][0]['Plan'] - if ordered(plan_initial) != ordered(plan_copy): - return PLANS_MISMATCH - - content_initial = [x[0] for x in con1.execute(content_query % table_ref)] - content_copy = [x[0] for x in con2.execute(content_query % table_ref)] - if content_initial != content_copy: - return CONTENTS_MISMATCH - - return CMP_OK - - def turnoff_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to off') - node.reload() - - def turnon_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to on') - node.psql('copy', 'alter system set pg_pathman.enable to on') - node.psql('initial', 'alter system set pg_pathman.override_copy to off') - node.psql('copy', 'alter system set pg_pathman.override_copy to off') - node.reload() - - # Test dump/restore from init database to copy functionality - test_params = [ - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via COPY - (turnoff_pathman, - turnon_pathman, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--inserts", - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via INSERTs - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--format=custom", - "initial"], - [node.get_bin_path("pg_restore"), - "-p {}".format(node.port), - "--dbname=copy"], - cmp_full), # dump in archive format - ] - - try: - FNULL = open(os.devnull, 'w') - - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) - - if (preproc != None): - preproc(node) - - # transfer and restore data - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - stdoutdata, _ = p1.communicate() - p2 = subprocess.Popen(pg_restore_params, stdin=subprocess.PIPE, - stdout=FNULL, stderr=FNULL) - p2.communicate(input=stdoutdata) - - if (postproc != None): - postproc(node) - - # check validity of data - with node.connect('initial') as con1, node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual(cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" % dump_restore_cmd) - self.assertNotEqual(cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') - - except: - raise - finally: - FNULL.close() - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_concurrent_detach(self): - """Test concurrent detach partition with contiguous tuple inserting and spawning new partitions""" - - # Init parameters - num_insert_workers = 8 - detach_timeout = 0.1 # time in sec between successive inserts and detachs - num_detachs = 100 # estimated number of detachs - inserts_advance = 1 # abvance in sec of inserts process under detachs - test_interval = int(math.ceil(detach_timeout * num_detachs)) - - insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/insert_current_timestamp.pgbench" - detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/detachs_in_timeout.pgbench" - - # Check pgbench scripts on existance - self.assertTrue(os.path.isfile(insert_pgbench_script), - msg="pgbench script with insert timestamp doesn't exist") - self.assertTrue(os.path.isfile(detach_pgbench_script), - msg="pgbench script with detach letfmost partition doesn't exist") - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec - with node.connect() as con0: - con0.begin() - con0.execute('create table ts_range_partitioned(ts timestamp not null)') - con0.execute("select create_range_partitions('ts_range_partitioned', 'ts', current_timestamp, interval '%f', 1)" % detach_timeout) - con0.commit() - - # Run in background inserts and detachs processes - FNULL = open(os.devnull, 'w') - - # init pgbench's utility tables - init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) - init_pgbench.wait() - - inserts = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ - "-j", "%i" % num_insert_workers, - "-c", "%i" % num_insert_workers, - "-f", insert_pgbench_script, - "-T", "%i" % (test_interval+inserts_advance) - ]) - time.sleep(inserts_advance) - detachs = node.pgbench(stdout=FNULL, stderr=FNULL, options=[ - "-D", "timeout=%f" % detach_timeout, - "-f", detach_pgbench_script, - "-T", "%i" % test_interval - ]) - - # Wait for completion of processes - _, stderrdata = inserts.communicate() - detachs.wait() - - # Obtain error log from inserts process - self.assertIsNone(re.search("ERROR|FATAL|PANIC", str(stderrdata)), - msg="Race condition between detach and concurrent inserts with append partition is expired") - - # Stop instance and finish work - node.stop() - node.cleanup() - FNULL.close() +def ordered(obj, skip_keys=None): + if isinstance(obj, dict): + return sorted((k, ordered(v, skip_keys=skip_keys)) for k, v in obj.items() + if skip_keys is None or (skip_keys and k not in skip_keys)) + if isinstance(obj, list): + return sorted(ordered(x, skip_keys=skip_keys) for x in obj) + else: + return obj + + +# Check if postgres_fdw is available +@functools.lru_cache(maxsize=1) +def is_postgres_fdw_ready(): + with get_new_node().init().start() as node: + result = node.execute(""" + select count(*) from pg_available_extensions where name = 'postgres_fdw' + """) + + if result[0][0] > 0: + return True + + return False + + +class Tests(unittest.TestCase): + def set_trace(self, con, command="pg_debug"): + pid = con.execute("select pg_backend_pid()")[0][0] + p = subprocess.Popen([command], stdin=subprocess.PIPE) + p.communicate(str(pid).encode()) + + def start_new_pathman_cluster(self, allow_streaming=False, test_data=False): + node = get_new_node() + node.init(allow_streaming=allow_streaming) + node.append_conf("shared_preload_libraries='pg_pathman'\n") + node.start() + node.psql('create extension pg_pathman') + + if test_data: + node.safe_psql(""" + create table abc(id serial, t text); + insert into abc select generate_series(1, 300000); + select create_hash_partitions('abc', 'id', 3, partition_data := false); + """) + + node.safe_psql('vacuum analyze') + + return node + + def test_concurrent(self): + """ Test concurrent partitioning """ + + with self.start_new_pathman_cluster(test_data=True) as node: + node.psql("select partition_table_concurrently('abc')") + + while True: + # update some rows to check for deadlocks + node.safe_psql(""" + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + """) + + count = node.execute(""" + select count(*) from pathman_concurrent_part_tasks + """) + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('select count(*) from abc') + self.assertEqual(data[0][0], 300000) + node.stop() + + def test_replication(self): + """ Test how pg_pathman works with replication """ + + with self.start_new_pathman_cluster(allow_streaming=True, test_data=True) as node: + with node.replicate() as replica: + replica.start() + replica.catchup() + + # check that results are equal + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + + # enable parent and see if it is enabled in replica + node.psql("select enable_parent('abc')") + + # wait until replica catches up + replica.catchup() + + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + self.assertEqual( + node.psql('select * from abc'), + replica.psql('select * from abc')) + self.assertEqual( + node.execute('select count(*) from abc')[0][0], 300000) + + # check that UPDATE in pathman_config_params invalidates cache + node.psql('update pathman_config_params set enable_parent = false') + + # wait until replica catches up + replica.catchup() + + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + self.assertEqual( + node.psql('select * from abc'), + replica.psql('select * from abc')) + self.assertEqual( + node.execute('select count(*) from abc')[0][0], 0) + + def test_locks(self): + """ + Test that a session trying to create new partitions + waits for other sessions if they are doing the same + """ + + class Flag: + def __init__(self, value): + self.flag = value + + def set(self, value): + self.flag = value + + def get(self): + return self.flag + + # There is one flag for each thread which shows if thread have done its work + flags = [Flag(False) for i in range(3)] + + # All threads synchronize though this lock + lock = threading.Lock() + + # Define thread function + def add_partition(node, flag, query): + """ + We expect that this query will wait until + another session commits or rolls back + """ + node.safe_psql(query) + with lock: + flag.set(True) + + # Initialize master server + with get_new_node() as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'") + node.start() + + node.safe_psql(""" + create extension pg_pathman; + create table abc(id serial, t text); + insert into abc select generate_series(1, 100000); + select create_range_partitions('abc', 'id', 1, 50000); + """) + + # Start transaction that will create partition + with node.connect() as con: + con.begin() + con.execute("select append_range_partition('abc')") + + # Start threads that suppose to add new partitions and wait some + # time + query = ( + "select prepend_range_partition('abc')", + "select append_range_partition('abc')", + "select add_range_partition('abc', 500000, 550000)", + ) + + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # These threads should wait until current transaction finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), False) + + # Commit transaction. Since then other sessions can create + # partitions + con.commit() + + # Now wait until each thread finishes + for thread in threads: + thread.join() + + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) + + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + "select count(*) from pg_inherits where inhparent='abc'::regclass"), + b'6\n') + + def test_tablespace(self): + """ Check tablespace support """ + + def check_tablespace(node, tablename, tablespace): + res = node.execute("select get_tablespace('{}')".format(tablename)) + if len(res) == 0: + return False + + return res[0][0] == tablespace + + with get_new_node() as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'") + node.start() + node.psql('create extension pg_pathman') + + # create tablespace + path = os.path.join(node.data_dir, 'test_space_location') + os.mkdir(path) + node.psql("create tablespace test_space location '{}'".format(path)) + + # create table in this tablespace + node.psql('create table abc(a serial, b int) tablespace test_space') + + # create three partitions. Excpect that they will be created in the + # same tablespace as the parent table + node.psql("select create_range_partitions('abc', 'a', 1, 10, 3)") + self.assertTrue(check_tablespace(node, 'abc', 'test_space')) + + # check tablespace for appended partition + node.psql("select append_range_partition('abc', 'abc_appended')") + self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) + + # check tablespace for prepended partition + node.psql("select prepend_range_partition('abc', 'abc_prepended')") + self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) + + # check tablespace for prepended partition + node.psql("select add_range_partition('abc', 41, 51, 'abc_added')") + self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + + # check tablespace for split + node.psql("select split_range_partition('abc_added', 45, 'abc_splitted')") + self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) + + # now let's specify tablespace explicitly + node.psql( + "select append_range_partition('abc', 'abc_appended_2', 'pg_default')" + ) + node.psql( + "select prepend_range_partition('abc', 'abc_prepended_2', 'pg_default')" + ) + node.psql( + "select add_range_partition('abc', 61, 71, 'abc_added_2', 'pg_default')" + ) + node.psql( + "select split_range_partition('abc_added_2', 65, 'abc_splitted_2', 'pg_default')" + ) + + # yapf: disable + self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) + + @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') + def test_foreign_table(self): + """ Test foreign tables """ + + # Start master server + with get_new_node() as master, get_new_node() as fserv: + master.init() + master.append_conf(""" + shared_preload_libraries='pg_pathman, postgres_fdw'\n + """) + master.start() + master.psql('create extension pg_pathman') + master.psql('create extension postgres_fdw') + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + master.psql(""" + create table abc(id serial, name text); + select create_range_partitions('abc', 'id', 0, 10, 2) + """) + + # Current user name (needed for user mapping) + username = master.execute('select current_user')[0][0] + + fserv.init().start() + fserv.safe_psql("create table ftable(id serial, name text)") + fserv.safe_psql("insert into ftable values (25, 'foreign')") + + # Create foreign table and attach it to partitioned table + master.safe_psql(""" + create server fserv + foreign data wrapper postgres_fdw + options (dbname 'postgres', host '127.0.0.1', port '{}') + """.format(fserv.port)) + + master.safe_psql(""" + create user mapping for {0} server fserv + options (user '{0}') + """.format(username)) + + master.safe_psql(""" + import foreign schema public limit to (ftable) + from server fserv into public + """) + + master.safe_psql( + "select attach_range_partition('abc', 'ftable', 20, 30)") + + # Check that table attached to partitioned table + self.assertEqual( + master.safe_psql('select * from ftable'), + b'25|foreign\n') + + # Check that we can successfully insert new data into foreign partition + master.safe_psql("insert into abc values (26, 'part')") + self.assertEqual( + master.safe_psql('select * from ftable order by id'), + b'25|foreign\n26|part\n') + + # Testing drop partitions (including foreign partitions) + master.safe_psql("select drop_partitions('abc')") + + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql(""" + create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2) + """) + fserv.safe_psql('create table f_hash_test(id serial, name text)') + + master.safe_psql(""" + import foreign schema public limit to (f_hash_test) + from server fserv into public + """) + master.safe_psql(""" + select replace_hash_partition('hash_test_1', 'f_hash_test') + """) + master.safe_psql('insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('select * from hash_test'), + b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') + master.safe_psql("select drop_partitions('hash_test')") + + @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') + def test_parallel_nodes(self): + """ Test parallel queries under partitions """ + + # Init and start postgres instance with preload pg_pathman module + with get_new_node() as node: + node.init() + node.append_conf( + "shared_preload_libraries='pg_pathman, postgres_fdw'") + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + if version < LooseVersion('9.6.0'): + return + + # Prepare test database + node.psql('create extension pg_pathman') + node.psql(""" + create table range_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table range_partitioned alter column i set not null; + select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); + + create table hash_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table hash_partitioned alter column i set not null; + select create_hash_partitions('hash_partitioned', 'i', 10); + """) + + # create statistics for both partitioned tables + node.psql('vacuum analyze') + + node.psql(""" + create or replace function query_plan(query text) + returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + if version >= LooseVersion('10'): + con.execute('set min_parallel_table_scan_size = 0') + else: + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check count of returned tuples + count = con.execute( + 'select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) + + # Check simple parallel seq scan plan with limit + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Check tuples returned by query above + res_tuples = con.execute( + 'select * from range_partitioned where i < 1500 limit 5') + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] + self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" + } + } + ] + """) + self.assertEqual(ordered(plan), ordered(expected)) + + # Remove all objects for testing + node.psql('drop table range_partitioned cascade') + node.psql('drop table hash_partitioned cascade') + node.psql('drop extension pg_pathman cascade') + + def test_conc_part_drop_runtime_append(self): + """ Test concurrent partition drop + SELECT (RuntimeAppend) """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'drop_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table drop_test(val int not null)") + con0.execute("insert into drop_test select generate_series(1, 1000)") + con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + try: + from queue import Queue + except ImportError: + from Queue import Queue + + # return values from thread + queue = Queue() + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con1.begin() + con2.execute('set enable_hashjoin = f') + con2.execute('set enable_mergejoin = f') + + res = con2.execute(""" + explain (analyze, costs off, timing off) + select * from drop_test + where val = any (select generate_series(1, 40, 34)) + """) # query selects from drop_test_1 and drop_test_4 + + con2.commit() + + has_runtime_append = False + has_drop_test_1 = False + has_drop_test_4 = False + + for row in res: + if row[0].find('RuntimeAppend') >= 0: + has_runtime_append = True + continue + + if row[0].find('drop_test_1') >= 0: + has_drop_test_1 = True + continue + + if row[0].find('drop_test_4') >= 0: + has_drop_test_4 = True + continue + + # return all values in tuple + queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) + + # Step 1: cache partitioned table in con1 + con1.begin() + con1.execute('select count(*) from drop_test') # load pathman's cache + con1.commit() + + # Step 2: cache partitioned table in con2 + con2.begin() + con2.execute('select count(*) from drop_test') # load pathman's cache + con2.commit() + + # Step 3: drop first partition of 'drop_test' + con1.begin() + con1.execute('drop table drop_test_1') + + # Step 4: try executing select (RuntimeAppend) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: commit 'DROP TABLE' + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'drop_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 99) + + # check RuntimeAppend + selected partitions + (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_1) + self.assertTrue(has_drop_test_4) + + def test_conc_part_creation_insert(self): + """ Test concurrent partition creation on INSERT """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("insert into ins_test select generate_series(1, 50)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + con2.commit() + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('lock table ins_test in share update exclusive mode') + + # Step 2: try inserting new value in con2 (waiting) + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 4: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 5: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) + + def test_conc_part_merge_insert(self): + """ Test concurrent merge_range_partitions() + INSERT """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('insert into ins_test values(20)') + con2.commit() + + # Step 1: initilize con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + + # Step 2: initilize con2 + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations + + # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) + con1.execute( + "select merge_range_partitions('ins_test_1', 'ins_test_2')") + + # Step 4: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: finish merge in con1 (success, unlock) + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute("select *, tableoid::regclass::text from ins_test") + + # check number of rows in table + self.assertEqual(len(rows), 1) + + # check value that has been inserted + self.assertEqual(int(rows[0][0]), 20) + + # check partition that was chosen for insert + self.assertEqual(str(rows[0][1]), 'ins_test_1') + + def test_pg_dump(self): + with self.start_new_pathman_cluster() as node: + node.safe_psql('create database copy') + + node.safe_psql(""" + create table test_hash(val int not null); + select create_hash_partitions('test_hash', 'val', 10); + insert into test_hash select generate_series(1, 90); + + create table test_range(val int not null); + select create_range_partitions('test_range', 'val', 1, 10, 10); + insert into test_range select generate_series(1, 95); + """) + + dump = node.dump() + node.restore(dbname='copy', filename=dump) + os.remove(dump) + + # HASH + a = node.execute('postgres', 'select * from test_hash order by val') + b = node.execute('copy', 'select * from test_hash order by val') + self.assertEqual(a, b) + c = node.execute('postgres', 'select * from only test_hash order by val') + d = node.execute('copy', 'select * from only test_hash order by val') + self.assertEqual(c, d) + + # RANGE + a = node.execute('postgres', 'select * from test_range order by val') + b = node.execute('copy', 'select * from test_range order by val') + self.assertEqual(a, b) + c = node.execute('postgres', 'select * from only test_range order by val') + d = node.execute('copy', 'select * from only test_range order by val') + self.assertEqual(c, d) + + # check partition sets + p1 = node.execute('postgres', 'select * from pathman_partition_list') + p2 = node.execute('copy', 'select * from pathman_partition_list') + self.assertEqual(sorted(p1), sorted(p2)) + + + def test_concurrent_detach(self): + """ + Test concurrent detach partition with contiguous + tuple inserting and spawning new partitions + """ + + # Init parameters + num_insert_workers = 8 + detach_timeout = 0.1 # time in sec between successive inserts and detachs + num_detachs = 100 # estimated number of detachs + inserts_advance = 1 # abvance in sec of inserts process under detachs + test_interval = int(math.ceil(detach_timeout * num_detachs)) + + insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/insert_current_timestamp.pgbench" + detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/detachs_in_timeout.pgbench" + + # Check pgbench scripts on existance + self.assertTrue( + os.path.isfile(insert_pgbench_script), + msg="pgbench script with insert timestamp doesn't exist") + + self.assertTrue( + os.path.isfile(detach_pgbench_script), + msg="pgbench script with detach letfmost partition doesn't exist") + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec + with node.connect() as con0: + con0.begin() + con0.execute( + 'create table ts_range_partitioned(ts timestamp not null)') + + # yapf: disable + con0.execute(""" + select create_range_partitions('ts_range_partitioned', + 'ts', + current_timestamp, + interval '%f', + 1) + """ % detach_timeout) + con0.commit() + + # Run in background inserts and detachs processes + with open(os.devnull, 'w') as fnull: + # init pgbench's utility tables + init_pgbench = node.pgbench(stdout=fnull, stderr=fnull, options=["-i"]) + init_pgbench.wait() + + inserts = node.pgbench( + stdout=fnull, + stderr=subprocess.PIPE, + options=[ + "-j", + "%i" % num_insert_workers, "-c", + "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", + "%i" % (test_interval + inserts_advance) + ]) + time.sleep(inserts_advance) + detachs = node.pgbench( + stdout=fnull, + stderr=fnull, + options=[ + "-D", + "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, + "-T", + "%i" % test_interval + ]) + + # Wait for completion of processes + _, stderrdata = inserts.communicate() + detachs.wait() + + # Obtain error log from inserts process + self.assertIsNone( + re.search("ERROR|FATAL|PANIC", str(stderrdata)), + msg=""" + Race condition between detach and concurrent + inserts with append partition is expired + """) if __name__ == "__main__": - unittest.main() - + unittest.main() diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh index be0e645e..97fa5ea9 100755 --- a/travis/pg-travis-test.sh +++ b/travis/pg-travis-test.sh @@ -7,7 +7,7 @@ sudo apt-get update # required packages apt_packages="postgresql-$PG_VER postgresql-server-dev-$PG_VER postgresql-common python-pip python-dev build-essential" -pip_packages="testgres==0.4.0" +pip_packages="testgres" # exit code status=0 From 49c6f70e8463b0d7d086c47cd7f286879a1d2378 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 15 Mar 2018 18:55:05 +0300 Subject: [PATCH 0810/1124] bump lib version to 1.4.10 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 4922b21a..9bcf29d5 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.9", + "version": "1.4.10", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.9", + "version": "1.4.10", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 121880ea..0e0cbed7 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -12,7 +12,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10409 + 10410 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 4ce40e4f..5de01b32 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010409 +#define CURRENT_LIB_VERSION 0x010410 void *pathman_cache_search_relid(HTAB *cache_table, From 001166ae918bae169def31e66b8c2ed21e76ac4e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 26 Mar 2018 15:04:10 +0300 Subject: [PATCH 0811/1124] some notes regarding partition creation callback (thanks to @thamerlan) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 99d2cc9f..b4a8be50 100644 --- a/README.md +++ b/README.md @@ -272,7 +272,7 @@ Enable/disable auto partition propagation (only for RANGE partitioning). It is e ```plpgsql set_init_callback(relation REGCLASS, callback REGPROC DEFAULT 0) ``` -Set partition creation callback to be invoked for each attached or created partition (both HASH and RANGE). The callback must have the following signature: `part_init_callback(args JSONB) RETURNS VOID`. Parameter `arg` consists of several fields whose presence depends on partitioning type: +Set partition creation callback to be invoked for each attached or created partition (both HASH and RANGE). If callback is marked with SECURITY INVOKER, it's executed with the privileges of the user that produced a statement which has led to creation of a new partition (e.g. `INSERT INTO partitioned_table VALUES (-5)`). The callback must have the following signature: `part_init_callback(args JSONB) RETURNS VOID`. Parameter `arg` consists of several fields whose presence depends on partitioning type: ```json /* RANGE-partitioned table abc (child abc_4) */ { From 6d154fd28938efda2cec68743b18e23b86d764c4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Apr 2018 15:57:16 +0300 Subject: [PATCH 0812/1124] fix partition creation for tables with dropped columns --- Makefile | 1 + expected/pathman_dropped_cols.out | 84 +++++++++++++++++++++++++++++++ sql/pathman_dropped_cols.sql | 43 ++++++++++++++++ src/include/utils.h | 2 +- src/init.c | 8 --- src/relation_info.c | 2 +- src/utils.c | 25 ++++++--- 7 files changed, 148 insertions(+), 17 deletions(-) create mode 100644 expected/pathman_dropped_cols.out create mode 100644 sql/pathman_dropped_cols.sql diff --git a/Makefile b/Makefile index 79f674ec..392f8e5d 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,7 @@ REGRESS = pathman_array_qual \ pathman_column_type \ pathman_cte \ pathman_domains \ + pathman_dropped_cols \ pathman_expressions \ pathman_foreign_keys \ pathman_gaps \ diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out new file mode 100644 index 00000000..89585b52 --- /dev/null +++ b/expected/pathman_dropped_cols.out @@ -0,0 +1,84 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA dropped_cols; +/* + * we should be able to manage tables with dropped columns + */ +create table test_range(a int, b int, key int not null); +alter table test_range drop column a; +select create_range_partitions('test_range', 'key', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +alter table test_range drop column b; +select prepend_range_partition('test_range'); + prepend_range_partition +------------------------- + test_range_3 +(1 row) + +select * from pathman_partition_list order by parent, partition; + parent | partition | parttype | expr | range_min | range_max +------------+--------------+----------+------+-----------+----------- + test_range | test_range_1 | 2 | key | 1 | 11 + test_range | test_range_2 | 2 | key | 11 | 21 + test_range | test_range_3 | 2 | key | -9 | 1 +(3 rows) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_1_check'; + pg_get_constraintdef +------------------------------- + CHECK (key >= 1 AND key < 11) +(1 row) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_3_check'; + pg_get_constraintdef +------------------------------------------ + CHECK (key >= '-9'::integer AND key < 1) +(1 row) + +drop table test_range cascade; +NOTICE: drop cascades to 4 other objects +create table test_hash(a int, b int, key int not null); +alter table test_hash drop column a; +select create_hash_partitions('test_hash', 'key', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +alter table test_hash drop column b; +create table test_dummy (like test_hash); +select replace_hash_partition('test_hash_2', 'test_dummy', true); + replace_hash_partition +------------------------ + test_dummy +(1 row) + +select * from pathman_partition_list order by parent, partition; + parent | partition | parttype | expr | range_min | range_max +-----------+-------------+----------+------+-----------+----------- + test_hash | test_hash_0 | 1 | key | | + test_hash | test_hash_1 | 1 | key | | + test_hash | test_dummy | 1 | key | | +(3 rows) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_hash_1_check'; + pg_get_constraintdef +------------------------------------------------- + CHECK (get_hash_part_idx(hashint4(key), 3) = 1) +(1 row) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_dummy_check'; + pg_get_constraintdef +------------------------------------------------- + CHECK (get_hash_part_idx(hashint4(key), 3) = 2) +(1 row) + +drop table test_hash cascade; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA dropped_cols CASCADE; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql new file mode 100644 index 00000000..32589c8c --- /dev/null +++ b/sql/pathman_dropped_cols.sql @@ -0,0 +1,43 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA dropped_cols; + + +/* + * we should be able to manage tables with dropped columns + */ + +create table test_range(a int, b int, key int not null); + +alter table test_range drop column a; +select create_range_partitions('test_range', 'key', 1, 10, 2); + +alter table test_range drop column b; +select prepend_range_partition('test_range'); + +select * from pathman_partition_list order by parent, partition; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_1_check'; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_3_check'; + +drop table test_range cascade; + + +create table test_hash(a int, b int, key int not null); + +alter table test_hash drop column a; +select create_hash_partitions('test_hash', 'key', 3); + +alter table test_hash drop column b; +create table test_dummy (like test_hash); +select replace_hash_partition('test_hash_2', 'test_dummy', true); + +select * from pathman_partition_list order by parent, partition; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_hash_1_check'; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_dummy_check'; +drop table test_hash cascade; + + +DROP SCHEMA dropped_cols CASCADE; +DROP EXTENSION pg_pathman; diff --git a/src/include/utils.h b/src/include/utils.h index 16100df7..42a1b814 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -24,7 +24,7 @@ bool clause_contains_params(Node *clause); bool is_date_type_internal(Oid typid); bool check_security_policy_internal(Oid relid, Oid role); -bool match_expr_to_operand(Node *expr, Node *operand); +bool match_expr_to_operand(const Node *expr, const Node *operand); /* * Misc. diff --git a/src/init.c b/src/init.c index 80ba4f0a..93b95839 100644 --- a/src/init.c +++ b/src/init.c @@ -1165,7 +1165,6 @@ validate_hash_constraint(const Expr *expr, Node *first = linitial(get_hash_expr->args); /* arg #1: TYPE_HASH_PROC(EXPRESSION) */ Node *second = lsecond(get_hash_expr->args); /* arg #2: PARTITIONS_COUNT */ Const *cur_partition_idx; /* hash value for this partition */ - Node *hash_arg; if (!IsA(first, FuncExpr) || !IsA(second, Const)) return false; @@ -1180,13 +1179,6 @@ validate_hash_constraint(const Expr *expr, if (list_length(type_hash_proc_expr->args) != 1) return false; - /* Extract arg of TYPE_HASH_PROC() */ - hash_arg = (Node *) linitial(type_hash_proc_expr->args); - - /* Check arg of TYPE_HASH_PROC() */ - if (!match_expr_to_operand(prel->expr, hash_arg)) - return false; - /* Check that PARTITIONS_COUNT is equal to total amount of partitions */ if (DatumGetUInt32(((Const *) second)->constvalue) != PrelChildrenCount(prel)) return false; diff --git a/src/relation_info.c b/src/relation_info.c index b46c62ee..cb9c8bab 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -976,7 +976,7 @@ delay_invalidation_vague_rel(Oid vague_rel) /* Finish all pending invalidation jobs if possible */ void finish_delayed_invalidation(void) -{ +{ /* Exit early if there's nothing to do */ if (delayed_invalidation_whole_cache == false && delayed_invalidation_parent_rels == NIL && diff --git a/src/utils.c b/src/utils.c index 6f9e53cd..bd60d57d 100644 --- a/src/utils.c +++ b/src/utils.c @@ -37,6 +37,21 @@ #include "utils/regproc.h" #endif +static const Node * +drop_irrelevant_expr_wrappers(const Node *expr) +{ + switch (nodeTag(expr)) + { + /* Strip relabeling */ + case T_RelabelType: + return (const Node *) ((const RelabelType *) expr)->arg; + + /* no special actions required */ + default: + return expr; + } +} + static bool clause_contains_params_walker(Node *node, void *context) { @@ -110,14 +125,10 @@ check_security_policy_internal(Oid relid, Oid role) /* Compare clause operand with expression */ bool -match_expr_to_operand(Node *expr, Node *operand) +match_expr_to_operand(const Node *expr, const Node *operand) { - /* Strip relabeling for both operand and expr */ - if (operand && IsA(operand, RelabelType)) - operand = (Node *) ((RelabelType *) operand)->arg; - - if (expr && IsA(expr, RelabelType)) - expr = (Node *) ((RelabelType *) expr)->arg; + expr = drop_irrelevant_expr_wrappers(expr); + operand = drop_irrelevant_expr_wrappers(operand); /* compare expressions and return result right away */ return equal(expr, operand); From 2ea90b3a3cdc4c36757fb6254735a35940943813 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Apr 2018 16:52:35 +0300 Subject: [PATCH 0813/1124] prohibit add_to_pathman_config() on partitions --- expected/pathman_basic.out | 12 ++++++++++++ sql/pathman_basic.sql | 6 ++++++ src/pl_funcs.c | 10 ++++++++++ 3 files changed, 28 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index fa946d72..15cd31b7 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1851,6 +1851,18 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects +/* Check that multilivel is prohibited */ +CREATE TABLE test.multi(key int NOT NULL); +SELECT create_hash_partitions('test.multi', 'key', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('test.multi_1', 'key', 3); +ERROR: multilevel partitioning is not supported +DROP TABLE test.multi CASCADE; +NOTICE: drop cascades to 3 other objects DROP SCHEMA test CASCADE; NOTICE: drop cascades to 28 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index f24716c0..a9d37f18 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -551,6 +551,12 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; +/* Check that multilivel is prohibited */ +CREATE TABLE test.multi(key int NOT NULL); +SELECT create_hash_partitions('test.multi', 'key', 3); +SELECT create_hash_partitions('test.multi_1', 'key', 3); +DROP TABLE test.multi CASCADE; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 175d36de..53d4259c 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -765,6 +765,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) Datum expr_datum; PathmanInitState init_state; + PartParentSearch parent_search; if (!PG_ARGISNULL(0)) { @@ -798,6 +799,15 @@ add_to_pathman_config(PG_FUNCTION_ARGS) get_rel_name_or_relid(relid)))); } + /* Check if it's a partition */ + if (get_parent_of_partition(relid, &parent_search) && + parent_search == PPS_ENTRY_PART_PARENT) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("multilevel partitioning is not supported"))); + } + /* Select partitioning type */ switch (PG_NARGS()) { From 88491153b8718e096c80b21cdaea277b3c4809c8 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Apr 2018 17:01:42 +0300 Subject: [PATCH 0814/1124] attempt to fix issue #153 (table is being partitioned now) --- src/init.c | 13 +------------ src/xact_handling.c | 4 ++-- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/src/init.c b/src/init.c index 93b95839..d8fb4c57 100644 --- a/src/init.c +++ b/src/init.c @@ -674,18 +674,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Set xmin if necessary */ if (xmin) - { - Datum value; - bool isnull; - - value = heap_getsysattr(htup, - MinTransactionIdAttributeNumber, - RelationGetDescr(rel), - &isnull); - - Assert(!isnull); - *xmin = DatumGetTransactionId(value); - } + *xmin = HeapTupleHeaderGetXmin(htup->t_data); /* Set ItemPointer if necessary */ if (iptr) diff --git a/src/xact_handling.c b/src/xact_handling.c index c6696cce..a63decce 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -162,8 +162,8 @@ xact_is_alter_pathman_stmt(Node *stmt) bool xact_object_is_visible(TransactionId obj_xmin) { - return TransactionIdPrecedes(obj_xmin, GetCurrentTransactionId()) || - TransactionIdEquals(obj_xmin, FrozenTransactionId); + return TransactionIdEquals(obj_xmin, FrozenTransactionId) || + TransactionIdPrecedes(obj_xmin, GetCurrentTransactionId()); } /* From e83044ebc82215d633ee457bdd9ab873a7c9ec07 Mon Sep 17 00:00:00 2001 From: "i.kartyshov" Date: Thu, 12 Apr 2018 20:23:27 +0300 Subject: [PATCH 0815/1124] Fix test pathman_join_clause set search_path=public --- expected/pathman_join_clause.out | 1 + sql/pathman_join_clause.sql | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out index 7d9acdea..25d5cba9 100644 --- a/expected/pathman_join_clause.out +++ b/expected/pathman_join_clause.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql index 90287201..c578d361 100644 --- a/sql/pathman_join_clause.sql +++ b/sql/pathman_join_clause.sql @@ -1,5 +1,5 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; From df9219c00ad4534ddd4b3d068ee38e83bdf33815 Mon Sep 17 00:00:00 2001 From: "i.kartyshov" Date: Thu, 12 Apr 2018 20:39:05 +0300 Subject: [PATCH 0816/1124] Fix tests set search_path=public --- expected/pathman_calamity.out | 1 + expected/pathman_callbacks.out | 1 + expected/pathman_domains.out | 1 + expected/pathman_foreign_keys.out | 1 + expected/pathman_interval.out | 1 + expected/pathman_permissions.out | 1 + expected/pathman_rowmarks.out | 1 + expected/pathman_rowmarks_1.out | 1 + expected/pathman_runtime_nodes.out | 1 + expected/pathman_utility_stmt.out | 1 + sql/pathman_calamity.sql | 2 +- sql/pathman_callbacks.sql | 2 +- sql/pathman_domains.sql | 1 + sql/pathman_foreign_keys.sql | 1 + sql/pathman_interval.sql | 1 + sql/pathman_permissions.sql | 1 + sql/pathman_rowmarks.sql | 2 +- sql/pathman_runtime_nodes.sql | 2 +- sql/pathman_utility_stmt.sql | 2 +- 19 files changed, 19 insertions(+), 5 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 0e0cbed7..3e87884c 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA calamity; /* call for coverage test */ diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index aaa9f82b..3eea2049 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA callbacks; /* callback #1 */ diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index f78a73dc..e5e882c0 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA domains; CREATE DOMAIN domains.dom_test AS numeric CHECK (value < 1200); diff --git a/expected/pathman_foreign_keys.out b/expected/pathman_foreign_keys.out index 00462c3d..2ff12279 100644 --- a/expected/pathman_foreign_keys.out +++ b/expected/pathman_foreign_keys.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA fkeys; /* Check primary keys generation */ diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 1bcd8216..72dc4e01 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_interval; /* Range partitions for INT2 type */ diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 4700f8bf..e329a9ec 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; CREATE ROLE user1 LOGIN; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 4c399e85..0bf1078a 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -3,6 +3,7 @@ * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- */ +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; CREATE TABLE rowmarks.first(id int NOT NULL); diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index 28d3f27d..d072cde9 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -3,6 +3,7 @@ * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- */ +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; CREATE TABLE rowmarks.first(id int NOT NULL); diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index d49343b9..f364cfb4 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 37149f1e..4cc4d493 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; /* * Test COPY diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index ed1b7b82..ad29a705 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -1,5 +1,5 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA calamity; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index f435e1c7..65b729d9 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -1,5 +1,5 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA callbacks; diff --git a/sql/pathman_domains.sql b/sql/pathman_domains.sql index f6ee7076..4793c6f8 100644 --- a/sql/pathman_domains.sql +++ b/sql/pathman_domains.sql @@ -1,5 +1,6 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA domains; diff --git a/sql/pathman_foreign_keys.sql b/sql/pathman_foreign_keys.sql index 392b3a7a..1ec1b766 100644 --- a/sql/pathman_foreign_keys.sql +++ b/sql/pathman_foreign_keys.sql @@ -1,5 +1,6 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA fkeys; diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql index 59393ca4..f2933ab0 100644 --- a/sql/pathman_interval.sql +++ b/sql/pathman_interval.sql @@ -1,5 +1,6 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_interval; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 43bf6ca6..2dd22fc0 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -1,5 +1,6 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index 9864b8b9..aa365544 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -3,7 +3,7 @@ * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- */ - +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index b54c7571..e0b50e9b 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -1,5 +1,5 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index c7d25051..31232ce1 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -1,5 +1,5 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE EXTENSION pg_pathman; From 7a561261b631a391156d1a074a6007d9e51ea128 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 23 Apr 2018 22:49:12 +0300 Subject: [PATCH 0817/1124] fix typos using codespell --- src/hooks.c | 2 +- src/include/partition_filter.h | 2 +- src/include/relation_info.h | 2 +- src/partition_creation.c | 4 ++-- src/partition_filter.c | 4 ++-- src/pathman_workers.c | 2 +- src/planner_tree_modification.c | 2 +- src/relation_info.c | 2 +- src/utils.c | 4 ++-- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index ebd35b61..adcb805b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -600,7 +600,7 @@ pathman_enable_assign_hook(bool newval, void *extra) /* * Planner hook. It disables inheritance for tables that have been partitioned - * by pathman to prevent standart PostgreSQL partitioning mechanism from + * by pathman to prevent standard PostgreSQL partitioning mechanism from * handling that tables. */ PlannedStmt * diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 0cd08c36..fdd14045 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -69,7 +69,7 @@ struct ResultPartsStorage EState *estate; /* pointer to executor's state */ - CmdType command_type; /* currenly we only allow INSERT */ + CmdType command_type; /* currently we only allow INSERT */ LOCKMODE head_open_lock_mode; LOCKMODE heap_close_lock_mode; }; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index c4bc3a05..99eddc22 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -161,7 +161,7 @@ typedef struct int ev_align; /* alignment of the expression val's type */ Oid ev_collid; /* collation of the expression val */ - Oid cmp_proc, /* comparison fuction for 'ev_type' */ + Oid cmp_proc, /* comparison function for 'ev_type' */ hash_proc; /* hash function for 'ev_type' */ MemoryContext mcxt; /* memory context holding this struct */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 20094a4f..b5acfb28 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -647,7 +647,7 @@ choose_range_partition_name(Oid parent_relid, Oid parent_nsp) (uint64) DatumGetInt64(part_num)); /* can't use UInt64 on 9.5 */ /* - * If we found a unique name or attemps number exceeds some reasonable + * If we found a unique name or attempts number exceeds some reasonable * value then we quit * * XXX Should we throw an exception if max attempts number is reached? @@ -1231,7 +1231,7 @@ build_raw_range_check_tree(Node *raw_expression, and_oper->args = lappend(and_oper->args, left_arg); } - /* Right comparision (VAR < end_value) */ + /* Right comparison (VAR < end_value) */ if (!IsInfinite(end_value)) { /* Build right boundary */ diff --git a/src/partition_filter.c b/src/partition_filter.c index a1886c4d..66d19d34 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -160,7 +160,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->on_new_rri_holder_callback = on_new_rri_holder_cb; parts_storage->callback_arg = on_new_rri_holder_cb_arg; - /* Currenly ResultPartsStorage is used only for INSERTs */ + /* Currently ResultPartsStorage is used only for INSERTs */ parts_storage->command_type = CMD_INSERT; parts_storage->speculative_inserts = speculative_inserts; @@ -484,7 +484,7 @@ make_partition_filter(Plan *subplan, CustomScan *cscan = makeNode(CustomScan); Relation parent_rel; - /* Currenly we don't support ON CONFLICT clauses */ + /* Currently we don't support ON CONFLICT clauses */ if (conflict_action != ONCONFLICT_NONE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), diff --git a/src/pathman_workers.c b/src/pathman_workers.c index e393d313..a3114ec7 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -599,7 +599,7 @@ bgw_main_concurrent_part(Datum main_arg) error = CopyErrorData(); FlushErrorState(); - /* Print messsage for this BGWorker to server log */ + /* Print message for this BGWorker to server log */ ereport(LOG, (errmsg("%s: %s", concurrent_part_bgw, error->message), errdetail("attempt: %d/%d, sleep time: %.2f", diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 0df4fc22..d4c2ee25 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -89,7 +89,7 @@ typedef struct /* SubLink that might contain an examined query */ SubLink *parent_sublink; - /* CommonTableExpr that might containt an examined query */ + /* CommonTableExpr that might contain an examined query */ CommonTableExpr *parent_cte; } transform_query_cxt; diff --git a/src/relation_info.c b/src/relation_info.c index cb9c8bab..1d191f1a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -218,7 +218,7 @@ refresh_pathman_relation_info(Oid relid, prel->ev_typmod = exprTypmod(prel->expr); prel->ev_collid = exprCollation(prel->expr); - /* Fetch HASH & CMP fuctions and other stuff from type cache */ + /* Fetch HASH & CMP functions and other stuff from type cache */ typcache = lookup_type_cache(prel->ev_type, TYPECACHE_CMP_PROC | TYPECACHE_HASH_PROC); diff --git a/src/utils.c b/src/utils.c index bd60d57d..6f18b770 100644 --- a/src/utils.c +++ b/src/utils.c @@ -377,7 +377,7 @@ perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success) if (IsBinaryCoercible(in_type, out_type)) return value; - /* If not, try to perfrom a type cast */ + /* If not, try to perform a type cast */ ret = find_coercion_pathway(out_type, in_type, COERCION_EXPLICIT, &castfunc); @@ -422,7 +422,7 @@ perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success) } /* - * Convert interval from TEXT to binary form using partitioninig expresssion type. + * Convert interval from TEXT to binary form using partitioninig expression type. */ Datum extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ From 0c8cd7f5078b062240ebd2a6dfba082ddb2de7d6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 27 Apr 2018 14:50:42 +0300 Subject: [PATCH 0818/1124] attempt to fix duplicate rows (issue #155) --- src/hooks.c | 21 +++++++++------------ src/planner_tree_modification.c | 7 ++----- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index adcb805b..2a045a43 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -369,6 +369,8 @@ pathman_rel_pathlist_hook(PlannerInfo *root, * FROM test.tmp2 t2 * WHERE id = t.id); * + * or unions, multilevel partitioning, etc. + * * Since we disable optimizations on 9.5, we * have to skip parent table that has already * been expanded by standard inheritance. @@ -378,23 +380,18 @@ pathman_rel_pathlist_hook(PlannerInfo *root, foreach (lc, root->append_rel_list) { AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); - RangeTblEntry *cur_parent_rte, - *cur_child_rte; - - /* This 'appinfo' is not for this child */ - if (appinfo->child_relid != rti) - continue; - - cur_parent_rte = root->simple_rte_array[appinfo->parent_relid]; - cur_child_rte = rte; /* we already have it, saves time */ - /* This child == its own parent table! */ - if (cur_parent_rte->relid == cur_child_rte->relid) + /* + * If there's an 'appinfo', it means that somebody + * (PG?) has already processed this partitioned table + * and added its children to the plan. + */ + if (appinfo->child_relid == rti) return; } } - /* Make copy of partitioning expression and fix Var's varno attributes */ + /* Make copy of partitioning expression and fix Var's varno attributes */ part_expr = PrelExpressionForRelid(prel, rti); /* Get partitioning-related clauses (do this before append_child_relation()) */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index d4c2ee25..3225e59e 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -343,10 +343,7 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) /* Proceed if table is partitioned by pg_pathman */ if ((prel = get_pathman_relation_info(rte->relid)) != NULL) { - /* - * HACK: unset the 'inh' flag to disable standard - * planning. We'll set it again later. - */ + /* HACK: unset the 'inh' flag to disable standard planning */ rte->inh = false; /* Try marking it using PARENTHOOD_ALLOWED */ @@ -569,7 +566,7 @@ partition_filter_visitor(Plan *plan, void *context) void assign_rel_parenthood_status(RangeTblEntry *rte, rel_parenthood_status new_status) -{ +{ Assert(rte->rtekind != RTE_CTE); /* HACK: set relevant bits in RTE */ From f0bd3675ee1546b9ff1193f9962e1ef6d11af423 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 27 Apr 2018 16:16:25 +0300 Subject: [PATCH 0819/1124] small refactorings in test suite --- Makefile | 1 + expected/pathman_basic.out | 12 --------- expected/pathman_multilevel.out | 44 +++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 6 ----- sql/pathman_multilevel.sql | 30 ++++++++++++++++++++++ 5 files changed, 75 insertions(+), 18 deletions(-) create mode 100644 expected/pathman_multilevel.out create mode 100644 sql/pathman_multilevel.sql diff --git a/Makefile b/Makefile index 392f8e5d..d810185c 100644 --- a/Makefile +++ b/Makefile @@ -46,6 +46,7 @@ REGRESS = pathman_array_qual \ pathman_join_clause \ pathman_lateral \ pathman_mergejoin \ + pathman_multilevel \ pathman_only \ pathman_param_upd_del \ pathman_permissions \ diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 15cd31b7..fa946d72 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1851,18 +1851,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects -/* Check that multilivel is prohibited */ -CREATE TABLE test.multi(key int NOT NULL); -SELECT create_hash_partitions('test.multi', 'key', 3); - create_hash_partitions ------------------------- - 3 -(1 row) - -SELECT create_hash_partitions('test.multi_1', 'key', 3); -ERROR: multilevel partitioning is not supported -DROP TABLE test.multi CASCADE; -NOTICE: drop cascades to 3 other objects DROP SCHEMA test CASCADE; NOTICE: drop cascades to 28 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_multilevel.out b/expected/pathman_multilevel.out new file mode 100644 index 00000000..062f60a5 --- /dev/null +++ b/expected/pathman_multilevel.out @@ -0,0 +1,44 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA multi; +/* Check that multilevel is prohibited */ +CREATE TABLE multi.test(key int NOT NULL); +SELECT create_hash_partitions('multi.test', 'key', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('multi.test_1', 'key', 3); +ERROR: multilevel partitioning is not supported +DROP TABLE multi.test CASCADE; +NOTICE: drop cascades to 3 other objects +/* Attach partitioned subtree to 'abc' */ +CREATE TABLE multi.abc (val int NOT NULL); +CREATE TABLE multi.def (LIKE multi.abc); +SELECT create_hash_partitions('multi.def', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +ALTER TABLE multi.def INHERIT multi.abc; +/* + * Although multilevel partitioning is not supported, + * we must make sure that pg_pathman won't add + * duplicate relations to the final plan. + */ +EXPLAIN (COSTS OFF) TABLE multi.abc; + QUERY PLAN +------------------------- + Append + -> Seq Scan on abc + -> Seq Scan on def + -> Seq Scan on def_0 + -> Seq Scan on def_1 +(5 rows) + +DROP SCHEMA multi CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index a9d37f18..f24716c0 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -551,12 +551,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; -/* Check that multilivel is prohibited */ -CREATE TABLE test.multi(key int NOT NULL); -SELECT create_hash_partitions('test.multi', 'key', 3); -SELECT create_hash_partitions('test.multi_1', 'key', 3); -DROP TABLE test.multi CASCADE; - DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_multilevel.sql b/sql/pathman_multilevel.sql new file mode 100644 index 00000000..1e211647 --- /dev/null +++ b/sql/pathman_multilevel.sql @@ -0,0 +1,30 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA multi; + + +/* Check that multilevel is prohibited */ +CREATE TABLE multi.test(key int NOT NULL); +SELECT create_hash_partitions('multi.test', 'key', 3); +SELECT create_hash_partitions('multi.test_1', 'key', 3); +DROP TABLE multi.test CASCADE; + + +/* Attach partitioned subtree to 'abc' */ +CREATE TABLE multi.abc (val int NOT NULL); +CREATE TABLE multi.def (LIKE multi.abc); +SELECT create_hash_partitions('multi.def', 'val', 2); +ALTER TABLE multi.def INHERIT multi.abc; + +/* + * Although multilevel partitioning is not supported, + * we must make sure that pg_pathman won't add + * duplicate relations to the final plan. + */ +EXPLAIN (COSTS OFF) TABLE multi.abc; + + +DROP SCHEMA multi CASCADE; +DROP EXTENSION pg_pathman; From 1c1dfffd2875855ccf4869bae098943269798064 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 27 Apr 2018 16:36:57 +0300 Subject: [PATCH 0820/1124] make output of function get_pathman_lib_version() more user-friendly --- expected/pathman_calamity.out | 2 +- src/pl_funcs.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 3e87884c..7ac7da61 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 10410 + 1.4.10 (1 row) set client_min_messages = NOTICE; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 53d4259c..00b26b44 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -1606,5 +1606,9 @@ debug_capture(PG_FUNCTION_ARGS) Datum get_pathman_lib_version(PG_FUNCTION_ARGS) { - PG_RETURN_CSTRING(psprintf("%x", CURRENT_LIB_VERSION)); + uint8 ver_major = (CURRENT_LIB_VERSION & 0xFF0000) >> 16, + ver_minor = (CURRENT_LIB_VERSION & 0xFF00) >> 8, + ver_patch = (CURRENT_LIB_VERSION & 0xFF); + + PG_RETURN_CSTRING(psprintf("%x.%x.%x", ver_major, ver_minor, ver_patch)); } From 601f53ae4d57216cc0650b3fbcb450e38bec6f01 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 27 Apr 2018 16:50:47 +0300 Subject: [PATCH 0821/1124] bump lib version to 1.4.11 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 9bcf29d5..8d62708b 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.10", + "version": "1.4.11", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.10", + "version": "1.4.11", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 7ac7da61..f9c63043 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 1.4.10 + 1.4.11 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 5de01b32..2fb82a91 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010410 +#define CURRENT_LIB_VERSION 0x010411 void *pathman_cache_search_relid(HTAB *cache_table, From cde3475de6df7201da6dfa2f4c976e5cbc8354b7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 27 Apr 2018 22:09:54 +0300 Subject: [PATCH 0822/1124] small changes to issue template --- .github/ISSUE_TEMPLATE.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 5ad2562c..b1e98a96 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -22,7 +22,5 @@ Explain your problem here (it's always better to provide reproduction steps) ... - - - + From 60e13be3238036c6f4cce8822df499afc64e0e27 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 28 Apr 2018 15:48:59 +0300 Subject: [PATCH 0823/1124] compatibility fixes for 10.4, 9.6.9 and 9.5.13 --- src/hooks.c | 6 ++++-- src/include/compat/pg_compat.h | 25 +++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 2a045a43..fb299371 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -165,8 +165,10 @@ pathman_join_pathlist_hook(PlannerInfo *root, /* Extract join clauses which will separate partitions */ if (IS_OUTER_JOIN(extra->sjinfo->jointype)) { - extract_actual_join_clauses(extra->restrictlist, - &joinclauses, &otherclauses); + extract_actual_join_clauses_compat(extra->restrictlist, + joinrel->relids, + &joinclauses, + &otherclauses); } else { diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 22a3d5ff..61d1ab1f 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -318,6 +318,31 @@ static inline void mult_result_handler() { elog(ERROR, ERR_PART_ATTR_MULTIPLE_RE #endif +/* + * extract_actual_join_clauses() + */ +#if (PG_VERSION_NUM >= 100004) || \ + (PG_VERSION_NUM < 100000 && PG_VERSION_NUM >= 90609) || \ + (PG_VERSION_NUM < 90600 && PG_VERSION_NUM >= 90513) +#define extract_actual_join_clauses_compat(restrictinfo_list, \ + joinrelids, \ + joinquals, \ + otherquals) \ + extract_actual_join_clauses((restrictinfo_list), \ + (joinrelids), \ + (joinquals), \ + (otherquals)) +#else +#define extract_actual_join_clauses_compat(restrictinfo_list, \ + joinrelids, \ + joinquals, \ + otherquals) \ + extract_actual_join_clauses((restrictinfo_list), \ + (joinquals), \ + (otherquals)) +#endif + + /* * get_all_actual_clauses() */ From 370f41f08d15216d294abcacc28ab31281e5a450 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 14 May 2018 15:09:43 +0300 Subject: [PATCH 0824/1124] bump lib version to 1.4.12 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 8d62708b..0cfa8dc2 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Partitioning tool", "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.11", + "version": "1.4.12", "maintainer": [ "Ildar Musin ", "Dmitry Ivanov ", @@ -24,7 +24,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.11", + "version": "1.4.12", "abstract": "Partitioning tool" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index f9c63043..0b2434d4 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 1.4.11 + 1.4.12 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 2fb82a91..8069f192 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010411 +#define CURRENT_LIB_VERSION 0x010412 void *pathman_cache_search_relid(HTAB *cache_table, From 6ca926c81b68112e34c86b0607ae1313c4e64ab0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 16 May 2018 17:37:13 +0300 Subject: [PATCH 0825/1124] add pathman_ prefix to common hooks --- src/hooks.c | 42 +++++++++++++++++++++--------------------- src/include/hooks.h | 14 +++++++------- src/pg_pathman.c | 24 ++++++++++++------------ 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index fb299371..96efad08 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -62,12 +62,12 @@ allow_star_schema_join(PlannerInfo *root, } -set_join_pathlist_hook_type set_join_pathlist_next = NULL; -set_rel_pathlist_hook_type set_rel_pathlist_hook_next = NULL; -planner_hook_type planner_hook_next = NULL; -post_parse_analyze_hook_type post_parse_analyze_hook_next = NULL; -shmem_startup_hook_type shmem_startup_hook_next = NULL; -ProcessUtility_hook_type process_utility_hook_next = NULL; +set_join_pathlist_hook_type pathman_set_join_pathlist_next = NULL; +set_rel_pathlist_hook_type pathman_set_rel_pathlist_hook_next = NULL; +planner_hook_type pathman_planner_hook_next = NULL; +post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next = NULL; +shmem_startup_hook_type pathman_shmem_startup_hook_next = NULL; +ProcessUtility_hook_type pathman_process_utility_hook_next = NULL; /* Take care of joins */ @@ -91,9 +91,9 @@ pathman_join_pathlist_hook(PlannerInfo *root, ListCell *lc; /* Call hooks set by other extensions */ - if (set_join_pathlist_next) - set_join_pathlist_next(root, joinrel, outerrel, - innerrel, jointype, extra); + if (pathman_set_join_pathlist_next) + pathman_set_join_pathlist_next(root, joinrel, outerrel, + innerrel, jointype, extra); /* Check that both pg_pathman & RuntimeAppend nodes are enabled */ if (!IsPathmanReady() || !pg_pathman_enable_runtimeappend) @@ -312,8 +312,8 @@ pathman_rel_pathlist_hook(PlannerInfo *root, int irange_len; /* Invoke original hook if needed */ - if (set_rel_pathlist_hook_next != NULL) - set_rel_pathlist_hook_next(root, rel, rti, rte); + if (pathman_set_rel_pathlist_hook_next) + pathman_set_rel_pathlist_hook_next(root, rel, rti, rte); /* Make sure that pg_pathman is ready */ if (!IsPathmanReady()) @@ -631,8 +631,8 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) } /* Invoke original hook if needed */ - if (planner_hook_next) - result = planner_hook_next(parse, cursorOptions, boundParams); + if (pathman_planner_hook_next) + result = pathman_planner_hook_next(parse, cursorOptions, boundParams); else result = standard_planner(parse, cursorOptions, boundParams); @@ -671,11 +671,11 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) * any statement, including utility commands */ void -pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) +pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) { /* Invoke original hook if needed */ - if (post_parse_analyze_hook_next) - post_parse_analyze_hook_next(pstate, query); + if (pathman_post_parse_analyze_hook_next) + pathman_post_parse_analyze_hook_next(pstate, query); /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) @@ -735,7 +735,7 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) if (IsPathmanReady() && get_planner_calls_count() > 0) { /* Check that pg_pathman is the last extension loaded */ - if (post_parse_analyze_hook != pathman_post_parse_analysis_hook) + if (post_parse_analyze_hook != pathman_post_parse_analyze_hook) { Oid save_userid; int save_sec_context; @@ -786,8 +786,8 @@ void pathman_shmem_startup_hook(void) { /* Invoke original hook if needed */ - if (shmem_startup_hook_next != NULL) - shmem_startup_hook_next(); + if (pathman_shmem_startup_hook_next) + pathman_shmem_startup_hook_next(); /* Allocate shared memory objects */ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); @@ -942,8 +942,8 @@ pathman_process_utility_hook(Node *first_arg, } /* Finally call process_utility_hook_next or standard_ProcessUtility */ - call_process_utility_compat((process_utility_hook_next ? - process_utility_hook_next : + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : standard_ProcessUtility), first_arg, queryString, context, params, queryEnv, diff --git a/src/include/hooks.h b/src/include/hooks.h index 6a312db3..3d25847a 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -20,12 +20,12 @@ #include "tcop/utility.h" -extern set_join_pathlist_hook_type set_join_pathlist_next; -extern set_rel_pathlist_hook_type set_rel_pathlist_hook_next; -extern planner_hook_type planner_hook_next; -extern post_parse_analyze_hook_type post_parse_analyze_hook_next; -extern shmem_startup_hook_type shmem_startup_hook_next; -extern ProcessUtility_hook_type process_utility_hook_next; +extern set_join_pathlist_hook_type pathman_set_join_pathlist_next; +extern set_rel_pathlist_hook_type pathman_set_rel_pathlist_hook_next; +extern planner_hook_type pathman_planner_hook_next; +extern post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next; +extern shmem_startup_hook_type pathman_shmem_startup_hook_next; +extern ProcessUtility_hook_type pathman_process_utility_hook_next; void pathman_join_pathlist_hook(PlannerInfo *root, @@ -46,7 +46,7 @@ PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams); -void pathman_post_parse_analysis_hook(ParseState *pstate, +void pathman_post_parse_analyze_hook(ParseState *pstate, Query *query); void pathman_shmem_startup_hook(void); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 37a2d3f1..c4adef6e 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -299,18 +299,18 @@ _PG_init(void) restore_pathman_init_state(&temp_init_state); /* Set basic hooks */ - set_rel_pathlist_hook_next = set_rel_pathlist_hook; - set_rel_pathlist_hook = pathman_rel_pathlist_hook; - set_join_pathlist_next = set_join_pathlist_hook; - set_join_pathlist_hook = pathman_join_pathlist_hook; - shmem_startup_hook_next = shmem_startup_hook; - shmem_startup_hook = pathman_shmem_startup_hook; - post_parse_analyze_hook_next = post_parse_analyze_hook; - post_parse_analyze_hook = pathman_post_parse_analysis_hook; - planner_hook_next = planner_hook; - planner_hook = pathman_planner_hook; - process_utility_hook_next = ProcessUtility_hook; - ProcessUtility_hook = pathman_process_utility_hook; + pathman_set_rel_pathlist_hook_next = set_rel_pathlist_hook; + set_rel_pathlist_hook = pathman_rel_pathlist_hook; + pathman_set_join_pathlist_next = set_join_pathlist_hook; + set_join_pathlist_hook = pathman_join_pathlist_hook; + pathman_shmem_startup_hook_next = shmem_startup_hook; + shmem_startup_hook = pathman_shmem_startup_hook; + pathman_post_parse_analyze_hook_next = post_parse_analyze_hook; + post_parse_analyze_hook = pathman_post_parse_analyze_hook; + pathman_planner_hook_next = planner_hook; + planner_hook = pathman_planner_hook; + pathman_process_utility_hook_next = ProcessUtility_hook; + ProcessUtility_hook = pathman_process_utility_hook; /* Initialize static data for all subsystems */ init_main_pathman_toggles(); From 1a882435fb98988e1c902be498e22ce426279c12 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 17 May 2018 11:05:12 +0300 Subject: [PATCH 0826/1124] Make README.md more clear on `make PG_CONFIG=...` --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b4a8be50..a96894d0 100644 --- a/README.md +++ b/README.md @@ -72,13 +72,19 @@ More interesting features are yet to come. Stay tuned! ## Installation guide To install `pg_pathman`, execute this in the module's directory: + ```shell make install USE_PGXS=1 ``` + +> **Important:** Don't forget to set the `PG_CONFIG` variable (`make PG_CONFIG=...`) in case you want to test `pg_pathman` on a non-default or custom build of PostgreSQL. Read more [here](https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules). + Modify the **`shared_preload_libraries`** parameter in `postgresql.conf` as following: + ``` shared_preload_libraries = 'pg_pathman' ``` + > **Important:** `pg_pathman` may cause conflicts with some other extensions that use the same hook functions. For example, `pg_pathman` uses `ProcessUtility_hook` to handle COPY queries for partitioned tables, which means it may interfere with `pg_stat_statements` from time to time. In this case, try listing libraries in certain order: `shared_preload_libraries = 'pg_stat_statements, pg_pathman'`. It is essential to restart the PostgreSQL instance. After that, execute the following query in psql: @@ -88,8 +94,6 @@ CREATE EXTENSION pg_pathman; Done! Now it's time to setup your partitioning schemes. -> **Important:** Don't forget to set the `PG_CONFIG` variable in case you want to test `pg_pathman` on a custom build of PostgreSQL. Read more [here](https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules). - ## How to update In order to update pg_pathman: From 603f0fc683799ea1250ed622781e5cb2db490a9f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Sat, 9 Jun 2018 16:01:48 +0300 Subject: [PATCH 0827/1124] fix locking in merge_range_partitions_internal() --- src/pl_range_funcs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 5e3a7696..8c7bb9b1 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -698,7 +698,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) int j; /* Prevent modification of partitions */ - LockRelationOid(parts[0], AccessExclusiveLock); + LockRelationOid(parts[i], AccessExclusiveLock); /* Look for the specified partition */ for (j = 0; j < PrelChildrenCount(prel); j++) From 0ce9a65274bd6455c28e5fdaae0c376486b6d132 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 11 Jun 2018 00:22:55 +0300 Subject: [PATCH 0828/1124] don't forget to register snapshots! --- src/pl_range_funcs.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 8c7bb9b1..b5deff4a 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -30,6 +30,7 @@ #include "utils/numeric.h" #include "utils/ruleutils.h" #include "utils/syscache.h" +#include "utils/snapmgr.h" #if PG_VERSION_NUM >= 100000 #include "utils/regproc.h" @@ -682,6 +683,7 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) *last; FmgrInfo cmp_proc; int i; + Snapshot fresh_snapshot; prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); @@ -739,6 +741,13 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) if (SPI_connect() != SPI_OK_CONNECT) elog(ERROR, "could not connect using SPI"); + /* + * Get latest snapshot to see data that might have been + * added to partitions before this transaction has started, + * but was committed a moment before we acquired the locks. + */ + fresh_snapshot = RegisterSnapshot(GetLatestSnapshot()); + /* Migrate the data from all partition to the first one */ for (i = 1; i < nparts; i++) { @@ -749,10 +758,24 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) get_qualified_rel_name(parts[i]), get_qualified_rel_name(parts[0])); - SPI_exec(query, 0); + SPIPlanPtr plan = SPI_prepare(query, 0, NULL); + + if (!plan) + elog(ERROR, "%s: SPI_prepare returned %d", + CppAsString(merge_range_partitions), + SPI_result); + + SPI_execute_snapshot(plan, NULL, NULL, + fresh_snapshot, + InvalidSnapshot, + false, true, 0); + pfree(query); } + /* Free snapshot */ + UnregisterSnapshot(fresh_snapshot); + SPI_finish(); /* Drop obsolete partitions */ From ba8e164a5aa46ae10641250ac097eaa839efa2e1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 13 Jun 2018 16:48:59 +0300 Subject: [PATCH 0829/1124] add small notice regarding builds on Windows --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index a96894d0..472f2d80 100644 --- a/README.md +++ b/README.md @@ -94,6 +94,8 @@ CREATE EXTENSION pg_pathman; Done! Now it's time to setup your partitioning schemes. +> **Windows-specific**: pg_pathman imports several symbols (e.g. None_Receiver, InvalidObjectAddress) from PostgreSQL, which is fine by itself, but requires that those symbols are marked as `PGDLLIMPORT`. Unfortunately, some of them are not exported from vanilla PostgreSQL, which means that you have to either use Postgres Pro Standard/Enterprise (which includes all necessary patches), or patch and build your own distribution of PostgreSQL. + ## How to update In order to update pg_pathman: From c1bbebb4755c6c35751df68e8318cf8f7437f4a2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 13 Jun 2018 17:22:08 +0300 Subject: [PATCH 0830/1124] further improvements in README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 472f2d80..e9a5d958 100644 --- a/README.md +++ b/README.md @@ -104,8 +104,8 @@ In order to update pg_pathman: 3. Execute the following queries: ```plpgsql -/* replace X.Y with the version number, e.g. 1.3 */ -ALTER EXTENSION pg_pathman UPDATE TO "X.Y"; +/* only required for major releases, e.g. 1.3 -> 1.4 */ +ALTER EXTENSION pg_pathman UPDATE; SET pg_pathman.enable = t; ``` From 5cdaa7a332db7884d92cfd6902cfd12cd2decc6e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 20 Jun 2018 14:38:37 +0300 Subject: [PATCH 0831/1124] fix several issues --- src/hooks.c | 1 + src/include/relation_info.h | 1 - src/pl_range_funcs.c | 2 +- src/relation_info.c | 37 ++++++++++++++++++------------------- src/utility_stmt_hooking.c | 10 +++++----- 5 files changed, 25 insertions(+), 26 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 83f040d8..0c11a666 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -167,6 +167,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (IS_OUTER_JOIN(extra->sjinfo->jointype)) { extract_actual_join_clauses(extra->restrictlist, + joinrel->relids, &joinclauses, &otherclauses); } else diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 14286546..fb8b98bf 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -334,7 +334,6 @@ void invalidate_pathman_status_info(Oid relid); void invalidate_pathman_status_info_cache(void); /* Dispatch cache */ -void refresh_pathman_relation_info(Oid relid); void close_pathman_relation_info(PartRelationInfo *prel); bool has_pathman_relation_info(Oid relid); PartRelationInfo *get_pathman_relation_info(Oid relid); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 242723f1..f69fd852 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -432,7 +432,7 @@ get_part_range_by_oid(PG_FUNCTION_ARGS) shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); /* Check type of 'dummy' (for correct output) */ - arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 1); if (getBaseType(arg_type) != getBaseType(prel->ev_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_typeof(dummy) should be %s", diff --git a/src/relation_info.c b/src/relation_info.c index b4f75f2a..1ac7c873 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -205,21 +205,23 @@ invalidate_psin_entry(PartStatusInfo *psin) psin->relid, MyProcPid); #endif - /* Mark entry as invalid */ - if (psin->prel && PrelReferenceCount(psin->prel) > 0) - { - PrelIsFresh(psin->prel) = false; - } - else + if (psin->prel) { - if (psin->prel) + if (PrelReferenceCount(psin->prel) > 0) + { + /* Mark entry as outdated and detach it */ + PrelIsFresh(psin->prel) = false; + } + else + { free_pathman_relation_info(psin->prel); - - (void) pathman_cache_search_relid(status_cache, - psin->relid, - HASH_REMOVE, - NULL); + } } + + (void) pathman_cache_search_relid(status_cache, + psin->relid, + HASH_REMOVE, + NULL); } @@ -227,13 +229,6 @@ invalidate_psin_entry(PartStatusInfo *psin) * Dispatch cache routines. */ -/* Make changes to PartRelationInfo visible */ -void -refresh_pathman_relation_info(Oid relid) -{ - -} - /* Close PartRelationInfo entry */ void close_pathman_relation_info(PartRelationInfo *prel) @@ -242,6 +237,10 @@ close_pathman_relation_info(PartRelationInfo *prel) Assert(PrelReferenceCount(prel) > 0); PrelReferenceCount(prel) -= 1; + + /* Remove entry is it's outdated and we're the last user */ + if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) + free_pathman_relation_info(prel); } /* Check if relation is partitioned by pg_pathman */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 0bc5b43d..bf58311b 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -213,6 +213,7 @@ is_pathman_related_alter_column_type(Node *parsetree, AlterTableStmt *alter_table_stmt = (AlterTableStmt *) parsetree; ListCell *lc; Oid parent_relid; + bool result = false; PartRelationInfo *prel; Assert(IsPathmanReady()); @@ -235,8 +236,6 @@ is_pathman_related_alter_column_type(Node *parsetree, /* Return 'parent_relid' and 'prel->parttype' */ if (parent_relid_out) *parent_relid_out = parent_relid; if (part_type_out) *part_type_out = prel->parttype; - - close_pathman_relation_info(prel); } else return false; @@ -264,11 +263,12 @@ is_pathman_related_alter_column_type(Node *parsetree, if (attr_number_out) *attr_number_out = attnum; /* Success! */ - return true; + result = true; } - /* Default failure */ - return false; + close_pathman_relation_info(prel); + + return result; } From 28749f5a7c1f99c17ddef1d873df8f1ab953047d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 20 Jun 2018 14:46:48 +0300 Subject: [PATCH 0832/1124] fix a few tests --- expected/pathman_column_type.out | 60 ++++++++++++++++---------------- expected/pathman_expressions.out | 4 +-- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index 4382db1f..eacdb97a 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -20,12 +20,12 @@ SELECT * FROM test_column_type.test; (0 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 (4 rows) /* change column's type (should flush caches) */ @@ -51,12 +51,12 @@ SELECT partrel, cooked_expr FROM pathman_config; (1 row) SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 (4 rows) /* check insert dispatching */ @@ -102,12 +102,12 @@ SELECT * FROM test_column_type.test; (0 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 5 - partition dispatch cache | 1 - partition parents cache | 5 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 (4 rows) /* change column's type (should NOT work) */ @@ -120,12 +120,12 @@ SELECT * FROM test_column_type.test; (0 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 5 - partition dispatch cache | 1 - partition parents cache | 5 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 (4 rows) /* change column's type (should flush caches) */ @@ -137,12 +137,12 @@ SELECT * FROM test_column_type.test; (0 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 5 - partition dispatch cache | 1 - partition parents cache | 5 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 (4 rows) /* check insert dispatching */ diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 9e19d217..685ca2d3 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -371,7 +371,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM /* Try using mutable expression */ SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); @@ -382,7 +382,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; From 8ad3bf8561a22c4416d30af5fa07f21d7901d018 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 20 Jun 2018 18:27:24 +0300 Subject: [PATCH 0833/1124] track PartRelationInfo references using ResourceOwner --- src/init.c | 29 ++++---- src/relation_info.c | 164 +++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 172 insertions(+), 21 deletions(-) diff --git a/src/init.c b/src/init.c index 58479939..ed89bf0b 100644 --- a/src/init.c +++ b/src/init.c @@ -47,14 +47,15 @@ MemoryContext PathmanParentsCacheContext = NULL; MemoryContext PathmanStatusCacheContext = NULL; MemoryContext PathmanBoundsCacheContext = NULL; -/* Storage for PartRelationInfos */ -HTAB *parents_cache = NULL; /* Storage for PartParentInfos */ -HTAB *status_cache = NULL; +HTAB *parents_cache = NULL; + +/* Storage for PartStatusInfos */ +HTAB *status_cache = NULL; /* Storage for PartBoundInfos */ -HTAB *bounds_cache = NULL; +HTAB *bounds_cache = NULL; /* pg_pathman's init status */ PathmanInitState pathman_init_state; @@ -63,10 +64,6 @@ PathmanInitState pathman_init_state; bool pathman_hooks_enabled = true; -/* Shall we install new relcache callback? */ -static bool relcache_callback_needed = true; - - /* Functions for various local caches */ static bool init_pathman_relation_oids(void); static void fini_pathman_relation_oids(void); @@ -196,6 +193,8 @@ init_main_pathman_toggles(void) bool load_config(void) { + static bool relcache_callback_needed = true; + /* * Try to cache important relids. * @@ -321,7 +320,9 @@ init_local_cache(void) Assert(MemoryContextIsValid(PathmanBoundsCacheContext)); /* Clear children */ - MemoryContextResetChildren(TopPathmanContext); + MemoryContextReset(PathmanParentsCacheContext); + MemoryContextReset(PathmanStatusCacheContext); + MemoryContextReset(PathmanBoundsCacheContext); } /* Initialize pg_pathman's memory contexts */ else @@ -356,7 +357,7 @@ init_local_cache(void) memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(PartRelationInfo); + ctl.entrysize = sizeof(PartParentInfo); ctl.hcxt = PathmanParentsCacheContext; parents_cache = hash_create(PATHMAN_PARENTS_CACHE, @@ -394,11 +395,13 @@ fini_local_cache(void) hash_destroy(bounds_cache); parents_cache = NULL; - status_cache = NULL; - bounds_cache = NULL; + status_cache = NULL; + bounds_cache = NULL; /* Now we can clear allocations */ - MemoryContextResetChildren(TopPathmanContext); + MemoryContextReset(PathmanParentsCacheContext); + MemoryContextReset(PathmanStatusCacheContext); + MemoryContextReset(PathmanBoundsCacheContext); } diff --git a/src/relation_info.c b/src/relation_info.c index 1ac7c873..c9c46f95 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -35,6 +35,7 @@ #include "utils/hsearch.h" #include "utils/inval.h" #include "utils/memutils.h" +#include "utils/resowner.h" #include "utils/ruleutils.h" #include "utils/syscache.h" #include "utils/lsyscache.h" @@ -66,6 +67,12 @@ typedef struct cmp_func_info Oid collid; } cmp_func_info; +typedef struct prel_resowner_info +{ + ResourceOwner owner; + List *prels; +} prel_resowner_info; + /* * For pg_pathman.enable_bounds_cache GUC. */ @@ -77,6 +84,11 @@ bool pg_pathman_enable_bounds_cache = true; */ static bool delayed_shutdown = false; /* pathman was dropped */ +/* + * PartRelationInfo is controlled by ResourceOwner; + */ +static HTAB *prel_resowner = NULL; + /* Handy wrappers for Oids */ #define bsearch_oid(key, array, array_size) \ @@ -88,6 +100,13 @@ static void free_pathman_relation_info(PartRelationInfo *prel); static void invalidate_psin_entries_using_relid(Oid relid); static void invalidate_psin_entry(PartStatusInfo *psin); +static PartRelationInfo *resowner_prel_add(PartRelationInfo *prel); +static PartRelationInfo *resowner_prel_del(PartRelationInfo *prel); +static void resonwner_prel_callback(ResourceReleasePhase phase, + bool isCommit, + bool isTopLevel, + void *arg); + static Expr *get_partition_constraint_expr(Oid partition); static void fill_prel_with_partitions(PartRelationInfo *prel, @@ -233,10 +252,7 @@ invalidate_psin_entry(PartStatusInfo *psin) void close_pathman_relation_info(PartRelationInfo *prel) { - /* Check that refcount is valid */ - Assert(PrelReferenceCount(prel) > 0); - - PrelReferenceCount(prel) -= 1; + (void) resowner_prel_del(prel); /* Remove entry is it's outdated and we're the last user */ if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) @@ -327,10 +343,7 @@ get_pathman_relation_info(Oid relid) (psin->prel ? "live" : "NULL"), relid, MyProcPid); #endif - if (psin->prel) - PrelReferenceCount(psin->prel) += 1; - - return psin->prel; + return resowner_prel_add(psin->prel); } /* Build a new PartRelationInfo for partitioned relation */ @@ -483,6 +496,141 @@ free_pathman_relation_info(PartRelationInfo *prel) MemoryContextDelete(prel->mcxt); } +static PartRelationInfo * +resowner_prel_add(PartRelationInfo *prel) +{ + if (!prel_resowner) + { + HASHCTL ctl; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(ResourceOwner); + ctl.entrysize = sizeof(prel_resowner_info); + ctl.hcxt = TopPathmanContext; + + prel_resowner = hash_create("prel resowner", + PART_RELS_SIZE, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + + RegisterResourceReleaseCallback(resonwner_prel_callback, NULL); + } + + if (prel) + { + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; + bool found; + MemoryContext old_mcxt; + + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_ENTER, + &found); + + if (!found) + info->prels = NIL; + + /* Register this 'prel' */ + old_mcxt = MemoryContextSwitchTo(TopPathmanContext); + info->prels = list_append_unique(info->prels, prel); + MemoryContextSwitchTo(old_mcxt); + + /* Finally, increment refcount */ + PrelReferenceCount(prel) += 1; + } + + return prel; +} + +static PartRelationInfo * +resowner_prel_del(PartRelationInfo *prel) +{ + /* Must be active! */ + Assert(prel_resowner); + + if (prel) + { + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; + + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_FIND, + NULL); + + if (info) + { + /* Check that 'prel' is registered! */ + Assert(list_member(info->prels, prel)); + + /* Remove it iff we're the only user */ + if (PrelReferenceCount(prel) == 1) + info->prels = list_delete(info->prels, prel); + } + + /* Check that refcount is valid */ + Assert(PrelReferenceCount(prel) > 0); + + /* Finally, decrement refcount */ + PrelReferenceCount(prel) -= 1; + } + + return prel; +} + +static void +resonwner_prel_callback(ResourceReleasePhase phase, + bool isCommit, + bool isTopLevel, + void *arg) +{ + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; + + if (prel_resowner) + { + ListCell *lc; + + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_FIND, + NULL); + + if (info) + { + foreach (lc, info->prels) + { + PartRelationInfo *prel = lfirst(lc); + + if (!isCommit) + { + /* Reset refcount for valid entry */ + if (PrelIsFresh(prel)) + { + PrelReferenceCount(prel) = 0; + } + /* Otherwise, free it when refcount is zero */ + else if (--PrelReferenceCount(prel) == 0) + { + free_pathman_relation_info(prel); + } + } + else + elog(ERROR, + "cache reference leak: PartRelationInfo(%d) has count %d", + PrelParentRelid(prel), PrelReferenceCount(prel)); + } + + list_free(info->prels); + + hash_search(prel_resowner, + (void *) &resowner, + HASH_REMOVE, + NULL); + } + } +} + /* Fill PartRelationInfo with partition-related info */ static void fill_prel_with_partitions(PartRelationInfo *prel, From 98fe5faee70302bee374050880535aae133d58fe Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 20 Jun 2018 19:31:26 +0300 Subject: [PATCH 0834/1124] minor fixes in ResourceOwner-based tracking machinery --- src/relation_info.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index c9c46f95..33aa6125 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -532,7 +532,7 @@ resowner_prel_add(PartRelationInfo *prel) /* Register this 'prel' */ old_mcxt = MemoryContextSwitchTo(TopPathmanContext); - info->prels = list_append_unique(info->prels, prel); + info->prels = list_append_unique_ptr(info->prels, prel); MemoryContextSwitchTo(old_mcxt); /* Finally, increment refcount */ @@ -561,11 +561,11 @@ resowner_prel_del(PartRelationInfo *prel) if (info) { /* Check that 'prel' is registered! */ - Assert(list_member(info->prels, prel)); + Assert(list_member_ptr(info->prels, prel)); /* Remove it iff we're the only user */ if (PrelReferenceCount(prel) == 1) - info->prels = list_delete(info->prels, prel); + info->prels = list_delete_ptr(info->prels, prel); } /* Check that refcount is valid */ From d68c9a79acbe37dacef31632be54505072c47c1e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 13:19:17 +0300 Subject: [PATCH 0835/1124] small fix in merge_range_partitions() --- src/pl_range_funcs.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index f69fd852..a1b4c0fe 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -743,8 +743,11 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) } } - ObjectAddressSet(object, RelationRelationId, parts[i]); - add_exact_object_address(&object, objects); + if (i > 0) + { + ObjectAddressSet(object, RelationRelationId, parts[i]); + add_exact_object_address(&object, objects); + } } /* Check that partitions are adjacent */ From c6153f4b23d175b600c725e04560ac2aa820f600 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 13:22:46 +0300 Subject: [PATCH 0836/1124] add a basic leak tracker --- sql/pathman_subpartitions.sql | 3 +++ src/include/relation_info.h | 38 +++++++++++++++++++++++++++++++---- src/relation_info.c | 31 +++++++++++++++++++++++++--- 3 files changed, 65 insertions(+), 7 deletions(-) diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 7f38f629..8f485503 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -3,6 +3,9 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA subpartitions; +:gdb +select pg_sleep(5); + /* Create two level partitioning structure */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h index fb8b98bf..2ce6fa01 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -31,6 +31,12 @@ #include "utils/relcache.h" +#ifdef USE_ASSERT_CHECKING +#define USE_RELINFO_LOGGING +#define USE_RELINFO_LEAK_TRACKER +#endif + + /* Range bound */ typedef struct { @@ -215,6 +221,10 @@ typedef struct PartRelationInfo Oid cmp_proc, /* comparison fuction for 'ev_type' */ hash_proc; /* hash function for 'ev_type' */ +#ifdef USE_RELINFO_LEAK_TRACKER + List *owners; /* saved callers of get_pathman_relation_info() */ +#endif + MemoryContext mcxt; /* memory context holding this struct */ } PartRelationInfo; @@ -334,9 +344,9 @@ void invalidate_pathman_status_info(Oid relid); void invalidate_pathman_status_info_cache(void); /* Dispatch cache */ -void close_pathman_relation_info(PartRelationInfo *prel); bool has_pathman_relation_info(Oid relid); PartRelationInfo *get_pathman_relation_info(Oid relid); +void close_pathman_relation_info(PartRelationInfo *prel); void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, @@ -382,11 +392,31 @@ char *canonicalize_partitioning_expression(const Oid relid, void delay_pathman_shutdown(void); void finish_delayed_invalidation(void); +void init_relation_info_static_data(void); -/* For pg_pathman.enable_bounds_cache GUC */ -extern bool pg_pathman_enable_bounds_cache; -void init_relation_info_static_data(void); +/* For pg_pathman.enable_bounds_cache GUC */ +extern bool pg_pathman_enable_bounds_cache; + + +/* This allows us to track leakers of PartRelationInfo */ +#ifdef USE_RELINFO_LEAK_TRACKER +extern const char *prel_resowner_function; +extern int prel_resowner_line; + +#define get_pathman_relation_info(relid) \ + ( \ + prel_resowner_function = __FUNCTION__, \ + prel_resowner_line = __LINE__, \ + get_pathman_relation_info(relid) \ + ) + +#define close_pathman_relation_info(prel) \ + do { \ + close_pathman_relation_info(prel); \ + prel = NULL; \ + } while (0) +#endif #endif /* RELATION_INFO_H */ diff --git a/src/relation_info.c b/src/relation_info.c index 33aa6125..2d520547 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -55,8 +55,11 @@ #define COOK_PART_EXPR_ERROR "failed to analyze partitioning expression \"%s\"" -#ifdef USE_ASSERT_CHECKING -#define USE_RELINFO_LOGGING +#ifdef USE_RELINFO_LEAK_TRACKER +#undef get_pathman_relation_info +#undef close_pathman_relation_info +const char *prel_resowner_function = NULL; +int prel_resowner_line = 0; #endif @@ -73,6 +76,7 @@ typedef struct prel_resowner_info List *prels; } prel_resowner_info; + /* * For pg_pathman.enable_bounds_cache GUC. */ @@ -373,7 +377,7 @@ build_pathman_relation_info(Oid relid, Datum *values) ALLOCSET_SMALL_SIZES); /* Create a new PartRelationInfo */ - prel = MemoryContextAlloc(prel_mcxt, sizeof(PartRelationInfo)); + prel = MemoryContextAllocZero(prel_mcxt, sizeof(PartRelationInfo)); prel->relid = relid; prel->refcount = 0; prel->fresh = true; @@ -535,6 +539,15 @@ resowner_prel_add(PartRelationInfo *prel) info->prels = list_append_unique_ptr(info->prels, prel); MemoryContextSwitchTo(old_mcxt); +#ifdef USE_RELINFO_LEAK_TRACKER + /* Save current caller (function:line) */ + old_mcxt = MemoryContextSwitchTo(prel->mcxt); + prel->owners = lappend(prel->owners, + list_make2(makeString((char *) prel_resowner_function), + makeInteger(prel_resowner_line))); + MemoryContextSwitchTo(old_mcxt); +#endif + /* Finally, increment refcount */ PrelReferenceCount(prel) += 1; } @@ -616,9 +629,21 @@ resonwner_prel_callback(ResourceReleasePhase phase, } } else + { +#ifdef USE_RELINFO_LEAK_TRACKER + ListCell *lc; + + foreach (lc, prel->owners) + { + char *fun = strVal(linitial(lfirst(lc))); + int line = intVal(lsecond(lfirst(lc))); + elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); + } +#endif elog(ERROR, "cache reference leak: PartRelationInfo(%d) has count %d", PrelParentRelid(prel), PrelReferenceCount(prel)); + } } list_free(info->prels); From 7193e4bede940607a1bcad4853577107c592e7da Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 13:33:04 +0300 Subject: [PATCH 0837/1124] simplified and fixed select_partition_for_insert() --- src/include/partition_filter.h | 20 ++--- src/partition_filter.c | 146 +++++++++++++-------------------- src/utility_stmt_hooking.c | 33 +------- 3 files changed, 70 insertions(+), 129 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index f50ec342..7c15a017 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -50,9 +50,7 @@ typedef struct /* Default settings for ResultPartsStorage */ -#define RPS_DEFAULT_ENTRY_SIZE sizeof(ResultPartsStorage) #define RPS_DEFAULT_SPECULATIVE false /* speculative inserts */ - #define RPS_CLOSE_RELATIONS true #define RPS_SKIP_RELATIONS false @@ -75,7 +73,7 @@ typedef void (*rri_holder_cb)(ResultRelInfoHolder *rri_holder, */ struct ResultPartsStorage { - ResultRelInfo *base_rri; /* original ResultRelInfo (parent) */ + ResultRelInfo *base_rri; /* original ResultRelInfo */ EState *estate; /* pointer to executor's state */ CmdType command_type; /* INSERT | UPDATE */ @@ -93,6 +91,10 @@ struct ResultPartsStorage bool close_relations; LOCKMODE head_open_lock_mode; LOCKMODE heap_close_lock_mode; + + PartRelationInfo *prel; + ExprState *prel_expr_state; + ExprContext *prel_econtext; }; typedef struct @@ -115,7 +117,6 @@ typedef struct JunkFilter *junkfilter; /* junkfilter for subplan_slot */ ExprContext *tup_convert_econtext; /* ExprContext for projections */ - ExprState *expr_state; /* for partitioning expression */ } PartitionFilterState; @@ -152,10 +153,10 @@ void init_partition_filter_static_data(void); /* Initialize storage for some parent table */ void init_result_parts_storage(ResultPartsStorage *parts_storage, - ResultRelInfo *parent_rri, + Oid parent_relid, + ResultRelInfo *current_rri, EState *estate, CmdType cmd_type, - Size table_entry_size, bool close_relations, bool speculative_inserts, rri_holder_cb init_rri_holder_cb, @@ -178,11 +179,8 @@ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); -ResultRelInfoHolder *select_partition_for_insert(ExprState *expr_state, - ExprContext *econtext, - EState *estate, - const PartRelationInfo *prel, - ResultPartsStorage *parts_storage); +ResultRelInfoHolder *select_partition_for_insert(ResultPartsStorage *parts_storage, + TupleTableSlot *slot); Plan * make_partition_filter(Plan *subplan, Oid parent_relid, diff --git a/src/partition_filter.c b/src/partition_filter.c index f70c9ef0..fa986c4e 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -143,10 +143,10 @@ init_partition_filter_static_data(void) /* Initialize ResultPartsStorage (hash table etc) */ void init_result_parts_storage(ResultPartsStorage *parts_storage, - ResultRelInfo *parent_rri, + Oid parent_relid, + ResultRelInfo *current_rri, EState *estate, CmdType cmd_type, - Size table_entry_size, bool close_relations, bool speculative_inserts, rri_holder_cb init_rri_holder_cb, @@ -158,13 +158,13 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, memset(result_rels_table_config, 0, sizeof(HASHCTL)); result_rels_table_config->keysize = sizeof(Oid); - result_rels_table_config->entrysize = table_entry_size; + result_rels_table_config->entrysize = sizeof(ResultPartsStorage); parts_storage->result_rels_table = hash_create("ResultRelInfo storage", 10, result_rels_table_config, HASH_ELEM | HASH_BLOBS); - Assert(parent_rri); - parts_storage->base_rri = parent_rri; + Assert(current_rri); + parts_storage->base_rri = current_rri; Assert(estate); parts_storage->estate = estate; @@ -185,6 +185,19 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->close_relations = close_relations; parts_storage->head_open_lock_mode = RowExclusiveLock; parts_storage->heap_close_lock_mode = NoLock; + + /* Fetch PartRelationInfo for this partitioned relation */ + parts_storage->prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, parts_storage->prel, PT_ANY); + + /* Build a partitioning expression state */ + parts_storage->prel_expr_state = prepare_expr_state(parts_storage->prel, + parts_storage->base_rri->ri_RelationDesc, + parts_storage->estate, + cmd_type == CMD_UPDATE); + + /* Build expression context */ + parts_storage->prel_econtext = CreateExprContext(parts_storage->estate); } /* Free ResultPartsStorage (close relations etc) */ @@ -222,6 +235,9 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) /* Finally destroy hash table */ hash_destroy(parts_storage->result_rels_table); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(parts_storage->prel); } /* Find a ResultRelInfo for the partition using ResultPartsStorage */ @@ -250,10 +266,6 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) List *translated_vars; MemoryContext old_mcxt; - /* Check that 'base_rri' is set */ - if (!parts_storage->base_rri) - elog(ERROR, "ResultPartsStorage contains no base_rri"); - /* Lock partition and check if it exists */ LockRelationOid(partid, parts_storage->head_open_lock_mode); if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partid))) @@ -435,23 +447,26 @@ find_partitions_for_value(Datum value, Oid value_type, * Smart wrapper for scan_result_parts_storage(). */ ResultRelInfoHolder * -select_partition_for_insert(ExprState *expr_state, - ExprContext *econtext, - EState *estate, - const PartRelationInfo *prel, - ResultPartsStorage *parts_storage) +select_partition_for_insert(ResultPartsStorage *parts_storage, + TupleTableSlot *slot) { - ResultRelInfoHolder *rri_holder; + bool close_prel = false; + PartRelationInfo *prel = parts_storage->prel; + ExprState *expr_state = parts_storage->prel_expr_state; + ExprContext *expr_context = parts_storage->prel_econtext; Oid parent_relid = PrelParentRelid(prel), partition_relid = InvalidOid; Oid *parts; int nparts; bool isnull; Datum value; + ResultRelInfoHolder *result; + + parts_storage->prel_econtext->ecxt_scantuple = slot; /* Execute expression */ - value = ExecEvalExprCompat(expr_state, econtext, &isnull, - mult_result_handler); + value = ExecEvalExprCompat(expr_state, expr_context, + &isnull, mult_result_handler); if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); @@ -473,54 +488,37 @@ select_partition_for_insert(ExprState *expr_state, else partition_relid = parts[0]; /* Get ResultRelationInfo holder for the selected partition */ - rri_holder = scan_result_parts_storage(partition_relid, parts_storage); + result = scan_result_parts_storage(partition_relid, parts_storage); + + /* Should we close 'prel'? */ + if (close_prel) + close_pathman_relation_info(prel); - /* This partition has been dropped, repeat with a new 'prel' */ - if (rri_holder == NULL) + if (result == NULL || nparts == 0) { - /* Get a fresh PartRelationInfo */ + /* This partition has been dropped | we have a new one */ prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); - /* Paranoid check (all partitions have vanished) */ - if (!prel) - elog(ERROR, "table \"%s\" is not partitioned", - get_rel_name_or_relid(parent_relid)); + /* Store new 'prel' in 'parts_storage' */ + close_pathman_relation_info(parts_storage->prel); + parts_storage->prel = prel; } - /* This partition might have sub-partitions */ - else if (rri_holder->has_children) + else if (result->has_children) { - const PartRelationInfo *child_prel; + /* This partition is a parent itself, repeat */ + prel = get_pathman_relation_info(partition_relid); + shout_if_prel_is_invalid(partition_relid, prel, PT_RANGE); + close_prel = true; - /* Fetch PartRelationInfo for this partitioned relation */ - child_prel = get_pathman_relation_info(rri_holder->partid); - - /* Might be a false alarm */ - if (!child_prel) - return rri_holder; - - /* Build an expression state if it's not ready yet */ - if (!rri_holder->expr_state) - { - /* Fetch original topmost parent */ - Relation source_rel = parts_storage->base_rri->ri_RelationDesc; - - /* Build a partitioning expression state */ - rri_holder->expr_state = prepare_expr_state(child_prel, - source_rel, - estate, - true); - } - - /* Recursively search for subpartitions */ - rri_holder = select_partition_for_insert(rri_holder->expr_state, - econtext, estate, - child_prel, parts_storage); + /* We're not done yet */ + result = NULL; } } /* Loop until we get some result */ - while (rri_holder == NULL); + while (result == NULL); - return rri_holder; + return result; } static ExprState * @@ -660,11 +658,9 @@ void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { PartitionFilterState *state = (PartitionFilterState *) node; + Oid parent_relid = state->partitioned_table; PlanState *child_state; ResultRelInfo *current_rri; - Relation current_rel; - PartRelationInfo *prel; - bool try_map; /* It's convenient to store PlanState in 'custom_ps' */ child_state = ExecInitNode(state->subplan, estate, eflags); @@ -672,31 +668,11 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) /* Fetch current result relation (rri + rel) */ current_rri = estate->es_result_relation_info; - current_rel = current_rri->ri_RelationDesc; - - /* - * In UPDATE queries we have to work with child relation tlist, - * but expression contains varattnos of base relation, so we - * map parent varattnos to child varattnos. - * - * We don't need map if current relation == base relation. - */ - try_map = state->command_type == CMD_UPDATE && - RelationGetRelid(current_rel) != state->partitioned_table; - - /* Fetch PartRelationInfo for this partitioned relation */ - prel = get_pathman_relation_info(state->partitioned_table); - - /* Build a partitioning expression state */ - state->expr_state = prepare_expr_state(prel, current_rel, estate, try_map); - - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); /* Init ResultRelInfo cache */ - init_result_parts_storage(&state->result_parts, current_rri, + init_result_parts_storage(&state->result_parts, + parent_relid, current_rri, estate, state->command_type, - RPS_DEFAULT_ENTRY_SIZE, RPS_SKIP_RELATIONS, state->on_conflict_action != ONCONFLICT_NONE, RPS_RRI_CB(prepare_rri_for_insert, state), @@ -736,16 +712,8 @@ partition_filter_exec(CustomScanState *node) /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - /* Store slot for expression evaluation */ - econtext->ecxt_scantuple = slot; - - /* - * Search for a matching partition. - * WARNING: 'prel' might change after this call! - */ - rri_holder = select_partition_for_insert(state->expr_state, - econtext, estate, - prel, &state->result_parts); + /* Search for a matching partition */ + rri_holder = select_partition_for_insert(&state->result_parts, slot); /* Switch back and clean up per-tuple context */ MemoryContextSwitchTo(old_mcxt); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index bf58311b..eda203e0 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -482,7 +482,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ResultPartsStorage parts_storage; ResultRelInfo *parent_rri; - ExprState *expr_state = NULL; + Oid parent_relid = RelationGetRelid(parent_rel); MemoryContext query_mcxt = CurrentMemoryContext; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ @@ -505,9 +505,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, estate->es_range_table = range_table; /* Initialize ResultPartsStorage */ - init_result_parts_storage(&parts_storage, parent_rri, + init_result_parts_storage(&parts_storage, + parent_relid, parent_rri, estate, CMD_INSERT, - RPS_DEFAULT_ENTRY_SIZE, RPS_CLOSE_RELATIONS, RPS_DEFAULT_SPECULATIVE, RPS_RRI_CB(prepare_rri_for_copy, cstate), @@ -540,7 +540,6 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, Oid tuple_oid = InvalidOid; ExprContext *econtext = GetPerTupleExprContext(estate); - PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; ResultRelInfo *child_rri; @@ -565,34 +564,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, ExecSetSlotDescriptor(slot, tupDesc); ExecStoreTuple(tuple, slot, InvalidBuffer, false); - /* Store slot for expression evaluation */ - econtext->ecxt_scantuple = slot; - - /* Fetch PartRelationInfo for parent relation */ - prel = get_pathman_relation_info(RelationGetRelid(parent_rel)); - - /* Initialize expression state */ - if (expr_state == NULL) - { - MemoryContext old_mcxt; - Node *expr; - - old_mcxt = MemoryContextSwitchTo(query_mcxt); - - expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); - expr_state = ExecInitExpr((Expr *) expr, NULL); - - MemoryContextSwitchTo(old_mcxt); - } - /* Search for a matching partition */ - rri_holder = select_partition_for_insert(expr_state, econtext, estate, - prel, &parts_storage); + rri_holder = select_partition_for_insert(&parts_storage, slot); child_rri = rri_holder->result_rel_info; - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = child_rri; From 96220424c87fdf97599065a00e71f76b03995838 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 13:33:31 +0300 Subject: [PATCH 0838/1124] update Dockefile template for testgres --- Dockerfile.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 0504dd5a..5ceaeb99 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -13,7 +13,7 @@ RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ RUN if [ "${CHECK_CODE}" = "false" ] ; then \ echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ - apk --no-cache add curl python3 gcc make musl-dev cmocka-dev;\ + apk --no-cache add curl python3 python3-dev gcc make musl-dev cmocka-dev linux-headers;\ pip3 install virtualenv;\ fi From ce4a301d028d58b790e55d6ee40d82340b35f3b7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 14:01:53 +0300 Subject: [PATCH 0839/1124] remove debug code from subpartitions test --- sql/pathman_subpartitions.sql | 3 --- 1 file changed, 3 deletions(-) diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 8f485503..7f38f629 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -3,9 +3,6 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA subpartitions; -:gdb -select pg_sleep(5); - /* Create two level partitioning structure */ From e1a791a933f29bc4fe086f6d4223070b28f8d39d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 15:15:33 +0300 Subject: [PATCH 0840/1124] restore previous behavior --- expected/pathman_basic.out | 2 +- src/pl_funcs.c | 22 ++++++++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index de3bf727..1bdbcef9 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -147,7 +147,7 @@ PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 13 at PERFORM +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM \set VERBOSITY terse ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ef68c11e..c4b13017 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -108,10 +108,12 @@ get_parent_of_partition_pl(PG_FUNCTION_ARGS) Oid partition = PG_GETARG_OID(0), parent = get_parent_of_partition(partition); - if (OidIsValid(parent)) - PG_RETURN_OID(parent); + if (!OidIsValid(parent)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("\"%s\" is not a partition", + get_rel_name_or_relid(partition)))); - PG_RETURN_NULL(); + PG_RETURN_OID(parent); } /* @@ -121,17 +123,17 @@ Datum get_partition_key_type_pl(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); + Oid typid; PartRelationInfo *prel; - if ((prel = get_pathman_relation_info(relid)) != NULL) - { - Oid result = prel->ev_type; - close_pathman_relation_info(prel); + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_ANY); - PG_RETURN_OID(result); - } + typid = prel->ev_type; + + close_pathman_relation_info(prel); - PG_RETURN_NULL(); + PG_RETURN_OID(typid); } /* From 107acab211514f1a92a6851fec59686777c8cfab Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 16:28:43 +0300 Subject: [PATCH 0841/1124] refactoring in merge_range_partitions() --- src/include/relation_info.h | 3 + src/pl_range_funcs.c | 186 ++++++++++++------------------------ src/relation_info.c | 30 ++++-- 3 files changed, 86 insertions(+), 133 deletions(-) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index a18edd4d..ee4e9a35 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -348,6 +348,9 @@ bool has_pathman_relation_info(Oid relid); PartRelationInfo *get_pathman_relation_info(Oid relid); void close_pathman_relation_info(PartRelationInfo *prel); +void qsort_range_entries(RangeEntry *entries, int nentries, + const PartRelationInfo *prel); + void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, const PartType expected_part_type); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index c4c14254..feb028a5 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -64,14 +64,6 @@ static ArrayType *construct_bounds_array(Bound *elems, bool elmbyval, char elmalign); -static void check_range_adjacence(Oid cmp_proc, - Oid collid, - List *ranges); - -static void merge_range_partitions_internal(Oid parent, - Oid *parts, - uint32 nparts); - static char *deparse_constraint(Oid relid, Node *expr); static void modify_range_constraint(Oid partition_relid, @@ -639,13 +631,22 @@ merge_range_partitions(PG_FUNCTION_ARGS) Oid parent = InvalidOid; ArrayType *arr = PG_GETARG_ARRAYTYPE_P(0); - Oid *partitions; + Oid *parts; + int nparts; + Datum *datums; bool *nulls; - int nparts; int16 typlen; bool typbyval; char typalign; + + PartRelationInfo *prel; + Bound min_bound, + max_bound; + RangeEntry *bounds; + ObjectAddresses *objects = new_object_addresses(); + Snapshot fresh_snapshot; + FmgrInfo finfo; int i; /* Validate array type */ @@ -657,35 +658,32 @@ merge_range_partitions(PG_FUNCTION_ARGS) typlen, typbyval, typalign, &datums, &nulls, &nparts); - /* Extract partition Oids from array */ - partitions = palloc(sizeof(Oid) * nparts); - for (i = 0; i < nparts; i++) - { - Oid partition_relid; - partition_relid = DatumGetObjectId(datums[i]); - - /* check that is not has subpartitions */ - if (has_subclass(partition_relid)) - ereport(ERROR, (errmsg("cannot merge partitions"), - errdetail("at least one of specified partitions has children"))); - - partitions[i] = partition_relid; - } - if (nparts < 2) ereport(ERROR, (errmsg("cannot merge partitions"), errdetail("there must be at least two partitions"))); - /* Check if all partitions are from the same parent */ + /* Allocate arrays */ + parts = palloc(nparts * sizeof(Oid)); + bounds = palloc(nparts * sizeof(RangeEntry)); + for (i = 0; i < nparts; i++) { - Oid cur_parent = get_parent_of_partition(partitions[i]); + Oid cur_parent; + + /* Extract partition Oids from array */ + parts[i] = DatumGetObjectId(datums[i]); + + /* Prevent modification of partitions */ + LockRelationOid(parts[i], AccessExclusiveLock); + + /* Check if all partitions are from the same parent */ + cur_parent = get_parent_of_partition(parts[i]); /* If we couldn't find a parent, it's not a partition */ if (!OidIsValid(cur_parent)) ereport(ERROR, (errmsg("cannot merge partitions"), errdetail("relation \"%s\" is not a partition", - get_rel_name_or_relid(partitions[i])))); + get_rel_name_or_relid(parts[i])))); /* 'parent' is not initialized */ if (parent == InvalidOid) @@ -697,84 +695,52 @@ merge_range_partitions(PG_FUNCTION_ARGS) errdetail("all relations must share the same parent"))); } - /* Now merge partitions */ - merge_range_partitions_internal(parent, partitions, nparts); - - PG_RETURN_VOID(); -} - -static void -merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) -{ - PartRelationInfo *prel; - List *rentry_list = NIL; - RangeEntry *ranges, - *first, - *last; - FmgrInfo cmp_proc; - ObjectAddresses *objects = new_object_addresses(); - Snapshot fresh_snapshot; - int i; + /* Lock parent till transaction's end */ + LockRelationOid(parent, ShareUpdateExclusiveLock); /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); - /* Fetch ranges array */ - ranges = PrelGetRangesArray(prel); - - /* Lock parent till transaction's end */ - LockRelationOid(parent, ShareUpdateExclusiveLock); - - /* Process partitions */ + /* Copy rentries from 'prel' */ for (i = 0; i < nparts; i++) { - ObjectAddress object; - int j; - - /* Prevent modification of partitions */ - LockRelationOid(parts[i], AccessExclusiveLock); - - /* Look for the specified partition */ - for (j = 0; j < PrelChildrenCount(prel); j++) - { - if (ranges[j].child_oid == parts[i]) - { - rentry_list = lappend(rentry_list, &ranges[j]); - break; - } - } + uint32 idx = PrelHasPartition(prel, parts[i]); + Assert(idx > 0); - if (i > 0) - { - ObjectAddressSet(object, RelationRelationId, parts[i]); - add_exact_object_address(&object, objects); - } + bounds[i] = PrelGetRangesArray(prel)[idx - 1]; } - /* Check that partitions are adjacent */ - check_range_adjacence(prel->cmp_proc, prel->ev_collid, rentry_list); + /* Sort rentries by increasing bound */ + qsort_range_entries(bounds, nparts, prel); - /* First determine the bounds of a new constraint */ - first = (RangeEntry *) linitial(rentry_list); - last = (RangeEntry *) llast(rentry_list); + fmgr_info(prel->cmp_proc, &finfo); - /* Swap ranges if 'last' < 'first' */ - fmgr_info(prel->cmp_proc, &cmp_proc); - if (cmp_bounds(&cmp_proc, prel->ev_collid, &last->min, &first->min) < 0) + /* Check that partitions are adjacent */ + for (i = 1; i < nparts; i++) { - RangeEntry *tmp = last; + Bound cur_min = bounds[i].min, + prev_max = bounds[i - 1].max; - last = first; - first = tmp; + if (cmp_bounds(&finfo, prel->ev_collid, &cur_min, &prev_max) != 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partitions \"%s\" and \"%s\" are not adjacent", + get_rel_name(bounds[i - 1].child_oid), + get_rel_name(bounds[i].child_oid)))); + } } + /* First determine the bounds of a new constraint */ + min_bound = bounds[0].min; + max_bound = bounds[nparts - 1].max; + /* Drop old constraint and create a new one */ modify_range_constraint(parts[0], prel->expr_cstr, prel->ev_type, - &first->min, - &last->max); + &min_bound, + &max_bound); /* Make constraint visible */ CommandCounterIncrement(); @@ -792,6 +758,8 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) /* Migrate the data from all partition to the first one */ for (i = 1; i < nparts; i++) { + ObjectAddress object; + char *query = psprintf("WITH part_data AS ( " "DELETE FROM %s RETURNING " "*) " @@ -812,6 +780,10 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) false, true, 0); pfree(query); + + /* To be deleted */ + ObjectAddressSet(object, RelationRelationId, parts[i]); + add_exact_object_address(&object, objects); } /* Free snapshot */ @@ -823,8 +795,13 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) performMultipleDeletions(objects, DROP_CASCADE, 0); free_object_addresses(objects); + pfree(bounds); + pfree(parts); + /* Don't forget to close 'prel'! */ close_pathman_relation_info(prel); + + PG_RETURN_VOID(); } @@ -1278,40 +1255,3 @@ construct_bounds_array(Bound *elems, return arr; } - -/* - * Check that range entries are adjacent - */ -static void -check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges) -{ - ListCell *lc; - RangeEntry *last = NULL; - FmgrInfo finfo; - - fmgr_info(cmp_proc, &finfo); - - foreach(lc, ranges) - { - RangeEntry *cur = (RangeEntry *) lfirst(lc); - - /* Skip first iteration */ - if (!last) - { - last = cur; - continue; - } - - /* Check that last and current partitions are adjacent */ - if ((cmp_bounds(&finfo, collid, &last->max, &cur->min) != 0) && - (cmp_bounds(&finfo, collid, &cur->max, &last->min) != 0)) - { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("partitions \"%s\" and \"%s\" are not adjacent", - get_rel_name(last->child_oid), - get_rel_name(cur->child_oid)))); - } - - last = cur; - } -} diff --git a/src/relation_info.c b/src/relation_info.c index 2d520547..e9b9245c 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -746,16 +746,9 @@ fill_prel_with_partitions(PartRelationInfo *prel, /* Finalize 'prel' for a RANGE-partitioned table */ if (prel->parttype == PT_RANGE) { - cmp_func_info cmp_info; - - /* Prepare function info */ - fmgr_info(prel->cmp_proc, &cmp_info.flinfo); - cmp_info.collid = prel->ev_collid; - - /* Sort partitions by RangeEntry->min asc */ - qsort_arg((void *) prel->ranges, PrelChildrenCount(prel), - sizeof(RangeEntry), cmp_range_entries, - (void *) &cmp_info); + qsort_range_entries(PrelGetRangesArray(prel), + PrelChildrenCount(prel), + prel); /* Initialize 'prel->children' array */ for (i = 0; i < PrelChildrenCount(prel); i++) @@ -789,6 +782,23 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) return cmp_bounds(&info->flinfo, info->collid, &v1->min, &v2->min); } +void +qsort_range_entries(RangeEntry *entries, int nentries, + const PartRelationInfo *prel) +{ + cmp_func_info cmp_info; + + /* Prepare function info */ + fmgr_info(prel->cmp_proc, &cmp_info.flinfo); + cmp_info.collid = prel->ev_collid; + + /* Sort partitions by RangeEntry->min asc */ + qsort_arg(entries, nentries, + sizeof(RangeEntry), + cmp_range_entries, + (void *) &cmp_info); +} + /* * Common PartRelationInfo checks. Emit ERROR if anything is wrong. */ From 561c971f0ec4c806dd98786ca4bd91199e3de078 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 17:41:19 +0300 Subject: [PATCH 0842/1124] remove dead code --- src/include/init.h | 2 -- src/init.c | 50 ---------------------------------------------- 2 files changed, 52 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index 799e1c2d..99426810 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -226,8 +226,6 @@ bool read_pathman_params(Oid relid, Datum *values, bool *isnull); -Oid *read_parent_oids(int *nelems); - bool validate_range_constraint(const Expr *expr, const PartRelationInfo *prel, diff --git a/src/init.c b/src/init.c index ad521ced..db71704d 100644 --- a/src/init.c +++ b/src/init.c @@ -71,7 +71,6 @@ static void init_local_cache(void); static void fini_local_cache(void); /* Special handlers for read_pathman_config() */ -static void add_partrel_to_array(Datum *values, bool *isnull, void *context); static void startup_invalidate_parent(Datum *values, bool *isnull, void *context); static void read_pathman_config(void (*per_row_cb)(Datum *values, @@ -796,55 +795,6 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) } -typedef struct -{ - Oid *array; - int nelems; - int capacity; -} read_parent_oids_cxt; - -/* - * Get a sorted array of partitioned tables' Oids. - */ -Oid * -read_parent_oids(int *nelems) -{ - read_parent_oids_cxt context = { NULL, 0, 0 }; - - read_pathman_config(add_partrel_to_array, &context); - - /* Perform sorting */ - qsort(context.array, context.nelems, sizeof(Oid), oid_cmp); - - /* Return values */ - *nelems = context.nelems; - return context.array; -} - - -/* read_pathman_config(): add parent to array of Oids */ -static void -add_partrel_to_array(Datum *values, bool *isnull, void *context) -{ - Oid relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); - read_parent_oids_cxt *result = (read_parent_oids_cxt *) context; - - if (result->array == NULL) - { - result->capacity = PART_RELS_SIZE; - result->array = palloc(result->capacity * sizeof(Oid)); - } - - if (result->nelems >= result->capacity) - { - result->capacity = result->capacity * 2 + 1; - result->array = repalloc(result->array, result->capacity * sizeof(Oid)); - } - - /* Append current relid */ - result->array[result->nelems++] = relid; -} - /* read_pathman_config(): create dummy cache entry for parent */ static void startup_invalidate_parent(Datum *values, bool *isnull, void *context) From 79e11d94a147095f6e131e980033018c449f8e2e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 17:43:39 +0300 Subject: [PATCH 0843/1124] protect data read from pg_pathman's tables --- src/init.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/init.c b/src/init.c index d8fb4c57..569a4c2f 100644 --- a/src/init.c +++ b/src/init.c @@ -664,6 +664,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Extract data if necessary */ if (values && isnull) { + htup = heap_copytuple(htup); heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); /* Perform checks for non-NULL columns */ @@ -778,6 +779,7 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) if ((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) { /* Extract data if necessary */ + htup = heap_copytuple(htup); heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); row_found = true; From 47b8ee25a5825ef9f0251f4888a060b29e766ef4 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 21 Jun 2018 17:46:02 +0300 Subject: [PATCH 0844/1124] get rid of read_pathman_config() --- src/init.c | 80 ------------------------------------------------------ 1 file changed, 80 deletions(-) diff --git a/src/init.c b/src/init.c index 48e9875a..2c93f974 100644 --- a/src/init.c +++ b/src/init.c @@ -70,14 +70,6 @@ static void fini_pathman_relation_oids(void); static void init_local_cache(void); static void fini_local_cache(void); -/* Special handlers for read_pathman_config() */ -static void startup_invalidate_parent(Datum *values, bool *isnull, void *context); - -static void read_pathman_config(void (*per_row_cb)(Datum *values, - bool *isnull, - void *context), - void *context); - static bool validate_range_opexpr(const Expr *expr, const PartRelationInfo *prel, const TypeCacheEntry *tce, @@ -213,9 +205,6 @@ load_config(void) /* Create various hash tables (caches) */ init_local_cache(); - /* Read PATHMAN_CONFIG table & fill cache */ - read_pathman_config(startup_invalidate_parent, NULL); - /* Register pathman_relcache_hook(), currently we can't unregister it */ if (relcache_callback_needed) { @@ -797,75 +786,6 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) } -/* read_pathman_config(): create dummy cache entry for parent */ -static void -startup_invalidate_parent(Datum *values, bool *isnull, void *context) -{ - Oid relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); - - /* Check that relation 'relid' exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("table \"%s\" contains nonexistent relation %u", - PATHMAN_CONFIG, relid), - errhint(INIT_ERROR_HINT))); - } -} - -/* - * Go through the PATHMAN_CONFIG table and create PartRelationInfo entries. - */ -static void -read_pathman_config(void (*per_row_cb)(Datum *values, - bool *isnull, - void *context), - void *context) -{ - Relation rel; - HeapScanDesc scan; - Snapshot snapshot; - HeapTuple htup; - - /* Open PATHMAN_CONFIG with latest snapshot available */ - rel = heap_open(get_pathman_config_relid(false), AccessShareLock); - - /* Check that 'partrel' column is if regclass type */ - Assert(TupleDescAttr(RelationGetDescr(rel), - Anum_pathman_config_partrel - 1)->atttypid == REGCLASSOID); - - /* Check that number of columns == Natts_pathman_config */ - Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); - - snapshot = RegisterSnapshot(GetLatestSnapshot()); - scan = heap_beginscan(rel, snapshot, 0, NULL); - - /* Examine each row and create a PartRelationInfo in local cache */ - while((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) - { - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - - /* Extract Datums from tuple 'htup' */ - heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); - - /* These attributes are marked as NOT NULL, check anyway */ - Assert(!isnull[Anum_pathman_config_partrel - 1]); - Assert(!isnull[Anum_pathman_config_parttype - 1]); - Assert(!isnull[Anum_pathman_config_expr - 1]); - - /* Execute per row callback */ - per_row_cb(values, isnull, context); - } - - /* Clean resources */ - heap_endscan(scan); - UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); -} - - /* * Validates range constraint. It MUST have one of the following formats: * 1) EXPRESSION >= CONST AND EXPRESSION < CONST From 758694d10a835d466c8e76b5a05da0e599bf8715 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Jun 2018 14:32:50 +0300 Subject: [PATCH 0845/1124] fix tuple visibility in add_to_pathman_config() --- expected/pathman_calamity.out | 108 +++++++++++++++++----------------- src/pl_funcs.c | 4 ++ 2 files changed, 58 insertions(+), 54 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 6243f1d9..14ff9cd6 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -807,23 +807,23 @@ SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10 (1 row) SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 1 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 (4 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 (4 rows) /* Change this setting for code coverage */ @@ -853,23 +853,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; (11 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 1 - partition parents cache | 10 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 + partition status cache | 3 (4 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 (4 rows) /* Restore this GUC */ @@ -899,23 +899,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; (11 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 (4 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 (4 rows) /* check that parents cache has been flushed after partition was dropped */ @@ -943,12 +943,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; (11 rows) SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 (4 rows) SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); @@ -958,23 +958,23 @@ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); (1 row) SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 9 - partition dispatch cache | 1 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 + partition status cache | 2 (4 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 10 other objects SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 (4 rows) DROP SCHEMA calamity CASCADE; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index c4b13017..fb457df1 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -19,6 +19,7 @@ #include "utils.h" #include "access/htup_details.h" +#include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" @@ -785,6 +786,9 @@ add_to_pathman_config(PG_FUNCTION_ARGS) heap_close(pathman_config, RowExclusiveLock); + /* Make changes visible */ + CommandCounterIncrement(); + /* Update caches only if this relation has children */ if (FCS_FOUND == find_inheritance_children_array(relid, NoLock, true, &children_count, From 26e2cc3e42c8100e82473a86a893bc30d8ddbdd6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Jun 2018 18:26:03 +0300 Subject: [PATCH 0846/1124] various fixes for multilevel partitioning --- src/hooks.c | 26 +++++++++++++++++--------- src/pg_pathman.c | 9 ++------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index e718edbe..b170e2cb 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -108,20 +108,24 @@ pathman_join_pathlist_hook(PlannerInfo *root, if (inner_rte->inh) return; + /* We shouldn't process functions etc */ + if (inner_rte->rtekind != RTE_RELATION) + return; + /* We don't support these join types (since inner will be parameterized) */ if (jointype == JOIN_FULL || jointype == JOIN_RIGHT || jointype == JOIN_UNIQUE_INNER) return; + /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(inner_rte)) + return; + /* Proceed iff relation 'innerrel' is partitioned */ if ((inner_prel = get_pathman_relation_info(inner_rte->relid)) == NULL) return; - /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ - if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(inner_rte)) - goto cleanup; - /* * Check if query is: * 1) UPDATE part_table SET = .. FROM part_table. @@ -294,7 +298,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, add_path(joinrel, (Path *) nest_path); } -cleanup: /* Don't forget to close 'inner_prel'! */ close_pathman_relation_info(inner_prel); } @@ -380,14 +383,22 @@ pathman_rel_pathlist_hook(PlannerInfo *root, foreach (lc, root->append_rel_list) { AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); + Oid child_oid, + parent_oid; + + /* Is it actually the same table? */ + child_oid = root->simple_rte_array[appinfo->child_relid]->relid; + parent_oid = root->simple_rte_array[appinfo->parent_relid]->relid; /* * If there's an 'appinfo', it means that somebody * (PG?) has already processed this partitioned table * and added its children to the plan. */ - if (appinfo->child_relid == rti) + if (appinfo->child_relid == rti && child_oid == parent_oid) + { goto cleanup; + } } } @@ -419,9 +430,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, pathkeyDesc = (PathKey *) linitial(pathkeys); } - /* Mark as partitioned table */ - assign_rel_parenthood_status(rte, PARENTHOOD_DISALLOWED); - children = PrelGetChildrenArray(prel); ranges = list_make1_irange_full(prel, IR_COMPLETE); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index e236735c..87d361fe 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1951,13 +1951,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, set_rel_consider_parallel_compat(root, child_rel, child_rte); #endif - /* - * If inh is True and pathlist is not null then it is a partitioned - * table and we've already filled it, skip it. Otherwise build a - * pathlist for it - */ - if (PARENTHOOD_DISALLOWED != get_rel_parenthood_status(child_rte) || - child_rel->pathlist == NIL) + /* Build a few paths for this relation */ + if (child_rel->pathlist == NIL) { /* Compute child's access paths & sizes */ if (child_rte->relkind == RELKIND_FOREIGN_TABLE) From cf28b4ef0b952dd9ef7fd02ec63892a240625af2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 22 Jun 2018 18:39:11 +0300 Subject: [PATCH 0847/1124] revisit some TODOs --- src/hooks.c | 4 ---- src/init.c | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index b170e2cb..89466d4b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -484,10 +484,6 @@ pathman_rel_pathlist_hook(PlannerInfo *root, parent_rowmark = get_plan_rowmark(root->rowMarks, rti); - /* - * WARNING: 'prel' might become invalid after append_child_relation(). - */ - /* Add parent if asked to */ if (prel->enable_parent) append_child_relation(root, parent_rel, parent_rowmark, diff --git a/src/init.c b/src/init.c index 2c93f974..d24b8ee6 100644 --- a/src/init.c +++ b/src/init.c @@ -665,7 +665,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Set ItemPointer if necessary */ if (iptr) - *iptr = htup->t_self; + *iptr = htup->t_self; /* FIXME: callers should lock table beforehand */ } /* Clean resources */ From 14d15f27ae4127ed031ed0473a58f66d57d1d629 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 25 Jun 2018 13:39:09 +0300 Subject: [PATCH 0848/1124] fix select_partition_for_insert() --- src/partition_filter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 9e97698f..96ca00e8 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -498,7 +498,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, { /* This partition has been dropped | we have a new one */ prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); /* Store new 'prel' in 'parts_storage' */ close_pathman_relation_info(parts_storage->prel); @@ -508,7 +508,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, { /* This partition is a parent itself, repeat */ prel = get_pathman_relation_info(partition_relid); - shout_if_prel_is_invalid(partition_relid, prel, PT_RANGE); + shout_if_prel_is_invalid(partition_relid, prel, PT_ANY); close_prel = true; /* We're not done yet */ From add67fa8a08e398dd1e91234873386347dcb6fe0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 25 Jun 2018 14:14:42 +0300 Subject: [PATCH 0849/1124] make use of RegisterCustomScanMethods(), rename some runtime* funcs and files --- Makefile | 2 +- src/hooks.c | 12 ++--- .../{runtimeappend.h => runtime_append.h} | 39 +++++++------- src/include/runtime_merge_append.h | 39 +++++++------- src/nodes_common.c | 2 +- src/partition_filter.c | 6 +-- src/partition_router.c | 4 +- src/pg_pathman.c | 4 +- src/{runtimeappend.c => runtime_append.c} | 54 ++++++++++--------- src/runtime_merge_append.c | 50 ++++++++--------- 10 files changed, 112 insertions(+), 100 deletions(-) rename src/include/{runtimeappend.h => runtime_append.h} (68%) rename src/{runtimeappend.c => runtime_append.c} (58%) diff --git a/Makefile b/Makefile index 1291e252..948bf3b6 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ MODULE_big = pg_pathman OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ - src/runtimeappend.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ + src/runtime_append.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ diff --git a/src/hooks.c b/src/hooks.c index 89466d4b..2e547af1 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -19,7 +19,7 @@ #include "partition_router.h" #include "pathman_workers.h" #include "planner_tree_modification.h" -#include "runtimeappend.h" +#include "runtime_append.h" #include "runtime_merge_append.h" #include "utility_stmt_hooking.h" #include "utils.h" @@ -244,7 +244,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, continue; /* Try building RuntimeAppend path, skip if it's not possible */ - inner = create_runtimeappend_path(root, cur_inner_path, ppi, paramsel); + inner = create_runtime_append_path(root, cur_inner_path, ppi, paramsel); if (!inner) continue; @@ -549,8 +549,8 @@ pathman_rel_pathlist_hook(PlannerInfo *root, ppi = get_appendrel_parampathinfo(rel, inner_required); if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) - inner_path = create_runtimeappend_path(root, cur_path, - ppi, paramsel); + inner_path = create_runtime_append_path(root, cur_path, + ppi, paramsel); else if (IsA(cur_path, MergeAppendPath) && pg_pathman_enable_runtime_merge_append) { @@ -560,8 +560,8 @@ pathman_rel_pathlist_hook(PlannerInfo *root, elog(FATAL, "Struct layouts of AppendPath and " "MergeAppendPath differ"); - inner_path = create_runtimemergeappend_path(root, cur_path, - ppi, paramsel); + inner_path = create_runtime_merge_append_path(root, cur_path, + ppi, paramsel); } if (inner_path) diff --git a/src/include/runtimeappend.h b/src/include/runtime_append.h similarity index 68% rename from src/include/runtimeappend.h rename to src/include/runtime_append.h index ee25c337..8e003a92 100644 --- a/src/include/runtimeappend.h +++ b/src/include/runtime_append.h @@ -21,6 +21,9 @@ #include "commands/explain.h" +#define RUNTIME_APPEND_NODE_NAME "RuntimeAppend" + + typedef struct { CustomPath cpath; @@ -70,32 +73,32 @@ extern CustomScanMethods runtimeappend_plan_methods; extern CustomExecMethods runtimeappend_exec_methods; -void init_runtimeappend_static_data(void); +void init_runtime_append_static_data(void); -Path * create_runtimeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel); +Path * create_runtime_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel); -Plan * create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans); +Plan * create_runtime_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans); -Node * runtimeappend_create_scan_state(CustomScan *node); +Node * runtime_append_create_scan_state(CustomScan *node); -void runtimeappend_begin(CustomScanState *node, - EState *estate, - int eflags); +void runtime_append_begin(CustomScanState *node, + EState *estate, + int eflags); -TupleTableSlot * runtimeappend_exec(CustomScanState *node); +TupleTableSlot * runtime_append_exec(CustomScanState *node); -void runtimeappend_end(CustomScanState *node); +void runtime_append_end(CustomScanState *node); -void runtimeappend_rescan(CustomScanState *node); +void runtime_append_rescan(CustomScanState *node); -void runtimeappend_explain(CustomScanState *node, - List *ancestors, - ExplainState *es); +void runtime_append_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); #endif /* RUNTIME_APPEND_H */ diff --git a/src/include/runtime_merge_append.h b/src/include/runtime_merge_append.h index 9aa6aed9..8d24bf20 100644 --- a/src/include/runtime_merge_append.h +++ b/src/include/runtime_merge_append.h @@ -14,12 +14,15 @@ #define RUNTIME_MERGE_APPEND_H -#include "runtimeappend.h" +#include "runtime_append.h" #include "pathman.h" #include "postgres.h" +#define RUNTIME_MERGE_APPEND_NODE_NAME "RuntimeMergeAppend" + + typedef struct { RuntimeAppendPath rpath; @@ -54,30 +57,30 @@ extern CustomExecMethods runtime_merge_append_exec_methods; void init_runtime_merge_append_static_data(void); -Path * create_runtimemergeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel); +Path * create_runtime_merge_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel); -Plan * create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans); +Plan * create_runtime_merge_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans); -Node * runtimemergeappend_create_scan_state(CustomScan *node); +Node * runtime_merge_append_create_scan_state(CustomScan *node); -void runtimemergeappend_begin(CustomScanState *node, - EState *estate, - int eflags); +void runtime_merge_append_begin(CustomScanState *node, + EState *estate, + int eflags); -TupleTableSlot * runtimemergeappend_exec(CustomScanState *node); +TupleTableSlot * runtime_merge_append_exec(CustomScanState *node); -void runtimemergeappend_end(CustomScanState *node); +void runtime_merge_append_end(CustomScanState *node); -void runtimemergeappend_rescan(CustomScanState *node); +void runtime_merge_append_rescan(CustomScanState *node); -void runtimemergeappend_explain(CustomScanState *node, - List *ancestors, - ExplainState *es); +void runtime_merge_append_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); #endif /* RUNTIME_MERGE_APPEND_H */ diff --git a/src/nodes_common.c b/src/nodes_common.c index 66f2df12..a370c165 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -11,7 +11,7 @@ #include "init.h" #include "nodes_common.h" -#include "runtimeappend.h" +#include "runtime_append.h" #include "utils.h" #include "nodes/nodeFuncs.h" diff --git a/src/partition_filter.c b/src/partition_filter.c index 96ca00e8..3314cef5 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -131,6 +131,8 @@ init_partition_filter_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&partition_filter_plan_methods); } @@ -659,12 +661,10 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { PartitionFilterState *state = (PartitionFilterState *) node; Oid parent_relid = state->partitioned_table; - PlanState *child_state; ResultRelInfo *current_rri; /* It's convenient to store PlanState in 'custom_ps' */ - child_state = ExecInitNode(state->subplan, estate, eflags); - node->custom_ps = list_make1(child_state); + node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); /* Fetch current result relation (rri + rel) */ current_rri = estate->es_result_relation_info; diff --git a/src/partition_router.c b/src/partition_router.c index f4a8cb6c..30ebd5d2 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -58,6 +58,8 @@ init_partition_router_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&partition_router_plan_methods); } Plan * @@ -121,7 +123,7 @@ partition_router_begin(CustomScanState *node, EState *estate, int eflags) { PartitionRouterState *state = (PartitionRouterState *) node; - /* Initialize PartitionFilter child node */ + /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 87d361fe..bc88217d 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -18,7 +18,7 @@ #include "partition_filter.h" #include "partition_router.h" #include "planner_tree_modification.h" -#include "runtimeappend.h" +#include "runtime_append.h" #include "runtime_merge_append.h" #include "postgres.h" @@ -319,7 +319,7 @@ _PG_init(void) /* Initialize static data for all subsystems */ init_main_pathman_toggles(); init_relation_info_static_data(); - init_runtimeappend_static_data(); + init_runtime_append_static_data(); init_runtime_merge_append_static_data(); init_partition_filter_static_data(); init_partition_router_static_data(); diff --git a/src/runtimeappend.c b/src/runtime_append.c similarity index 58% rename from src/runtimeappend.c rename to src/runtime_append.c index 9e93aedf..e73c5c7b 100644 --- a/src/runtimeappend.c +++ b/src/runtime_append.c @@ -8,7 +8,7 @@ * ------------------------------------------------------------------------ */ -#include "runtimeappend.h" +#include "runtime_append.h" #include "utils/guc.h" @@ -21,25 +21,25 @@ CustomExecMethods runtimeappend_exec_methods; void -init_runtimeappend_static_data(void) +init_runtime_append_static_data(void) { - runtimeappend_path_methods.CustomName = "RuntimeAppend"; - runtimeappend_path_methods.PlanCustomPath = create_runtimeappend_plan; + runtimeappend_path_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_path_methods.PlanCustomPath = create_runtime_append_plan; - runtimeappend_plan_methods.CustomName = "RuntimeAppend"; - runtimeappend_plan_methods.CreateCustomScanState = runtimeappend_create_scan_state; + runtimeappend_plan_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_plan_methods.CreateCustomScanState = runtime_append_create_scan_state; - runtimeappend_exec_methods.CustomName = "RuntimeAppend"; - runtimeappend_exec_methods.BeginCustomScan = runtimeappend_begin; - runtimeappend_exec_methods.ExecCustomScan = runtimeappend_exec; - runtimeappend_exec_methods.EndCustomScan = runtimeappend_end; - runtimeappend_exec_methods.ReScanCustomScan = runtimeappend_rescan; + runtimeappend_exec_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_exec_methods.BeginCustomScan = runtime_append_begin; + runtimeappend_exec_methods.ExecCustomScan = runtime_append_exec; + runtimeappend_exec_methods.EndCustomScan = runtime_append_end; + runtimeappend_exec_methods.ReScanCustomScan = runtime_append_rescan; runtimeappend_exec_methods.MarkPosCustomScan = NULL; runtimeappend_exec_methods.RestrPosCustomScan = NULL; - runtimeappend_exec_methods.ExplainCustomScan = runtimeappend_explain; + runtimeappend_exec_methods.ExplainCustomScan = runtime_append_explain; DefineCustomBoolVariable("pg_pathman.enable_runtimeappend", - "Enables the planner's use of RuntimeAppend custom node.", + "Enables the planner's use of " RUNTIME_APPEND_NODE_NAME " custom node.", NULL, &pg_pathman_enable_runtimeappend, true, @@ -48,13 +48,15 @@ init_runtimeappend_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&runtimeappend_plan_methods); } Path * -create_runtimeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel) +create_runtime_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel) { return create_append_path_common(root, inner_append, param_info, @@ -64,9 +66,9 @@ create_runtimeappend_path(PlannerInfo *root, } Plan * -create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans) +create_runtime_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans) { return create_append_plan_common(root, rel, best_path, tlist, @@ -75,7 +77,7 @@ create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, } Node * -runtimeappend_create_scan_state(CustomScan *node) +runtime_append_create_scan_state(CustomScan *node) { return create_append_scan_state_common(node, &runtimeappend_exec_methods, @@ -83,7 +85,7 @@ runtimeappend_create_scan_state(CustomScan *node) } void -runtimeappend_begin(CustomScanState *node, EState *estate, int eflags) +runtime_append_begin(CustomScanState *node, EState *estate, int eflags) { begin_append_common(node, estate, eflags); } @@ -116,25 +118,25 @@ fetch_next_tuple(CustomScanState *node) } TupleTableSlot * -runtimeappend_exec(CustomScanState *node) +runtime_append_exec(CustomScanState *node) { return exec_append_common(node, fetch_next_tuple); } void -runtimeappend_end(CustomScanState *node) +runtime_append_end(CustomScanState *node) { end_append_common(node); } void -runtimeappend_rescan(CustomScanState *node) +runtime_append_rescan(CustomScanState *node) { rescan_append_common(node); } void -runtimeappend_explain(CustomScanState *node, List *ancestors, ExplainState *es) +runtime_append_explain(CustomScanState *node, List *ancestors, ExplainState *es) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index 453ebab1..836a1fdd 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -191,23 +191,23 @@ unpack_runtimemergeappend_private(RuntimeMergeAppendState *scan_state, void init_runtime_merge_append_static_data(void) { - runtime_merge_append_path_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_path_methods.PlanCustomPath = create_runtimemergeappend_plan; + runtime_merge_append_path_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_path_methods.PlanCustomPath = create_runtime_merge_append_plan; - runtime_merge_append_plan_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_plan_methods.CreateCustomScanState = runtimemergeappend_create_scan_state; + runtime_merge_append_plan_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_plan_methods.CreateCustomScanState = runtime_merge_append_create_scan_state; - runtime_merge_append_exec_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_exec_methods.BeginCustomScan = runtimemergeappend_begin; - runtime_merge_append_exec_methods.ExecCustomScan = runtimemergeappend_exec; - runtime_merge_append_exec_methods.EndCustomScan = runtimemergeappend_end; - runtime_merge_append_exec_methods.ReScanCustomScan = runtimemergeappend_rescan; + runtime_merge_append_exec_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_exec_methods.BeginCustomScan = runtime_merge_append_begin; + runtime_merge_append_exec_methods.ExecCustomScan = runtime_merge_append_exec; + runtime_merge_append_exec_methods.EndCustomScan = runtime_merge_append_end; + runtime_merge_append_exec_methods.ReScanCustomScan = runtime_merge_append_rescan; runtime_merge_append_exec_methods.MarkPosCustomScan = NULL; runtime_merge_append_exec_methods.RestrPosCustomScan = NULL; - runtime_merge_append_exec_methods.ExplainCustomScan = runtimemergeappend_explain; + runtime_merge_append_exec_methods.ExplainCustomScan = runtime_merge_append_explain; DefineCustomBoolVariable("pg_pathman.enable_runtimemergeappend", - "Enables the planner's use of RuntimeMergeAppend custom node.", + "Enables the planner's use of " RUNTIME_MERGE_APPEND_NODE_NAME " custom node.", NULL, &pg_pathman_enable_runtime_merge_append, true, @@ -216,13 +216,15 @@ init_runtime_merge_append_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&runtime_merge_append_plan_methods); } Path * -create_runtimemergeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel) +create_runtime_merge_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel) { RelOptInfo *rel = inner_append->path.parent; Path *path; @@ -245,9 +247,9 @@ create_runtimemergeappend_path(PlannerInfo *root, } Plan * -create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans) +create_runtime_merge_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans) { CustomScan *node; Plan *plan; @@ -337,7 +339,7 @@ create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, } Node * -runtimemergeappend_create_scan_state(CustomScan *node) +runtime_merge_append_create_scan_state(CustomScan *node) { Node *state; state = create_append_scan_state_common(node, @@ -350,7 +352,7 @@ runtimemergeappend_create_scan_state(CustomScan *node) } void -runtimemergeappend_begin(CustomScanState *node, EState *estate, int eflags) +runtime_merge_append_begin(CustomScanState *node, EState *estate, int eflags) { begin_append_common(node, estate, eflags); } @@ -412,13 +414,13 @@ fetch_next_tuple(CustomScanState *node) } TupleTableSlot * -runtimemergeappend_exec(CustomScanState *node) +runtime_merge_append_exec(CustomScanState *node) { return exec_append_common(node, fetch_next_tuple); } void -runtimemergeappend_end(CustomScanState *node) +runtime_merge_append_end(CustomScanState *node) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; @@ -429,7 +431,7 @@ runtimemergeappend_end(CustomScanState *node) } void -runtimemergeappend_rescan(CustomScanState *node) +runtime_merge_append_rescan(CustomScanState *node) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; int nplans; @@ -475,7 +477,7 @@ runtimemergeappend_rescan(CustomScanState *node) } void -runtimemergeappend_explain(CustomScanState *node, List *ancestors, ExplainState *es) +runtime_merge_append_explain(CustomScanState *node, List *ancestors, ExplainState *es) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; From 313b31a5103c3e9d873979a79fc42142aecabb9a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 25 Jun 2018 14:25:07 +0300 Subject: [PATCH 0850/1124] remove useless junkfilters --- src/hooks.c | 8 ++++---- src/include/partition_filter.h | 8 ++------ 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 2e547af1..2f9e1cd7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -967,8 +967,8 @@ pathman_executor_hook(QueryDesc *queryDesc, if (IsA(state, ModifyTableState)) { - ModifyTableState *mt_state = (ModifyTableState *) state; - int i; + ModifyTableState *mt_state = (ModifyTableState *) state; + int i; for (i = 0; i < mt_state->mt_nplans; i++) { @@ -980,8 +980,8 @@ pathman_executor_hook(QueryDesc *queryDesc, ResultRelInfo *rri = &mt_state->resultRelInfo[i]; /* - * We unset junkfilter to disable junk - * cleaning in ExecModifyTable. + * HACK: We unset junkfilter to disable + * junk cleaning in ExecModifyTable. */ rri->ri_junkFilter = NULL; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 7c15a017..b601a654 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -43,7 +43,6 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ - JunkFilter *junkfilter; /* junkfilter for cached ResultRelInfo */ bool has_children; /* hint that it might have children */ ExprState *expr_state; /* children have their own expressions */ } ResultRelInfoHolder; @@ -107,15 +106,12 @@ typedef struct Plan *subplan; /* proxy variable to store subplan */ ResultPartsStorage result_parts; /* partition ResultRelInfo cache */ + CmdType command_type; bool warning_triggered; /* warning message counter */ - TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ - CmdType command_type; - TupleTableSlot *subplan_slot; /* slot that was returned from subplan */ - JunkFilter *junkfilter; /* junkfilter for subplan_slot */ - + TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ ExprContext *tup_convert_econtext; /* ExprContext for projections */ } PartitionFilterState; From bb3b2b8501ca8ea5262868762b927b0474751e4f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 25 Jun 2018 15:08:31 +0300 Subject: [PATCH 0851/1124] reuse PartitionFilter's ExprContext for ProjectionInfo --- src/include/partition_filter.h | 1 - src/partition_filter.c | 9 +-------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index b601a654..25ab51f3 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -112,7 +112,6 @@ typedef struct TupleTableSlot *subplan_slot; /* slot that was returned from subplan */ TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ - ExprContext *tup_convert_econtext; /* ExprContext for projections */ } PartitionFilterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 3314cef5..05a8f80c 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -861,9 +861,6 @@ prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, *parent_rri; Index parent_rt_idx; TupleTableSlot *result_slot; - EState *estate; - - estate = rps_storage->estate; /* We don't need to do anything ff there's no map */ if (!rri_holder->tuple_map) @@ -880,10 +877,6 @@ prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, parent_rri = rps_storage->base_rri; parent_rt_idx = parent_rri->ri_RangeTableIndex; - /* Create ExprContext for tuple projections */ - if (!pfstate->tup_convert_econtext) - pfstate->tup_convert_econtext = CreateExprContext(estate); - /* Replace parent's varattnos with child's */ returning_list = (List *) fix_returning_list_mutator((Node *) returning_list, @@ -899,7 +892,7 @@ prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, /* Build new projection info */ child_rri->ri_projectReturning = - ExecBuildProjectionInfoCompat(returning_list, pfstate->tup_convert_econtext, + ExecBuildProjectionInfoCompat(returning_list, pfstate->css.ss.ps.ps_ExprContext, result_slot, NULL /* HACK: no PlanState */, RelationGetDescr(child_rri->ri_RelationDesc)); } From 35b9be724cab347ac8d321a73ad5681f53f238b5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 25 Jun 2018 18:53:23 +0300 Subject: [PATCH 0852/1124] minor changes in PartitionRouter --- src/partition_router.c | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index 30ebd5d2..a87b514f 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -29,7 +29,6 @@ CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, - TupleTableSlot *planSlot, EPQState *epqstate, EState *estate); @@ -140,28 +139,27 @@ partition_router_exec(CustomScanState *node) if (!TupIsNull(slot)) { - ResultRelInfo *result_rri, - *parent_rri; - ItemPointer tupleid = NULL; - ItemPointerData tuple_ctid; + ResultRelInfo *new_rri, /* new tuple owner */ + *old_rri; /* previous tuple owner */ EPQState epqstate; PartitionFilterState *child_state; char relkind; + ItemPointerData ctid; + + ItemPointerSetInvalid(&ctid); child_state = (PartitionFilterState *) child_ps; Assert(child_state->command_type == CMD_UPDATE); - EvalPlanQualSetSlot(&epqstate, child_state->subplan_slot); - - parent_rri = child_state->result_parts.base_rri; - result_rri = estate->es_result_relation_info; + old_rri = child_state->result_parts.base_rri; + new_rri = estate->es_result_relation_info; /* Build new junkfilter if we have to */ if (state->junkfilter == NULL) { state->junkfilter = ExecInitJunkFilter(state->subplan->targetlist, - parent_rri->ri_RelationDesc->rd_att->tdhasoid, + old_rri->ri_RelationDesc->rd_att->tdhasoid, ExecInitExtraTupleSlot(estate)); state->junkfilter->jf_junkAttNo = @@ -171,7 +169,7 @@ partition_router_exec(CustomScanState *node) elog(ERROR, "could not find junk ctid column"); } - relkind = parent_rri->ri_RelationDesc->rd_rel->relkind; + relkind = old_rri->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) { Datum ctid_datum; @@ -185,9 +183,8 @@ partition_router_exec(CustomScanState *node) if (ctid_isnull) elog(ERROR, "ctid is NULL"); - tupleid = (ItemPointer) DatumGetPointer(ctid_datum); - tuple_ctid = *tupleid; /* be sure we don't free ctid! */ - tupleid = &tuple_ctid; + /* Get item pointer to tuple */ + ctid = *(ItemPointer) DatumGetPointer(ctid_datum); } else if (relkind == RELKIND_FOREIGN_TABLE) elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); @@ -202,14 +199,17 @@ partition_router_exec(CustomScanState *node) slot = ExecFilterJunk(state->junkfilter, slot); /* Magic: replace current ResultRelInfo with parent's one (DELETE) */ - estate->es_result_relation_info = parent_rri; + estate->es_result_relation_info = old_rri; - Assert(tupleid != NULL); - ExecDeleteInternal(tupleid, child_state->subplan_slot, &epqstate, estate); + /* Delete tuple from old partition */ + Assert(ItemPointerIsValid(&ctid)); + EvalPlanQualSetSlot(&epqstate, child_state->subplan_slot); + ExecDeleteInternal(&ctid, &epqstate, estate); /* Magic: replace parent's ResultRelInfo with child's one (INSERT) */ - estate->es_result_relation_info = result_rri; + estate->es_result_relation_info = new_rri; + /* Tuple will be inserted by ModifyTable */ return slot; } @@ -246,7 +246,6 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e static TupleTableSlot * ExecDeleteInternal(ItemPointer tupleid, - TupleTableSlot *planSlot, EPQState *epqstate, EState *estate) { From 99b7c027c0ed386d94b1015cdacd671ef9aa8cd6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Jun 2018 00:28:55 +0300 Subject: [PATCH 0853/1124] make sure that partition creation is visible --- src/include/partition_filter.h | 11 ++-- src/partition_creation.c | 4 ++ src/partition_filter.c | 94 +++++++++++++++++++++++----------- 3 files changed, 74 insertions(+), 35 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 25ab51f3..aa3a01e1 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -40,11 +40,12 @@ */ typedef struct { - Oid partid; /* partition's relid */ - ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ - TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ - bool has_children; /* hint that it might have children */ - ExprState *expr_state; /* children have their own expressions */ + Oid partid; /* partition's relid */ + ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ + TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ + + PartRelationInfo *prel; /* this child might be a parent... */ + ExprState *prel_expr_state; /* and have its own part. expression */ } ResultRelInfoHolder; diff --git a/src/partition_creation.c b/src/partition_creation.c index 5de44ee4..1ddc39e1 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -41,6 +41,7 @@ #include "utils/builtins.h" #include "utils/datum.h" #include "utils/fmgroids.h" +#include "utils/inval.h" #include "utils/jsonb.h" #include "utils/snapmgr.h" #include "utils/lsyscache.h" @@ -309,6 +310,9 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) elog(ERROR, "could not create new partitions for relation \"%s\"", get_rel_name_or_relid(relid)); + /* Make changes visible */ + AcceptInvalidationMessages(); + return last_partition; } diff --git a/src/partition_filter.c b/src/partition_filter.c index 05a8f80c..e100a2c9 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -233,6 +233,10 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) free_conversion_map(rri_holder->tuple_map); } + + /* Don't forget to close 'prel'! */ + if (rri_holder->prel) + close_pathman_relation_info(rri_holder->prel); } /* Finally destroy hash table */ @@ -352,9 +356,18 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Generate tuple transformation map and some other stuff */ rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); - /* Are there subpartitions? */ - rri_holder->has_children = child_rel->rd_rel->relhassubclass; - rri_holder->expr_state = NULL; + /* Default values */ + rri_holder->prel = NULL; + rri_holder->prel_expr_state = NULL; + + if ((rri_holder->prel = get_pathman_relation_info(partid)) != NULL) + { + rri_holder->prel_expr_state = + prepare_expr_state(rri_holder->prel, /* NOTE: this prel! */ + parts_storage->base_rri->ri_RelationDesc, + parts_storage->estate, + parts_storage->command_type == CMD_UPDATE); + } /* Call initialization callback if needed */ if (parts_storage->init_rri_holder_cb) @@ -452,29 +465,41 @@ ResultRelInfoHolder * select_partition_for_insert(ResultPartsStorage *parts_storage, TupleTableSlot *slot) { - bool close_prel = false; PartRelationInfo *prel = parts_storage->prel; ExprState *expr_state = parts_storage->prel_expr_state; ExprContext *expr_context = parts_storage->prel_econtext; + Oid parent_relid = PrelParentRelid(prel), partition_relid = InvalidOid; + + Datum value; + bool isnull; + bool compute_value = true; + Oid *parts; int nparts; - bool isnull; - Datum value; ResultRelInfoHolder *result; - parts_storage->prel_econtext->ecxt_scantuple = slot; + do + { + if (compute_value) + { + /* Prepare expression context */ + ResetExprContext(expr_context); - /* Execute expression */ - value = ExecEvalExprCompat(expr_state, expr_context, - &isnull, mult_result_handler); + /* Execute expression */ + expr_context->ecxt_scantuple = slot; - if (isnull) - elog(ERROR, ERR_PART_ATTR_NULL); + value = ExecEvalExprCompat(expr_state, expr_context, + &isnull, mult_result_handler); + + if (isnull) + elog(ERROR, ERR_PART_ATTR_NULL); + + /* Ok, we have a value */ + compute_value = false; + } - do - { /* Search for matching partitions */ parts = find_partitions_for_value(value, prel->ev_type, prel, &nparts); @@ -492,28 +517,37 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, /* Get ResultRelationInfo holder for the selected partition */ result = scan_result_parts_storage(partition_relid, parts_storage); - /* Should we close 'prel'? */ - if (close_prel) - close_pathman_relation_info(prel); - - if (result == NULL || nparts == 0) + /* Somebody has dropped or created partitions */ + if (!PrelIsFresh(prel) && (nparts == 0 || result == NULL)) { - /* This partition has been dropped | we have a new one */ + /* Compare original and current Oids */ + Oid relid1 = PrelParentRelid(parts_storage->prel), + relid2 = PrelParentRelid(prel); + + /* Reopen 'prel' to make it fresh again */ + close_pathman_relation_info(prel); prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); - /* Store new 'prel' in 'parts_storage' */ - close_pathman_relation_info(parts_storage->prel); - parts_storage->prel = prel; + /* Store new 'prel' */ + if (relid1 == relid2) + { + shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); + parts_storage->prel = prel; + } + else if (result && result->prel) + /* TODO: WTF? this is a new RRI, not the one we used before */ + result->prel = prel; } - else if (result->has_children) + + /* This partition is a parent itself */ + if (result && result->prel) { - /* This partition is a parent itself, repeat */ - prel = get_pathman_relation_info(partition_relid); - shout_if_prel_is_invalid(partition_relid, prel, PT_ANY); - close_prel = true; + prel = result->prel; + expr_state = result->prel_expr_state; + parent_relid = PrelParentRelid(prel); + compute_value = true; - /* We're not done yet */ + /* Repeat with a new dispatch */ result = NULL; } } From 379b686a0683c06d864e84e24a7040c0a6841f69 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Jun 2018 16:04:13 +0300 Subject: [PATCH 0854/1124] preserve PartRelationInfo for the lifetime of RuntimeAppend --- src/include/runtime_append.h | 3 ++- src/nodes_common.c | 19 +++++-------------- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/src/include/runtime_append.h b/src/include/runtime_append.h index 8e003a92..bc76ea70 100644 --- a/src/include/runtime_append.h +++ b/src/include/runtime_append.h @@ -44,8 +44,9 @@ typedef struct /* Refined clauses for partition pruning */ List *canon_custom_exprs; - /* Copy of partitioning expression (protect from invalidations) */ + /* Copy of partitioning expression and dispatch info */ Node *prel_expr; + PartRelationInfo *prel; /* All available plans \ plan states */ HTAB *children_table; diff --git a/src/nodes_common.c b/src/nodes_common.c index a370c165..3662fb6c 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -663,24 +663,20 @@ void begin_append_common(CustomScanState *node, EState *estate, int eflags) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - PartRelationInfo *prel; #if PG_VERSION_NUM < 100000 node->ss.ps.ps_TupFromTlist = false; #endif - prel = get_pathman_relation_info(scan_state->relid); - Assert(prel); + scan_state->prel = get_pathman_relation_info(scan_state->relid); + Assert(scan_state->prel); /* Prepare expression according to set_set_customscan_references() */ - scan_state->prel_expr = PrelExpressionForRelid(prel, INDEX_VAR); + scan_state->prel_expr = PrelExpressionForRelid(scan_state->prel, INDEX_VAR); /* Prepare custom expression according to set_set_customscan_references() */ scan_state->canon_custom_exprs = canonicalize_custom_exprs(scan_state->custom_exprs); - - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); } TupleTableSlot * @@ -756,6 +752,7 @@ end_append_common(CustomScanState *node) clear_plan_states(&scan_state->css); hash_destroy(scan_state->children_table); + close_pathman_relation_info(scan_state->prel); } void @@ -763,16 +760,13 @@ rescan_append_common(CustomScanState *node) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; ExprContext *econtext = node->ss.ps.ps_ExprContext; - PartRelationInfo *prel; + PartRelationInfo *prel = scan_state->prel; List *ranges; ListCell *lc; WalkerContext wcxt; Oid *parts; int nparts; - prel = get_pathman_relation_info(scan_state->relid); - Assert(prel); - /* First we select all available partitions... */ ranges = list_make1_irange_full(prel, IR_COMPLETE); @@ -804,9 +798,6 @@ rescan_append_common(CustomScanState *node) scan_state->ncur_plans, scan_state->css.ss.ps.state); - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - scan_state->running_idx = 0; } From d31d8829fe5c6dd0727d887c3a52f08f2b0b3b4d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Jun 2018 16:08:34 +0300 Subject: [PATCH 0855/1124] restore compatibility with 9.5 --- src/include/compat/pg_compat.h | 9 +++++++++ src/runtime_append.c | 2 ++ 2 files changed, 11 insertions(+) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 273e023c..6d9f1c21 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -723,6 +723,15 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif +/* + * RegisterCustomScanMethods() + */ +#if PG_VERSION_NUM < 96000 +#define RegisterCustomScanMethods(methods) +#endif + + + /* * ------------- * Common code diff --git a/src/runtime_append.c b/src/runtime_append.c index e73c5c7b..a90c101a 100644 --- a/src/runtime_append.c +++ b/src/runtime_append.c @@ -8,6 +8,8 @@ * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" + #include "runtime_append.h" #include "utils/guc.h" From 159a9a229453efc7feba18b5545169fdaecb4a94 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 26 Jun 2018 19:06:21 +0300 Subject: [PATCH 0856/1124] introduce smart Array* wrappers --- src/include/utils.h | 26 ++++++++++++++++++++++++++ src/init.c | 11 ++--------- src/nodes_common.c | 45 ++++++++++++++------------------------------- 3 files changed, 42 insertions(+), 40 deletions(-) diff --git a/src/include/utils.h b/src/include/utils.h index 885d4bfc..0697b923 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -31,6 +31,32 @@ bool match_expr_to_operand(const Node *expr, const Node *operand); Oid get_pathman_schema(void); List *list_reverse(List *l); +/* + * Dynamic arrays. + */ + +#define ARRAY_EXP 2 + +#define ArrayAlloc(array, alloced, used, size) \ + do { \ + (array) = palloc((size) * sizeof(*(array))); \ + (alloced) = (size); \ + (used) = 0; \ + } while (0) + +#define ArrayPush(array, alloced, used, value) \ + do { \ + if ((alloced) <= (used)) \ + { \ + (alloced) = (alloced) * ARRAY_EXP + 1; \ + (array) = repalloc((array), (alloced) * sizeof(*(array))); \ + } \ + \ + (array)[(used)] = (value); \ + \ + (used)++; \ + } while (0) + /* * Useful functions for relations. */ diff --git a/src/init.c b/src/init.c index d24b8ee6..2994aaf8 100644 --- a/src/init.c +++ b/src/init.c @@ -441,9 +441,7 @@ find_inheritance_children_array(Oid parent_relid, /* * Scan pg_inherits and build a working array of subclass OIDs. */ - maxoids = 32; - oidarr = (Oid *) palloc(maxoids * sizeof(Oid)); - numoids = 0; + ArrayAlloc(oidarr, maxoids, numoids, 32); relation = heap_open(InheritsRelationId, AccessShareLock); @@ -460,12 +458,7 @@ find_inheritance_children_array(Oid parent_relid, Oid inhrelid; inhrelid = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhrelid; - if (numoids >= maxoids) - { - maxoids *= 2; - oidarr = (Oid *) repalloc(oidarr, maxoids * sizeof(Oid)); - } - oidarr[numoids++] = inhrelid; + ArrayPush(oidarr, maxoids, numoids, inhrelid); } systable_endscan(scan); diff --git a/src/nodes_common.c b/src/nodes_common.c index 3662fb6c..f8721037 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -25,7 +25,6 @@ /* Allocation settings */ #define INITIAL_ALLOC_NUM 10 -#define ALLOC_EXP 2 /* Compare plans by 'original_order' */ @@ -92,12 +91,12 @@ transform_plans_into_states(RuntimeAppendState *scan_state, static ChildScanCommon * select_required_plans(HTAB *children_table, Oid *parts, int nparts, int *nres) { - uint32 allocated = INITIAL_ALLOC_NUM, - used = 0; + uint32 allocated, + used; ChildScanCommon *result; int i; - result = (ChildScanCommon *) palloc(allocated * sizeof(ChildScanCommon)); + ArrayAlloc(result, allocated, used, INITIAL_ALLOC_NUM); for (i = 0; i < nparts; i++) { @@ -107,13 +106,7 @@ select_required_plans(HTAB *children_table, Oid *parts, int nparts, int *nres) if (!child) continue; /* no plan for this partition */ - if (allocated <= used) - { - allocated = allocated * ALLOC_EXP + 1; - result = repalloc(result, allocated * sizeof(ChildScanCommon)); - } - - result[used++] = child; + ArrayPush(result, allocated, used, child); } /* Get rid of useless array */ @@ -418,11 +411,13 @@ get_partition_oids(List *ranges, int *n, const PartRelationInfo *prel, bool include_parent) { ListCell *range_cell; - uint32 allocated = INITIAL_ALLOC_NUM, - used = 0; - Oid *result = (Oid *) palloc(allocated * sizeof(Oid)); + uint32 allocated, + used; + Oid *result; Oid *children = PrelGetChildrenArray(prel); + ArrayAlloc(result, allocated, used, INITIAL_ALLOC_NUM); + /* If required, add parent to result */ Assert(INITIAL_ALLOC_NUM >= 1); if (include_parent) @@ -437,14 +432,8 @@ get_partition_oids(List *ranges, int *n, const PartRelationInfo *prel, for (i = a; i <= b; i++) { - if (allocated <= used) - { - allocated = allocated * ALLOC_EXP + 1; - result = repalloc(result, allocated * sizeof(Oid)); - } - Assert(i < PrelChildrenCount(prel)); - result[used++] = children[i]; + ArrayPush(result, allocated, used, children[i]); } } @@ -826,14 +815,14 @@ explain_append_common(CustomScanState *node, /* Construct excess PlanStates */ if (!es->analyze) { - uint32 allocated = INITIAL_ALLOC_NUM, - used = 0; + uint32 allocated, + used; ChildScanCommon *custom_ps, child; HASH_SEQ_STATUS seqstat; int i; - custom_ps = (ChildScanCommon *) palloc(allocated * sizeof(ChildScanCommon)); + ArrayAlloc(custom_ps, allocated, used, INITIAL_ALLOC_NUM); /* There can't be any nodes since we're not scanning anything */ Assert(!node->custom_ps); @@ -843,13 +832,7 @@ explain_append_common(CustomScanState *node, while ((child = (ChildScanCommon) hash_seq_search(&seqstat))) { - if (allocated <= used) - { - allocated = allocated * ALLOC_EXP + 1; - custom_ps = repalloc(custom_ps, allocated * sizeof(ChildScanCommon)); - } - - custom_ps[used++] = child; + ArrayPush(custom_ps, allocated, used, child); } /* From bd635df88cca4c5cf8340d8bfe7b237693206ac1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 27 Jun 2018 16:06:07 +0300 Subject: [PATCH 0857/1124] fix resowner for PartRelationInfo --- src/relation_info.c | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index e9b9245c..9c130a55 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -536,7 +536,7 @@ resowner_prel_add(PartRelationInfo *prel) /* Register this 'prel' */ old_mcxt = MemoryContextSwitchTo(TopPathmanContext); - info->prels = list_append_unique_ptr(info->prels, prel); + info->prels = lappend(info->prels, prel); MemoryContextSwitchTo(old_mcxt); #ifdef USE_RELINFO_LEAK_TRACKER @@ -576,9 +576,8 @@ resowner_prel_del(PartRelationInfo *prel) /* Check that 'prel' is registered! */ Assert(list_member_ptr(info->prels, prel)); - /* Remove it iff we're the only user */ - if (PrelReferenceCount(prel) == 1) - info->prels = list_delete_ptr(info->prels, prel); + /* Remove it from list */ + info->prels = list_delete_ptr(info->prels, prel); } /* Check that refcount is valid */ @@ -615,20 +614,7 @@ resonwner_prel_callback(ResourceReleasePhase phase, { PartRelationInfo *prel = lfirst(lc); - if (!isCommit) - { - /* Reset refcount for valid entry */ - if (PrelIsFresh(prel)) - { - PrelReferenceCount(prel) = 0; - } - /* Otherwise, free it when refcount is zero */ - else if (--PrelReferenceCount(prel) == 0) - { - free_pathman_relation_info(prel); - } - } - else + if (isCommit) { #ifdef USE_RELINFO_LEAK_TRACKER ListCell *lc; @@ -640,10 +626,22 @@ resonwner_prel_callback(ResourceReleasePhase phase, elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); } #endif - elog(ERROR, + elog(WARNING, "cache reference leak: PartRelationInfo(%d) has count %d", PrelParentRelid(prel), PrelReferenceCount(prel)); } + + /* Check that refcount is valid */ + Assert(PrelReferenceCount(prel) > 0); + + /* Decrease refcount */ + PrelReferenceCount(prel) -= 1; + + /* Free this entry if it's time */ + if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) + { + free_pathman_relation_info(prel); + } } list_free(info->prels); From 5f3b17d97ccd62fc6344f39115cda69e90ae429e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 27 Jun 2018 16:30:31 +0300 Subject: [PATCH 0858/1124] fixes in PartitionFilter & select_partition_for_insert() --- src/include/partition_filter.h | 6 ++-- src/partition_filter.c | 64 ++++++++++++++++++---------------- 2 files changed, 37 insertions(+), 33 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index aa3a01e1..0940a59f 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -164,8 +164,10 @@ void init_result_parts_storage(ResultPartsStorage *parts_storage, void fini_result_parts_storage(ResultPartsStorage *parts_storage); /* Find ResultRelInfo holder in storage */ -ResultRelInfoHolder * scan_result_parts_storage(Oid partid, - ResultPartsStorage *storage); +ResultRelInfoHolder * scan_result_parts_storage(ResultPartsStorage *storage, Oid partid); + +/* Refresh PartRelationInfo in storage */ +void refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid); TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); diff --git a/src/partition_filter.c b/src/partition_filter.c index e100a2c9..f64603cf 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -248,7 +248,7 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) /* Find a ResultRelInfo for the partition using ResultPartsStorage */ ResultRelInfoHolder * -scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) +scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) { #define CopyToResultRelInfo(field_name) \ ( child_result_rel_info->field_name = parts_storage->base_rri->field_name ) @@ -383,6 +383,32 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) return rri_holder; } +/* Refresh PartRelationInfo for the partition in storage */ +void +refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) +{ + if (partid == PrelParentRelid(parts_storage->prel)) + { + close_pathman_relation_info(parts_storage->prel); + parts_storage->prel = get_pathman_relation_info(partid); + shout_if_prel_is_invalid(partid, parts_storage->prel, PT_ANY); + } + else + { + ResultRelInfoHolder *rri_holder; + + rri_holder = hash_search(parts_storage->result_rels_table, + (const void *) &partid, + HASH_FIND, NULL); + + if (rri_holder && rri_holder->prel) + { + close_pathman_relation_info(rri_holder->prel); + rri_holder->prel = get_pathman_relation_info(partid); + shout_if_prel_is_invalid(partid, rri_holder->prel, PT_ANY); + } + } +} /* Build tuple conversion map (e.g. parent has a dropped column) */ TupleConversionMap * @@ -486,10 +512,9 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, { /* Prepare expression context */ ResetExprContext(expr_context); - - /* Execute expression */ expr_context->ecxt_scantuple = slot; + /* Execute expression */ value = ExecEvalExprCompat(expr_state, expr_context, &isnull, mult_result_handler); @@ -515,28 +540,13 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, else partition_relid = parts[0]; /* Get ResultRelationInfo holder for the selected partition */ - result = scan_result_parts_storage(partition_relid, parts_storage); + result = scan_result_parts_storage(parts_storage, partition_relid); /* Somebody has dropped or created partitions */ - if (!PrelIsFresh(prel) && (nparts == 0 || result == NULL)) + if ((nparts == 0 || result == NULL) && !PrelIsFresh(prel)) { - /* Compare original and current Oids */ - Oid relid1 = PrelParentRelid(parts_storage->prel), - relid2 = PrelParentRelid(prel); - - /* Reopen 'prel' to make it fresh again */ - close_pathman_relation_info(prel); - prel = get_pathman_relation_info(parent_relid); - - /* Store new 'prel' */ - if (relid1 == relid2) - { - shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); - parts_storage->prel = prel; - } - else if (result && result->prel) - /* TODO: WTF? this is a new RRI, not the one we used before */ - result->prel = prel; + /* Try building a new 'prel' for this relation */ + refresh_result_parts_storage(parts_storage, parent_relid); } /* This partition is a parent itself */ @@ -544,7 +554,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, { prel = result->prel; expr_state = result->prel_expr_state; - parent_relid = PrelParentRelid(prel); + parent_relid = result->partid; compute_value = true; /* Repeat with a new dispatch */ @@ -735,14 +745,9 @@ partition_filter_exec(CustomScanState *node) if (!TupIsNull(slot)) { MemoryContext old_mcxt; - PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; ResultRelInfo *resultRelInfo; - /* Fetch PartRelationInfo for this partitioned relation */ - if ((prel = get_pathman_relation_info(state->partitioned_table)) == NULL) - return slot; /* table is not partitioned anymore */ - /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -779,9 +784,6 @@ partition_filter_exec(CustomScanState *node) slot = state->tup_convert_slot; } - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - return slot; } From 8995eb851cb27b2850f613e7bd65a8ca5d46b4c3 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 27 Jun 2018 17:20:35 +0300 Subject: [PATCH 0859/1124] improved leak tracker --- src/include/relation_info.h | 1 + src/relation_info.c | 82 +++++++++++++++++++++++++++---------- 2 files changed, 62 insertions(+), 21 deletions(-) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index ee4e9a35..6c1d5435 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -223,6 +223,7 @@ typedef struct PartRelationInfo #ifdef USE_RELINFO_LEAK_TRACKER List *owners; /* saved callers of get_pathman_relation_info() */ + uint64 access_total; /* total amount of accesses to this entry */ #endif MemoryContext mcxt; /* memory context holding this struct */ diff --git a/src/relation_info.c b/src/relation_info.c index 9c130a55..ef170b58 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -58,8 +58,48 @@ #ifdef USE_RELINFO_LEAK_TRACKER #undef get_pathman_relation_info #undef close_pathman_relation_info + const char *prel_resowner_function = NULL; int prel_resowner_line = 0; + +#define LeakTrackerAdd(prel) \ + do { \ + MemoryContext old_mcxt = MemoryContextSwitchTo((prel)->mcxt); \ + (prel)->owners = \ + list_append_unique( \ + (prel)->owners, \ + list_make2(makeString((char *) prel_resowner_function), \ + makeInteger(prel_resowner_line))); \ + MemoryContextSwitchTo(old_mcxt); \ + \ + (prel)->access_total++; \ + } while (0) + +#define LeakTrackerPrint(prel) \ + do { \ + ListCell *lc; \ + foreach (lc, (prel)->owners) \ + { \ + char *fun = strVal(linitial(lfirst(lc))); \ + int line = intVal(lsecond(lfirst(lc))); \ + elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); \ + } \ + } while (0) + +#define LeakTrackerFree(prel) \ + do { \ + ListCell *lc; \ + foreach (lc, (prel)->owners) \ + { \ + list_free_deep(lfirst(lc)); \ + } \ + list_free((prel)->owners); \ + (prel)->owners = NIL; \ + } while (0) +#else +#define LeakTrackerAdd(prel) +#define LeakTrackerPrint(prel) +#define LeakTrackerFree(prel) #endif @@ -256,11 +296,9 @@ invalidate_psin_entry(PartStatusInfo *psin) void close_pathman_relation_info(PartRelationInfo *prel) { - (void) resowner_prel_del(prel); + AssertArg(prel); - /* Remove entry is it's outdated and we're the last user */ - if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) - free_pathman_relation_info(prel); + (void) resowner_prel_del(prel); } /* Check if relation is partitioned by pg_pathman */ @@ -539,14 +577,8 @@ resowner_prel_add(PartRelationInfo *prel) info->prels = lappend(info->prels, prel); MemoryContextSwitchTo(old_mcxt); -#ifdef USE_RELINFO_LEAK_TRACKER /* Save current caller (function:line) */ - old_mcxt = MemoryContextSwitchTo(prel->mcxt); - prel->owners = lappend(prel->owners, - list_make2(makeString((char *) prel_resowner_function), - makeInteger(prel_resowner_line))); - MemoryContextSwitchTo(old_mcxt); -#endif + LeakTrackerAdd(prel); /* Finally, increment refcount */ PrelReferenceCount(prel) += 1; @@ -583,8 +615,20 @@ resowner_prel_del(PartRelationInfo *prel) /* Check that refcount is valid */ Assert(PrelReferenceCount(prel) > 0); - /* Finally, decrement refcount */ + /* Decrease refcount */ PrelReferenceCount(prel) -= 1; + + /* Free list of owners */ + if (PrelReferenceCount(prel) == 0) + { + LeakTrackerFree(prel); + } + + /* Free this entry if it's time */ + if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) + { + free_pathman_relation_info(prel); + } } return prel; @@ -616,16 +660,9 @@ resonwner_prel_callback(ResourceReleasePhase phase, if (isCommit) { -#ifdef USE_RELINFO_LEAK_TRACKER - ListCell *lc; + /* Print verbose list of *possible* owners */ + LeakTrackerPrint(prel); - foreach (lc, prel->owners) - { - char *fun = strVal(linitial(lfirst(lc))); - int line = intVal(lsecond(lfirst(lc))); - elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); - } -#endif elog(WARNING, "cache reference leak: PartRelationInfo(%d) has count %d", PrelParentRelid(prel), PrelReferenceCount(prel)); @@ -637,6 +674,9 @@ resonwner_prel_callback(ResourceReleasePhase phase, /* Decrease refcount */ PrelReferenceCount(prel) -= 1; + /* Free list of owners */ + LeakTrackerFree(prel); + /* Free this entry if it's time */ if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) { From 718627e781a9ab77991e52bcc2577cb83ef82418 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 27 Jun 2018 18:57:12 +0300 Subject: [PATCH 0860/1124] fix expression evaluation in PartitionFilter --- src/partition_filter.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index f64603cf..87facbc0 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -71,8 +71,7 @@ CustomExecMethods partition_filter_exec_methods; static ExprState *prepare_expr_state(const PartRelationInfo *prel, Relation source_rel, - EState *estate, - bool try_map); + EState *estate); static void prepare_rri_for_insert(ResultRelInfoHolder *rri_holder, const ResultPartsStorage *rps_storage); @@ -195,8 +194,7 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, /* Build a partitioning expression state */ parts_storage->prel_expr_state = prepare_expr_state(parts_storage->prel, parts_storage->base_rri->ri_RelationDesc, - parts_storage->estate, - cmd_type == CMD_UPDATE); + parts_storage->estate); /* Build expression context */ parts_storage->prel_econtext = CreateExprContext(parts_storage->estate); @@ -365,8 +363,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) rri_holder->prel_expr_state = prepare_expr_state(rri_holder->prel, /* NOTE: this prel! */ parts_storage->base_rri->ri_RelationDesc, - parts_storage->estate, - parts_storage->command_type == CMD_UPDATE); + parts_storage->estate); } /* Call initialization callback if needed */ @@ -570,8 +567,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, static ExprState * prepare_expr_state(const PartRelationInfo *prel, Relation source_rel, - EState *estate, - bool try_map) + EState *estate) { ExprState *expr_state; MemoryContext old_mcxt; @@ -584,9 +580,8 @@ prepare_expr_state(const PartRelationInfo *prel, expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); /* Should we try using map? */ - if (try_map) + if (PrelParentRelid(prel) != RelationGetRelid(source_rel)) { - AttrNumber *map; int map_length; TupleDesc source_tupdesc = RelationGetDescr(source_rel); From 92f587828fa333d43ffe479c0b9f3bffaa255a25 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 27 Jun 2018 19:09:05 +0300 Subject: [PATCH 0861/1124] rewrite some of subpartitioning tests --- expected/pathman_subpartitions.out | 193 ++++++++++++----------------- sql/pathman_subpartitions.sql | 73 ++++++----- 2 files changed, 112 insertions(+), 154 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 27be6b1e..924d1bde 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -237,7 +237,7 @@ SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 2 (1 row) INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ tableoid | a | b -----------------------+----+---- subpartitions.abc_1_1 | 25 | 25 @@ -253,7 +253,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitio (10 rows) UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_1 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ tableoid | a | b -----------------------+-----+---- subpartitions.abc_2_1 | 125 | 25 @@ -269,7 +269,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitio (10 rows) UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_2 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ tableoid | a | b -----------------------+-----+---- subpartitions.abc_2_2 | 125 | 75 @@ -285,7 +285,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitio (10 rows) UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ tableoid | a | b -----------------------+-----+----- subpartitions.abc_2_3 | 125 | 125 @@ -301,9 +301,9 @@ SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartiti (10 rows) /* split_range_partition */ -SELECT split_range_partition('subpartitions.abc_2', 150); -ERROR: could not split partition if it has children -SELECT split_range_partition('subpartitions.abc_2_2', 75); +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ split_range_partition ----------------------- {50,100} @@ -324,144 +324,103 @@ SELECT subpartitions.partitions_tree('subpartitions.abc'); (9 rows) /* merge_range_partitions */ -SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ append_range_partition ------------------------ subpartitions.abc_3 (1 row) -select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); -ERROR: cannot merge partitions -select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ merge_range_partitions ------------------------ (1 row) -DROP TABLE subpartitions.abc CASCADE; -NOTICE: drop cascades to 11 other objects -/* subpartitions on same expressions */ -CREATE TABLE subpartitions.abc(a INTEGER NOT NULL); -INSERT INTO subpartitions.abc SELECT i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 4); - create_range_partitions -------------------------- - 4 -(1 row) +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) -SELECT create_range_partitions('subpartitions.abc_1', 'a', 0, 11, 9); /* not multiple */ - create_range_partitions -------------------------- - 9 +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + (1 row) -SELECT create_range_partitions('subpartitions.abc_2', 'a', 150, 11, 8); /* start_value should be lower */ -WARNING: "start_value" was set to 100 +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); create_range_partitions ------------------------- - 8 + 2 (1 row) -SELECT create_range_partitions('subpartitions.abc_3', 'a', 200, 11, 20); /* too big p_count */ -WARNING: "p_interval" is not multiple of range (200, 310) -NOTICE: "p_count" was limited to 10 - create_range_partitions +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition ------------------------- - 10 + subpartitions.abc_3 (1 row) -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[301, 350, 400]); /* bounds check */ -ERROR: Bounds should start from 300 -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 450, 500]); /* bounds check */ -ERROR: Lower bound of rightmost partition should be less than 400 -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]); /* bounds check */ +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); create_range_partitions ------------------------- - 2 + 3 (1 row) -SELECT * FROM pathman_partition_list; - parent | partition | parttype | expr | range_min | range_max ----------------------+------------------------+----------+------+-----------+----------- - subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 - subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 - subpartitions.abc | subpartitions.abc_3 | 2 | a | 200 | 300 - subpartitions.abc | subpartitions.abc_4 | 2 | a | 300 | 400 - subpartitions.abc_1 | subpartitions.abc_1_1 | 2 | a | 0 | 11 - subpartitions.abc_1 | subpartitions.abc_1_2 | 2 | a | 11 | 22 - subpartitions.abc_1 | subpartitions.abc_1_3 | 2 | a | 22 | 33 - subpartitions.abc_1 | subpartitions.abc_1_4 | 2 | a | 33 | 44 - subpartitions.abc_1 | subpartitions.abc_1_5 | 2 | a | 44 | 55 - subpartitions.abc_1 | subpartitions.abc_1_6 | 2 | a | 55 | 66 - subpartitions.abc_1 | subpartitions.abc_1_7 | 2 | a | 66 | 77 - subpartitions.abc_1 | subpartitions.abc_1_8 | 2 | a | 77 | 88 - subpartitions.abc_1 | subpartitions.abc_1_9 | 2 | a | 88 | 99 - subpartitions.abc_2 | subpartitions.abc_2_1 | 2 | a | 100 | 111 - subpartitions.abc_2 | subpartitions.abc_2_2 | 2 | a | 111 | 122 - subpartitions.abc_2 | subpartitions.abc_2_3 | 2 | a | 122 | 133 - subpartitions.abc_2 | subpartitions.abc_2_4 | 2 | a | 133 | 144 - subpartitions.abc_2 | subpartitions.abc_2_5 | 2 | a | 144 | 155 - subpartitions.abc_2 | subpartitions.abc_2_6 | 2 | a | 155 | 166 - subpartitions.abc_2 | subpartitions.abc_2_7 | 2 | a | 166 | 177 - subpartitions.abc_2 | subpartitions.abc_2_8 | 2 | a | 177 | 188 - subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | a | 200 | 211 - subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | a | 211 | 222 - subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | a | 222 | 233 - subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | a | 233 | 244 - subpartitions.abc_3 | subpartitions.abc_3_5 | 2 | a | 244 | 255 - subpartitions.abc_3 | subpartitions.abc_3_6 | 2 | a | 255 | 266 - subpartitions.abc_3 | subpartitions.abc_3_7 | 2 | a | 266 | 277 - subpartitions.abc_3 | subpartitions.abc_3_8 | 2 | a | 277 | 288 - subpartitions.abc_3 | subpartitions.abc_3_9 | 2 | a | 288 | 299 - subpartitions.abc_3 | subpartitions.abc_3_10 | 2 | a | 299 | 310 - subpartitions.abc_4 | subpartitions.abc_4_1 | 2 | a | 300 | 350 - subpartitions.abc_4 | subpartitions.abc_4_2 | 2 | a | 350 | 450 -(33 rows) - -SELECT append_range_partition('subpartitions.abc_1'::regclass); - append_range_partition ------------------------- - subpartitions.abc_1_10 +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 (1 row) -SELECT append_range_partition('subpartitions.abc_1'::regclass); -ERROR: reached upper bound in the current level of subpartitions -DROP TABLE subpartitions.abc_1_10; -/* detach_range_partition */ -SELECt detach_range_partition('subpartitions.abc_1'); -ERROR: could not detach partition if it has children -/* attach_range_partition */ -CREATE TABLE subpartitions.abc_c(LIKE subpartitions.abc_1 INCLUDING ALL); -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 98, 110); /* fail */ -ERROR: specified range [98, 110) overlaps with existing partitions -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 100, 110); /* fail */ -ERROR: "start value" exceeds upper bound of the current level of subpartitions -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 99, 110); /* ok */ - attach_range_partition ------------------------- - subpartitions.abc_c -(1 row) +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) -DROP TABLE subpartitions.abc CASCADE; -NOTICE: drop cascades to 39 other objects -/* subpartitions on same expression but dates */ -CREATE TABLE subpartitions.abc(a DATE NOT NULL); -INSERT INTO subpartitions.abc SELECT '2017-10-02'::DATE + i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', '2017-10-02'::DATE, '1 month'::INTERVAL); - create_range_partitions -------------------------- - 6 -(1 row) +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) -SELECT create_range_partitions('subpartitions.abc_1', 'a', '2017-10-02'::DATE + 1, - '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ -WARNING: "start_value" was set to 10-02-2017 -WARNING: "p_interval" is not multiple of range (10-02-2017, 11-03-2017) -NOTICE: "p_count" was limited to 1 - create_range_partitions -------------------------- - 1 -(1 row) +SET pg_pathman.enable_partitionrouter = ON; +UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 9 other objects diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 7f38f629..b790c20e 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -86,61 +86,60 @@ SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_1_1 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_1 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should be in subpartitions.abc_2_2 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; -SELECT tableoid::regclass, * FROM subpartitions.abc; /* Should create subpartitions.abc_2_3 */ +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + /* split_range_partition */ -SELECT split_range_partition('subpartitions.abc_2', 150); -SELECT split_range_partition('subpartitions.abc_2_2', 75); +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ SELECT subpartitions.partitions_tree('subpartitions.abc'); + /* merge_range_partitions */ -SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ -select merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); -select merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); -DROP TABLE subpartitions.abc CASCADE; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ +INSERT INTO subpartitions.abc VALUES (250, 50); -/* subpartitions on same expressions */ -CREATE TABLE subpartitions.abc(a INTEGER NOT NULL); -INSERT INTO subpartitions.abc SELECT i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 4); -SELECT create_range_partitions('subpartitions.abc_1', 'a', 0, 11, 9); /* not multiple */ -SELECT create_range_partitions('subpartitions.abc_2', 'a', 150, 11, 8); /* start_value should be lower */ -SELECT create_range_partitions('subpartitions.abc_3', 'a', 200, 11, 20); /* too big p_count */ -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[301, 350, 400]); /* bounds check */ -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 450, 500]); /* bounds check */ -SELECT create_range_partitions('subpartitions.abc_4', 'a', ARRAY[300, 350, 450]); /* bounds check */ -SELECT * FROM pathman_partition_list; -SELECT append_range_partition('subpartitions.abc_1'::regclass); -SELECT append_range_partition('subpartitions.abc_1'::regclass); -DROP TABLE subpartitions.abc_1_10; +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; -/* detach_range_partition */ -SELECt detach_range_partition('subpartitions.abc_1'); +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; -/* attach_range_partition */ -CREATE TABLE subpartitions.abc_c(LIKE subpartitions.abc_1 INCLUDING ALL); -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 98, 110); /* fail */ -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 100, 110); /* fail */ -SELECT attach_range_partition('subpartitions.abc_1', 'subpartitions.abc_c', 99, 110); /* ok */ DROP TABLE subpartitions.abc CASCADE; -/* subpartitions on same expression but dates */ -CREATE TABLE subpartitions.abc(a DATE NOT NULL); -INSERT INTO subpartitions.abc SELECT '2017-10-02'::DATE + i FROM generate_series(1, 200, 20) as i; -SELECT create_range_partitions('subpartitions.abc', 'a', '2017-10-02'::DATE, '1 month'::INTERVAL); -SELECT create_range_partitions('subpartitions.abc_1', 'a', '2017-10-02'::DATE + 1, - '32 day'::INTERVAL, 10); /* not multiple, and limited p_count */ + +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + +SET pg_pathman.enable_partitionrouter = ON; +UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; + + DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; From 6afa610c8b20c98b9fc85e078786f481f6f30d3e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 2 Jul 2018 15:01:25 +0300 Subject: [PATCH 0862/1124] attempt to fix performance issues described in issue #164 --- expected/pathman_rebuild_updates.out | 8 +- src/include/compat/pg_compat.h | 1 + src/pg_pathman.c | 1 - src/planner_tree_modification.c | 224 ++++++++++++++++++--------- 4 files changed, 153 insertions(+), 81 deletions(-) diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index d06f7c5b..297089af 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -43,14 +43,10 @@ UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; QUERY PLAN ----------------------------- - Update on test - Update on test - Update on test_11 - -> Seq Scan on test - Filter: (val = 101) + Update on test_11 -> Seq Scan on test_11 Filter: (val = 101) -(7 rows) +(3 rows) UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; val | b | tableoid diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 61d1ab1f..8632578e 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -29,6 +29,7 @@ #include "nodes/pg_list.h" #include "optimizer/cost.h" #include "optimizer/paths.h" +#include "optimizer/prep.h" #include "utils/memutils.h" /* diff --git a/src/pg_pathman.c b/src/pg_pathman.c index c4adef6e..8cb2ee9c 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -26,7 +26,6 @@ #include "miscadmin.h" #include "optimizer/clauses.h" #include "optimizer/plancat.h" -#include "optimizer/prep.h" #include "optimizer/restrictinfo.h" #include "optimizer/cost.h" #include "utils/datum.h" diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 3225e59e..45ec1b0f 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -94,6 +94,13 @@ typedef struct } transform_query_cxt; +typedef struct +{ + Index child_varno; + List *translated_vars; +} adjust_appendrel_varnos_cxt; + + static bool pathman_transform_query_walker(Node *node, void *context); @@ -103,6 +110,7 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context static void partition_filter_visitor(Plan *plan, void *context); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); +static bool adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context); /* @@ -366,20 +374,20 @@ handle_modification_query(Query *parse, transform_query_cxt *context) WrapperNode *wrap; Expr *expr; WalkerContext wcxt; - Index result_rel; + Index result_rti; int num_selected; ParamListInfo params; /* Fetch index of result relation */ - result_rel = parse->resultRelation; + result_rti = parse->resultRelation; /* Exit if it's not a DELETE or UPDATE query */ - if (result_rel == 0 || + if (result_rti == 0 || (parse->commandType != CMD_UPDATE && parse->commandType != CMD_DELETE)) return; - rte = rt_fetch(result_rel, parse->rtable); + rte = rt_fetch(result_rti, parse->rtable); /* Exit if it's DELETE FROM ONLY table */ if (!rte->inh) return; @@ -406,7 +414,7 @@ handle_modification_query(Query *parse, transform_query_cxt *context) expr = (Expr *) eval_extern_params_mutator((Node *) expr, params); /* Prepare partitioning expression */ - prel_expr = PrelExpressionForRelid(prel, result_rel); + prel_expr = PrelExpressionForRelid(prel, result_rti); /* Parse syntax tree and extract partition ranges */ InitWalkerContext(&wcxt, prel_expr, prel, NULL); @@ -430,13 +438,14 @@ handle_modification_query(Query *parse, transform_query_cxt *context) Relation child_rel, parent_rel; - void *tuple_map; /* we don't need the map itself */ - LOCKMODE lockmode = RowExclusiveLock; /* UPDATE | DELETE */ HeapTuple syscache_htup; char child_relkind; + List *translated_vars; + adjust_appendrel_varnos_cxt aav_cxt; + /* Lock 'child' table */ LockRelationOid(child, lockmode); @@ -460,19 +469,23 @@ handle_modification_query(Query *parse, transform_query_cxt *context) child_rel = heap_open(child, NoLock); parent_rel = heap_open(parent, NoLock); - /* Build a conversion map (may be trivial, i.e. NULL) */ - tuple_map = build_part_tuple_map(parent_rel, child_rel); - if (tuple_map) - free_conversion_map((TupleConversionMap *) tuple_map); + make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); + + /* Translate varnos for this child */ + aav_cxt.child_varno = result_rti; + aav_cxt.translated_vars = translated_vars; + if (adjust_appendrel_varnos((Node *) parse, &aav_cxt)) + return; /* failed to perform rewrites */ + + /* Translate column privileges for this child */ + rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); + rte->insertedCols = translate_col_privs(rte->insertedCols, translated_vars); + rte->updatedCols = translate_col_privs(rte->updatedCols, translated_vars); /* Close relations (should remain locked, though) */ heap_close(child_rel, NoLock); heap_close(parent_rel, NoLock); - /* Exit if tuple map was NOT trivial */ - if (tuple_map) /* just checking the pointer! */ - return; - /* Update RTE's relid and relkind (for FDW) */ rte->relid = child; rte->relkind = child_relkind; @@ -490,6 +503,128 @@ handle_modification_query(Query *parse, transform_query_cxt *context) } } +/* Replace extern param nodes with consts */ +static Node * +eval_extern_params_mutator(Node *node, ParamListInfo params) +{ + if (node == NULL) + return NULL; + + if (IsA(node, Param)) + { + Param *param = (Param *) node; + + Assert(params); + + /* Look to see if we've been given a value for this Param */ + if (param->paramkind == PARAM_EXTERN && + param->paramid > 0 && + param->paramid <= params->numParams) + { + ParamExternData *prm = ¶ms->params[param->paramid - 1]; + + if (OidIsValid(prm->ptype)) + { + /* OK to substitute parameter value? */ + if (prm->pflags & PARAM_FLAG_CONST) + { + /* + * Return a Const representing the param value. + * Must copy pass-by-ref datatypes, since the + * Param might be in a memory context + * shorter-lived than our output plan should be. + */ + int16 typLen; + bool typByVal; + Datum pval; + + Assert(prm->ptype == param->paramtype); + get_typlenbyval(param->paramtype, + &typLen, &typByVal); + if (prm->isnull || typByVal) + pval = prm->value; + else + pval = datumCopy(prm->value, typByVal, typLen); + return (Node *) makeConst(param->paramtype, + param->paramtypmod, + param->paramcollid, + (int) typLen, + pval, + prm->isnull, + typByVal); + } + } + } + } + + return expression_tree_mutator(node, eval_extern_params_mutator, + (void *) params); +} + +static bool +adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) +{ + if (node == NULL) + return false; + + if (IsA(node, Query)) + { + Query *query = (Query *) node; + ListCell *lc; + + foreach (lc, query->targetList) + { + TargetEntry *te = (TargetEntry *) lfirst(lc); + Var *child_var; + + if (te->resjunk) + continue; + + if (te->resno > list_length(context->translated_vars)) + return true; + + child_var = list_nth(context->translated_vars, te->resno - 1); + if (!child_var) + return true; + + /* Transform attribute number */ + te->resno = child_var->varattno; + } + + return query_tree_walker((Query *) node, + adjust_appendrel_varnos, + context, + QTW_IGNORE_RC_SUBQUERIES); + } + + if (IsA(node, Var)) + { + Var *var = (Var *) node; + + /* Don't tranform system columns & other relations' Vars */ + if (var->varoattno > 0 && var->varno == context->child_varno) + { + Var *child_var; + + if (var->varattno > list_length(context->translated_vars)) + return true; + + child_var = list_nth(context->translated_vars, var->varattno - 1); + if (!child_var) + return true; + + /* Transform attribute number */ + var->varattno = child_var->varattno; + } + + return false; + } + + return expression_tree_walker(node, + adjust_appendrel_varnos, + context); +} + /* * ------------------------------- @@ -592,65 +727,6 @@ get_rel_parenthood_status(RangeTblEntry *rte) } -/* Replace extern param nodes with consts */ -static Node * -eval_extern_params_mutator(Node *node, ParamListInfo params) -{ - if (node == NULL) - return NULL; - - if (IsA(node, Param)) - { - Param *param = (Param *) node; - - Assert(params); - - /* Look to see if we've been given a value for this Param */ - if (param->paramkind == PARAM_EXTERN && - param->paramid > 0 && - param->paramid <= params->numParams) - { - ParamExternData *prm = ¶ms->params[param->paramid - 1]; - - if (OidIsValid(prm->ptype)) - { - /* OK to substitute parameter value? */ - if (prm->pflags & PARAM_FLAG_CONST) - { - /* - * Return a Const representing the param value. - * Must copy pass-by-ref datatypes, since the - * Param might be in a memory context - * shorter-lived than our output plan should be. - */ - int16 typLen; - bool typByVal; - Datum pval; - - Assert(prm->ptype == param->paramtype); - get_typlenbyval(param->paramtype, - &typLen, &typByVal); - if (prm->isnull || typByVal) - pval = prm->value; - else - pval = datumCopy(prm->value, typByVal, typLen); - return (Node *) makeConst(param->paramtype, - param->paramtypmod, - param->paramcollid, - (int) typLen, - pval, - prm->isnull, - typByVal); - } - } - } - } - - return expression_tree_mutator(node, eval_extern_params_mutator, - (void *) params); -} - - /* * ----------------------------------------------- * Count number of times we've visited planner() From edf46f91a679ff467f65896f6f9a26029ebda118 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 2 Jul 2018 15:28:17 +0300 Subject: [PATCH 0863/1124] small fixes --- src/planner_tree_modification.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 45ec1b0f..c4b4073d 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -93,15 +93,14 @@ typedef struct CommonTableExpr *parent_cte; } transform_query_cxt; - typedef struct { Index child_varno; + Oid parent_relid; List *translated_vars; } adjust_appendrel_varnos_cxt; - static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse, transform_query_cxt *context); @@ -473,9 +472,9 @@ handle_modification_query(Query *parse, transform_query_cxt *context) /* Translate varnos for this child */ aav_cxt.child_varno = result_rti; + aav_cxt.parent_relid = parent; aav_cxt.translated_vars = translated_vars; - if (adjust_appendrel_varnos((Node *) parse, &aav_cxt)) - return; /* failed to perform rewrites */ + adjust_appendrel_varnos((Node *) parse, &aav_cxt); /* Translate column privileges for this child */ rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); @@ -561,6 +560,7 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) (void *) params); } +/* Remap parent's attributes to child ones s*/ static bool adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) { @@ -572,6 +572,7 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) Query *query = (Query *) node; ListCell *lc; + /* FIXME: we might need to reorder TargetEntries */ foreach (lc, query->targetList) { TargetEntry *te = (TargetEntry *) lfirst(lc); @@ -581,11 +582,13 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) continue; if (te->resno > list_length(context->translated_vars)) - return true; + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + te->resno, get_rel_name(context->parent_relid)); child_var = list_nth(context->translated_vars, te->resno - 1); if (!child_var) - return true; + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + te->resno, get_rel_name(context->parent_relid)); /* Transform attribute number */ te->resno = child_var->varattno; @@ -601,17 +604,19 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) { Var *var = (Var *) node; - /* Don't tranform system columns & other relations' Vars */ + /* Don't transform system columns & other relations' Vars */ if (var->varoattno > 0 && var->varno == context->child_varno) { Var *child_var; if (var->varattno > list_length(context->translated_vars)) - return true; + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); child_var = list_nth(context->translated_vars, var->varattno - 1); if (!child_var) - return true; + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); /* Transform attribute number */ var->varattno = child_var->varattno; From da4c916300b9fbf07e033f306c91292dd2ffce39 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 2 Jul 2018 16:15:42 +0300 Subject: [PATCH 0864/1124] don't use 'varoattno' where possible --- src/nodes_common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/nodes_common.c b/src/nodes_common.c index 7a4b71fe..e8d056bf 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -174,7 +174,7 @@ build_parent_tlist(List *tlist, AppendRelInfo *appinfo) elog(ERROR, "table \"%s\" has no attribute %d of partition \"%s\"", get_rel_name_or_relid(appinfo->parent_relid), - tlist_var->varoattno, + tlist_var->varattno, get_rel_name_or_relid(appinfo->child_relid)); } @@ -232,7 +232,7 @@ append_part_attr_to_tlist(List *tlist, TargetEntry *te = (TargetEntry *) lfirst(lc); Var *var = (Var *) te->expr; - if (IsA(var, Var) && var->varoattno == child_var->varattno) + if (IsA(var, Var) && var->varattno == child_var->varattno) { part_attr_found = true; break; From 6c4f5964f276386acfaa6323df76fe7ac99eeef5 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 2 Jul 2018 19:27:02 +0300 Subject: [PATCH 0865/1124] attempt to fix issue #165 --- expected/pathman_views.out | 68 +++++++++++++++++++++++++++++++++++- expected/pathman_views_1.out | 68 +++++++++++++++++++++++++++++++++++- sql/pathman_views.sql | 15 ++++++++ src/hooks.c | 7 ++-- 4 files changed, 154 insertions(+), 4 deletions(-) diff --git a/expected/pathman_views.out b/expected/pathman_views.out index 2341919a..45423ef5 100644 --- a/expected/pathman_views.out +++ b/expected/pathman_views.out @@ -16,6 +16,9 @@ select create_hash_partitions('views._abc', 'id', 10); (1 row) insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; /* create a facade view */ create view views.abc as select * from views._abc; create or replace function views.disable_modification() @@ -117,6 +120,69 @@ explain (costs off) delete from views.abc where id = 1 or id = 2; delete from views.abc where id = 1 or id = 2; ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 16 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out index fdf19f28..bead6de1 100644 --- a/expected/pathman_views_1.out +++ b/expected/pathman_views_1.out @@ -16,6 +16,9 @@ select create_hash_partitions('views._abc', 'id', 10); (1 row) insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; /* create a facade view */ create view views.abc as select * from views._abc; create or replace function views.disable_modification() @@ -173,6 +176,69 @@ explain (costs off) delete from views.abc where id = 1 or id = 2; delete from views.abc where id = 1 or id = 2; ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 16 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql index 90118fe0..9f386a3d 100644 --- a/sql/pathman_views.sql +++ b/sql/pathman_views.sql @@ -17,6 +17,12 @@ create table views._abc(id int4 not null); select create_hash_partitions('views._abc', 'id', 10); insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); + + +vacuum analyze; + /* create a facade view */ create view views.abc as select * from views._abc; @@ -60,6 +66,15 @@ explain (costs off) delete from views.abc where id = 1 or id = 2; delete from views.abc where id = 1 or id = 2; +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; +explain (costs off) select * from views.abc_union where id = 5; +explain (costs off) table views.abc_union_all; +explain (costs off) select * from views.abc_union_all where id = 5; + + DROP SCHEMA views CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index 96efad08..d78d1943 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -384,11 +384,14 @@ pathman_rel_pathlist_hook(PlannerInfo *root, AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); /* - * If there's an 'appinfo', it means that somebody + * If there's an 'appinfo' with Oid, it means that somebody * (PG?) has already processed this partitioned table * and added its children to the plan. + * + * NOTE: there's no Oid iff it's UNION. */ - if (appinfo->child_relid == rti) + if (appinfo->child_relid == rti && + OidIsValid(appinfo->parent_reloid)) return; } } From a6f968ca3ebc0e0470a73967f73856ab723668b1 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 13:35:14 +0300 Subject: [PATCH 0866/1124] use VARIADIC in merge_range_partitions() --- README.md | 9 ++------- expected/pathman_basic.out | 8 ++++---- expected/pathman_calamity.out | 8 ++++---- expected/pathman_domains.out | 2 +- expected/pathman_subpartitions.out | 4 ++-- range.sql | 16 ++-------------- sql/pathman_calamity.sql | 8 ++++---- src/pl_range_funcs.c | 14 +++++++------- 8 files changed, 26 insertions(+), 43 deletions(-) diff --git a/README.md b/README.md index c83df46c..c89d99de 100644 --- a/README.md +++ b/README.md @@ -203,14 +203,9 @@ split_range_partition(partition REGCLASS, Split RANGE `partition` in two by `split_value`. Partition creation callback is invoked for a new partition if available. ```plpgsql -merge_range_partitions(partition1 REGCLASS, partition2 REGCLASS) +merge_range_partitions(variadic partitions REGCLASS[]) ``` -Merge two adjacent RANGE partitions. First, data from `partition2` is copied to `partition1`, then `partition2` is removed. - -```plpgsql -merge_range_partitions(partitions REGCLASS[]) -``` -Merge several adjacent RANGE partitions (partitions must be specified in ascending or descending order). All the data will be accumulated in the first partition. +Merge several adjacent RANGE partitions. Partitions are automatically ordered by increasing bounds; all the data will be accumulated in the first partition. ```plpgsql append_range_partition(parent REGCLASS, diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 1bdbcef9..b4b062d3 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -933,7 +933,7 @@ SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); merge_range_partitions ------------------------ - + test.num_range_rel_1 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; @@ -947,7 +947,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 70 SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); merge_range_partitions ------------------------ - + test.range_rel_1 (1 row) /* Append and prepend partitions */ @@ -1524,7 +1524,7 @@ SELECT pathman.prepend_range_partition('test."RangeRel"'); SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); merge_range_partitions ------------------------ - + test."RangeRel_1" (1 row) SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); @@ -1594,7 +1594,7 @@ SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 mo SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); merge_range_partitions ------------------------ - + test.range_rel_1 (1 row) SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 14ff9cd6..2889cc80 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -746,9 +746,9 @@ SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ DROP TABLE calamity.test_range_oid CASCADE; NOTICE: drop cascades to 2 other objects /* check function merge_range_partitions() */ -SELECT merge_range_partitions('{pg_class}'); /* not ok */ +SELECT merge_range_partitions('pg_class'); /* not ok */ ERROR: cannot merge partitions -SELECT merge_range_partitions('{pg_class, pg_inherits}'); /* not ok */ +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ ERROR: cannot merge partitions CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); @@ -764,8 +764,8 @@ SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); 2 (1 row) -SELECT merge_range_partitions('{calamity.merge_test_a_1, - calamity.merge_test_b_1}'); /* not ok */ +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index e5e882c0..e6fc43fe 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -41,7 +41,7 @@ SELECT prepend_range_partition('domains.dom_table'); SELECT merge_range_partitions('domains.dom_table_1', 'domains.dom_table_2'); merge_range_partitions ------------------------ - + domains.dom_table_1 (1 row) SELECT split_range_partition('domains.dom_table_1', 50); diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 924d1bde..c5446c94 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -336,7 +336,7 @@ INSERT INTO subpartitions.abc VALUES (250, 50); SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ merge_range_partitions ------------------------ - + subpartitions.abc_2 (1 row) SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; @@ -349,7 +349,7 @@ SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ merge_range_partitions ------------------------ - + subpartitions.abc_2_1 (1 row) SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; diff --git a/range.sql b/range.sql index dad82ff2..4b5c74a0 100644 --- a/range.sql +++ b/range.sql @@ -392,18 +392,6 @@ BEGIN END $$ LANGUAGE plpgsql; -/* - * The special case of merging two partitions - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partition1 REGCLASS, - partition2 REGCLASS) -RETURNS VOID AS $$ -BEGIN - PERFORM @extschema@.merge_range_partitions(array[partition1, partition2]::regclass[]); -END -$$ LANGUAGE plpgsql; - /* * Append new partition. */ @@ -883,8 +871,8 @@ SET client_min_messages = WARNING; /* mute NOTICE message */ * The rest of partitions will be dropped. */ CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partitions REGCLASS[]) -RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' + variadic partitions REGCLASS[]) +RETURNS REGCLASS AS 'pg_pathman', 'merge_range_partitions' LANGUAGE C STRICT; /* diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index ed0eae95..1c48138e 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -320,8 +320,8 @@ DROP TABLE calamity.test_range_oid CASCADE; /* check function merge_range_partitions() */ -SELECT merge_range_partitions('{pg_class}'); /* not ok */ -SELECT merge_range_partitions('{pg_class, pg_inherits}'); /* not ok */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); @@ -329,8 +329,8 @@ CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); -SELECT merge_range_partitions('{calamity.merge_test_a_1, - calamity.merge_test_b_1}'); /* not ok */ +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index feb028a5..6289e065 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -628,7 +628,8 @@ build_sequence_name(PG_FUNCTION_ARGS) Datum merge_range_partitions(PG_FUNCTION_ARGS) { - Oid parent = InvalidOid; + Oid parent = InvalidOid, + partition = InvalidOid; ArrayType *arr = PG_GETARG_ARRAYTYPE_P(0); Oid *parts; @@ -734,9 +735,10 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* First determine the bounds of a new constraint */ min_bound = bounds[0].min; max_bound = bounds[nparts - 1].max; + partition = parts[0]; /* Drop old constraint and create a new one */ - modify_range_constraint(parts[0], + modify_range_constraint(partition, prel->expr_cstr, prel->ev_type, &min_bound, @@ -801,7 +803,7 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* Don't forget to close 'prel'! */ close_pathman_relation_info(prel); - PG_RETURN_VOID(); + PG_RETURN_OID(partition); } @@ -851,12 +853,10 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) ranges = PrelGetRangesArray(prel); /* Looking for partition in child relations */ - for (i = 0; i < PrelChildrenCount(prel); i++) - if (ranges[i].child_oid == partition) - break; + i = PrelHasPartition(prel, partition) - 1; /* Should have found it */ - Assert(i < PrelChildrenCount(prel)); + Assert(i >= 0 && i < PrelChildrenCount(prel)); /* Expand next partition if it exists */ if (i < PrelLastChild(prel)) From 464e840bf19182d6909f01157e583ad97b4e83f6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 13:38:45 +0300 Subject: [PATCH 0867/1124] reorder some code --- src/pl_range_funcs.c | 700 +++++++++++++++++++++---------------------- 1 file changed, 349 insertions(+), 351 deletions(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 6289e065..7fa00cf7 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -384,242 +384,111 @@ generate_range_bounds_pl(PG_FUNCTION_ARGS) PG_RETURN_ARRAYTYPE_P(array); } - -/* - * ------------------------ - * Various useful getters - * ------------------------ - */ - /* - * Returns range entry (min, max) (in form of array). - * - * arg #1 is the parent's Oid. - * arg #2 is the partition's Oid. + * Takes text representation of interval value and checks + * if it corresponds to partitioning expression. + * NOTE: throws an ERROR if it fails to convert text to Datum. */ Datum -get_part_range_by_oid(PG_FUNCTION_ARGS) +validate_interval_value(PG_FUNCTION_ARGS) { - Oid partition_relid, - parent_relid; - Oid arg_type; - RangeEntry *ranges; - PartRelationInfo *prel; - uint32 idx; - - if (!PG_ARGISNULL(0)) - { - partition_relid = PG_GETARG_OID(0); - } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partition_relid' should not be NULL"))); - - parent_relid = get_parent_of_partition(partition_relid); - if (!OidIsValid(parent_relid)) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("relation \"%s\" is not a partition", - get_rel_name_or_relid(partition_relid)))); - - /* Emit an error if it is not partitioned by RANGE */ - prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); - - /* Check type of 'dummy' (for correct output) */ - arg_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - if (getBaseType(arg_type) != getBaseType(prel->ev_type)) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("pg_typeof(dummy) should be %s", - format_type_be(getBaseType(prel->ev_type))))); +#define ARG_PARTREL 0 +#define ARG_EXPRESSION 1 +#define ARG_PARTTYPE 2 +#define ARG_RANGE_INTERVAL 3 +#define ARG_EXPRESSION_P 4 - ranges = PrelGetRangesArray(prel); + Oid partrel; + PartType parttype; + char *expr_cstr; + Oid expr_type; - /* Look for the specified partition */ - if ((idx = PrelHasPartition(prel, partition_relid)) > 0) + if (PG_ARGISNULL(ARG_PARTREL)) { - ArrayType *arr; - Bound elems[2]; - - elems[0] = ranges[idx - 1].min; - elems[1] = ranges[idx - 1].max; - - arr = construct_bounds_array(elems, 2, - prel->ev_type, - prel->ev_len, - prel->ev_byval, - prel->ev_align); - - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - - PG_RETURN_ARRAYTYPE_P(arr); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partrel' should not be NULL"))); } + else partrel = PG_GETARG_OID(ARG_PARTREL); - /* No partition found, report error */ - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("relation \"%s\" has no partition \"%s\"", - get_rel_name_or_relid(parent_relid), - get_rel_name_or_relid(partition_relid)))); - - PG_RETURN_NULL(); /* keep compiler happy */ -} - -/* - * Returns N-th range entry (min, max) (in form of array). - * - * arg #1 is the parent's Oid. - * arg #2 is the index of the range - * (if it is negative then the last range will be returned). - */ -Datum -get_part_range_by_idx(PG_FUNCTION_ARGS) -{ - Oid parent_relid; - int partition_idx = 0; - Oid arg_type; - Bound elems[2]; - RangeEntry *ranges; - PartRelationInfo *prel; - ArrayType *arr; + /* Check that relation exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partrel))) + elog(ERROR, "relation \"%u\" does not exist", partrel); - if (!PG_ARGISNULL(0)) + if (PG_ARGISNULL(ARG_EXPRESSION)) { - parent_relid = PG_GETARG_OID(0); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'expression' should not be NULL"))); } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'parent_relid' should not be NULL"))); + else expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION)); - if (!PG_ARGISNULL(1)) + if (PG_ARGISNULL(ARG_PARTTYPE)) { - partition_idx = PG_GETARG_INT32(1); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parttype' should not be NULL"))); } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partition_idx' should not be NULL"))); - - /* Emit an error if it is not partitioned by RANGE */ - prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); - /* Check type of 'dummy' (for correct output) */ - arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); - if (getBaseType(arg_type) != getBaseType(prel->ev_type)) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("pg_typeof(dummy) should be %s", - format_type_be(getBaseType(prel->ev_type))))); + /* + * Fetch partitioning expression's type using + * either user's expression or parsed expression. + * + * NOTE: we check number of function's arguments + * in case of late updates (e.g. 1.1 => 1.4). + */ + if (PG_ARGISNULL(ARG_EXPRESSION_P) || PG_NARGS() <= ARG_EXPRESSION_P) + { + Datum expr_datum; + /* We'll have to parse expression with our own hands */ + expr_datum = cook_partitioning_expression(partrel, expr_cstr, &expr_type); - /* Now we have to deal with 'idx' */ - if (partition_idx < -1) - { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("negative indices other than -1" - " (last partition) are not allowed"))); - } - else if (partition_idx == -1) - { - partition_idx = PrelLastChild(prel); + /* Free both expressions */ + pfree(DatumGetPointer(expr_datum)); + pfree(expr_cstr); } - else if (((uint32) abs(partition_idx)) >= PrelChildrenCount(prel)) + else { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("partition #%d does not exist (total amount is %u)", - partition_idx, PrelChildrenCount(prel)))); - } - - ranges = PrelGetRangesArray(prel); - - /* Build args for construct_infinitable_array() */ - elems[0] = ranges[partition_idx].min; - elems[1] = ranges[partition_idx].max; - - arr = construct_bounds_array(elems, 2, - prel->ev_type, - prel->ev_len, - prel->ev_byval, - prel->ev_align); - - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - - PG_RETURN_ARRAYTYPE_P(arr); -} - - -/* - * ------------------------ - * Useful string builders - * ------------------------ - */ - -/* Build range condition for a CHECK CONSTRAINT. */ -Datum -build_range_condition(PG_FUNCTION_ARGS) -{ - Oid partition_relid; - char *expression; - Node *expr; + char *expr_p_cstr; - Bound min, - max; - Oid bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 2); - Constraint *con; - char *result; + /* Good, let's use a cached parsed expression */ + expr_p_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION_P)); + expr_type = exprType(stringToNode(expr_p_cstr)); - if (!PG_ARGISNULL(0)) - { - partition_relid = PG_GETARG_OID(0); + /* Free both expressions */ + pfree(expr_p_cstr); + pfree(expr_cstr); } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partition_relid' should not be NULL"))); - if (!PG_ARGISNULL(1)) + /* + * NULL interval is fine for both HASH and RANGE. + * But for RANGE we need to make some additional checks. + */ + if (!PG_ARGISNULL(ARG_RANGE_INTERVAL)) { - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); - } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'expression' should not be NULL")));; - - min = PG_ARGISNULL(2) ? - MakeBoundInf(MINUS_INFINITY) : - MakeBound(PG_GETARG_DATUM(2)); - - max = PG_ARGISNULL(3) ? - MakeBoundInf(PLUS_INFINITY) : - MakeBound(PG_GETARG_DATUM(3)); - - expr = parse_partitioning_expression(partition_relid, expression, NULL, NULL); - con = build_range_check_constraint(partition_relid, - expr, - &min, &max, - bounds_type); - - result = deparse_constraint(partition_relid, con->raw_expr); - - PG_RETURN_TEXT_P(cstring_to_text(result)); -} - -/* Build name for sequence for auto partition naming */ -Datum -build_sequence_name(PG_FUNCTION_ARGS) -{ - Oid parent_relid = PG_GETARG_OID(0); - Oid parent_nsp; - char *seq_name; - char *result; + Datum interval_text = PG_GETARG_DATUM(ARG_RANGE_INTERVAL), + interval_value; + Oid interval_type; - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) - ereport(ERROR, (errmsg("relation \"%u\" does not exist", parent_relid))); + if (parttype == PT_HASH) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should be NULL for HASH partitioned table"))); - parent_nsp = get_rel_namespace(parent_relid); - seq_name = build_sequence_name_relid_internal(parent_relid); + /* Try converting textual representation */ + interval_value = extract_binary_interval_from_text(interval_text, + expr_type, + &interval_type); - result = psprintf("%s.%s", - quote_identifier(get_namespace_name(parent_nsp)), - quote_identifier(seq_name)); + /* Check that interval isn't trivial */ + if (interval_is_trivial(expr_type, interval_value, interval_type)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should not be trivial"))); + } - PG_RETURN_TEXT_P(cstring_to_text(result)); + PG_RETURN_BOOL(true); } - /* * Merge multiple partitions. * All data will be copied to the first one. @@ -806,196 +675,325 @@ merge_range_partitions(PG_FUNCTION_ARGS) PG_RETURN_OID(partition); } - /* - * Drops partition and expands the next partition - * so that it could cover the dropped one. - * - * This function was written in order to support - * Oracle-like ALTER TABLE ... DROP PARTITION. + * Drops partition and expands the next partition + * so that it could cover the dropped one. + * + * This function was written in order to support + * Oracle-like ALTER TABLE ... DROP PARTITION. + * + * In Oracle partitions only have upper bound and when partition + * is dropped the next one automatically covers freed range. + */ +Datum +drop_range_partition_expand_next(PG_FUNCTION_ARGS) +{ + Oid partition = PG_GETARG_OID(0), + parent; + PartRelationInfo *prel; + ObjectAddress object; + RangeEntry *ranges; + int i; + + /* Lock the partition we're going to drop */ + LockRelationOid(partition, AccessExclusiveLock); + + /* Check if partition exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partition))) + elog(ERROR, "relation %u does not exist", partition); + + /* Get parent's relid */ + parent = get_parent_of_partition(partition); + + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); + + /* Check if parent exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent))) + elog(ERROR, "relation \"%s\" is not a partition", + get_rel_name(partition)); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); + + /* Fetch ranges array */ + ranges = PrelGetRangesArray(prel); + + /* Looking for partition in child relations */ + i = PrelHasPartition(prel, partition) - 1; + + /* Should have found it */ + Assert(i >= 0 && i < PrelChildrenCount(prel)); + + /* Expand next partition if it exists */ + if (i < PrelLastChild(prel)) + { + RangeEntry *cur = &ranges[i], + *next = &ranges[i + 1]; + Oid next_partition = next->child_oid; + LOCKMODE lockmode = AccessExclusiveLock; + + /* Lock next partition */ + LockRelationOid(next_partition, lockmode); + + /* Does next partition exist? */ + if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(next_partition))) + { + /* Stretch next partition to cover range */ + modify_range_constraint(next_partition, + prel->expr_cstr, + prel->ev_type, + &cur->min, + &next->max); + } + /* Bad luck, unlock missing partition */ + else UnlockRelationOid(next_partition, lockmode); + } + + /* Drop partition */ + ObjectAddressSet(object, RelationRelationId, partition); + performDeletion(&object, DROP_CASCADE, 0); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_VOID(); +} + + +/* + * ------------------------ + * Various useful getters + * ------------------------ + */ + +/* + * Returns range entry (min, max) (in form of array). + * + * arg #1 is the parent's Oid. + * arg #2 is the partition's Oid. + */ +Datum +get_part_range_by_oid(PG_FUNCTION_ARGS) +{ + Oid partition_relid, + parent_relid; + Oid arg_type; + RangeEntry *ranges; + PartRelationInfo *prel; + uint32 idx; + + if (!PG_ARGISNULL(0)) + { + partition_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); + + parent_relid = get_parent_of_partition(partition_relid); + if (!OidIsValid(parent_relid)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition_relid)))); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + + /* Check type of 'dummy' (for correct output) */ + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->ev_type))))); + + ranges = PrelGetRangesArray(prel); + + /* Look for the specified partition */ + if ((idx = PrelHasPartition(prel, partition_relid)) > 0) + { + ArrayType *arr; + Bound elems[2]; + + elems[0] = ranges[idx - 1].min; + elems[1] = ranges[idx - 1].max; + + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_ARRAYTYPE_P(arr); + } + + /* No partition found, report error */ + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" has no partition \"%s\"", + get_rel_name_or_relid(parent_relid), + get_rel_name_or_relid(partition_relid)))); + + PG_RETURN_NULL(); /* keep compiler happy */ +} + +/* + * Returns N-th range entry (min, max) (in form of array). * - * In Oracle partitions only have upper bound and when partition - * is dropped the next one automatically covers freed range. + * arg #1 is the parent's Oid. + * arg #2 is the index of the range + * (if it is negative then the last range will be returned). */ Datum -drop_range_partition_expand_next(PG_FUNCTION_ARGS) +get_part_range_by_idx(PG_FUNCTION_ARGS) { - Oid partition = PG_GETARG_OID(0), - parent; - PartRelationInfo *prel; - ObjectAddress object; + Oid parent_relid; + int partition_idx = 0; + Oid arg_type; + Bound elems[2]; RangeEntry *ranges; - int i; - - /* Lock the partition we're going to drop */ - LockRelationOid(partition, AccessExclusiveLock); - - /* Check if partition exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partition))) - elog(ERROR, "relation %u does not exist", partition); - - /* Get parent's relid */ - parent = get_parent_of_partition(partition); + PartRelationInfo *prel; + ArrayType *arr; - /* Prevent changes in partitioning scheme */ - LockRelationOid(parent, ShareUpdateExclusiveLock); + if (!PG_ARGISNULL(0)) + { + parent_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); - /* Check if parent exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent))) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name(partition)); + if (!PG_ARGISNULL(1)) + { + partition_idx = PG_GETARG_INT32(1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_idx' should not be NULL"))); /* Emit an error if it is not partitioned by RANGE */ - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_RANGE); - - /* Fetch ranges array */ - ranges = PrelGetRangesArray(prel); + prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); - /* Looking for partition in child relations */ - i = PrelHasPartition(prel, partition) - 1; + /* Check type of 'dummy' (for correct output) */ + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->ev_type))))); - /* Should have found it */ - Assert(i >= 0 && i < PrelChildrenCount(prel)); - /* Expand next partition if it exists */ - if (i < PrelLastChild(prel)) + /* Now we have to deal with 'idx' */ + if (partition_idx < -1) { - RangeEntry *cur = &ranges[i], - *next = &ranges[i + 1]; - Oid next_partition = next->child_oid; - LOCKMODE lockmode = AccessExclusiveLock; + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("negative indices other than -1" + " (last partition) are not allowed"))); + } + else if (partition_idx == -1) + { + partition_idx = PrelLastChild(prel); + } + else if (((uint32) abs(partition_idx)) >= PrelChildrenCount(prel)) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partition #%d does not exist (total amount is %u)", + partition_idx, PrelChildrenCount(prel)))); + } - /* Lock next partition */ - LockRelationOid(next_partition, lockmode); + ranges = PrelGetRangesArray(prel); - /* Does next partition exist? */ - if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(next_partition))) - { - /* Stretch next partition to cover range */ - modify_range_constraint(next_partition, - prel->expr_cstr, - prel->ev_type, - &cur->min, - &next->max); - } - /* Bad luck, unlock missing partition */ - else UnlockRelationOid(next_partition, lockmode); - } + /* Build args for construct_infinitable_array() */ + elems[0] = ranges[partition_idx].min; + elems[1] = ranges[partition_idx].max; - /* Drop partition */ - ObjectAddressSet(object, RelationRelationId, partition); - performDeletion(&object, DROP_CASCADE, 0); + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); /* Don't forget to close 'prel'! */ close_pathman_relation_info(prel); - PG_RETURN_VOID(); + PG_RETURN_ARRAYTYPE_P(arr); } + /* - * Takes text representation of interval value and checks - * if it corresponds to partitioning expression. - * NOTE: throws an ERROR if it fails to convert text to Datum. + * ------------------------ + * Useful string builders + * ------------------------ */ + +/* Build range condition for a CHECK CONSTRAINT. */ Datum -validate_interval_value(PG_FUNCTION_ARGS) +build_range_condition(PG_FUNCTION_ARGS) { -#define ARG_PARTREL 0 -#define ARG_EXPRESSION 1 -#define ARG_PARTTYPE 2 -#define ARG_RANGE_INTERVAL 3 -#define ARG_EXPRESSION_P 4 - - Oid partrel; - PartType parttype; - char *expr_cstr; - Oid expr_type; - - if (PG_ARGISNULL(ARG_PARTREL)) - { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partrel' should not be NULL"))); - } - else partrel = PG_GETARG_OID(ARG_PARTREL); + Oid partition_relid; + char *expression; + Node *expr; - /* Check that relation exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partrel))) - elog(ERROR, "relation \"%u\" does not exist", partrel); + Bound min, + max; + Oid bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + Constraint *con; + char *result; - if (PG_ARGISNULL(ARG_EXPRESSION)) + if (!PG_ARGISNULL(0)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'expression' should not be NULL"))); + partition_relid = PG_GETARG_OID(0); } - else expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION)); + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); - if (PG_ARGISNULL(ARG_PARTTYPE)) + if (!PG_ARGISNULL(1)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'parttype' should not be NULL"))); + expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); } - else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'expression' should not be NULL")));; - /* - * Fetch partitioning expression's type using - * either user's expression or parsed expression. - * - * NOTE: we check number of function's arguments - * in case of late updates (e.g. 1.1 => 1.4). - */ - if (PG_ARGISNULL(ARG_EXPRESSION_P) || PG_NARGS() <= ARG_EXPRESSION_P) - { - Datum expr_datum; + min = PG_ARGISNULL(2) ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(2)); - /* We'll have to parse expression with our own hands */ - expr_datum = cook_partitioning_expression(partrel, expr_cstr, &expr_type); + max = PG_ARGISNULL(3) ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(3)); - /* Free both expressions */ - pfree(DatumGetPointer(expr_datum)); - pfree(expr_cstr); - } - else - { - char *expr_p_cstr; + expr = parse_partitioning_expression(partition_relid, expression, NULL, NULL); + con = build_range_check_constraint(partition_relid, + expr, + &min, &max, + bounds_type); - /* Good, let's use a cached parsed expression */ - expr_p_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION_P)); - expr_type = exprType(stringToNode(expr_p_cstr)); + result = deparse_constraint(partition_relid, con->raw_expr); - /* Free both expressions */ - pfree(expr_p_cstr); - pfree(expr_cstr); - } + PG_RETURN_TEXT_P(cstring_to_text(result)); +} - /* - * NULL interval is fine for both HASH and RANGE. - * But for RANGE we need to make some additional checks. - */ - if (!PG_ARGISNULL(ARG_RANGE_INTERVAL)) - { - Datum interval_text = PG_GETARG_DATUM(ARG_RANGE_INTERVAL), - interval_value; - Oid interval_type; +/* Build name for sequence for auto partition naming */ +Datum +build_sequence_name(PG_FUNCTION_ARGS) +{ + Oid parent_relid = PG_GETARG_OID(0); + Oid parent_nsp; + char *seq_name; + char *result; - if (parttype == PT_HASH) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("interval should be NULL for HASH partitioned table"))); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) + ereport(ERROR, (errmsg("relation \"%u\" does not exist", parent_relid))); - /* Try converting textual representation */ - interval_value = extract_binary_interval_from_text(interval_text, - expr_type, - &interval_type); + parent_nsp = get_rel_namespace(parent_relid); + seq_name = build_sequence_name_relid_internal(parent_relid); - /* Check that interval isn't trivial */ - if (interval_is_trivial(expr_type, interval_value, interval_type)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("interval should not be trivial"))); - } + result = psprintf("%s.%s", + quote_identifier(get_namespace_name(parent_nsp)), + quote_identifier(seq_name)); - PG_RETURN_BOOL(true); + PG_RETURN_TEXT_P(cstring_to_text(result)); } From 2559992db6c7c1f7f5ea738429348c837004570c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 16:22:48 +0300 Subject: [PATCH 0868/1124] rewrite split_range_partition() in C language --- expected/pathman_basic.out | 30 +++-- expected/pathman_domains.out | 2 +- expected/pathman_subpartitions.out | 2 +- range.sql | 112 ++--------------- sql/pathman_basic.sql | 1 + src/pl_range_funcs.c | 187 ++++++++++++++++++++++++++--- 6 files changed, 202 insertions(+), 132 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index b4b062d3..e9950470 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -910,7 +910,7 @@ NOTICE: drop cascades to 4 other objects SELECT pathman.split_range_partition('test.num_range_rel_1', 500); split_range_partition ----------------------- - {0,1000} + test.num_range_rel_5 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; @@ -923,10 +923,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 70 Index Cond: (id <= 700) (5 rows) +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); - split_range_partition -------------------------- - {01-01-2015,02-01-2015} + split_range_partition +----------------------- + test.range_rel_5 (1 row) /* Merge two partitions into one */ @@ -1207,7 +1215,7 @@ SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); split_range_partition ----------------------- - {50,70} + test."test.zero_60" (1 row) DROP TABLE test.zero CASCADE; @@ -1528,9 +1536,9 @@ SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || (1 row) SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); - split_range_partition -------------------------- - {12-31-2014,01-02-2015} + split_range_partition +----------------------- + test."RangeRel_6" (1 row) DROP TABLE test."RangeRel" CASCADE; @@ -1598,9 +1606,9 @@ SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); (1 row) SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); - split_range_partition -------------------------- - {01-01-2010,03-01-2010} + split_range_partition +----------------------- + test.range_rel_13 (1 row) SELECT append_range_partition('test.range_rel'); diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index e6fc43fe..41c8bfbb 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -47,7 +47,7 @@ SELECT merge_range_partitions('domains.dom_table_1', 'domains.dom_table_2'); SELECT split_range_partition('domains.dom_table_1', 50); split_range_partition ----------------------- - {1,201} + domains.dom_table_14 (1 row) INSERT INTO domains.dom_table VALUES(1101); diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index c5446c94..4dd5f5dd 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -306,7 +306,7 @@ ERROR: cannot split partition that has children SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ split_range_partition ----------------------- - {50,100} + subpartitions.abc_2_4 (1 row) SELECT subpartitions.partitions_tree('subpartitions.abc'); diff --git a/range.sql b/range.sql index 4b5c74a0..a014ed0f 100644 --- a/range.sql +++ b/range.sql @@ -294,104 +294,6 @@ END $$ LANGUAGE plpgsql; - -/* - * Split RANGE partition - */ -CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( - partition_relid REGCLASS, - split_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS $$ -DECLARE - parent_relid REGCLASS; - part_type INTEGER; - part_expr TEXT; - part_expr_type REGTYPE; - check_name TEXT; - check_cond TEXT; - new_partition TEXT; - -BEGIN - parent_relid = @extschema@.get_parent_of_partition(partition_relid); - - PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition_relid); - - /* Acquire lock on parent's scheme */ - PERFORM @extschema@.prevent_part_modification(parent_relid); - - /* Acquire lock on partition's scheme */ - PERFORM @extschema@.prevent_part_modification(partition_relid); - - /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_data_modification(partition_relid); - - /* Check that partition is not partitioned */ - if @extschema@.get_number_of_partitions(partition_relid) > 0 THEN - RAISE EXCEPTION 'cannot split partition that has children'; - END IF; - - part_expr_type = @extschema@.get_partition_key_type(parent_relid); - part_expr := @extschema@.get_partition_key(parent_relid); - part_type := @extschema@.get_partition_type(parent_relid); - - /* Check if this is a RANGE partition */ - IF part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; - END IF; - - /* Get partition values range */ - EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', - @extschema@.get_base_type(part_expr_type)::TEXT) - USING partition_relid - INTO p_range; - - IF p_range IS NULL THEN - RAISE EXCEPTION 'could not find specified partition'; - END IF; - - /* Check if value fit into the range */ - IF p_range[1] > split_value OR p_range[2] <= split_value - THEN - RAISE EXCEPTION 'specified value does not fit into the range [%, %)', - p_range[1], p_range[2]; - END IF; - - /* Create new partition */ - new_partition := @extschema@.create_single_range_partition(parent_relid, - split_value, - p_range[2], - partition_name, - tablespace); - - /* Copy data */ - check_cond := @extschema@.build_range_condition(new_partition::regclass, - part_expr, split_value, p_range[2]); - EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - partition_relid::TEXT, - check_cond, - new_partition); - - /* Alter original partition */ - check_cond := @extschema@.build_range_condition(partition_relid::regclass, - part_expr, p_range[1], split_value); - check_name := @extschema@.build_check_constraint_name(partition_relid); - - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition_relid::TEXT, - check_name); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition_relid::TEXT, - check_name, - check_cond); -END -$$ LANGUAGE plpgsql; - /* * Append new partition. */ @@ -867,8 +769,18 @@ SET client_min_messages = WARNING; /* mute NOTICE message */ /* - * Merge multiple partitions. All data will be copied to the first one. - * The rest of partitions will be dropped. + * Split RANGE partition in two using a pivot. + */ +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'split_range_partition' +LANGUAGE C; + +/* + * Merge RANGE partitions. */ CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( variadic partitions REGCLASS[]) diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 11639852..3eb0afff 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -263,6 +263,7 @@ DROP TABLE test.hash_varchar CASCADE; /* Split first partition in half */ SELECT pathman.split_range_partition('test.num_range_rel_1', 500); EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 7fa00cf7..7d17d407 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -46,15 +46,16 @@ PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); PG_FUNCTION_INFO_V1( create_range_partitions_internal ); PG_FUNCTION_INFO_V1( check_range_available_pl ); PG_FUNCTION_INFO_V1( generate_range_bounds_pl ); +PG_FUNCTION_INFO_V1( validate_interval_value ); +PG_FUNCTION_INFO_V1( split_range_partition ); +PG_FUNCTION_INFO_V1( merge_range_partitions ); +PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); PG_FUNCTION_INFO_V1( get_part_range_by_oid ); PG_FUNCTION_INFO_V1( get_part_range_by_idx ); PG_FUNCTION_INFO_V1( build_range_condition ); PG_FUNCTION_INFO_V1( build_sequence_name ); -PG_FUNCTION_INFO_V1( merge_range_partitions ); -PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); -PG_FUNCTION_INFO_V1( validate_interval_value ); static ArrayType *construct_bounds_array(Bound *elems, @@ -489,6 +490,162 @@ validate_interval_value(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } +Datum +split_range_partition(PG_FUNCTION_ARGS) +{ + Oid parent = InvalidOid, + partition1, + partition2; + RangeVar *part_name = NULL; + char *tablespace_name = NULL; + + Datum pivot_value; + Oid pivot_type; + + PartRelationInfo *prel; + Bound min_bound, + max_bound, + split_bound; + + Snapshot fresh_snapshot; + FmgrInfo finfo; + SPIPlanPtr plan; + char *query; + int i; + + if (!PG_ARGISNULL(0)) + { + partition1 = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition1' should not be NULL"))); + + if (!PG_ARGISNULL(1)) + { + pivot_value = PG_GETARG_DATUM(1); + pivot_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'split_value' should not be NULL"))); + + LockRelationOid(partition1, ExclusiveLock); + + /* Get parent of partition */ + parent = get_parent_of_partition(partition1); + if (!OidIsValid(parent)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition1)))); + + /* This partition should not have children */ + if (has_pathman_relation_info(partition1)) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot split partition that has children"))); + + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); + + i = PrelHasPartition(prel, partition1) - 1; + Assert(i >= 0 && i < PrelChildrenCount(prel)); + + min_bound = PrelGetRangesArray(prel)[i].min; + max_bound = PrelGetRangesArray(prel)[i].max; + + split_bound = MakeBound(perform_type_cast(pivot_value, + getBaseType(pivot_type), + getBaseType(prel->ev_type), + NULL)); + + fmgr_info(prel->cmp_proc, &finfo); + + /* Validate pivot's value */ + if (cmp_bounds(&finfo, prel->ev_collid, &split_bound, &min_bound) <= 0 || + cmp_bounds(&finfo, prel->ev_collid, &split_bound, &max_bound) >= 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("specified value does not fit into the range (%s, %s)", + BoundToCString(&min_bound, prel->ev_type), + BoundToCString(&max_bound, prel->ev_type)))); + } + + if (!PG_ARGISNULL(2)) + { + part_name = makeRangeVar(get_namespace_name(get_rel_namespace(parent)), + TextDatumGetCString(PG_GETARG_DATUM(2)), + 0); + } + + if (!PG_ARGISNULL(3)) + { + tablespace_name = TextDatumGetCString(PG_GETARG_DATUM(3)); + } + + /* Create a new partition */ + partition2 = create_single_range_partition_internal(parent, + &split_bound, + &max_bound, + prel->ev_type, + part_name, + tablespace_name); + + /* Make constraint visible */ + CommandCounterIncrement(); + + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect using SPI"); + + /* + * Get latest snapshot to see data that might have been + * added to partitions before this transaction has started, + * but was committed a moment before we acquired the locks. + */ + fresh_snapshot = RegisterSnapshot(GetLatestSnapshot()); + + query = psprintf("WITH part_data AS ( " + "DELETE FROM %1$s WHERE (%3$s) >= $1 RETURNING " + "*) " + "INSERT INTO %2$s SELECT * FROM part_data", + get_qualified_rel_name(partition1), + get_qualified_rel_name(partition2), + prel->expr_cstr); + + plan = SPI_prepare(query, 1, &prel->ev_type); + + if (!plan) + elog(ERROR, "%s: SPI_prepare returned %d", + __FUNCTION__, SPI_result); + + SPI_execute_snapshot(plan, + &split_bound.value, NULL, + fresh_snapshot, + InvalidSnapshot, + false, true, 0); + + /* Free snapshot */ + UnregisterSnapshot(fresh_snapshot); + + SPI_finish(); + + /* Drop old constraint and create a new one */ + modify_range_constraint(partition1, + prel->expr_cstr, + prel->ev_type, + &min_bound, + &split_bound); + + /* Make constraint visible */ + CommandCounterIncrement(); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_OID(partition2); +} + /* * Merge multiple partitions. * All data will be copied to the first one. @@ -565,7 +722,7 @@ merge_range_partitions(PG_FUNCTION_ARGS) errdetail("all relations must share the same parent"))); } - /* Lock parent till transaction's end */ + /* Prevent changes in partitioning scheme */ LockRelationOid(parent, ShareUpdateExclusiveLock); /* Emit an error if it is not partitioned by RANGE */ @@ -632,9 +789,9 @@ merge_range_partitions(PG_FUNCTION_ARGS) ObjectAddress object; char *query = psprintf("WITH part_data AS ( " - "DELETE FROM %s RETURNING " + "DELETE FROM %1$s RETURNING " "*) " - "INSERT INTO %s SELECT * FROM part_data", + "INSERT INTO %2$s SELECT * FROM part_data", get_qualified_rel_name(parts[i]), get_qualified_rel_name(parts[0])); @@ -642,8 +799,7 @@ merge_range_partitions(PG_FUNCTION_ARGS) if (!plan) elog(ERROR, "%s: SPI_prepare returned %d", - CppAsString(merge_range_partitions), - SPI_result); + __FUNCTION__, SPI_result); SPI_execute_snapshot(plan, NULL, NULL, fresh_snapshot, @@ -698,21 +854,16 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) /* Lock the partition we're going to drop */ LockRelationOid(partition, AccessExclusiveLock); - /* Check if partition exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partition))) - elog(ERROR, "relation %u does not exist", partition); - /* Get parent's relid */ parent = get_parent_of_partition(partition); + if (!OidIsValid(parent)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition)))); /* Prevent changes in partitioning scheme */ LockRelationOid(parent, ShareUpdateExclusiveLock); - /* Check if parent exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent))) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name(partition)); - /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); @@ -722,8 +873,6 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) /* Looking for partition in child relations */ i = PrelHasPartition(prel, partition) - 1; - - /* Should have found it */ Assert(i >= 0 && i < PrelChildrenCount(prel)); /* Expand next partition if it exists */ From fa874e99a0f128341641281a5900e7ec921e6829 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 16:30:13 +0300 Subject: [PATCH 0869/1124] disabled cppcheck-based builds --- .travis.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 3ca602c2..265ac48d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,14 +18,10 @@ script: env: - DOCKER_IMAGE=pathman/pg95_clang_check_code - - DOCKER_IMAGE=pathman/pg95_cppcheck - DOCKER_IMAGE=pathman/pg95_pathman_tests - DOCKER_IMAGE=pathman/pg96_clang_check_code - - DOCKER_IMAGE=pathman/pg96_cppcheck - DOCKER_IMAGE=pathman/pg96_pathman_tests - DOCKER_IMAGE=pathman/pg10_clang_check_code - - DOCKER_IMAGE=pathman/pg10_cppcheck - DOCKER_IMAGE=pathman/pg10_pathman_tests - DOCKER_IMAGE=pathman/pg10_ca_clang_check_code - - DOCKER_IMAGE=pathman/pg10_ca_cppcheck - DOCKER_IMAGE=pathman/pg10_ca_pathman_tests From f87a871d55510c16f4b6dfde897e2f8c397399af Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 16:43:33 +0300 Subject: [PATCH 0870/1124] fix python-based tests --- tests/python/partitioning_test.py | 554 ------------------------------ 1 file changed, 554 deletions(-) mode change 100755 => 100644 tests/python/partitioning_test.py diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py old mode 100755 new mode 100644 index 27ad6613..12475b9e --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -15,48 +15,6 @@ import threading import time import unittest -<<<<<<< HEAD - -from distutils.version import LooseVersion -from testgres import get_new_node, get_bin_path, get_pg_version - -# set setup base logging config, it can be turned on by `use_logging` -# parameter on node setup - -import logging -import logging.config - -logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tests.log') -LOG_CONFIG = { - 'version': 1, - 'handlers': { - 'console': { - 'class': 'logging.StreamHandler', - 'formatter': 'base_format', - 'level': logging.DEBUG, - }, - 'file': { - 'class': 'logging.FileHandler', - 'filename': logfile, - 'formatter': 'base_format', - 'level': logging.DEBUG, - }, - }, - 'formatters': { - 'base_format': { - 'format': '%(node)-5s: %(message)s', - }, - }, - 'root': { - 'handlers': ('file', ), - 'level': 'DEBUG', - }, -} - -logging.config.dictConfig(LOG_CONFIG) -version = LooseVersion(get_pg_version()) - -======= import functools from distutils.version import LooseVersion @@ -99,7 +57,6 @@ logging.config.dictConfig(LOG_CONFIG) version = LooseVersion(get_pg_version()) ->>>>>>> master # Helper function for json equality def ordered(obj, skip_keys=None): @@ -112,18 +69,6 @@ def ordered(obj, skip_keys=None): return obj -<<<<<<< HEAD -def if_fdw_enabled(func): - """ To run tests with FDW support, set environment variable TEST_FDW=1 """ - - def wrapper(*args, **kwargs): - if os.environ.get('FDW_DISABLED') != '1': - func(*args, **kwargs) - else: - print('Warning: FDW features tests are disabled, skipping...') - - return wrapper -======= # Check if postgres_fdw is available @functools.lru_cache(maxsize=1) def is_postgres_fdw_ready(): @@ -136,7 +81,6 @@ def is_postgres_fdw_ready(): return True return False ->>>>>>> master class Tests(unittest.TestCase): @@ -145,25 +89,6 @@ def set_trace(self, con, command="pg_debug"): p = subprocess.Popen([command], stdin=subprocess.PIPE) p.communicate(str(pid).encode()) -<<<<<<< HEAD - def start_new_pathman_cluster(self, - name='test', - allow_streaming=False, - test_data=False): - node = get_new_node(name) - node.init(allow_streaming=allow_streaming) - node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") - node.start() - node.psql('postgres', 'create extension pg_pathman') - if test_data: - cmds = ( - "create table abc(id serial, t text)", - "insert into abc select generate_series(1, 300000)", - "select create_hash_partitions('abc', 'id', 3, partition_data := false)", - ) - for cmd in cmds: - node.safe_psql('postgres', cmd) -======= def start_new_pathman_cluster(self, allow_streaming=False, test_data=False): node = get_new_node() node.init(allow_streaming=allow_streaming) @@ -179,7 +104,6 @@ def start_new_pathman_cluster(self, allow_streaming=False, test_data=False): """) node.safe_psql('vacuum analyze') ->>>>>>> master return node @@ -187,29 +111,17 @@ def test_concurrent(self): """ Test concurrent partitioning """ with self.start_new_pathman_cluster(test_data=True) as node: -<<<<<<< HEAD - node.psql('postgres', "select partition_table_concurrently('abc')") - - while True: - # update some rows to check for deadlocks - node.safe_psql('postgres', """ -======= node.psql("select partition_table_concurrently('abc')") while True: # update some rows to check for deadlocks node.safe_psql(""" ->>>>>>> master update abc set t = 'test' where id in (select (random() * 300000)::int from generate_series(1, 3000)) """) -<<<<<<< HEAD - count = node.execute('postgres', """ -======= count = node.execute(""" ->>>>>>> master select count(*) from pathman_concurrent_part_tasks """) @@ -218,15 +130,9 @@ def test_concurrent(self): break time.sleep(1) -<<<<<<< HEAD - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') -======= data = node.execute('select count(*) from only abc') self.assertEqual(data[0][0], 0) data = node.execute('select count(*) from abc') ->>>>>>> master self.assertEqual(data[0][0], 300000) node.stop() @@ -234,47 +140,22 @@ def test_replication(self): """ Test how pg_pathman works with replication """ with self.start_new_pathman_cluster(allow_streaming=True, test_data=True) as node: -<<<<<<< HEAD - with node.replicate('node2') as replica: -======= with node.replicate() as replica: ->>>>>>> master replica.start() replica.catchup() # check that results are equal self.assertEqual( -<<<<<<< HEAD - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - - # enable parent and see if it is enabled in replica - node.psql('postgres', "select enable_parent('abc')") -======= node.psql('explain (costs off) select * from abc'), replica.psql('explain (costs off) select * from abc')) # enable parent and see if it is enabled in replica node.psql("select enable_parent('abc')") ->>>>>>> master # wait until replica catches up replica.catchup() self.assertEqual( -<<<<<<< HEAD - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc')) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], 300000) - - # check that UPDATE in pathman_config_params invalidates cache - node.psql('postgres', - 'update pathman_config_params set enable_parent = false') -======= node.psql('explain (costs off) select * from abc'), replica.psql('explain (costs off) select * from abc')) self.assertEqual( @@ -285,21 +166,11 @@ def test_replication(self): # check that UPDATE in pathman_config_params invalidates cache node.psql('update pathman_config_params set enable_parent = false') ->>>>>>> master # wait until replica catches up replica.catchup() self.assertEqual( -<<<<<<< HEAD - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc')) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc')) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], 0) -======= node.psql('explain (costs off) select * from abc'), replica.psql('explain (costs off) select * from abc')) self.assertEqual( @@ -307,7 +178,6 @@ def test_replication(self): replica.psql('select * from abc')) self.assertEqual( node.execute('select count(*) from abc')[0][0], 0) ->>>>>>> master def test_locks(self): """ @@ -337,39 +207,22 @@ def add_partition(node, flag, query): We expect that this query will wait until another session commits or rolls back """ -<<<<<<< HEAD - node.safe_psql('postgres', query) -======= node.safe_psql(query) ->>>>>>> master with lock: flag.set(True) # Initialize master server -<<<<<<< HEAD - with get_new_node('master') as node: - node.init() - node.append_conf("postgresql.conf", "shared_preload_libraries='pg_pathman'\n") - node.start() - sql = """ -======= with get_new_node() as node: node.init() node.append_conf("shared_preload_libraries='pg_pathman'") node.start() node.safe_psql(""" ->>>>>>> master create extension pg_pathman; create table abc(id serial, t text); insert into abc select generate_series(1, 100000); select create_range_partitions('abc', 'id', 1, 50000); -<<<<<<< HEAD - """ - node.safe_psql('postgres', sql) -======= """) ->>>>>>> master # Start transaction that will create partition with node.connect() as con: @@ -381,13 +234,9 @@ def add_partition(node, flag, query): query = ( "select prepend_range_partition('abc')", "select append_range_partition('abc')", -<<<<<<< HEAD - "select add_range_partition('abc', 500000, 550000)", ) -======= "select add_range_partition('abc', 500000, 550000)", ) ->>>>>>> master threads = [] for i in range(3): thread = threading.Thread( @@ -396,11 +245,7 @@ def add_partition(node, flag, query): thread.start() time.sleep(3) -<<<<<<< HEAD - # This threads should wait until current transaction finished -======= # These threads should wait until current transaction finished ->>>>>>> master with lock: for i in range(3): self.assertEqual(flags[i].get(), False) @@ -422,10 +267,6 @@ def add_partition(node, flag, query): # Check that all partitions are created self.assertEqual( node.safe_psql( -<<<<<<< HEAD - 'postgres', -======= ->>>>>>> master "select count(*) from pg_inherits where inhparent='abc'::regclass"), b'6\n') @@ -433,68 +274,21 @@ def test_tablespace(self): """ Check tablespace support """ def check_tablespace(node, tablename, tablespace): -<<<<<<< HEAD - res = node.execute('postgres', - "select get_tablespace('{}')".format(tablename)) -======= res = node.execute("select get_tablespace('{}')".format(tablename)) ->>>>>>> master if len(res) == 0: return False return res[0][0] == tablespace -<<<<<<< HEAD - with get_new_node('master') as node: - node.init() - node.append_conf('postgresql.conf', - "shared_preload_libraries='pg_pathman'\n") - node.start() - node.psql('postgres', 'create extension pg_pathman') -======= with get_new_node() as node: node.init() node.append_conf("shared_preload_libraries='pg_pathman'") node.start() node.psql('create extension pg_pathman') ->>>>>>> master # create tablespace path = os.path.join(node.data_dir, 'test_space_location') os.mkdir(path) -<<<<<<< HEAD - node.psql('postgres', - "create tablespace test_space location '{}'".format(path)) - - # create table in this tablespace - node.psql('postgres', - 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql('postgres', - "select create_range_partitions('abc', 'a', 1, 10, 3)") - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql('postgres', - "select append_range_partition('abc', 'abc_appended')") - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql('postgres', - "select prepend_range_partition('abc', 'abc_prepended')") - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql('postgres', - "select add_range_partition('abc', 41, 51, 'abc_added')") - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # check tablespace for split - node.psql('postgres', - "select split_range_partition('abc_added', 45, 'abc_splitted')") -======= node.psql("create tablespace test_space location '{}'".format(path)) # create table in this tablespace @@ -519,36 +313,19 @@ def check_tablespace(node, tablename, tablespace): # check tablespace for split node.psql("select split_range_partition('abc_added', 45, 'abc_splitted')") ->>>>>>> master self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) # now let's specify tablespace explicitly node.psql( -<<<<<<< HEAD - 'postgres', "select append_range_partition('abc', 'abc_appended_2', 'pg_default')" ) node.psql( - 'postgres', "select prepend_range_partition('abc', 'abc_prepended_2', 'pg_default')" ) node.psql( - 'postgres', "select add_range_partition('abc', 61, 71, 'abc_added_2', 'pg_default')" ) node.psql( - 'postgres', -======= - "select append_range_partition('abc', 'abc_appended_2', 'pg_default')" - ) - node.psql( - "select prepend_range_partition('abc', 'abc_prepended_2', 'pg_default')" - ) - node.psql( - "select add_range_partition('abc', 61, 71, 'abc_added_2', 'pg_default')" - ) - node.psql( ->>>>>>> master "select split_range_partition('abc_added_2', 65, 'abc_splitted_2', 'pg_default')" ) @@ -558,25 +335,11 @@ def check_tablespace(node, tablename, tablespace): self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) -<<<<<<< HEAD - @if_fdw_enabled -======= @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') ->>>>>>> master def test_foreign_table(self): """ Test foreign tables """ # Start master server -<<<<<<< HEAD - with get_new_node('test') as master, get_new_node('fserv') as fserv: - master.init() - master.append_conf('postgresql.conf', """ - shared_preload_libraries='pg_pathman, postgres_fdw'\n - """) - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') -======= with get_new_node() as master, get_new_node() as fserv: master.init() master.append_conf(""" @@ -585,7 +348,6 @@ def test_foreign_table(self): master.start() master.psql('create extension pg_pathman') master.psql('create extension postgres_fdw') ->>>>>>> master # RANGE partitioning test with FDW: # - create range partitioned table in master @@ -594,26 +356,12 @@ def test_foreign_table(self): # - attach foreign table to partitioned one # - try inserting data into foreign partition via parent # - drop partitions -<<<<<<< HEAD - master.psql('postgres', """ -======= master.psql(""" ->>>>>>> master create table abc(id serial, name text); select create_range_partitions('abc', 'id', 0, 10, 2) """) # Current user name (needed for user mapping) -<<<<<<< HEAD - username = master.execute('postgres', 'select current_user')[0][0] - - fserv.init().start() - fserv.safe_psql('postgres', "create table ftable(id serial, name text)") - fserv.safe_psql('postgres', "insert into ftable values (25, 'foreign')") - - # Create foreign table and attach it to partitioned table - master.safe_psql('postgres', """ -======= username = master.execute('select current_user')[0][0] fserv.init().start() @@ -622,52 +370,26 @@ def test_foreign_table(self): # Create foreign table and attach it to partitioned table master.safe_psql(""" ->>>>>>> master create server fserv foreign data wrapper postgres_fdw options (dbname 'postgres', host '127.0.0.1', port '{}') """.format(fserv.port)) -<<<<<<< HEAD - master.safe_psql('postgres', """ -======= master.safe_psql(""" ->>>>>>> master create user mapping for {0} server fserv options (user '{0}') """.format(username)) -<<<<<<< HEAD - master.safe_psql('postgres', """ -======= master.safe_psql(""" ->>>>>>> master import foreign schema public limit to (ftable) from server fserv into public """) master.safe_psql( -<<<<<<< HEAD - 'postgres', -======= ->>>>>>> master "select attach_range_partition('abc', 'ftable', 20, 30)") # Check that table attached to partitioned table self.assertEqual( -<<<<<<< HEAD - master.safe_psql('postgres', 'select * from ftable'), - b'25|foreign\n') - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', "insert into abc values (26, 'part')") - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - b'25|foreign\n26|part\n') - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', "select drop_partitions('abc')") -======= master.safe_psql('select * from ftable'), b'25|foreign\n') @@ -679,7 +401,6 @@ def test_foreign_table(self): # Testing drop partitions (including foreign partitions) master.safe_psql("select drop_partitions('abc')") ->>>>>>> master # HASH partitioning with FDW: # - create hash partitioned table in master @@ -687,31 +408,6 @@ def test_foreign_table(self): # - replace local partition with foreign one # - insert data # - drop partitions -<<<<<<< HEAD - master.psql('postgres', """ - create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2) - """) - fserv.safe_psql('postgres', - 'create table f_hash_test(id serial, name text)') - - master.safe_psql('postgres', """ - import foreign schema public limit to (f_hash_test) - from server fserv into public - """) - master.safe_psql('postgres', """ - select replace_hash_partition('hash_test_1', 'f_hash_test') - """) - master.safe_psql('postgres', - 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') - master.safe_psql('postgres', "select drop_partitions('hash_test')") - - @if_fdw_enabled -======= master.psql(""" create table hash_test(id serial, name text); select create_hash_partitions('hash_test', 'id', 2) @@ -733,23 +429,14 @@ def test_foreign_table(self): master.safe_psql("select drop_partitions('hash_test')") @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') ->>>>>>> master def test_parallel_nodes(self): """ Test parallel queries under partitions """ # Init and start postgres instance with preload pg_pathman module -<<<<<<< HEAD - with get_new_node('test') as node: - node.init() - node.append_conf( - 'postgresql.conf', - "shared_preload_libraries='pg_pathman, postgres_fdw'\n") -======= with get_new_node() as node: node.init() node.append_conf( "shared_preload_libraries='pg_pathman, postgres_fdw'") ->>>>>>> master node.start() # Check version of postgres server @@ -758,13 +445,8 @@ def test_parallel_nodes(self): return # Prepare test database -<<<<<<< HEAD - node.psql('postgres', 'create extension pg_pathman') - node.psql('postgres', """ -======= node.psql('create extension pg_pathman') node.psql(""" ->>>>>>> master create table range_partitioned as select generate_series(1, 1e4::integer) i; @@ -779,15 +461,9 @@ def test_parallel_nodes(self): """) # create statistics for both partitioned tables -<<<<<<< HEAD - node.psql('postgres', 'vacuum analyze') - - node.psql('postgres', """ -======= node.psql('vacuum analyze') node.psql(""" ->>>>>>> master create or replace function query_plan(query text) returns jsonb as $$ declare @@ -945,15 +621,9 @@ def test_parallel_nodes(self): self.assertEqual(ordered(plan), ordered(expected)) # Remove all objects for testing -<<<<<<< HEAD - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') -======= node.psql('drop table range_partitioned cascade') node.psql('drop table hash_partitioned cascade') node.psql('drop extension pg_pathman cascade') ->>>>>>> master def test_conc_part_drop_runtime_append(self): """ Test concurrent partition drop + SELECT (RuntimeAppend) """ @@ -1196,225 +866,6 @@ def con2_thread(): self.assertEqual(str(rows[0][1]), 'ins_test_1') def test_pg_dump(self): -<<<<<<< HEAD - """ - Test using dump and restore of partitioned table through pg_dump and pg_restore tools. - - Test strategy: - - test range and hash partitioned tables; - - for each partitioned table check on restorable side the following quantities: - * constraints related to partitioning; - * init callback function and enable parent flag; - * number of rows in parent and child tables; - * plan validity of simple SELECT query under partitioned table; - - check dumping using the following parameters of pg_dump: - * format = plain | custom; - * using of inserts and copy. - - all test cases are carried out on tables half-full with data located in parent part, - the rest of data - in child tables. - """ - - # Init and start postgres instance with preload pg_pathman module - with get_new_node('test') as node: - node.init() - node.append_conf('postgresql.conf', """ - shared_preload_libraries='pg_pathman' - pg_pathman.override_copy=false - """) - node.start() - - # Init two databases: initial and copy - node.psql('postgres', 'create database initial') - node.psql('postgres', 'create database copy') - node.psql('initial', 'create extension pg_pathman') - - # Create and fillin partitioned table in initial database - with node.connect('initial') as con: - - # create and initailly fillin tables - con.execute('create table range_partitioned (i integer not null)') - con.execute( - 'insert into range_partitioned select i from generate_series(1, 500) i' - ) - con.execute('create table hash_partitioned (i integer not null)') - con.execute( - 'insert into hash_partitioned select i from generate_series(1, 500) i' - ) - - # partition table keeping data in base table - # enable_parent parameter automatically becames true - con.execute( - "select create_range_partitions('range_partitioned', 'i', 1, 200, partition_data := false)" - ) - con.execute( - "select create_hash_partitions('hash_partitioned', 'i', 5, false)" - ) - - # fillin child tables with remain data - con.execute( - 'insert into range_partitioned select i from generate_series(501, 1000) i' - ) - con.execute( - 'insert into hash_partitioned select i from generate_series(501, 1000) i' - ) - - # set init callback - con.execute(""" - create or replace function init_partition_stub_callback(args jsonb) - returns void as $$ - begin - end - $$ language plpgsql; - """) - con.execute( - "select set_init_callback('range_partitioned', 'init_partition_stub_callback(jsonb)')" - ) - con.execute( - "select set_init_callback('hash_partitioned', 'init_partition_stub_callback(jsonb)')" - ) - - # turn off enable_parent option - con.execute( - "select set_enable_parent('range_partitioned', false)") - con.execute("select set_enable_parent('hash_partitioned', false)") - con.commit() - - # compare strategies - CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) - - def cmp_full(con1, con2): - """ - Compare selection partitions in plan - and contents in partitioned tables - """ - - plan_query = 'explain (costs off, format json) select * from %s' - content_query = 'select * from %s order by i' - table_refs = [ - 'range_partitioned', 'only range_partitioned', - 'hash_partitioned', 'only hash_partitioned' - ] - for table_ref in table_refs: - plan_initial = con1.execute( - plan_query % table_ref)[0][0][0]['Plan'] - plan_copy = con2.execute( - plan_query % table_ref)[0][0][0]['Plan'] - if ordered(plan_initial) != ordered(plan_copy): - return PLANS_MISMATCH - - content_initial = [ - x[0] for x in con1.execute(content_query % table_ref) - ] - content_copy = [ - x[0] for x in con2.execute(content_query % table_ref) - ] - if content_initial != content_copy: - return CONTENTS_MISMATCH - - return CMP_OK - - def turnoff_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to off') - node.reload() - - def turnon_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to on') - node.psql('copy', 'alter system set pg_pathman.enable to on') - node.psql('initial', - 'alter system set pg_pathman.override_copy to off') - node.psql('copy', - 'alter system set pg_pathman.override_copy to off') - node.reload() - - # Test dump/restore from init database to copy functionality - test_params = [ - (None, None, [ - get_bin_path("pg_dump"), "-p {}".format(node.port), - "initial" - ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via COPY - (turnoff_pathman, turnon_pathman, [ - get_bin_path("pg_dump"), "-p {}".format(node.port), - "--inserts", "initial" - ], [get_bin_path("psql"), "-p {}".format(node.port), "copy"], - cmp_full), # dump as plain text and restore via INSERTs - (None, None, [ - get_bin_path("pg_dump"), "-p {}".format(node.port), - "--format=custom", "initial" - ], [ - get_bin_path("pg_restore"), "-p {}".format(node.port), - "--dbname=copy" - ], cmp_full), # dump in archive format - ] - - with open(os.devnull, 'w') as fnull: - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), - ' '.join(pg_restore_params))) - - if (preproc is not None): - preproc(node) - - # transfer and restore data - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - stdoutdata, _ = p1.communicate() - p2 = subprocess.Popen( - pg_restore_params, - stdin=subprocess.PIPE, - stdout=fnull, - stderr=fnull) - p2.communicate(input=stdoutdata) - - if (postproc is not None): - postproc(node) - - # validate data - with node.connect('initial') as con1, \ - node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual( - cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" - % dump_restore_cmd) - self.assertNotEqual( - cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" - % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') -======= with self.start_new_pathman_cluster() as node: node.safe_psql('create database copy') @@ -1453,8 +904,6 @@ def turnon_pathman(node): p2 = node.execute('copy', 'select * from pathman_partition_list') self.assertEqual(sorted(p1), sorted(p2)) ->>>>>>> master - def test_concurrent_detach(self): """ Test concurrent detach partition with contiguous @@ -1537,7 +986,6 @@ def test_concurrent_detach(self): Race condition between detach and concurrent inserts with append partition is expired """) -<<<<<<< HEAD def test_update_node_plan1(self): ''' @@ -1614,8 +1062,6 @@ def test_update_node_plan1(self): node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') -======= ->>>>>>> master if __name__ == "__main__": From f6610e696f97f05e68a1ed12dbd05d76d6a96629 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 3 Jul 2018 23:16:34 +0300 Subject: [PATCH 0871/1124] treat SubLinks differently --- src/planner_tree_modification.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index c4b4073d..ff5b51fb 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -625,6 +625,14 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) return false; } + if (IsA(node, SubLink)) + { + SubLink *sl = (SubLink *) node; + + /* Examine its expression */ + node = sl->testexpr; + } + return expression_tree_walker(node, adjust_appendrel_varnos, context); From 26ed609e4aae6695c9f1a231db5cfb056a3d3412 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 4 Jul 2018 00:37:21 +0300 Subject: [PATCH 0872/1124] more tests for rebuilt updates --- expected/pathman_rebuild_updates.out | 30 ++++++++++++++++++++++++++++ sql/pathman_rebuild_updates.sql | 16 +++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index 297089af..5c54f2cd 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -24,6 +24,7 @@ SELECT append_range_partition('test_updates.test'); (1 row) INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; /* tuple descs are the same */ EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; QUERY PLAN @@ -54,6 +55,35 @@ UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLA 101 | 0 | test_updates.test_11 (1 row) +CREATE TABLE test_updates.test_dummy (val INT4); +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Update on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Update on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +DROP TABLE test_updates.test_dummy; DROP SCHEMA test_updates CASCADE; NOTICE: drop cascades to 13 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index ec4924ea..fc827dd3 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -22,6 +22,9 @@ SELECT append_range_partition('test_updates.test'); INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; + + /* tuple descs are the same */ EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS; @@ -31,6 +34,19 @@ UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; +CREATE TABLE test_updates.test_dummy (val INT4); + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + +DROP TABLE test_updates.test_dummy; + DROP SCHEMA test_updates CASCADE; From 7747219f48ad044402199bdfdb6058955ea1c460 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 4 Jul 2018 14:40:20 +0300 Subject: [PATCH 0873/1124] don't use varoattno in adjust_appendrel_varnos() --- src/planner_tree_modification.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index ff5b51fb..f404300e 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -605,7 +605,7 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) Var *var = (Var *) node; /* Don't transform system columns & other relations' Vars */ - if (var->varoattno > 0 && var->varno == context->child_varno) + if (var->varattno > 0 && var->varno == context->child_varno) { Var *child_var; From 1c436e82457212097047fc3d89e83ed9ae56f64a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 4 Jul 2018 14:57:24 +0300 Subject: [PATCH 0874/1124] decouple Append & MergeAppend in plan_tree_walker() --- src/planner_tree_modification.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 3225e59e..4a9b8c40 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -132,9 +132,9 @@ reset_query_id_generator(void) /* - * Basic plan tree walker + * Basic plan tree walker. * - * 'visitor' is applied right before return + * 'visitor' is applied right before return. */ void plan_tree_walker(Plan *plan, @@ -165,15 +165,16 @@ plan_tree_walker(Plan *plan, plan_tree_walker((Plan *) lfirst(l), visitor, context); break; - /* Since they look alike */ - case T_MergeAppend: case T_Append: - Assert(offsetof(Append, appendplans) == - offsetof(MergeAppend, mergeplans)); foreach(l, ((Append *) plan)->appendplans) plan_tree_walker((Plan *) lfirst(l), visitor, context); break; + case T_MergeAppend: + foreach(l, ((MergeAppend *) plan)->mergeplans) + plan_tree_walker((Plan *) lfirst(l), visitor, context); + break; + case T_BitmapAnd: foreach(l, ((BitmapAnd *) plan)->bitmapplans) plan_tree_walker((Plan *) lfirst(l), visitor, context); From 2ce250c8d93bb2b3ae2ec7c8e77589311e7cb7ee Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 4 Jul 2018 15:15:50 +0300 Subject: [PATCH 0875/1124] copy expression trees for safety --- src/planner_tree_modification.c | 93 ++++++++++++++++++++++----------- 1 file changed, 62 insertions(+), 31 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index c18bf137..04474fda 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -109,7 +109,8 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context static void partition_filter_visitor(Plan *plan, void *context); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); -static bool adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context); +static Node *adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context); +static bool inh_translation_list_is_trivial(List *translated_vars); /* @@ -389,7 +390,7 @@ handle_modification_query(Query *parse, transform_query_cxt *context) rte = rt_fetch(result_rti, parse->rtable); - /* Exit if it's DELETE FROM ONLY table */ + /* Exit if it's ONLY table */ if (!rte->inh) return; prel = get_pathman_relation_info(rte->relid); @@ -465,33 +466,37 @@ handle_modification_query(Query *parse, transform_query_cxt *context) return; /* nothing to do here */ } + /* Update RTE's relid and relkind (for FDW) */ + rte->relid = child; + rte->relkind = child_relkind; + + /* HACK: unset the 'inh' flag (no children) */ + rte->inh = false; + /* Both tables are already locked */ child_rel = heap_open(child, NoLock); parent_rel = heap_open(parent, NoLock); make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); - /* Translate varnos for this child */ - aav_cxt.child_varno = result_rti; - aav_cxt.parent_relid = parent; - aav_cxt.translated_vars = translated_vars; - adjust_appendrel_varnos((Node *) parse, &aav_cxt); - - /* Translate column privileges for this child */ - rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); - rte->insertedCols = translate_col_privs(rte->insertedCols, translated_vars); - rte->updatedCols = translate_col_privs(rte->updatedCols, translated_vars); + /* Perform some additional adjustments */ + if (!inh_translation_list_is_trivial(translated_vars)) + { + /* Translate varnos for this child */ + aav_cxt.child_varno = result_rti; + aav_cxt.parent_relid = parent; + aav_cxt.translated_vars = translated_vars; + adjust_appendrel_varnos((Node *) parse, &aav_cxt); + + /* Translate column privileges for this child */ + rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); + rte->insertedCols = translate_col_privs(rte->insertedCols, translated_vars); + rte->updatedCols = translate_col_privs(rte->updatedCols, translated_vars); + } /* Close relations (should remain locked, though) */ heap_close(child_rel, NoLock); heap_close(parent_rel, NoLock); - - /* Update RTE's relid and relkind (for FDW) */ - rte->relid = child; - rte->relkind = child_relkind; - - /* HACK: unset the 'inh' flag (no children) */ - rte->inh = false; } } @@ -562,11 +567,11 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) } /* Remap parent's attributes to child ones s*/ -static bool +static Node * adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) { if (node == NULL) - return false; + return NULL; if (IsA(node, Query)) { @@ -577,7 +582,7 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) foreach (lc, query->targetList) { TargetEntry *te = (TargetEntry *) lfirst(lc); - Var *child_var; + Var *child_var; if (te->resjunk) continue; @@ -595,10 +600,12 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) te->resno = child_var->varattno; } - return query_tree_walker((Query *) node, - adjust_appendrel_varnos, - context, - QTW_IGNORE_RC_SUBQUERIES); + /* NOTE: we shouldn't copy top-level Query */ + return (Node *) query_tree_mutator((Query *) node, + adjust_appendrel_varnos, + context, + (QTW_IGNORE_RC_SUBQUERIES | + QTW_DONT_COPY_QUERY)); } if (IsA(node, Var)) @@ -610,6 +617,8 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) { Var *child_var; + var = copyObject(var); + if (var->varattno > list_length(context->translated_vars)) elog(ERROR, "attribute %d of relation \"%s\" does not exist", var->varattno, get_rel_name(context->parent_relid)); @@ -623,7 +632,7 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) var->varattno = child_var->varattno; } - return false; + return (Node *) var; } if (IsA(node, SubLink)) @@ -631,14 +640,36 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) SubLink *sl = (SubLink *) node; /* Examine its expression */ - node = sl->testexpr; + sl->testexpr = expression_tree_mutator(sl->testexpr, + adjust_appendrel_varnos, + context); + return (Node *) sl; } - return expression_tree_walker(node, - adjust_appendrel_varnos, - context); + return expression_tree_mutator(node, + adjust_appendrel_varnos, + context); } +/* Check whether Var translation list is trivial (no shuffle) */ +static bool +inh_translation_list_is_trivial(List *translated_vars) +{ + ListCell *lc; + AttrNumber i = 1; + + foreach (lc, translated_vars) + { + Var *var = (Var *) lfirst(lc); + + if (var && var->varattno != i) + return false; + + i++; + } + + return true; +} /* * ------------------------------- From 79a1b89c375f59d8f4c375c0256582884384904b Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 4 Jul 2018 16:02:27 +0300 Subject: [PATCH 0876/1124] handle wholerow references as well --- expected/pathman_rebuild_updates.out | 19 ++++++++++ sql/pathman_rebuild_updates.sql | 9 +++++ src/planner_tree_modification.c | 57 +++++++++++++++++++--------- 3 files changed, 68 insertions(+), 17 deletions(-) diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index 5c54f2cd..f7d59718 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -83,6 +83,25 @@ RETURNING t1.*, t1.tableoid::REGCLASS; Filter: (val = 101) (6 rows) +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_updates.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + test +--------- + (101,0) +(1 row) + DROP TABLE test_updates.test_dummy; DROP SCHEMA test_updates CASCADE; NOTICE: drop cascades to 13 other objects diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index fc827dd3..41d168df 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -45,6 +45,15 @@ FROM test_updates.test_dummy t2 WHERE t1.val = 101 AND t1.val = t2.val RETURNING t1.*, t1.tableoid::REGCLASS; +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + DROP TABLE test_updates.test_dummy; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 04474fda..63cf9963 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -96,7 +96,9 @@ typedef struct typedef struct { Index child_varno; - Oid parent_relid; + Oid parent_relid, + parent_reltype, + child_reltype; List *translated_vars; } adjust_appendrel_varnos_cxt; @@ -483,9 +485,11 @@ handle_modification_query(Query *parse, transform_query_cxt *context) if (!inh_translation_list_is_trivial(translated_vars)) { /* Translate varnos for this child */ - aav_cxt.child_varno = result_rti; - aav_cxt.parent_relid = parent; - aav_cxt.translated_vars = translated_vars; + aav_cxt.child_varno = result_rti; + aav_cxt.parent_relid = parent; + aav_cxt.parent_reltype = RelationGetDescr(parent_rel)->tdtypeid; + aav_cxt.child_reltype = RelationGetDescr(child_rel)->tdtypeid; + aav_cxt.translated_vars = translated_vars; adjust_appendrel_varnos((Node *) parse, &aav_cxt); /* Translate column privileges for this child */ @@ -612,24 +616,43 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) { Var *var = (Var *) node; - /* Don't transform system columns & other relations' Vars */ - if (var->varattno > 0 && var->varno == context->child_varno) + /* See adjust_appendrel_attrs_mutator() */ + if (var->varno == context->child_varno) { - Var *child_var; + if (var->varattno > 0) + { + Var *child_var; - var = copyObject(var); + var = copyObject(var); - if (var->varattno > list_length(context->translated_vars)) - elog(ERROR, "attribute %d of relation \"%s\" does not exist", - var->varattno, get_rel_name(context->parent_relid)); + if (var->varattno > list_length(context->translated_vars)) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); - child_var = list_nth(context->translated_vars, var->varattno - 1); - if (!child_var) - elog(ERROR, "attribute %d of relation \"%s\" does not exist", - var->varattno, get_rel_name(context->parent_relid)); + child_var = list_nth(context->translated_vars, var->varattno - 1); + if (!child_var) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); - /* Transform attribute number */ - var->varattno = child_var->varattno; + /* Transform attribute number */ + var->varattno = child_var->varattno; + } + else if (var->varattno == 0) + { + ConvertRowtypeExpr *r = makeNode(ConvertRowtypeExpr); + + Assert(var->vartype = context->parent_reltype); + + r->arg = (Expr *) var; + r->resulttype = context->parent_reltype; + r->convertformat = COERCE_IMPLICIT_CAST; + r->location = -1; + + /* Make sure the Var node has the right type ID, too */ + var->vartype = context->child_reltype; + + return (Node *) r; + } } return (Node *) var; From 1608d8d3ff1bc77cb4412b3b6acb16c83c170879 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 5 Jul 2018 12:34:23 +0300 Subject: [PATCH 0877/1124] small refactoring in run_tests.sh --- run_tests.sh | 58 +++++++++++++++++++++------------------------------- 1 file changed, 23 insertions(+), 35 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 49c481b9..2dbcfd0c 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -6,33 +6,23 @@ # * cmocka-based tests # Copyright (c) 2017, Postgres Professional -set -eux +set -ux echo CHECK_CODE=$CHECK_CODE +echo PG_VERSION=$(pg_config --version) status=0 +# change relevant core dump settings +CORE_DIR=/tmp/cores +ulimit -c unlimited -S +mkdir "$CORE_DIR" +echo "$CORE_DIR/%e-%s-%p.core" | sudo tee /proc/sys/kernel/core_pattern + # perform code analysis if necessary if [ "$CHECK_CODE" = "clang" ]; then scan-build --status-bugs make USE_PGXS=1 || status=$? exit $status - -elif [ "$CHECK_CODE" = "cppcheck" ]; then - cppcheck \ - --template "{file} ({line}): {severity} ({id}): {message}" \ - --enable=warning,portability,performance \ - --suppress=redundantAssignment \ - --suppress=uselessAssignmentPtrArg \ - --suppress=literalWithCharPtrCompare \ - --suppress=incorrectStringBooleanError \ - --std=c89 src/*.c src/include/*.h 2> cppcheck.log - - if [ -s cppcheck.log ]; then - cat cppcheck.log - status=1 # error - fi - - exit $status fi # we need testgres for pathman tests @@ -42,28 +32,20 @@ source env/bin/activate pip install testgres pip freeze | grep testgres -# don't forget to "make clean" -make USE_PGXS=1 clean - # initialize database initdb # build pg_pathman (using PG_CPPFLAGS and SHLIB_LINK for gcov) +set -e +make USE_PGXS=1 clean make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" make USE_PGXS=1 install - -# check build -status=$? -if [ $status -ne 0 ]; then exit $status; fi +set +e # add pg_pathman to shared_preload_libraries and restart cluster 'test' echo "shared_preload_libraries = 'pg_pathman'" >> $PGDATA/postgresql.conf echo "port = 55435" >> $PGDATA/postgresql.conf -pg_ctl start -l /tmp/postgres.log -w - -# check startup -status=$? -if [ $status -ne 0 ]; then cat /tmp/postgres.log; fi +pg_ctl start -l /tmp/postgres.log -w || cat /tmp/postgres.log # run regression tests export PG_REGRESS_DIFF_OPTS="-w -U3" # for alpine's diff (BusyBox) @@ -72,18 +54,22 @@ PGPORT=55435 make USE_PGXS=1 installcheck || status=$? # show diff if it exists if test -f regression.diffs; then cat regression.diffs; fi -set +u +# list cores and exit if we failed +ls "$CORE_DIR" +if [ $status -ne 0 ]; then exit $status; fi # run python tests +set +u make USE_PGXS=1 python_tests || status=$? -if [ $status -ne 0 ]; then exit $status; fi - set -u -# run cmocka tests (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || status=$? +# list cores and exit if we failed +ls "$CORE_DIR" if [ $status -ne 0 ]; then exit $status; fi +# run cmocka tests (using CFLAGS_SL for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || exit $? + # remove useless gcov files rm -f tests/cmocka/*.gcno rm -f tests/cmocka/*.gcda @@ -92,6 +78,8 @@ rm -f tests/cmocka/*.gcda gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h # send coverage stats to Coveralls +set +u bash <(curl -s https://codecov.io/bash) +set -u exit $status From 21330156691f4d3f680a0a000b7d8191bbd2a887 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 5 Jul 2018 18:10:15 +0300 Subject: [PATCH 0878/1124] update test environment (Docker etc) --- .dockerignore | 5 ++ .travis.yml | 30 ++++---- Dockerfile.tmpl | 54 ++++++++------ make_images.py | 139 ----------------------------------- mk_dockerfile.sh | 16 ++++ run_tests.sh | 187 ++++++++++++++++++++++++++++++++--------------- 6 files changed, 200 insertions(+), 231 deletions(-) create mode 100644 .dockerignore delete mode 100755 make_images.py create mode 100755 mk_dockerfile.sh diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..ce3c9e6f --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +*.gcno +*.gcda +*.gcov +*.so +*.o diff --git a/.travis.yml b/.travis.yml index 265ac48d..051401f6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,4 @@ -os: - - linux - sudo: required -dist: trusty language: c @@ -10,18 +6,26 @@ services: - docker install: - - echo "FROM ${DOCKER_IMAGE}" > Dockerfile + - ./mk_dockerfile.sh - docker-compose build script: - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests +notifications: + email: + on_success: change + on_failure: always + env: - - DOCKER_IMAGE=pathman/pg95_clang_check_code - - DOCKER_IMAGE=pathman/pg95_pathman_tests - - DOCKER_IMAGE=pathman/pg96_clang_check_code - - DOCKER_IMAGE=pathman/pg96_pathman_tests - - DOCKER_IMAGE=pathman/pg10_clang_check_code - - DOCKER_IMAGE=pathman/pg10_pathman_tests - - DOCKER_IMAGE=pathman/pg10_ca_clang_check_code - - DOCKER_IMAGE=pathman/pg10_ca_pathman_tests + - PG_VERSION=10 LEVEL=nightmare + - PG_VERSION=10 LEVEL=hardcore + - PG_VERSION=10 + - PG_VERSION=9.6 LEVEL=hardcore + - PG_VERSION=9.6 + - PG_VERSION=9.5 LEVEL=hardcore + - PG_VERSION=9.5 + +matrix: + allow_failures: + - env: PG_VERSION=10 LEVEL=nightmare diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 5ceaeb99..021a2850 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -1,30 +1,40 @@ -FROM ${PG_IMAGE} +FROM postgres:${PG_VERSION}-alpine -ENV LANG=C.UTF-8 PGDATA=/pg/data +# Install dependencies +RUN apk add --no-cache \ + openssl curl \ + cmocka-dev \ + perl perl-ipc-run \ + python3 python3-dev py-virtualenv \ + coreutils linux-headers \ + make musl-dev gcc bison flex \ + zlib-dev libedit-dev \ + clang clang-analyzer; + +# Install fresh valgrind +RUN apk add valgrind \ + --update-cache \ + --repository http://dl-3.alpinelinux.org/alpine/edge/main; -RUN if [ "${CHECK_CODE}" = "clang" ] ; then \ - echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ - apk --no-cache add clang-analyzer make musl-dev gcc; \ - fi +# Environment +ENV LANG=C.UTF-8 PGDATA=/pg/data -RUN if [ "${CHECK_CODE}" = "cppcheck" ] ; then \ - apk --no-cache add cppcheck --repository http://dl-cdn.alpinelinux.org/alpine/v3.6/community; \ - fi +# Make directories +RUN mkdir -p ${PGDATA} && \ + mkdir -p /pg/testdir -RUN if [ "${CHECK_CODE}" = "false" ] ; then \ - echo 'http://dl-3.alpinelinux.org/alpine/edge/main' > /etc/apk/repositories; \ - apk --no-cache add curl python3 python3-dev gcc make musl-dev cmocka-dev linux-headers;\ - pip3 install virtualenv;\ - fi +# Add data to test dir +ADD . /pg/testdir -RUN mkdir -p /pg/data && \ - mkdir /pg/pg_pathman && \ - chown postgres:postgres ${PGDATA} && \ +# Grant privileges +RUN chown -R postgres:postgres ${PGDATA} && \ + chown -R postgres:postgres /pg/testdir && \ chmod a+rwx /usr/local/lib/postgresql && \ chmod a+rwx /usr/local/share/postgresql/extension -ONBUILD ADD . /pg/pg_pathman -ONBUILD WORKDIR /pg/pg_pathman -ONBUILD RUN chmod -R go+rwX /pg/pg_pathman -ONBUILD USER postgres -ONBUILD ENTRYPOINT PGDATA=${PGDATA} CHECK_CODE=${CHECK_CODE} bash run_tests.sh +COPY run_tests.sh /run.sh +RUN chmod 755 /run.sh + +USER postgres +WORKDIR /pg/testdir +ENTRYPOINT LEVEL=${LEVEL} /run.sh diff --git a/make_images.py b/make_images.py deleted file mode 100755 index 9c9b6e43..00000000 --- a/make_images.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env python3 - -import os -import subprocess -import getpass -import requests -import tempfile - -from urllib.parse import urljoin -from urllib.request import urlopen - -DOCKER_ID = 'pathman' -ALPINE_BASE_URL = 'https://raw.githubusercontent.com/docker-library/postgres/master/10/alpine/' -ALPINE_ENTRYPOINT = 'docker-entrypoint.sh' - -''' -How to create this patch: - * put `import ipdb; ipdb.set_trace()` in make_alpine_image, just before `open(patch_name)..` - * run the script - * in temporary folder run `cp Dockerfile Dockerfile.1 && vim Dockerfile.1` - * uncomment --enable-debug, add --enable-cassert, add `CFLAGS="-g3 -O0"` before ./configure - * run `diff -Naur Dockerfile Dockerfile.1 > ./cassert.patch` - * contents of cassert.patch put to variable below - * change Dockerfile.1 to Dockerfile in text, change `\` symbols to `\\` -''' -ALPINE_PATCH = b''' ---- Dockerfile 2017-09-25 12:01:24.597813507 +0300 -+++ Dockerfile 2017-09-25 12:09:06.104059704 +0300 -@@ -79,15 +79,15 @@ - && wget -O config/config.sub 'https://git.savannah.gnu.org/cgit/config.git/plain/config.sub?id=7d3d27baf8107b630586c962c057e22149653deb' \\ - # configure options taken from: - # https://anonscm.debian.org/cgit/pkg-postgresql/postgresql.git/tree/debian/rules?h=9.5 -- && ./configure \\ -+ && CFLAGS="-g3 -O0" ./configure \\ - --build="$gnuArch" \\ - # "/usr/src/postgresql/src/backend/access/common/tupconvert.c:105: undefined reference to `libintl_gettext'" - # --enable-nls \\ - --enable-integer-datetimes \\ - --enable-thread-safety \\ - --enable-tap-tests \\ --# skip debugging info -- we want tiny size instead --# --enable-debug \\ -+ --enable-debug \\ -+ --enable-cassert \\ - --disable-rpath \\ - --with-uuid=e2fs \\ - --with-gnu-ld \\ -''' -CUSTOM_IMAGE_NAME = "%s/postgres_stable" % DOCKER_ID - -def make_alpine_image(image_name): - dockerfile = urlopen(urljoin(ALPINE_BASE_URL, 'Dockerfile')).read() - entrypoint_sh = urlopen(urljoin(ALPINE_BASE_URL, ALPINE_ENTRYPOINT)).read() - - with tempfile.TemporaryDirectory() as tmpdir: - print("Creating build in %s" % tmpdir) - patch_name = os.path.join(tmpdir, "cassert.patch") - - with open(os.path.join(tmpdir, 'Dockerfile'), 'w') as f: - f.write(dockerfile.decode()) - - with open(os.path.join(tmpdir, ALPINE_ENTRYPOINT), 'w') as f: - f.write(entrypoint_sh.decode()) - - with open(patch_name, 'w') as f: - f.write(ALPINE_PATCH.decode()) - - with open(patch_name, 'r') as f: - p = subprocess.Popen(["patch", "-p0"], cwd=tmpdir, stdin=subprocess.PIPE) - p.communicate(str.encode(f.read())) - print("patch applied") - subprocess.check_output(["docker", "build", ".", '-t', image_name], cwd=tmpdir) - print("build ok: ", image_name) - subprocess.check_output(['docker', 'push', image_name], - stderr=subprocess.STDOUT) - print("upload ok:", image_name) - -make_alpine_image(CUSTOM_IMAGE_NAME) - -pg_containers = [ - ('pg95', 'postgres:9.5-alpine'), - ('pg96', 'postgres:9.6-alpine'), - ('pg10', 'postgres:10-alpine'), - ('pg10_ca', CUSTOM_IMAGE_NAME), -] - -image_types = { - 'clang_check_code': { - 'CHECK_CODE': 'clang', - }, - 'cppcheck': { - 'CHECK_CODE': 'cppcheck', - }, - 'pathman_tests': { - 'CHECK_CODE': 'false', - } -} - -user = input("Enter username for `docker login`: ") -password = getpass.getpass() -subprocess.check_output([ - 'docker', - 'login', - '-u', user, - '-p', password]) - -travis_conf_line = '- DOCKER_IMAGE=%s' -travis_conf = [] -print("") - -if __name__ == '__main__': - for pgname, container in pg_containers: - for key, variables in image_types.items(): - image_name = '%s/%s_%s' % (DOCKER_ID, pgname, key) - with open('Dockerfile', 'w') as out: - with open('Dockerfile.tmpl', 'r') as f: - for line in f: - line = line.replace('${PG_IMAGE}', container) - for key, value in variables.items(): - varname = '${%s}' % key - line = line.replace(varname, value) - - out.write(line) - - args = [ - 'docker', - 'build', - '-t', image_name, - '.' - ] - subprocess.check_output(args, stderr=subprocess.STDOUT) - print("build ok:", image_name) - subprocess.check_output(['docker', 'push', image_name], - stderr=subprocess.STDOUT) - print("upload ok:", image_name) - travis_conf.append(travis_conf_line % image_name) - -print("\ntravis configuration") -print('\n'.join(travis_conf)) diff --git a/mk_dockerfile.sh b/mk_dockerfile.sh new file mode 100755 index 00000000..f15433c4 --- /dev/null +++ b/mk_dockerfile.sh @@ -0,0 +1,16 @@ +if [ -z ${PG_VERSION+x} ]; then + echo PG_VERSION is not set! + exit 1 +fi + +if [ -z ${LEVEL+x} ]; then + LEVEL=scan-build +fi + +echo PG_VERSION=${PG_VERSION} +echo LEVEL=${LEVEL} + +sed \ + -e 's/${PG_VERSION}/'${PG_VERSION}/g \ + -e 's/${LEVEL}/'${LEVEL}/g \ + Dockerfile.tmpl > Dockerfile diff --git a/run_tests.sh b/run_tests.sh index 2dbcfd0c..a11be8f4 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,85 +1,158 @@ -#!/bin/bash - -# This is a main testing script for: -# * regression tests -# * testgres-based tests -# * cmocka-based tests -# Copyright (c) 2017, Postgres Professional +#!/usr/bin/env bash + +# +# Copyright (c) 2018, Postgres Professional +# +# supported levels: +# * standard +# * scan-build +# * hardcore +# * nightmare +# set -ux +status=0 -echo CHECK_CODE=$CHECK_CODE -echo PG_VERSION=$(pg_config --version) +# global exports +export PGPORT=55435 +export VIRTUAL_ENV_DISABLE_PROMPT=1 -status=0 +# rebuild PostgreSQL with cassert + valgrind support +if [ "$LEVEL" = "hardcore" ] || \ + [ "$LEVEL" = "nightmare" ]; then + + set -e + + CUSTOM_PG_BIN=$PWD/pg_bin + CUSTOM_PG_SRC=$PWD/postgresql + + # here PG_VERSION is provided by postgres:X-alpine docker image + curl "https://ftp.postgresql.org/pub/source/v$PG_VERSION/postgresql-$PG_VERSION.tar.bz2" -o postgresql.tar.bz2 + echo "$PG_SHA256 *postgresql.tar.bz2" | sha256sum -c - + + mkdir $CUSTOM_PG_SRC -# change relevant core dump settings -CORE_DIR=/tmp/cores -ulimit -c unlimited -S -mkdir "$CORE_DIR" -echo "$CORE_DIR/%e-%s-%p.core" | sudo tee /proc/sys/kernel/core_pattern + tar \ + --extract \ + --file postgresql.tar.bz2 \ + --directory $CUSTOM_PG_SRC \ + --strip-components 1 -# perform code analysis if necessary -if [ "$CHECK_CODE" = "clang" ]; then - scan-build --status-bugs make USE_PGXS=1 || status=$? - exit $status + cd $CUSTOM_PG_SRC + + # enable Valgrind support + sed -i.bak "s/\/* #define USE_VALGRIND *\//#define USE_VALGRIND/g" src/include/pg_config_manual.h + + # enable additional options + ./configure \ + CFLAGS='-O0 -ggdb3 -fno-omit-frame-pointer' \ + --enable-cassert \ + --prefix=$CUSTOM_PG_BIN \ + --quiet + + time make -s -j$(nproc) && make -s install + + # override default PostgreSQL instance + export PATH=$CUSTOM_PG_BIN/bin:$PATH + export LD_LIBRARY_PATH=$CUSTOM_PG_BIN/lib + + # show pg_config path (just in case) + which pg_config + + cd - + + set +e fi -# we need testgres for pathman tests -virtualenv env -export VIRTUAL_ENV_DISABLE_PROMPT=1 -source env/bin/activate -pip install testgres -pip freeze | grep testgres +# show pg_config just in case +pg_config + +# perform code checks if asked to +if [ "$LEVEL" = "scan-build" ] || \ + [ "$LEVEL" = "hardcore" ] || \ + [ "$LEVEL" = "nightmare" ]; then + + # perform static analyzis + scan-build --status-bugs make USE_PGXS=1 || status=$? + + # something's wrong, exit now! + if [ $status -ne 0 ]; then exit 1; fi + + # don't forget to "make clean" + make USE_PGXS=1 clean +fi -# initialize database -initdb -# build pg_pathman (using PG_CPPFLAGS and SHLIB_LINK for gcov) -set -e -make USE_PGXS=1 clean +# build and install extension (using PG_CPPFLAGS and SHLIB_LINK for gcov) make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" make USE_PGXS=1 install -set +e -# add pg_pathman to shared_preload_libraries and restart cluster 'test' -echo "shared_preload_libraries = 'pg_pathman'" >> $PGDATA/postgresql.conf -echo "port = 55435" >> $PGDATA/postgresql.conf -pg_ctl start -l /tmp/postgres.log -w || cat /tmp/postgres.log +# initialize database +initdb -D $PGDATA + +# change PG's config +echo "port = $PGPORT" >> $PGDATA/postgresql.conf +cat conf.add >> $PGDATA/postgresql.conf + +# restart cluster 'test' +if [ "$LEVEL" = "nightmare" ]; then + ls $CUSTOM_PG_BIN/bin + + valgrind \ + --tool=memcheck \ + --leak-check=no \ + --time-stamp=yes \ + --track-origins=yes \ + --trace-children=yes \ + --gen-suppressions=all \ + --suppressions=$CUSTOM_PG_SRC/src/tools/valgrind.supp \ + --suppressions=$PWD/valgrind.supp \ + --log-file=/tmp/valgrind-%p.log \ + pg_ctl start -l /tmp/postgres.log -w || status=$? +else + pg_ctl start -l /tmp/postgres.log -w || status=$? +fi + +# something's wrong, exit now! +if [ $status -ne 0 ]; then cat /tmp/postgres.log; exit 1; fi # run regression tests export PG_REGRESS_DIFF_OPTS="-w -U3" # for alpine's diff (BusyBox) -PGPORT=55435 make USE_PGXS=1 installcheck || status=$? +make USE_PGXS=1 installcheck || status=$? # show diff if it exists -if test -f regression.diffs; then cat regression.diffs; fi - -# list cores and exit if we failed -ls "$CORE_DIR" -if [ $status -ne 0 ]; then exit $status; fi +if [ -f regression.diffs ]; then cat regression.diffs; fi # run python tests -set +u +set +x +virtualenv /tmp/env && source /tmp/env/bin/activate && pip install testgres make USE_PGXS=1 python_tests || status=$? -set -u - -# list cores and exit if we failed -ls "$CORE_DIR" -if [ $status -ne 0 ]; then exit $status; fi +deactivate +set -x + +# show Valgrind logs if necessary +if [ "$LEVEL" = "nightmare" ]; then + for f in $(find /tmp -name valgrind-*.log); do + if grep -q 'Command: [^ ]*/postgres' $f && grep -q 'ERROR SUMMARY: [1-9]' $f; then + echo "========= Contents of $f" + cat $f + status=1 + fi + done +fi # run cmocka tests (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || exit $? +make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || status=$? -# remove useless gcov files -rm -f tests/cmocka/*.gcno -rm -f tests/cmocka/*.gcda +# something's wrong, exit now! +if [ $status -ne 0 ]; then exit 1; fi # generate *.gcov files -gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h +gcov *.c *.h -# send coverage stats to Coveralls -set +u -bash <(curl -s https://codecov.io/bash) -set -u -exit $status +set +ux + + +# send coverage stats to Codecov +bash <(curl -s https://codecov.io/bash) From add3e9c265521c7c147922eabc42b9ea87663933 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 6 Jul 2018 04:00:33 +0300 Subject: [PATCH 0879/1124] remove dead code & fix logic in get_pathman_relation_info() --- src/relation_info.c | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index ef170b58..449636a7 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -322,7 +322,6 @@ PartRelationInfo * get_pathman_relation_info(Oid relid) { PartStatusInfo *psin; - bool refresh; /* Should always be called in transaction */ Assert(IsTransactionState()); @@ -331,24 +330,18 @@ get_pathman_relation_info(Oid relid) if (relid < FirstNormalObjectId) return NULL; - /* Create a new entry for this table if needed */ + /* Do we know anything about this relation? */ psin = pathman_cache_search_relid(status_cache, relid, HASH_FIND, NULL); - /* Should we build a new PartRelationInfo? */ - refresh = psin ? - (psin->prel && - !PrelIsFresh(psin->prel) && - PrelReferenceCount(psin->prel) == 0) : - true; - - if (refresh) + if (!psin) { PartRelationInfo *prel = NULL; ItemPointerData iptr; Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; + bool found; /* Check if PATHMAN_CONFIG table contains this relation */ if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) @@ -364,21 +357,19 @@ get_pathman_relation_info(Oid relid) prel = build_pathman_relation_info(relid, values); } - /* Create a new entry for this table if needed */ - if (!psin) - { - bool found; - - psin = pathman_cache_search_relid(status_cache, - relid, HASH_ENTER, - &found); - Assert(!found); - } + /* Create a new entry for this relation */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_ENTER, + &found); + Assert(!found); /* it shouldn't just appear out of thin air */ /* Cache fresh entry */ psin->prel = prel; } + /* Check invariants */ + Assert(!psin->prel || PrelIsFresh(psin->prel)); + #ifdef USE_RELINFO_LOGGING elog(DEBUG2, "fetching %s record for parent %u [%u]", From fbc89f677df87f18a5be4bbb283ef3c69903a646 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 9 Jul 2018 15:24:46 +0300 Subject: [PATCH 0880/1124] fix Valgrind startup & config --- .travis.yml | 2 ++ run_tests.sh | 1 - src/hooks.c | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 051401f6..5b1732a6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,6 +21,7 @@ env: - PG_VERSION=10 LEVEL=nightmare - PG_VERSION=10 LEVEL=hardcore - PG_VERSION=10 + - PG_VERSION=9.6 LEVEL=nightmare - PG_VERSION=9.6 LEVEL=hardcore - PG_VERSION=9.6 - PG_VERSION=9.5 LEVEL=hardcore @@ -29,3 +30,4 @@ env: matrix: allow_failures: - env: PG_VERSION=10 LEVEL=nightmare + - env: PG_VERSION=9.6 LEVEL=nightmare diff --git a/run_tests.sh b/run_tests.sh index a11be8f4..d0581e7f 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -106,7 +106,6 @@ if [ "$LEVEL" = "nightmare" ]; then --trace-children=yes \ --gen-suppressions=all \ --suppressions=$CUSTOM_PG_SRC/src/tools/valgrind.supp \ - --suppressions=$PWD/valgrind.supp \ --log-file=/tmp/valgrind-%p.log \ pg_ctl start -l /tmp/postgres.log -w || status=$? else diff --git a/src/hooks.c b/src/hooks.c index 4efa9d1c..1088b27a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -826,6 +826,8 @@ pathman_relcache_hook(Datum arg, Oid relid) if (relid == InvalidOid) { invalidate_pathman_status_info_cache(); + + /* FIXME: reset other caches as well */ } /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ From 13878ea0966f2f920ab2f0bc4e548ad889c52ee7 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 9 Jul 2018 16:11:13 +0300 Subject: [PATCH 0881/1124] Fix endless loop in partition_filter.c --- src/partition_filter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 87facbc0..65107759 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -1215,7 +1215,7 @@ fetch_estate_mod_data(EState *estate) if (cb->func == pf_memcxt_callback) return (estate_mod_data *) cb->arg; - cb = estate_mcxt->reset_cbs->next; + cb = cb->next; } /* Have to create a new one */ From dde913b2203716acfddebd29f3d51884a602db47 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 13:31:44 +0300 Subject: [PATCH 0882/1124] bump lib version to 1.4.13 --- META.json | 15 ++++++++------- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/META.json b/META.json index 0cfa8dc2..a198d696 100644 --- a/META.json +++ b/META.json @@ -1,10 +1,9 @@ { "name": "pg_pathman", - "abstract": "Partitioning tool", - "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.12", + "abstract": "Fast partitioning tool for PostgreSQL", + "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", + "version": "1.4.13", "maintainer": [ - "Ildar Musin ", "Dmitry Ivanov ", "Ildus Kurbangaliev " ], @@ -19,12 +18,12 @@ "type": "git" } }, - "generated_by": "Ildar Musin", + "generated_by": "pgpro", "provides": { "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.12", + "version": "1.4.13", "abstract": "Partitioning tool" } }, @@ -35,6 +34,8 @@ "tags": [ "partitioning", "partition", - "optimization" + "optimization", + "range", + "hash" ] } diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 0b2434d4..5cda7bc5 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 1.4.12 + 1.4.13 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 8069f192..6bdccc2e 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010412 +#define CURRENT_LIB_VERSION 0x010413 void *pathman_cache_search_relid(HTAB *cache_table, From c871c0b2c96be1be62781a6cbac28bc88ebbb992 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 14:19:56 +0300 Subject: [PATCH 0883/1124] add pathman_rebuild_deletes test suite --- Makefile | 1 + expected/pathman_rebuild_deletes.out | 99 ++++++++++++++++++++++++++++ sql/pathman_rebuild_deletes.sql | 57 ++++++++++++++++ 3 files changed, 157 insertions(+) create mode 100644 expected/pathman_rebuild_deletes.out create mode 100644 sql/pathman_rebuild_deletes.sql diff --git a/Makefile b/Makefile index d810185c..8fdc0cde 100644 --- a/Makefile +++ b/Makefile @@ -50,6 +50,7 @@ REGRESS = pathman_array_qual \ pathman_only \ pathman_param_upd_del \ pathman_permissions \ + pathman_rebuild_deletes \ pathman_rebuild_updates \ pathman_rowmarks \ pathman_runtime_nodes \ diff --git a/expected/pathman_rebuild_deletes.out b/expected/pathman_rebuild_deletes.out new file mode 100644 index 00000000..98e43862 --- /dev/null +++ b/expected/pathman_rebuild_deletes.out @@ -0,0 +1,99 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); + append_range_partition +------------------------ + test_deletes.test_11 +(1 row) + +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; + QUERY PLAN +--------------------------- + Delete on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 1 | test_deletes.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; + QUERY PLAN +----------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+----+---------------------- + 101 | 10 | test_deletes.test_11 +(1 row) + +CREATE TABLE test_deletes.test_dummy (val INT4); +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Delete on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Delete on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_deletes.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +DROP TABLE test_deletes.test_dummy; +DROP SCHEMA test_deletes CASCADE; +NOTICE: drop cascades to 13 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_deletes.sql b/sql/pathman_rebuild_deletes.sql new file mode 100644 index 00000000..f14bce5a --- /dev/null +++ b/sql/pathman_rebuild_deletes.sql @@ -0,0 +1,57 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; + + +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ + +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; + +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); + + +VACUUM ANALYZE; + + +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + +CREATE TABLE test_deletes.test_dummy (val INT4); + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + +DROP TABLE test_deletes.test_dummy; + + + +DROP SCHEMA test_deletes CASCADE; +DROP EXTENSION pg_pathman; From 05f4bbb2a73c4651d0f35ef3f5f5a661a14bbe57 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 14:41:33 +0300 Subject: [PATCH 0884/1124] extend pathman_rebuild_updates test suite --- expected/pathman_rebuild_updates.out | 48 +++++++++++++++++++++++++++- sql/pathman_rebuild_updates.sql | 15 +++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index f7d59718..79a186ae 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -103,6 +103,52 @@ RETURNING test; (1 row) DROP TABLE test_updates.test_dummy; +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + parent | partition | parttype | expr | range_min | range_max | columns +-------------------+----------------------+----------+------+-----------+-----------+--------- + test_updates.test | test_updates.test_1 | 2 | val | 1 | 11 | 9 + test_updates.test | test_updates.test_2 | 2 | val | 11 | 21 | 9 + test_updates.test | test_updates.test_3 | 2 | val | 21 | 31 | 9 + test_updates.test | test_updates.test_4 | 2 | val | 31 | 41 | 9 + test_updates.test | test_updates.test_5 | 2 | val | 41 | 51 | 9 + test_updates.test | test_updates.test_6 | 2 | val | 51 | 61 | 9 + test_updates.test | test_updates.test_7 | 2 | val | 61 | 71 | 9 + test_updates.test | test_updates.test_8 | 2 | val | 71 | 81 | 9 + test_updates.test | test_updates.test_9 | 2 | val | 81 | 91 | 9 + test_updates.test | test_updates.test_10 | 2 | val | 91 | 101 | 9 + test_updates.test | test_updates.test_11 | 2 | val | 101 | 111 | 8 +(11 rows) + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 106 | 105 | test_updates.test_11 +(1 row) + +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 115 | 105 | test_updates.test_12 +(1 row) + +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 95 | 105 | test_updates.test_10 +(1 row) + +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + -1 | 105 | test_updates.test_13 +(1 row) + DROP SCHEMA test_updates CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index 41d168df..3144a416 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -57,6 +57,21 @@ RETURNING test; DROP TABLE test_updates.test_dummy; +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; + +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + + DROP SCHEMA test_updates CASCADE; DROP EXTENSION pg_pathman; From 54a589c4f1b876bbcbbbc92eae4442445dfc786a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 14:57:44 +0300 Subject: [PATCH 0885/1124] remove obsolete test runner --- travis/pg-travis-test.sh | 141 --------------------------------------- 1 file changed, 141 deletions(-) delete mode 100755 travis/pg-travis-test.sh diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh deleted file mode 100755 index 97fa5ea9..00000000 --- a/travis/pg-travis-test.sh +++ /dev/null @@ -1,141 +0,0 @@ -#!/bin/bash - -set -eux - -sudo apt-get update - - -# required packages -apt_packages="postgresql-$PG_VER postgresql-server-dev-$PG_VER postgresql-common python-pip python-dev build-essential" -pip_packages="testgres" - -# exit code -status=0 - -# pg_config path -pg_ctl_path=/usr/lib/postgresql/$PG_VER/bin/pg_ctl -initdb_path=/usr/lib/postgresql/$PG_VER/bin/initdb -config_path=/usr/lib/postgresql/$PG_VER/bin/pg_config - - -# bug: http://www.postgresql.org/message-id/20130508192711.GA9243@msgid.df7cb.de -sudo update-alternatives --remove-all postmaster.1.gz - -# stop all existing instances (because of https://github.com/travis-ci/travis-cookbooks/pull/221) -sudo service postgresql stop -# ... and make sure they don't come back -echo 'exit 0' | sudo tee /etc/init.d/postgresql -sudo chmod a+x /etc/init.d/postgresql - -# install required packages -sudo apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install -qq $apt_packages - - -# perform code analysis if necessary -if [ $CHECK_CODE = "true" ]; then - - if [ "$CC" = "clang" ]; then - sudo apt-get -y install -qq clang-$LLVM_VER - - scan-build-$LLVM_VER --status-bugs make USE_PGXS=1 PG_CONFIG=$config_path || status=$? - exit $status - - elif [ "$CC" = "gcc" ]; then - sudo apt-get -y install -qq cppcheck - - cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ - --enable=warning,portability,performance \ - --suppress=redundantAssignment \ - --suppress=uselessAssignmentPtrArg \ - --suppress=incorrectStringBooleanError \ - --std=c89 src/*.c src/*.h 2> cppcheck.log - - if [ -s cppcheck.log ]; then - cat cppcheck.log - status=1 # error - fi - - exit $status - fi - - # don't forget to "make clean" - make clean USE_PGXS=1 PG_CONFIG=$config_path -fi - - -# create cluster 'test' -CLUSTER_PATH=$(pwd)/test_cluster -$initdb_path -D $CLUSTER_PATH -U $USER -A trust - -# build pg_pathman (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -coverage" -sudo make install USE_PGXS=1 PG_CONFIG=$config_path - -# check build -status=$? -if [ $status -ne 0 ]; then exit $status; fi - -# set permission to write postgres locks -sudo chown $USER /var/run/postgresql/ - -# add pg_pathman to shared_preload_libraries and restart cluster 'test' -echo "shared_preload_libraries = 'pg_pathman'" >> $CLUSTER_PATH/postgresql.conf -echo "port = 55435" >> $CLUSTER_PATH/postgresql.conf -$pg_ctl_path -D $CLUSTER_PATH start -l postgres.log -w - -# run regression tests -PGPORT=55435 PGUSER=$USER PG_CONFIG=$config_path make installcheck USE_PGXS=1 || status=$? - -# show diff if it exists -if test -f regression.diffs; then cat regression.diffs; fi - - -set +u - -# create virtual environment and activate it -virtualenv /tmp/envs/pg_pathman --python=python3 -source /tmp/envs/pg_pathman/bin/activate -type python -type pip - -# install pip packages -pip install $pip_packages - -# run python tests -make USE_PGXS=1 PG_CONFIG=$config_path python_tests || status=$? - -# deactivate virtual environment -deactivate - -set -u - - -# install cmake for cmocka -sudo apt-get -y install -qq cmake - -# build & install cmocka -CMOCKA_VER=1.1.1 -cd tests/cmocka -tar xf cmocka-$CMOCKA_VER.tar.xz -cd cmocka-$CMOCKA_VER -mkdir build && cd build -cmake .. -make && sudo make install -cd ../../../.. - -# export path to libcmocka.so -LD_LIBRARY_PATH=/usr/local/lib -export LD_LIBRARY_PATH - -# run cmocka tests (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path PG_CPPFLAGS="-coverage" cmocka_tests || status=$? - -# remove useless gcov files -rm -f tests/cmocka/*.gcno -rm -f tests/cmocka/*.gcda - -#generate *.gcov files -gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h - - -exit $status From dff70cc7e71ace4d2b1019c016267fc516c12867 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 16:09:54 +0300 Subject: [PATCH 0886/1124] move get_pathman_schema() to its siblings --- src/include/pathman.h | 1 + src/include/utils.h | 1 - src/pg_pathman.c | 51 +++++++++++++++++++++++++++++++++++++++++++ src/relation_info.c | 2 -- src/utils.c | 50 +----------------------------------------- 5 files changed, 53 insertions(+), 52 deletions(-) diff --git a/src/include/pathman.h b/src/include/pathman.h index d1ebb583..b5f9a156 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -100,6 +100,7 @@ extern Oid pathman_config_params_relid; */ Oid get_pathman_config_relid(bool invalid_is_ok); Oid get_pathman_config_params_relid(bool invalid_is_ok); +Oid get_pathman_schema(void); /* diff --git a/src/include/utils.h b/src/include/utils.h index 0697b923..1e0b87a4 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -28,7 +28,6 @@ bool match_expr_to_operand(const Node *expr, const Node *operand); /* * Misc. */ -Oid get_pathman_schema(void); List *list_reverse(List *l); /* diff --git a/src/pg_pathman.c b/src/pg_pathman.c index a3ff2c7f..b9e4a6a4 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -22,8 +22,13 @@ #include "runtime_merge_append.h" #include "postgres.h" +#include "access/htup_details.h" #include "access/sysattr.h" +#include "access/xact.h" +#include "catalog/indexing.h" #include "catalog/pg_type.h" +#include "catalog/pg_extension.h" +#include "commands/extension.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "optimizer/clauses.h" @@ -31,6 +36,7 @@ #include "optimizer/restrictinfo.h" #include "optimizer/cost.h" #include "utils/datum.h" +#include "utils/fmgroids.h" #include "utils/rel.h" #include "utils/lsyscache.h" #include "utils/syscache.h" @@ -354,6 +360,51 @@ get_pathman_config_params_relid(bool invalid_is_ok) return pathman_config_params_relid; } +/* + * Return pg_pathman schema's Oid or InvalidOid if that's not possible. + */ +Oid +get_pathman_schema(void) +{ + Oid result; + Relation rel; + SysScanDesc scandesc; + HeapTuple tuple; + ScanKeyData entry[1]; + Oid ext_oid; + + /* It's impossible to fetch pg_pathman's schema now */ + if (!IsTransactionState()) + return InvalidOid; + + ext_oid = get_extension_oid("pg_pathman", true); + if (ext_oid == InvalidOid) + return InvalidOid; /* exit if pg_pathman does not exist */ + + ScanKeyInit(&entry[0], + ObjectIdAttributeNumber, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(ext_oid)); + + rel = heap_open(ExtensionRelationId, AccessShareLock); + scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, + NULL, 1, entry); + + tuple = systable_getnext(scandesc); + + /* We assume that there can be at most one matching tuple */ + if (HeapTupleIsValid(tuple)) + result = ((Form_pg_extension) GETSTRUCT(tuple))->extnamespace; + else + result = InvalidOid; + + systable_endscan(scandesc); + + heap_close(rel, AccessShareLock); + + return result; +} + /* diff --git a/src/relation_info.c b/src/relation_info.c index 449636a7..999608ec 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1571,7 +1571,5 @@ finish_delayed_invalidation(void) return; } } - - } } diff --git a/src/utils.c b/src/utils.c index 05f68acf..cbec24c8 100644 --- a/src/utils.c +++ b/src/utils.c @@ -16,18 +16,15 @@ #include "access/htup_details.h" #include "access/nbtree.h" #include "access/sysattr.h" -#include "access/xact.h" -#include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" -#include "catalog/pg_extension.h" #include "catalog/pg_operator.h" #include "catalog/pg_type.h" -#include "commands/extension.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/parse_coerce.h" #include "parser/parse_oper.h" +#include "utils/array.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" @@ -136,51 +133,6 @@ match_expr_to_operand(const Node *expr, const Node *operand) } -/* - * Return pg_pathman schema's Oid or InvalidOid if that's not possible. - */ -Oid -get_pathman_schema(void) -{ - Oid result; - Relation rel; - SysScanDesc scandesc; - HeapTuple tuple; - ScanKeyData entry[1]; - Oid ext_oid; - - /* It's impossible to fetch pg_pathman's schema now */ - if (!IsTransactionState()) - return InvalidOid; - - ext_oid = get_extension_oid("pg_pathman", true); - if (ext_oid == InvalidOid) - return InvalidOid; /* exit if pg_pathman does not exist */ - - ScanKeyInit(&entry[0], - ObjectIdAttributeNumber, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(ext_oid)); - - rel = heap_open(ExtensionRelationId, AccessShareLock); - scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, - NULL, 1, entry); - - tuple = systable_getnext(scandesc); - - /* We assume that there can be at most one matching tuple */ - if (HeapTupleIsValid(tuple)) - result = ((Form_pg_extension) GETSTRUCT(tuple))->extnamespace; - else - result = InvalidOid; - - systable_endscan(scandesc); - - heap_close(rel, AccessShareLock); - - return result; -} - List * list_reverse(List *l) { From b3eac64837654bec0fd52b71c776a54a67cfcb8d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 18:24:05 +0300 Subject: [PATCH 0887/1124] fix builds on 9.5 and 9.6 --- src/utils.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/utils.c b/src/utils.c index cbec24c8..ddf10bae 100644 --- a/src/utils.c +++ b/src/utils.c @@ -26,6 +26,7 @@ #include "parser/parse_oper.h" #include "utils/array.h" #include "utils/builtins.h" +#include "utils/datetime.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/syscache.h" From b609c5f1be07c6fac93cf1f6d9243f3d9232bba6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 10 Jul 2018 18:33:06 +0300 Subject: [PATCH 0888/1124] Fix nasty bug in select_partition_for_insert(). Many thanks to @arssher. --- src/include/partition_filter.h | 2 +- src/partition_filter.c | 22 ++++++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 0940a59f..3a3e848e 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -167,7 +167,7 @@ void fini_result_parts_storage(ResultPartsStorage *parts_storage); ResultRelInfoHolder * scan_result_parts_storage(ResultPartsStorage *storage, Oid partid); /* Refresh PartRelationInfo in storage */ -void refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid); +PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid); TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); diff --git a/src/partition_filter.c b/src/partition_filter.c index 65107759..4d588914 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -381,7 +381,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) } /* Refresh PartRelationInfo for the partition in storage */ -void +PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) { if (partid == PrelParentRelid(parts_storage->prel)) @@ -389,6 +389,8 @@ refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) close_pathman_relation_info(parts_storage->prel); parts_storage->prel = get_pathman_relation_info(partid); shout_if_prel_is_invalid(partid, parts_storage->prel, PT_ANY); + + return parts_storage->prel; } else { @@ -398,12 +400,14 @@ refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) (const void *) &partid, HASH_FIND, NULL); - if (rri_holder && rri_holder->prel) - { - close_pathman_relation_info(rri_holder->prel); - rri_holder->prel = get_pathman_relation_info(partid); - shout_if_prel_is_invalid(partid, rri_holder->prel, PT_ANY); - } + /* We must have entry (since we got 'prel' from it) */ + Assert(rri_holder && rri_holder->prel); + + close_pathman_relation_info(rri_holder->prel); + rri_holder->prel = get_pathman_relation_info(partid); + shout_if_prel_is_invalid(partid, rri_holder->prel, PT_ANY); + + return rri_holder->prel; } } @@ -543,7 +547,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, if ((nparts == 0 || result == NULL) && !PrelIsFresh(prel)) { /* Try building a new 'prel' for this relation */ - refresh_result_parts_storage(parts_storage, parent_relid); + prel = refresh_result_parts_storage(parts_storage, parent_relid); } /* This partition is a parent itself */ @@ -557,6 +561,8 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, /* Repeat with a new dispatch */ result = NULL; } + + Assert(prel); } /* Loop until we get some result */ while (result == NULL); From d0d128d79b5cb499a679f5a212ef99cbb75e83ee Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 11 Jul 2018 18:02:39 +0300 Subject: [PATCH 0889/1124] change update test suite --- src/partition_filter.c | 1 + ...thman_objects => dump_pathman_objects.sql} | 17 +------- tests/update/get_sql_diff | 39 +++++++++++++++++++ 3 files changed, 41 insertions(+), 16 deletions(-) rename tests/update/{dump_pathman_objects => dump_pathman_objects.sql} (68%) mode change 100755 => 100644 create mode 100755 tests/update/get_sql_diff diff --git a/src/partition_filter.c b/src/partition_filter.c index 4d588914..1e5c2db1 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -1173,6 +1173,7 @@ append_rri_to_estate(EState *estate, ResultRelInfo *rri) { ResultRelInfo *rri_array = estate->es_result_relations; + /* HACK: we can't repalloc or free previous array (there might be users) */ result_rels_allocated = result_rels_allocated * ALLOC_EXP + 1; estate->es_result_relations = palloc(result_rels_allocated * sizeof(ResultRelInfo)); diff --git a/tests/update/dump_pathman_objects b/tests/update/dump_pathman_objects.sql old mode 100755 new mode 100644 similarity index 68% rename from tests/update/dump_pathman_objects rename to tests/update/dump_pathman_objects.sql index fff1ed17..e1a632ca --- a/tests/update/dump_pathman_objects +++ b/tests/update/dump_pathman_objects.sql @@ -1,17 +1,4 @@ -#!/usr/bin/bash - - -rndstr=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 13 ; echo '') -bindir=$($PG_CONFIG --bindir) -dbname=$1 -flname=pathman_objects_$rndstr.txt - -# show file name -echo $flname - -$bindir/psql $dbname << EOF - -\o $flname +CREATE EXTENSION IF NOT EXISTS pg_pathman; SELECT pg_get_functiondef(objid) FROM pg_catalog.pg_depend JOIN pg_proc ON pg_proc.oid = pg_depend.objid @@ -27,5 +14,3 @@ ORDER BY objid::regprocedure::TEXT ASC; \d+ pathman_partition_list \d+ pathman_cache_stats \d+ pathman_concurrent_part_tasks - -EOF diff --git a/tests/update/get_sql_diff b/tests/update/get_sql_diff new file mode 100755 index 00000000..876717a8 --- /dev/null +++ b/tests/update/get_sql_diff @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +PG_VER=$1 +WORK_DIR=/tmp/pg_pathman +BRANCH_1=$2 +BRANCH_2=$3 + + +if [ -z "$PG_VER" ]; then + PG_VER=10 +fi + +if [ -z "$BRANCH_1" ]; then + BRANCH_1=master +fi + +if [ -z "$BRANCH_1" ]; then + BRANCH_2=$(git tag | sort -V | tail -1) +fi + + +printf "PG:\\t$PG_VER\\n" +printf "BRANCH_1:\\t$BRANCH_1\\n" +printf "BRANCH_2:\\t$BRANCH_2\\n" + + +cp -R "$(dirname $0)" "$WORK_DIR" + +git checkout "$BRANCH_1" + +norsu pgxs "$PG_VER" -- clean install +norsu run "$PG_VER" --pgxs --psql < "$WORK_DIR"/dump_pathman_objects.sql > "$WORK_DIR"/dump_1 + +git checkout "$BRANCH_2" + +norsu pgxs "$PG_VER" -- clean install +norsu run "$PG_VER" --pgxs --psql < "$WORK_DIR"/dump_pathman_objects.sql > "$WORK_DIR"/dump_2 + +diff -u "$WORK_DIR"/dump_1 "$WORK_DIR"/dump_2 > "$WORK_DIR"/diff From 0dc040b6df6adefdaeed93f9233d217e445a9494 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Jul 2018 13:41:26 +0300 Subject: [PATCH 0890/1124] fix evaluation of expressions with PARAMs in handle_modification_query() --- expected/pathman_param_upd_del.out | 58 ++++++++++++++++++++++++++++++ sql/pathman_param_upd_del.sql | 11 ++++++ src/planner_tree_modification.c | 13 +++---- 3 files changed, 76 insertions(+), 6 deletions(-) diff --git a/expected/pathman_param_upd_del.out b/expected/pathman_param_upd_del.out index 7419ad29..ad935579 100644 --- a/expected/pathman_param_upd_del.out +++ b/expected/pathman_param_upd_del.out @@ -68,6 +68,64 @@ EXPLAIN (COSTS OFF) EXECUTE upd(11); Filter: (key = 11) (3 rows) +DEALLOCATE upd; +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = ($1 + 3) * 2; +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(6); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 18) +(3 rows) + DEALLOCATE upd; PREPARE del(INT4) AS DELETE FROM param_upd_del.test WHERE key = $1; EXPLAIN (COSTS OFF) EXECUTE del(10); diff --git a/sql/pathman_param_upd_del.sql b/sql/pathman_param_upd_del.sql index 98be1179..f4e42a41 100644 --- a/sql/pathman_param_upd_del.sql +++ b/sql/pathman_param_upd_del.sql @@ -23,6 +23,17 @@ EXPLAIN (COSTS OFF) EXECUTE upd(11); DEALLOCATE upd; +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = ($1 + 3) * 2; +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(6); +DEALLOCATE upd; + + PREPARE del(INT4) AS DELETE FROM param_upd_del.test WHERE key = $1; EXPLAIN (COSTS OFF) EXECUTE del(10); EXPLAIN (COSTS OFF) EXECUTE del(10); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index e5dcfe2c..35473a75 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -374,8 +374,8 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context) { RangeTblEntry *rte; - Expr *quals; Oid child; + Node *quals = parse->jointree->quals; Index result_rti = parse->resultRelation; ParamListInfo params = context->query_params; @@ -390,14 +390,15 @@ handle_modification_query(Query *parse, transform_query_cxt *context) if (!rte->inh) return; - quals = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); - /* Check if we can replace PARAMs with CONSTs */ - if (params && clause_contains_params((Node *) quals)) - quals = (Expr *) eval_extern_params_mutator((Node *) quals, params); + if (params && clause_contains_params(quals)) + quals = eval_extern_params_mutator(quals, params); + + /* Evaluate constaint expressions */ + quals = eval_const_expressions(NULL, quals); /* Parse syntax tree and extract deepest partition if possible */ - child = find_deepest_partition(rte->relid, result_rti, quals); + child = find_deepest_partition(rte->relid, result_rti, (Expr *) quals); /* Substitute parent table with partition */ if (OidIsValid(child)) From adfaa72a1cf91e61056006f00ce599cf37be730a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Jul 2018 14:15:08 +0300 Subject: [PATCH 0891/1124] attempt to fix tests (also disable Valgrind) --- .travis.yml | 2 -- Dockerfile.tmpl | 2 +- tests/python/partitioning_test.py | 1 - 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5b1732a6..db2eebc9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,10 +18,8 @@ notifications: on_failure: always env: - - PG_VERSION=10 LEVEL=nightmare - PG_VERSION=10 LEVEL=hardcore - PG_VERSION=10 - - PG_VERSION=9.6 LEVEL=nightmare - PG_VERSION=9.6 LEVEL=hardcore - PG_VERSION=9.6 - PG_VERSION=9.5 LEVEL=hardcore diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 021a2850..85b159cf 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -5,7 +5,7 @@ RUN apk add --no-cache \ openssl curl \ cmocka-dev \ perl perl-ipc-run \ - python3 python3-dev py-virtualenv \ + python3 python3-dev py3-virtualenv \ coreutils linux-headers \ make musl-dev gcc bison flex \ zlib-dev libedit-dev \ diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 12475b9e..3b889405 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -19,7 +19,6 @@ from distutils.version import LooseVersion from testgres import get_new_node, get_pg_version -from testgres.utils import pg_version_ge # set setup base logging config, it can be turned on by `use_logging` # parameter on node setup From 389c8076b0c403ac1d0a027f3823dac559136525 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 5 Jul 2018 17:41:16 +0300 Subject: [PATCH 0892/1124] Adapt pg_pathman for PG 11. I kinda lost interest to exorcise a couple of tests further in attempts to make them pass on all supported versions and just added copies. These are * pathman_expressions now differs because planner converts ROW(Const, Const) to just Const of record type. * Same with pathman_rebuild_updates. I have removed inclusion of partition_filter.h in pg_compat.h in 9.5 as it created circular dependency hell. I think it is not worthwhile to fight with it since the only thing actually needed was error message, which is used in this single place. Small typo fix in partitioning_test.py: con2.begin instead of con1.begin. Finally, run python tests with --failfast and --verbose options. --- expected/pathman_expressions.out | 36 +- expected/pathman_expressions_1.out | 436 +++++++++++++++++++++++++ expected/pathman_permissions.out | 32 +- expected/pathman_rebuild_updates.out | 12 +- expected/pathman_rebuild_updates_1.out | 114 +++++++ sql/pathman_expressions.sql | 7 + sql/pathman_permissions.sql | 28 +- sql/pathman_rebuild_updates.sql | 7 + src/compat/pg_compat.c | 9 + src/hooks.c | 15 +- src/include/compat/pg_compat.h | 110 +++++-- src/include/hooks.h | 2 +- src/include/partition_filter.h | 2 +- src/include/relation_info.h | 4 +- src/init.c | 2 + src/nodes_common.c | 6 +- src/partition_filter.c | 8 +- src/partition_router.c | 12 +- src/pathman_workers.c | 8 +- src/pl_funcs.c | 2 + src/pl_range_funcs.c | 2 + src/planner_tree_modification.c | 15 +- src/relation_info.c | 11 +- src/utility_stmt_hooking.c | 4 +- tests/python/Makefile | 2 +- tests/python/partitioning_test.py | 2 +- 26 files changed, 799 insertions(+), 89 deletions(-) create mode 100644 expected/pathman_expressions_1.out create mode 100644 expected/pathman_rebuild_updates_1.out diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 685ca2d3..66f931e3 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -1,3 +1,9 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -160,42 +166,38 @@ SELECT *, tableoid::REGCLASS FROM test_exprs.composite; (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------ Append -> Seq Scan on composite_1 -> Seq Scan on composite_2 -> Seq Scan on composite_3 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------- Append -> Seq Scan on composite_1 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) -> Seq Scan on composite_2 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) -> Seq Scan on composite_3 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) -> Seq Scan on composite_4 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) (9 rows) EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); - QUERY PLAN ----------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------- Append -> Seq Scan on composite_1 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) -> Seq Scan on composite_2 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) -> Seq Scan on composite_3 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) - -> Seq Scan on composite_4 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) -(9 rows) + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) DROP TABLE test_exprs.composite CASCADE; NOTICE: drop cascades to 5 other objects diff --git a/expected/pathman_expressions_1.out b/expected/pathman_expressions_1.out new file mode 100644 index 00000000..893bcd21 --- /dev/null +++ b/expected/pathman_expressions_1.out @@ -0,0 +1,436 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on canon_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, '0'::text)::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_2 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_4 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) +(9 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------------- + Append + -> Seq Scan on hash_rel_0 + Filter: ((value * value2) = 5) +(3 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_4 + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(3 rows) + +DROP SCHEMA test_exprs CASCADE; +NOTICE: drop cascades to 24 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index e329a9ec..a9e68be4 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -12,15 +12,23 @@ CREATE TABLE permissions.user1_table(id serial, a int); INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; /* Should fail (can't SELECT) */ SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); -ERROR: permission denied for relation user1_table +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Grant SELECT to user2 */ SET ROLE user1; GRANT SELECT ON permissions.user1_table TO user2; /* Should fail (don't own parent) */ SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); -ERROR: only the owner or superuser can change partitioning configuration of table "user1_table" +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Should be ok */ SET ROLE user1; SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); @@ -58,8 +66,12 @@ WHERE partrel = 'permissions.user1_table'::regclass; WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" /* No rights to insert, should fail */ SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); -ERROR: permission denied for relation user1_table +DO $$ +BEGIN + INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* No rights to create partitions (need INSERT privilege) */ SET ROLE user2; SELECT prepend_range_partition('permissions.user1_table'); @@ -116,8 +128,12 @@ ORDER BY relname; /* we also check ACL for "user1_table_2" */ (3 rows) /* Try to drop partition, should fail */ -SELECT drop_range_partition('permissions.user1_table_4'); -ERROR: must be owner of relation user1_table_4 +DO $$ +BEGIN + SELECT drop_range_partition('permissions.user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Disable automatic partition creation */ SET ROLE user1; SELECT set_auto('permissions.user1_table', false); diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index 79a186ae..eb078303 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -1,3 +1,9 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -86,11 +92,11 @@ RETURNING t1.*, t1.tableoid::REGCLASS; EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101 AND test >= (100, 8) RETURNING *, tableoid::REGCLASS; - QUERY PLAN ------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------- Update on test_11 -> Seq Scan on test_11 - Filter: (((test_11.*)::test_updates.test >= ROW(100, 8)) AND (val = 101)) + Filter: (((test_11.*)::test_updates.test >= '(100,8)'::record) AND (val = 101)) (3 rows) /* execute this one */ diff --git a/expected/pathman_rebuild_updates_1.out b/expected/pathman_rebuild_updates_1.out new file mode 100644 index 00000000..cf0fc1dc --- /dev/null +++ b/expected/pathman_rebuild_updates_1.out @@ -0,0 +1,114 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_updates; +/* + * Test UPDATEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_updates.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_updates.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_updates.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_updates.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_updates.test'); + append_range_partition +------------------------ + test_updates.test_11 +(1 row) + +INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; + QUERY PLAN +--------------------------- + Update on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 0 | test_updates.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; + QUERY PLAN +----------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+---------------------- + 101 | 0 | test_updates.test_11 +(1 row) + +CREATE TABLE test_updates.test_dummy (val INT4); +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Update on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Update on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_updates.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + test +--------- + (101,0) +(1 row) + +DROP TABLE test_updates.test_dummy; +DROP SCHEMA test_updates CASCADE; +NOTICE: drop cascades to 13 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 46bceafb..6149a0c2 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -1,3 +1,10 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 2dd22fc0..5f66a84f 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -18,7 +18,12 @@ INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g /* Should fail (can't SELECT) */ SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Grant SELECT to user2 */ SET ROLE user1; @@ -26,7 +31,12 @@ GRANT SELECT ON permissions.user1_table TO user2; /* Should fail (don't own parent) */ SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Should be ok */ SET ROLE user1; @@ -49,7 +59,12 @@ WHERE partrel = 'permissions.user1_table'::regclass; /* No rights to insert, should fail */ SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); +DO $$ +BEGIN + INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* No rights to create partitions (need INSERT privilege) */ SET ROLE user2; @@ -81,7 +96,12 @@ WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list ORDER BY relname; /* we also check ACL for "user1_table_2" */ /* Try to drop partition, should fail */ -SELECT drop_range_partition('permissions.user1_table_4'); +DO $$ +BEGIN + SELECT drop_range_partition('permissions.user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN +END$$; /* Disable automatic partition creation */ SET ROLE user1; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index 3144a416..f4229d09 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -1,3 +1,10 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 602102c4..0fb510ed 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -48,7 +48,12 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) { int parallel_workers; +#if PG_VERSION_NUM >= 110000 + parallel_workers = compute_parallel_worker(rel, rel->pages, -1, + max_parallel_workers_per_gather); +#else parallel_workers = compute_parallel_worker(rel, rel->pages, -1); +#endif /* If any limit was set to zero, the user doesn't want a parallel scan. */ if (parallel_workers <= 0) @@ -240,7 +245,11 @@ McxtStatsInternal(MemoryContext context, int level, AssertArg(MemoryContextIsValid(context)); /* Examine the context itself */ +#if PG_VERSION_NUM >= 110000 + (*context->methods->stats) (context, NULL, NULL, totals); +#else (*context->methods->stats) (context, level, false, totals); +#endif memset(&local_totals, 0, sizeof(local_totals)); diff --git a/src/hooks.c b/src/hooks.c index 1088b27a..2693cd91 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -514,13 +514,20 @@ pathman_rel_pathlist_hook(PlannerInfo *root, rel->partial_pathlist = NIL; #endif +/* Convert list to array for faster lookups */ +#if PG_VERSION_NUM >= 110000 + setup_append_rel_array(root); +#endif + /* Generate new paths using the rels we've just added */ set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); set_append_rel_size_compat(root, rel, rti); -#if PG_VERSION_NUM >= 90600 - /* consider gathering partial paths for the parent appendrel */ - generate_gather_paths(root, rel); + /* consider gathering partial paths for the parent appendrel */ +#if PG_VERSION_NUM >= 110000 + generate_gather_paths(root, rel, false); +#elif PG_VERSION_NUM >= 90600 + generate_gather_paths(root, rel); #endif /* Skip if both custom nodes are disabled */ @@ -925,7 +932,7 @@ pathman_process_utility_hook(Node *first_arg, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot change type of column \"%s\"" " of table \"%s\" partitioned by HASH", - get_attname(relation_oid, attr_number), + get_attname_compat(relation_oid, attr_number), get_rel_name(relation_oid)))); /* Don't forget to invalidate parsed partitioning expression */ diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 9eeca190..2cc80731 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -41,32 +41,33 @@ */ /* - * calc_nestloop_required_outer() + * get_attname() */ +#if PG_VERSION_NUM >= 110000 +#define get_attname_compat(relid, attnum) \ + get_attname((relid), (attnum), false) +#else +#define get_attname_compat(relid, attnum) \ + get_attname((relid), (attnum)) +#endif + +/* + * calc_nestloop_required_outer + */ #if PG_VERSION_NUM >= 110000 -static inline Relids -calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) -{ - RelOptInfo *innerrel = inner_path->parent; - RelOptInfo *outerrel = outer_path->parent; - Relids innerrelids = innerrel->relids; - Relids outerrelids = outerrel->relids; - Relids inner_paramrels = PATH_REQ_OUTER(inner_path); - Relids outer_paramrels = PATH_REQ_OUTER(outer_path); - - return calc_nestloop_required_outer(outerrelids, outer_paramrels, - innerrelids, inner_paramrels); -} +#define calc_nestloop_required_outer_compat(outer, inner) \ + calc_nestloop_required_outer((outer)->parent->relids, PATH_REQ_OUTER(outer), \ + (inner)->parent->relids, PATH_REQ_OUTER(inner)) #else -#define calc_nestloop_required_outer_compat(outer_path, inner_path) \ - (calc_nestloop_required_outer((outer_path), (inner_path))) +#define calc_nestloop_required_outer_compat(outer, inner) \ + calc_nestloop_required_outer((outer), (inner)) #endif + /* * adjust_appendrel_attrs() */ - #if PG_VERSION_NUM >= 110000 #define adjust_appendrel_attrs_compat(root, node, appinfo) \ adjust_appendrel_attrs((root), \ @@ -93,17 +94,17 @@ calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) #define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ do { \ (dst_rel)->reltarget->exprs = (List *) \ - adjust_appendrel_attrs((root), \ - (Node *) (src_rel)->reltarget->exprs, \ - (appinfo)); \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltarget->exprs, \ + (appinfo)); \ } while (0) #elif PG_VERSION_NUM >= 90500 #define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ do { \ (dst_rel)->reltargetlist = (List *) \ - adjust_appendrel_attrs((root), \ - (Node *) (src_rel)->reltargetlist, \ - (appinfo)); \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltargetlist, \ + (appinfo)); \ } while (0) #endif @@ -231,7 +232,17 @@ calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) /* * create_append_path() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 110000 + +#ifndef PGPRO_VERSION +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#else +/* TODO */ +#endif /* PGPRO_VERSION */ + +#elif PG_VERSION_NUM >= 100000 #ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ @@ -240,7 +251,6 @@ calc_nestloop_required_outer_compat(Path *outer_path, Path *inner_path) #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NIL, \ false, NIL) - #endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 90600 @@ -360,14 +370,16 @@ extern void create_plain_partial_paths(PlannerInfo *root, #define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ ExecEvalExpr((expr), (econtext), (isNull)) #elif PG_VERSION_NUM >= 90500 -#include "partition_filter.h" /* Variables for ExecEvalExprCompat() */ extern Datum exprResult; extern ExprDoneCond isDone; /* Error handlers */ -static inline void mult_result_handler() { elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); } +static inline void mult_result_handler() +{ + elog(ERROR, "partitioning expression should return single value"); +} #define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ ( \ @@ -727,11 +739,53 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * RegisterCustomScanMethods() */ -#if PG_VERSION_NUM < 96000 +#if PG_VERSION_NUM < 90600 #define RegisterCustomScanMethods(methods) #endif +/* + * MakeTupleTableSlot() + */ +#if PG_VERSION_NUM >= 110000 +#define MakeTupleTableSlotCompat() \ + MakeTupleTableSlot(NULL) +#else +#define MakeTupleTableSlotCompat() \ + MakeTupleTableSlot() +#endif + +/* + * ExecInitExtraTupleSlot() + */ +#if PG_VERSION_NUM >= 110000 +#define ExecInitExtraTupleSlotCompat(estate) \ + ExecInitExtraTupleSlot((estate), NULL) +#else +#define ExecInitExtraTupleSlotCompat(estate) \ + ExecInitExtraTupleSlot(estate) +#endif +/* + * BackgroundWorkerInitializeConnectionByOid() + */ +#if PG_VERSION_NUM >= 110000 +#define BackgroundWorkerInitializeConnectionByOidCompat(dboid, useroid) \ + BackgroundWorkerInitializeConnectionByOid((dboid), (useroid), 0) +#else +#define BackgroundWorkerInitializeConnectionByOidCompat(dboid, useroid) \ + BackgroundWorkerInitializeConnectionByOid((dboid), (useroid)) +#endif + +/* + * heap_delete() + */ +#if PG_VERSION_NUM >= 110000 +#define heap_delete_compat(relation, tid, cid, crosscheck, wait, hufd) \ + heap_delete((relation), (tid), (cid), (crosscheck), (wait), (hufd), false) +#else +#define heap_delete_compat(relation, tid, cid, crosscheck, wait, hufd) \ + heap_delete((relation), (tid), (cid), (crosscheck), (wait), (hufd)) +#endif /* * ------------- diff --git a/src/include/hooks.h b/src/include/hooks.h index 14542bc0..adf96d37 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -42,7 +42,7 @@ void pathman_rel_pathlist_hook(PlannerInfo *root, Index rti, RangeTblEntry *rte); -void pathman_enable_assign_hook(char newval, void *extra); +void pathman_enable_assign_hook(bool newval, void *extra); PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 3a3e848e..69cdb8c8 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -29,7 +29,7 @@ #define ERR_PART_ATTR_NULL "partitioning expression's value should not be NULL" -#define ERR_PART_ATTR_MULTIPLE_RESULTS "partitioning expression should return single value" +#define ERR_PART_ATTR_MULTIPLE_RESULTS #define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" #define ERR_PART_ATTR_MULTIPLE INSERT_NODE_NAME " selected more than one partition" #define ERR_PART_DESC_CONVERT "could not convert row type for partition" diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 6c1d5435..2f37406c 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -11,10 +11,10 @@ #ifndef RELATION_INFO_H #define RELATION_INFO_H +#include "compat/pg_compat.h" #include "utils.h" -#include "postgres.h" #include "access/attnum.h" #include "access/sysattr.h" #include "fmgr.h" @@ -279,7 +279,7 @@ PrelExpressionColumnNames(const PartRelationInfo *prel) while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; - char *attname = get_attname(PrelParentRelid(prel), attnum); + char *attname = get_attname_compat(PrelParentRelid(prel), attnum); columns = lappend(columns, makeString(attname)); } diff --git a/src/init.c b/src/init.c index 2994aaf8..eb3b6feb 100644 --- a/src/init.c +++ b/src/init.c @@ -25,7 +25,9 @@ #include "catalog/indexing.h" #include "catalog/pg_extension.h" #include "catalog/pg_inherits.h" +#if PG_VERSION_NUM < 110000 #include "catalog/pg_inherits_fn.h" +#endif #include "catalog/pg_type.h" #include "miscadmin.h" #include "optimizer/clauses.h" diff --git a/src/nodes_common.c b/src/nodes_common.c index 09f1b07e..5c484cdb 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -568,8 +568,12 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, forboth (lc1, rpath->cpath.custom_paths, lc2, custom_plans) { Plan *child_plan = (Plan *) lfirst(lc2); - RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; + RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; +#if PG_VERSION_NUM >= 110000 + AppendRelInfo *appinfo = root->append_rel_array[child_rel->relid]; +#else AppendRelInfo *appinfo = find_childrel_appendrelinfo(root, child_rel); +#endif /* Replace rel's tlist with a matching one (for ExecQual()) */ if (!processed_rel_tlist) diff --git a/src/partition_filter.c b/src/partition_filter.c index 1e5c2db1..f96eb970 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -332,8 +332,12 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) CopyToResultRelInfo(ri_WithCheckOptions); CopyToResultRelInfo(ri_WithCheckOptionExprs); CopyToResultRelInfo(ri_projectReturning); +#if PG_VERSION_NUM >= 110000 + CopyToResultRelInfo(ri_onConflict); +#else CopyToResultRelInfo(ri_onConflictSetProj); CopyToResultRelInfo(ri_onConflictSetWhere); +#endif if (parts_storage->command_type != CMD_UPDATE) CopyToResultRelInfo(ri_junkFilter); @@ -776,7 +780,7 @@ partition_filter_exec(CustomScanState *node) /* Allocate new slot if needed */ if (!state->tup_convert_slot) - state->tup_convert_slot = MakeTupleTableSlot(); + state->tup_convert_slot = MakeTupleTableSlotCompat(); ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); @@ -1055,7 +1059,9 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, mtstate.ps.state = estate; mtstate.operation = CMD_INSERT; mtstate.resultRelInfo = rri; +#if PG_VERSION_NUM < 110000 mtstate.mt_onconflict = ONCONFLICT_NONE; +#endif /* Plan fake query in for FDW access to be planned as well */ elog(DEBUG1, "FDW(%u): plan fake query for fdw_private", partid); diff --git a/src/partition_router.c b/src/partition_router.c index a87b514f..a354fd87 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -160,7 +160,7 @@ partition_router_exec(CustomScanState *node) state->junkfilter = ExecInitJunkFilter(state->subplan->targetlist, old_rri->ri_RelationDesc->rd_att->tdhasoid, - ExecInitExtraTupleSlot(estate)); + ExecInitExtraTupleSlotCompat(estate)); state->junkfilter->jf_junkAttNo = ExecFindJunkAttribute(state->junkfilter, "ctid"); @@ -277,11 +277,11 @@ ExecDeleteInternal(ItemPointer tupleid, { /* delete the tuple */ ldelete: - result = heap_delete(resultRelationDesc, tupleid, - estate->es_output_cid, - estate->es_crosscheck_snapshot, - true /* wait for commit */ , - &hufd); + result = heap_delete_compat(resultRelationDesc, tupleid, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ , + &hufd); switch (result) { case HeapTupleSelfUpdated: diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 86416b36..532420f3 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -384,7 +384,7 @@ bgw_main_spawn_partitions(Datum main_arg) #endif /* Establish connection and start transaction */ - BackgroundWorkerInitializeConnectionByOid(args->dbid, args->userid); + BackgroundWorkerInitializeConnectionByOidCompat(args->dbid, args->userid); /* Start new transaction (syscache access etc.) */ StartTransactionCommand(); @@ -469,7 +469,7 @@ bgw_main_concurrent_part(Datum main_arg) SetAutoPartitionEnabled(false); /* Establish connection and start transaction */ - BackgroundWorkerInitializeConnectionByOid(part_slot->dbid, part_slot->userid); + BackgroundWorkerInitializeConnectionByOidCompat(part_slot->dbid, part_slot->userid); /* Initialize pg_pathman's local config */ StartTransactionCommand(); @@ -483,7 +483,7 @@ bgw_main_concurrent_part(Datum main_arg) Oid types[2] = { OIDOID, INT4OID }; Datum vals[2] = { part_slot->relid, part_slot->batch_size }; - bool nulls[2] = { false, false }; + char nulls[2] = { false, false }; bool rel_locked = false; @@ -568,7 +568,7 @@ bgw_main_concurrent_part(Datum main_arg) /* Extract number of processed rows */ rows = DatumGetInt64(SPI_getbinval(tuple, tupdesc, 1, &isnull)); - Assert(tupdesc->attrs[0]->atttypid == INT8OID); /* check type */ + Assert(TupleDescAttr(tupdesc, 0)->atttypid == INT8OID); /* check type */ Assert(!isnull); /* ... and ofc it must not be NULL */ } /* Else raise generic error */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index fb457df1..22e6b83f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -23,7 +23,9 @@ #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" +#if PG_VERSION_NUM < 110000 #include "catalog/pg_inherits_fn.h" +#endif #include "catalog/pg_type.h" #include "commands/tablespace.h" #include "commands/trigger.h" diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 7d17d407..89e8536d 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -18,7 +18,9 @@ #include "access/xact.h" #include "catalog/heap.h" #include "catalog/namespace.h" +#if PG_VERSION_NUM < 110000 #include "catalog/pg_inherits_fn.h" +#endif #include "catalog/pg_type.h" #include "commands/tablecmds.h" #include "executor/spi.h" diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 35473a75..233b8773 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -851,7 +851,20 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) param->paramid > 0 && param->paramid <= params->numParams) { - ParamExternData *prm = ¶ms->params[param->paramid - 1]; + ParamExternData *prm; + +#if PG_VERSION_NUM >= 110000 + ParamExternData prmdata; + if (params->paramFetch != NULL) + prm = params->paramFetch(params, param->paramid, false, &prmdata); + else + prm = ¶ms->params[param->paramid - 1]; +#else + prm = ¶ms->params[param->paramid - 1]; + if (!OidIsValid(prm->ptype) && params->paramFetch != NULL) + (*params->paramFetch) (params, param->paramid); +#endif + if (OidIsValid(prm->ptype)) { diff --git a/src/relation_info.c b/src/relation_info.c index 999608ec..a4c91bbe 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -20,6 +20,9 @@ #include "catalog/catalog.h" #include "catalog/indexing.h" #include "catalog/pg_constraint.h" +#if PG_VERSION_NUM < 110000 && PG_VERSION_NUM >= 90600 +#include "catalog/pg_constraint_fn.h" +#endif #include "catalog/pg_inherits.h" #include "catalog/pg_type.h" #include "miscadmin.h" @@ -45,7 +48,7 @@ #include "optimizer/planmain.h" #endif -#if PG_VERSION_NUM >= 90600 +#if PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 11000 #include "catalog/pg_constraint_fn.h" #endif @@ -402,7 +405,7 @@ build_pathman_relation_info(Oid relid, Datum *values) /* Create a new memory context to store expression tree etc */ prel_mcxt = AllocSetContextCreate(PathmanParentsCacheContext, - __FUNCTION__, + "build_pathman_relation_info", ALLOCSET_SMALL_SIZES); /* Create a new PartRelationInfo */ @@ -897,7 +900,7 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; - char *attname = get_attname(parent_relid, attnum); + char *attname = get_attname_compat(parent_relid, attnum); int j; Assert(attnum <= expr_natts); @@ -1435,7 +1438,7 @@ cook_partitioning_expression(const Oid relid, if (nullable) ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("column \"%s\" should be marked NOT NULL", - get_attname(relid, attnum)))); + get_attname_compat(relid, attnum)))); } } diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index bd65e50f..553f7c8e 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -514,10 +514,10 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, RPS_RRI_CB(finish_rri_for_copy, NULL)); /* Set up a tuple slot too */ - myslot = ExecInitExtraTupleSlot(estate); + myslot = ExecInitExtraTupleSlotCompat(estate); ExecSetSlotDescriptor(myslot, tupDesc); /* Triggers might need a slot as well */ - estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); + estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate); /* Prepare to catch AFTER triggers. */ AfterTriggerBeginQuery(); diff --git a/tests/python/Makefile b/tests/python/Makefile index bb548928..ee650ea4 100644 --- a/tests/python/Makefile +++ b/tests/python/Makefile @@ -1,2 +1,2 @@ partitioning_tests: - python -m unittest partitioning_test.py + python -m unittest --verbose --failfast partitioning_test.py diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 3b889405..41390d4a 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -650,7 +650,7 @@ def test_conc_part_drop_runtime_append(self): # Thread for connection #2 (it has to wait) def con2_thread(): - con1.begin() + con2.begin() con2.execute('set enable_hashjoin = f') con2.execute('set enable_mergejoin = f') From 6a60886a6b82a607baf4218dbda01174f1a3faf6 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 9 Jul 2018 19:36:19 +0300 Subject: [PATCH 0893/1124] Fix handling of append_rel_array. Also a bunch of other stuff noted by @funbringer. --- src/compat/pg_compat.c | 8 ++----- src/hooks.c | 26 ++++++++++++----------- src/include/compat/pg_compat.h | 39 ++++++++++++++++++++++++++++++++++ src/init.c | 6 +++--- src/nodes_common.c | 8 +++---- src/pathman_workers.c | 3 +-- src/pg_pathman.c | 4 ++++ 7 files changed, 66 insertions(+), 28 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 0fb510ed..5547231e 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -48,12 +48,8 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) { int parallel_workers; -#if PG_VERSION_NUM >= 110000 - parallel_workers = compute_parallel_worker(rel, rel->pages, -1, - max_parallel_workers_per_gather); -#else - parallel_workers = compute_parallel_worker(rel, rel->pages, -1); -#endif + /* no more than max_parallel_workers_per_gather since 11 */ + parallel_workers = compute_parallel_worker_compat(rel, rel->pages, -1); /* If any limit was set to zero, the user doesn't want a parallel scan. */ if (parallel_workers <= 0) diff --git a/src/hooks.c b/src/hooks.c index 2693cd91..26adad63 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -477,6 +477,17 @@ pathman_rel_pathlist_hook(PlannerInfo *root, memset((void *) &root->simple_rte_array[current_len], 0, irange_len * sizeof(RangeTblEntry *)); +#if PG_VERSION_NUM >= 110000 + /* Make sure append_rel_array is wide enough */ + if (root->append_rel_array == NULL) + root->append_rel_array = (AppendRelInfo **) palloc0(0); + root->append_rel_array = (AppendRelInfo **) + repalloc(root->append_rel_array, + new_len * sizeof(AppendRelInfo *)); + memset((void *) &root->append_rel_array[current_len], 0, + irange_len * sizeof(AppendRelInfo *)); +#endif + /* Don't forget to update array size! */ root->simple_rel_array_size = new_len; } @@ -485,7 +496,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, parent_rel = heap_open(rte->relid, NoLock); parent_rowmark = get_plan_rowmark(root->rowMarks, rti); - + /* Add parent if asked to */ if (prel->enable_parent) append_child_relation(root, parent_rel, parent_rowmark, @@ -514,21 +525,12 @@ pathman_rel_pathlist_hook(PlannerInfo *root, rel->partial_pathlist = NIL; #endif -/* Convert list to array for faster lookups */ -#if PG_VERSION_NUM >= 110000 - setup_append_rel_array(root); -#endif - /* Generate new paths using the rels we've just added */ set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); set_append_rel_size_compat(root, rel, rti); - /* consider gathering partial paths for the parent appendrel */ -#if PG_VERSION_NUM >= 110000 - generate_gather_paths(root, rel, false); -#elif PG_VERSION_NUM >= 90600 - generate_gather_paths(root, rel); -#endif + /* consider gathering partial paths for the parent appendrel */ + generate_gather_paths_compat(root, rel); /* Skip if both custom nodes are disabled */ if (!(pg_pathman_enable_runtimeappend || diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 2cc80731..699b152d 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -787,6 +787,45 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, heap_delete((relation), (tid), (cid), (crosscheck), (wait), (hufd)) #endif +/* + * compute_parallel_worker + */ +#if PG_VERSION_NUM >= 110000 +#define compute_parallel_worker_compat(rel, heap_pages, index_pages) \ + compute_parallel_worker((rel), (heap_pages), (index_pages), \ + max_parallel_workers_per_gather) +#elif PG_VERSION_NUM >= 100000 +#define compute_parallel_worker_compat(rel, heap_pages, index_pages) \ + compute_parallel_worker((rel), (heap_pages), (index_pages)) +#endif + + +/* + * generate_gather_paths + */ +#if PG_VERSION_NUM >= 110000 +#define generate_gather_paths_compat(root, rel) \ + generate_gather_paths((root), (rel), false) +#elif PG_VERSION_NUM >= 90600 +#define generate_gather_paths_compat(root, rel) \ + generate_gather_paths((rel), (heap_pages), false) +#else +#define generate_gather_paths_compat(root, rel) +#endif + + +/* + * handling appendrelinfo array + */ +#if PG_VERSION_NUM >= 110000 +#define find_childrel_appendrelinfo_compat(root, rel) \ + ((root)->append_rel_array[(rel)->relid]) +#else +#define find_childrel_appendrelinfo_compat(root, rel) \ + find_childrel_appendrelinfo((root), (rel)) +#endif + + /* * ------------- * Common code diff --git a/src/init.c b/src/init.c index eb3b6feb..327ca027 100644 --- a/src/init.c +++ b/src/init.c @@ -25,9 +25,6 @@ #include "catalog/indexing.h" #include "catalog/pg_extension.h" #include "catalog/pg_inherits.h" -#if PG_VERSION_NUM < 110000 -#include "catalog/pg_inherits_fn.h" -#endif #include "catalog/pg_type.h" #include "miscadmin.h" #include "optimizer/clauses.h" @@ -39,6 +36,9 @@ #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif #include diff --git a/src/nodes_common.c b/src/nodes_common.c index 5c484cdb..f9f394ec 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -569,11 +569,9 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, { Plan *child_plan = (Plan *) lfirst(lc2); RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; -#if PG_VERSION_NUM >= 110000 - AppendRelInfo *appinfo = root->append_rel_array[child_rel->relid]; -#else - AppendRelInfo *appinfo = find_childrel_appendrelinfo(root, child_rel); -#endif + AppendRelInfo *appinfo; + + appinfo = find_childrel_appendrelinfo_compat(root, child_rel); /* Replace rel's tlist with a matching one (for ExecQual()) */ if (!processed_rel_tlist) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 532420f3..69f5db3b 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -483,7 +483,6 @@ bgw_main_concurrent_part(Datum main_arg) Oid types[2] = { OIDOID, INT4OID }; Datum vals[2] = { part_slot->relid, part_slot->batch_size }; - char nulls[2] = { false, false }; bool rel_locked = false; @@ -557,7 +556,7 @@ bgw_main_concurrent_part(Datum main_arg) } /* Call concurrent partitioning function */ - ret = SPI_execute_with_args(sql, 2, types, vals, nulls, false, 0); + ret = SPI_execute_with_args(sql, 2, types, vals, NULL, false, 0); if (ret == SPI_OK_SELECT) { TupleDesc tupdesc = SPI_tuptable->tupdesc; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index b9e4a6a4..588f5417 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -535,6 +535,10 @@ append_child_relation(PlannerInfo *root, /* Now append 'appinfo' to 'root->append_rel_list' */ root->append_rel_list = lappend(root->append_rel_list, appinfo); + /* And to array in >= 11, it must be big enough */ +#if PG_VERSION_NUM >= 110000 + root->append_rel_array[child_rti] = appinfo; +#endif /* Translate column privileges for this child */ if (parent_rte->relid != child_oid) From 0137d1903ae01d339ecf2d0954d560d08a666952 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 9 Jul 2018 19:46:56 +0300 Subject: [PATCH 0894/1124] REL10 typo fix --- src/include/compat/pg_compat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 699b152d..b4fcba7c 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -808,7 +808,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, generate_gather_paths((root), (rel), false) #elif PG_VERSION_NUM >= 90600 #define generate_gather_paths_compat(root, rel) \ - generate_gather_paths((rel), (heap_pages), false) + generate_gather_paths((root), (rel)) #else #define generate_gather_paths_compat(root, rel) #endif From 4e842566bad2b7cde040cf706dea576394b2c9e2 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 10 Jul 2018 18:21:32 +0300 Subject: [PATCH 0895/1124] Raise notice 'insufficient privileges', fix pg_constraint_fn inclusion. --- expected/pathman_permissions.out | 8 ++++++++ sql/pathman_permissions.sql | 4 ++++ src/relation_info.c | 6 +----- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index a9e68be4..388fc2bc 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -17,7 +17,9 @@ BEGIN SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; +NOTICE: Insufficient priviliges /* Grant SELECT to user2 */ SET ROLE user1; GRANT SELECT ON permissions.user1_table TO user2; @@ -28,7 +30,9 @@ BEGIN SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; +NOTICE: Insufficient priviliges /* Should be ok */ SET ROLE user1; SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); @@ -71,7 +75,9 @@ BEGIN INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; +NOTICE: Insufficient priviliges /* No rights to create partitions (need INSERT privilege) */ SET ROLE user2; SELECT prepend_range_partition('permissions.user1_table'); @@ -133,7 +139,9 @@ BEGIN SELECT drop_range_partition('permissions.user1_table_4'); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; +NOTICE: Insufficient priviliges /* Disable automatic partition creation */ SET ROLE user1; SELECT set_auto('permissions.user1_table', false); diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 5f66a84f..3a234676 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -23,6 +23,7 @@ BEGIN SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; /* Grant SELECT to user2 */ @@ -36,6 +37,7 @@ BEGIN SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; /* Should be ok */ @@ -64,6 +66,7 @@ BEGIN INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; /* No rights to create partitions (need INSERT privilege) */ @@ -101,6 +104,7 @@ BEGIN SELECT drop_range_partition('permissions.user1_table_4'); EXCEPTION WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; END$$; /* Disable automatic partition creation */ diff --git a/src/relation_info.c b/src/relation_info.c index a4c91bbe..1f8a1ba1 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -20,9 +20,6 @@ #include "catalog/catalog.h" #include "catalog/indexing.h" #include "catalog/pg_constraint.h" -#if PG_VERSION_NUM < 110000 && PG_VERSION_NUM >= 90600 -#include "catalog/pg_constraint_fn.h" -#endif #include "catalog/pg_inherits.h" #include "catalog/pg_type.h" #include "miscadmin.h" @@ -47,8 +44,7 @@ #if PG_VERSION_NUM < 90600 #include "optimizer/planmain.h" #endif - -#if PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 11000 +#if PG_VERSION_NUM < 110000 && PG_VERSION_NUM >= 90600 #include "catalog/pg_constraint_fn.h" #endif From 6418761aa976726b6f74fede8854f0b0484a2c15 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Wed, 11 Jul 2018 19:18:34 +0300 Subject: [PATCH 0896/1124] Fix pathman_rebuild_deletes test for 11. --- expected/pathman_rebuild_deletes.out | 12 ++- expected/pathman_rebuild_deletes_1.out | 105 +++++++++++++++++++++++++ sql/pathman_rebuild_deletes.sql | 7 ++ 3 files changed, 121 insertions(+), 3 deletions(-) create mode 100644 expected/pathman_rebuild_deletes_1.out diff --git a/expected/pathman_rebuild_deletes.out b/expected/pathman_rebuild_deletes.out index 98e43862..b19d700a 100644 --- a/expected/pathman_rebuild_deletes.out +++ b/expected/pathman_rebuild_deletes.out @@ -1,3 +1,9 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -86,11 +92,11 @@ RETURNING t1.*, t1.tableoid::REGCLASS; EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101 AND test >= (100, 8) RETURNING *, tableoid::REGCLASS; - QUERY PLAN ------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------- Delete on test_11 -> Seq Scan on test_11 - Filter: (((test_11.*)::test_deletes.test >= ROW(100, 8)) AND (val = 101)) + Filter: (((test_11.*)::test_deletes.test >= '(100,8)'::record) AND (val = 101)) (3 rows) DROP TABLE test_deletes.test_dummy; diff --git a/expected/pathman_rebuild_deletes_1.out b/expected/pathman_rebuild_deletes_1.out new file mode 100644 index 00000000..d1c4b69e --- /dev/null +++ b/expected/pathman_rebuild_deletes_1.out @@ -0,0 +1,105 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); + append_range_partition +------------------------ + test_deletes.test_11 +(1 row) + +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; + QUERY PLAN +--------------------------- + Delete on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 1 | test_deletes.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; + QUERY PLAN +----------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+----+---------------------- + 101 | 10 | test_deletes.test_11 +(1 row) + +CREATE TABLE test_deletes.test_dummy (val INT4); +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Delete on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Delete on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_deletes.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +DROP TABLE test_deletes.test_dummy; +DROP SCHEMA test_deletes CASCADE; +NOTICE: drop cascades to 13 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_deletes.sql b/sql/pathman_rebuild_deletes.sql index f14bce5a..28a09916 100644 --- a/sql/pathman_rebuild_deletes.sql +++ b/sql/pathman_rebuild_deletes.sql @@ -1,3 +1,10 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ + \set VERBOSITY terse SET search_path = 'public'; From 50bfb926ca32ef6fbef04c18e58de2100135aa58 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Wed, 11 Jul 2018 19:25:07 +0300 Subject: [PATCH 0897/1124] Fix pathman_rebuild_updates test. --- expected/pathman_rebuild_updates_1.out | 48 +++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/expected/pathman_rebuild_updates_1.out b/expected/pathman_rebuild_updates_1.out index cf0fc1dc..10ec256e 100644 --- a/expected/pathman_rebuild_updates_1.out +++ b/expected/pathman_rebuild_updates_1.out @@ -109,6 +109,52 @@ RETURNING test; (1 row) DROP TABLE test_updates.test_dummy; +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + parent | partition | parttype | expr | range_min | range_max | columns +-------------------+----------------------+----------+------+-----------+-----------+--------- + test_updates.test | test_updates.test_1 | 2 | val | 1 | 11 | 9 + test_updates.test | test_updates.test_2 | 2 | val | 11 | 21 | 9 + test_updates.test | test_updates.test_3 | 2 | val | 21 | 31 | 9 + test_updates.test | test_updates.test_4 | 2 | val | 31 | 41 | 9 + test_updates.test | test_updates.test_5 | 2 | val | 41 | 51 | 9 + test_updates.test | test_updates.test_6 | 2 | val | 51 | 61 | 9 + test_updates.test | test_updates.test_7 | 2 | val | 61 | 71 | 9 + test_updates.test | test_updates.test_8 | 2 | val | 71 | 81 | 9 + test_updates.test | test_updates.test_9 | 2 | val | 81 | 91 | 9 + test_updates.test | test_updates.test_10 | 2 | val | 91 | 101 | 9 + test_updates.test | test_updates.test_11 | 2 | val | 101 | 111 | 8 +(11 rows) + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 106 | 105 | test_updates.test_11 +(1 row) + +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 115 | 105 | test_updates.test_12 +(1 row) + +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 95 | 105 | test_updates.test_10 +(1 row) + +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + -1 | 105 | test_updates.test_13 +(1 row) + DROP SCHEMA test_updates CASCADE; -NOTICE: drop cascades to 13 other objects +NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; From 44a3f678da529c70b15054cf3c2c2bdcf9808cb3 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 12 Jul 2018 16:30:59 +0300 Subject: [PATCH 0898/1124] Purge spurious whitespace --- src/hooks.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 26adad63..28e52f47 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -487,7 +487,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, memset((void *) &root->append_rel_array[current_len], 0, irange_len * sizeof(AppendRelInfo *)); #endif - + /* Don't forget to update array size! */ root->simple_rel_array_size = new_len; } @@ -496,7 +496,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, parent_rel = heap_open(rte->relid, NoLock); parent_rowmark = get_plan_rowmark(root->rowMarks, rti); - + /* Add parent if asked to */ if (prel->enable_parent) append_child_relation(root, parent_rel, parent_rowmark, From 0c38735df27e4e78496fa7eaa1c1d2dd1905ffb6 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 12 Jul 2018 17:39:38 +0300 Subject: [PATCH 0899/1124] minor style fixes & refactoring --- src/include/compat/pg_compat.h | 29 ++++++++++++++++++++++++++--- src/include/partition_filter.h | 1 - src/init.c | 1 + src/pl_funcs.c | 7 ++++--- src/pl_range_funcs.c | 7 ++++--- src/planner_tree_modification.c | 15 +-------------- 6 files changed, 36 insertions(+), 24 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index b4fcba7c..08298dd8 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -788,7 +788,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif /* - * compute_parallel_worker + * compute_parallel_worker() */ #if PG_VERSION_NUM >= 110000 #define compute_parallel_worker_compat(rel, heap_pages, index_pages) \ @@ -801,7 +801,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* - * generate_gather_paths + * generate_gather_paths() */ #if PG_VERSION_NUM >= 110000 #define generate_gather_paths_compat(root, rel) \ @@ -815,7 +815,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* - * handling appendrelinfo array + * find_childrel_appendrelinfo() */ #if PG_VERSION_NUM >= 110000 #define find_childrel_appendrelinfo_compat(root, rel) \ @@ -832,6 +832,29 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, * ------------- */ +/* See ExecEvalParamExtern() */ +static inline ParamExternData * +CustomEvalParamExternCompat(Param *param, ParamListInfo params) +{ + ParamExternData *prm; + +#if PG_VERSION_NUM >= 110000 + ParamExternData prmdata; + + if (params->paramFetch != NULL) + prm = params->paramFetch(params, param->paramid, false, &prmdata); + else + prm = ¶ms->params[param->paramid - 1]; +#else + prm = ¶ms->params[param->paramid - 1]; + + if (!OidIsValid(prm->ptype) && params->paramFetch != NULL) + params->paramFetch(params, param->paramid); +#endif + + return prm; +} + void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 69cdb8c8..ef091e0b 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -29,7 +29,6 @@ #define ERR_PART_ATTR_NULL "partitioning expression's value should not be NULL" -#define ERR_PART_ATTR_MULTIPLE_RESULTS #define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" #define ERR_PART_ATTR_MULTIPLE INSERT_NODE_NAME " selected more than one partition" #define ERR_PART_DESC_CONVERT "could not convert row type for partition" diff --git a/src/init.c b/src/init.c index 327ca027..a25d5956 100644 --- a/src/init.c +++ b/src/init.c @@ -36,6 +36,7 @@ #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/typcache.h" + #if PG_VERSION_NUM < 110000 #include "catalog/pg_inherits_fn.h" #endif diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 22e6b83f..b90619e0 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -23,9 +23,6 @@ #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" -#if PG_VERSION_NUM < 110000 -#include "catalog/pg_inherits_fn.h" -#endif #include "catalog/pg_type.h" #include "commands/tablespace.h" #include "commands/trigger.h" @@ -41,6 +38,10 @@ #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif + /* Function declarations */ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 89e8536d..f8f52e9d 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -18,9 +18,6 @@ #include "access/xact.h" #include "catalog/heap.h" #include "catalog/namespace.h" -#if PG_VERSION_NUM < 110000 -#include "catalog/pg_inherits_fn.h" -#endif #include "catalog/pg_type.h" #include "commands/tablecmds.h" #include "executor/spi.h" @@ -35,6 +32,10 @@ #include "utils/syscache.h" #include "utils/snapmgr.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif + #if PG_VERSION_NUM >= 100000 #include "utils/regproc.h" #include "utils/varlena.h" diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 233b8773..9d3ffb15 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -851,20 +851,7 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) param->paramid > 0 && param->paramid <= params->numParams) { - ParamExternData *prm; - -#if PG_VERSION_NUM >= 110000 - ParamExternData prmdata; - if (params->paramFetch != NULL) - prm = params->paramFetch(params, param->paramid, false, &prmdata); - else - prm = ¶ms->params[param->paramid - 1]; -#else - prm = ¶ms->params[param->paramid - 1]; - if (!OidIsValid(prm->ptype) && params->paramFetch != NULL) - (*params->paramFetch) (params, param->paramid); -#endif - + ParamExternData *prm = CustomEvalParamExternCompat(param, params); if (OidIsValid(prm->ptype)) { From 087abe75abd95a6afb5366ff212e5845f61dbd6d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 13 Jul 2018 15:09:55 +0300 Subject: [PATCH 0900/1124] resolve fixme regarding flushing all caches --- src/hooks.c | 8 +++--- src/include/relation_info.h | 16 +++++++++-- src/relation_info.c | 57 ++++++++++++++++++++++++++++++------- 3 files changed, 64 insertions(+), 17 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 28e52f47..cbed54f4 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -834,9 +834,9 @@ pathman_relcache_hook(Datum arg, Oid relid) /* Invalidation event for whole cache */ if (relid == InvalidOid) { - invalidate_pathman_status_info_cache(); - - /* FIXME: reset other caches as well */ + invalidate_bounds_cache(); + invalidate_parents_cache(); + invalidate_status_cache(); } /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ @@ -855,7 +855,7 @@ pathman_relcache_hook(Datum arg, Oid relid) forget_parent_of_partition(relid); /* Invalidate PartStatusInfo entry if needed */ - invalidate_pathman_status_info(relid); + forget_status_of_relation(relid); } } diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 2f37406c..d2a3d053 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -184,6 +184,16 @@ typedef struct PartBoundInfo uint32 part_idx; } PartBoundInfo; +static inline void +FreePartBoundInfo(PartBoundInfo *pbin) +{ + if (pbin->parttype == PT_RANGE) + { + FreeBound(&pbin->range_min, pbin->byval); + FreeBound(&pbin->range_max, pbin->byval); + } +} + /* * PartRelationInfo * Per-relation partitioning information. @@ -341,8 +351,8 @@ PartTypeToCString(PartType parttype) /* Status chache */ -void invalidate_pathman_status_info(Oid relid); -void invalidate_pathman_status_info_cache(void); +void forget_status_of_relation(Oid relid); +void invalidate_status_cache(void); /* Dispatch cache */ bool has_pathman_relation_info(Oid relid); @@ -359,11 +369,13 @@ void shout_if_prel_is_invalid(const Oid parent_oid, /* Bounds cache */ void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); +void invalidate_bounds_cache(void); /* Parents cache */ void cache_parent_of_partition(Oid partition, Oid parent); void forget_parent_of_partition(Oid partition); Oid get_parent_of_partition(Oid partition); +void invalidate_parents_cache(void); /* Partitioning expression routines */ Node *parse_partitioning_expression(const Oid relid, diff --git a/src/relation_info.c b/src/relation_info.c index 1f8a1ba1..a18ceeec 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -187,7 +187,7 @@ init_relation_info_static_data(void) /* Invalidate PartStatusInfo for 'relid' */ void -invalidate_pathman_status_info(Oid relid) +forget_status_of_relation(Oid relid) { PartStatusInfo *psin; PartParentInfo *ppar; @@ -225,7 +225,7 @@ invalidate_pathman_status_info(Oid relid) /* Invalidate all PartStatusInfo entries */ void -invalidate_pathman_status_info_cache(void) +invalidate_status_cache(void) { invalidate_psin_entries_using_relid(InvalidOid); } @@ -241,14 +241,14 @@ invalidate_psin_entries_using_relid(Oid relid) while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) { - if (relid == InvalidOid || + if (!OidIsValid(relid) || psin->relid == relid || (psin->prel && PrelHasPartition(psin->prel, relid))) { /* Perform invalidation */ invalidate_psin_entry(psin); - /* Exit if found */ + /* Exit if exact match */ if (OidIsValid(relid)) { hash_seq_term(&status); @@ -952,15 +952,10 @@ forget_bounds_of_partition(Oid partition) NULL) : NULL; /* don't even bother */ - /* Free this entry */ if (pbin) { - /* Call pfree() if it's RANGE bounds */ - if (pbin->parttype == PT_RANGE) - { - FreeBound(&pbin->range_min, pbin->byval); - FreeBound(&pbin->range_max, pbin->byval); - } + /* Free this entry */ + FreePartBoundInfo(pbin); /* Finally remove this entry from cache */ pathman_cache_search_relid(bounds_cache, @@ -1027,6 +1022,26 @@ get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) return pbin; } +void +invalidate_bounds_cache(void) +{ + HASH_SEQ_STATUS status; + PartBoundInfo *pbin; + + Assert(offsetof(PartBoundInfo, child_relid) == 0); + + hash_seq_init(&status, bounds_cache); + + while ((pbin = hash_seq_search(&status)) != NULL) + { + FreePartBoundInfo(pbin); + + pathman_cache_search_relid(bounds_cache, + pbin->child_relid, + HASH_REMOVE, NULL); + } +} + /* * Get constraint expression tree of a partition. * @@ -1258,6 +1273,26 @@ get_parent_of_partition(Oid partition) } } +void +invalidate_parents_cache(void) +{ + HASH_SEQ_STATUS status; + PartParentInfo *ppar; + + Assert(offsetof(PartParentInfo, child_relid) == 0); + + hash_seq_init(&status, parents_cache); + + while ((ppar = hash_seq_search(&status)) != NULL) + { + /* This is a plain structure, no need to pfree() */ + + pathman_cache_search_relid(parents_cache, + ppar->child_relid, + HASH_REMOVE, NULL); + } +} + /* * Partitioning expression routines. From a69b12264469541b3e6c90ce4fead76460687e68 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 19 Jul 2018 07:37:06 +0300 Subject: [PATCH 0901/1124] Fix shardman's COPY FROM as it got a bit rotten in the 'next' branch --- src/utility_stmt_hooking.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 553f7c8e..fcd6a1dc 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -630,9 +630,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Handle foreign tables */ else { - child_result_rel->ri_FdwRoutine->ForeignNextCopyFrom(estate, - child_rri, - cstate); + child_rri->ri_FdwRoutine->ForeignNextCopyFrom(estate, + child_rri, + cstate); } #endif @@ -706,7 +706,7 @@ prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, FdwCopyFromIsSupported(fdw_routine)) { CopyState cstate = (CopyState) rps_storage->init_rri_holder_cb_arg; - ResultRelInfo *parent_rri = rps_storage->saved_rel_info; + ResultRelInfo *parent_rri = rps_storage->base_rri; EState *estate = rps_storage->estate; fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_rri); From f8d5a5a45539b1551c1e5a8a29196bb266d17a46 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 23 Jul 2018 13:58:40 +0300 Subject: [PATCH 0902/1124] Postgres Pro Enterprise compatibility fixes by Victor Wagner (@vbwagner) --- src/include/compat/pg_compat.h | 12 ++++++++++++ src/init.c | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 8632578e..4228d264 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -626,6 +626,18 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif +/* + * HeapTupleGetXmin() + * Vanilla PostgreSQL has HeaptTupleHeaderGetXmin, but for 64-bit xid + * we need access to entire tuple, not just its header. + */ +#ifdef XID_IS_64BIT +# define HeapTupleGetXminCompat(htup) HeapTupleGetXmin(htup) +#else +# define HeapTupleGetXminCompat(htup) HeapTupleHeaderGetXmin((htup)->t_data) +#endif + + /* * ------------- * Common code diff --git a/src/init.c b/src/init.c index 569a4c2f..3435fdc8 100644 --- a/src/init.c +++ b/src/init.c @@ -675,7 +675,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Set xmin if necessary */ if (xmin) - *xmin = HeapTupleHeaderGetXmin(htup->t_data); + *xmin = HeapTupleGetXminCompat(htup); /* Set ItemPointer if necessary */ if (iptr) From 91005d048f07c87a850da82af3a6df561fea3bb2 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 23 Jul 2018 14:07:44 +0300 Subject: [PATCH 0903/1124] fix python tests --- tests/python/partitioning_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 2c290f8d..6a435c89 100755 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -19,7 +19,6 @@ from distutils.version import LooseVersion from testgres import get_new_node, get_pg_version -from testgres.utils import pg_version_ge # set setup base logging config, it can be turned on by `use_logging` # parameter on node setup From b23712b640673dc81d7940bc01900ae832979e4c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 25 Jul 2018 17:19:25 +0300 Subject: [PATCH 0904/1124] refresh README.md --- README.md | 77 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 49 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index c89d99de..1b4dcf1b 100644 --- a/README.md +++ b/README.md @@ -63,13 +63,13 @@ More interesting features are yet to come. Stay tuned! * Effective query planning for partitioned tables (JOINs, subselects etc); * `RuntimeAppend` & `RuntimeMergeAppend` custom plan nodes to pick partitions at runtime; * `PartitionFilter`: an efficient drop-in replacement for INSERT triggers; + * `PartitionRouter` for cross-partition UPDATE queries (instead of triggers); * Automatic partition creation for new INSERTed data (only for RANGE partitioning); * Improved `COPY FROM` statement that is able to insert rows directly into partitions; - * UPDATE triggers generation out of the box (will be replaced with custom nodes too); * User-defined callbacks for partition creation event handling; * Non-blocking concurrent table partitioning; * FDW support (foreign partitions); - * Various GUC toggles and configurable settings. + * Various [GUC](#disabling-pg_pathman) toggles and configurable settings. ## Installation guide To install `pg_pathman`, execute this in the module's directory: @@ -121,8 +121,8 @@ Although it's possible to get major and minor version numbers using `\dx pg_path ### Partition creation ```plpgsql -create_hash_partitions(relation REGCLASS, - expr TEXT, +create_hash_partitions(parent_relid REGCLASS, + expression TEXT, partitions_count INTEGER, partition_data BOOLEAN DEFAULT TRUE, partition_names TEXT[] DEFAULT NULL, @@ -131,21 +131,21 @@ create_hash_partitions(relation REGCLASS, Performs HASH partitioning for `relation` by partitioning expression `expr`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. Partition creation callback is invoked for each partition if set beforehand (see `set_init_callback()`). ```plpgsql -create_range_partitions(relation REGCLASS, +create_range_partitions(parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, p_interval ANYELEMENT, p_count INTEGER DEFAULT NULL partition_data BOOLEAN DEFAULT TRUE) -create_range_partitions(relation REGCLASS, +create_range_partitions(parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, p_interval INTERVAL, p_count INTEGER DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) -create_range_partitions(relation REGCLASS, +create_range_partitions(parent_relid REGCLASS, expression TEXT, bounds ANYARRAY, partition_names TEXT[] DEFAULT NULL, @@ -181,10 +181,12 @@ stop_concurrent_part_task(relation REGCLASS) Stops a background worker performing a concurrent partitioning task. Note: worker will exit after it finishes relocating a current batch. ### Triggers -```plpgsql -create_update_triggers(parent REGCLASS) -``` -Creates a for-each-row trigger to enable cross-partition UPDATE on a table partitioned by HASH/RANGE. The trigger is not created automatically because of the overhead caused by its function. You don't have to use this feature unless partitioning key might change during an UPDATE. + +Triggers are no longer required nor for INSERTs, neither for cross-partition UPDATEs. However, user-supplied triggers *are supported*. + +Each inserted row results in execution of BEFORE/AFTER INSERT trigger functions of a **corresponding partition**. +Each updated row results in execution of BEFORE/AFTER UPDATE trigger functions of a **corresponding partition**. +Each moved row (cross-partition update) results in execution of BEFORE UPDATE + BEFORE/AFTER DELETE + BEFORE/AFTER INSERT trigger functions of **corresponding partitions**. ### Post-creation partition management ```plpgsql @@ -196,9 +198,10 @@ Replaces specified partition of HASH-partitioned table with another table. The ` ```plpgsql -split_range_partition(partition REGCLASS, - split_value ANYELEMENT, - partition_name TEXT DEFAULT NULL) +split_range_partition(partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) ``` Split RANGE `partition` in two by `split_value`. Partition creation callback is invoked for a new partition if available. @@ -208,21 +211,21 @@ merge_range_partitions(variadic partitions REGCLASS[]) Merge several adjacent RANGE partitions. Partitions are automatically ordered by increasing bounds; all the data will be accumulated in the first partition. ```plpgsql -append_range_partition(parent REGCLASS, +append_range_partition(parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) ``` Append new RANGE partition with `pathman_config.range_interval` as interval. ```plpgsql -prepend_range_partition(parent REGCLASS, +prepend_range_partition(parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) ``` Prepend new RANGE partition with `pathman_config.range_interval` as interval. ```plpgsql -add_range_partition(relation REGCLASS, +add_range_partition(parent_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT DEFAULT NULL, @@ -236,26 +239,26 @@ drop_range_partition(partition TEXT, delete_data BOOLEAN DEFAULT TRUE) Drop RANGE partition and all of its data if `delete_data` is true. ```plpgsql -attach_range_partition(relation REGCLASS, - partition REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT) +attach_range_partition(parent_relid REGCLASS, + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) ``` Attach partition to the existing RANGE-partitioned relation. The attached table must have exactly the same structure as the parent table, including the dropped columns. Partition creation callback is invoked if set (see `pathman_config_params`). ```plpgsql -detach_range_partition(partition REGCLASS) +detach_range_partition(partition_relid REGCLASS) ``` Detach partition from the existing RANGE-partitioned relation. ```plpgsql -disable_pathman_for(relation TEXT) +disable_pathman_for(parent_relid REGCLASS) ``` Permanently disable `pg_pathman` partitioning mechanism for the specified parent table and remove the insert trigger if it exists. All partitions and data remain unchanged. ```plpgsql -drop_partitions(parent REGCLASS, - delete_data BOOLEAN DEFAULT FALSE) +drop_partitions(parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) ``` Drop partitions of the `parent` table (both foreign and local relations). If `delete_data` is `false`, the data is copied to the parent table first. Default is `false`. @@ -347,7 +350,7 @@ CREATE TABLE IF NOT EXISTS pathman_config_params ( partrel REGCLASS NOT NULL PRIMARY KEY, enable_parent BOOLEAN NOT NULL DEFAULT TRUE, auto BOOLEAN NOT NULL DEFAULT TRUE, - init_callback REGPROCEDURE NOT NULL DEFAULT 0, + init_callback TEXT DEFAULT NULL, spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE); ``` This table stores optional parameters which override standard behavior. @@ -414,6 +417,7 @@ Shows memory consumption of various caches. - `RuntimeAppend` (overrides `Append` plan node) - `RuntimeMergeAppend` (overrides `MergeAppend` plan node) - `PartitionFilter` (drop-in replacement for INSERT triggers) +- `PartitionRouter` (implements cross-partition UPDATEs) `PartitionFilter` acts as a *proxy node* for INSERT's child scan, which means it can redirect output tuples to the corresponding partition: @@ -430,6 +434,22 @@ SELECT generate_series(1, 10), random(); (4 rows) ``` +`PartitionRouter` is another *proxy node* used in conjunction with `PartitionFilter` to enable cross-partition UPDATEs (i.e. when you update any column of a partitioning key). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; cross-partition `UPDATE` is transformed into `DELETE + INSERT`), it is disabled by default. To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. + +```plpgsql +EXPLAIN (COSTS OFF) +UPDATE partitioned_table +SET value = value + 1 WHERE value = 2; + QUERY PLAN +--------------------------------------------------- + Update on partitioned_table_0 + -> Custom Scan (PartitionRouter) + -> Custom Scan (PartitionFilter) + -> Seq Scan on partitioned_table_0 + Filter: (value = 2) +(5 rows) +``` + `RuntimeAppend` and `RuntimeMergeAppend` have much in common: they come in handy in a case when WHERE condition takes form of: ``` VARIABLE OP PARAM @@ -580,7 +600,7 @@ NOTICE: 100 rows copied from part_test_2 (3 rows) ``` -- You can turn foreign tables into partitions using the `attach_range_partition()` function. Rows that were meant to be inserted into parent will be redirected to foreign partitions (as usual, PartitionFilter will be involved), though by default it is prohibited to insert rows into partitions provided not by `postgres_fdw`. Only superuser is allowed to set `pg_pathman.insert_into_fdw` GUC variable. +- You can turn foreign tables into partitions using the `attach_range_partition()` function. Rows that were meant to be inserted into parent will be redirected to foreign partitions (as usual, PartitionFilter will be involved), though by default it is prohibited to insert rows into partitions provided not by `postgres_fdw`. Only superuser is allowed to set `pg_pathman.insert_into_fdw` [GUC](#disabling-pg_pathman) variable. ### HASH partitioning Consider an example of HASH partitioning. First create a table with some integer column: @@ -710,7 +730,8 @@ There are several user-accessible [GUC](https://www.postgresql.org/docs/9.5/stat - `pg_pathman.enable` --- disable (or enable) `pg_pathman` **completely** - `pg_pathman.enable_runtimeappend` --- toggle `RuntimeAppend` custom node on\off - `pg_pathman.enable_runtimemergeappend` --- toggle `RuntimeMergeAppend` custom node on\off - - `pg_pathman.enable_partitionfilter` --- toggle `PartitionFilter` custom node on\off + - `pg_pathman.enable_partitionfilter` --- toggle `PartitionFilter` custom node on\off (for INSERTs) + - `pg_pathman.enable_partitionrouter` --- toggle `PartitionRouter` custom node on\off (for cross-partition UPDATEs) - `pg_pathman.enable_auto_partition` --- toggle automatic partition creation on\off (per session) - `pg_pathman.enable_bounds_cache` --- toggle bounds cache on\off (faster updates of partitioning scheme) - `pg_pathman.insert_into_fdw` --- allow INSERTs into various FDWs `(disabled | postgres | any_fdw)` From 784170a7edbdfec4323a48ca8f7d24ea0cc68429 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 25 Jul 2018 17:23:21 +0300 Subject: [PATCH 0905/1124] more fixes to README.md --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 1b4dcf1b..2a2796d7 100644 --- a/README.md +++ b/README.md @@ -58,16 +58,16 @@ More interesting features are yet to come. Stay tuned! * HASH and RANGE partitioning schemes; * Partitioning by expression and composite key; - * Both automatic and manual partition management; + * Both automatic and manual [partition management](#post-creation-partition-management); * Support for integer, floating point, date and other types, including domains; * Effective query planning for partitioned tables (JOINs, subselects etc); * `RuntimeAppend` & `RuntimeMergeAppend` custom plan nodes to pick partitions at runtime; - * `PartitionFilter`: an efficient drop-in replacement for INSERT triggers; - * `PartitionRouter` for cross-partition UPDATE queries (instead of triggers); + * [`PartitionFilter`](#custom-plan-nodes): an efficient drop-in replacement for INSERT triggers; + * [`PartitionRouter`](#custom-plan-nodes) for cross-partition UPDATE queries (instead of triggers); * Automatic partition creation for new INSERTed data (only for RANGE partitioning); * Improved `COPY FROM` statement that is able to insert rows directly into partitions; - * User-defined callbacks for partition creation event handling; - * Non-blocking concurrent table partitioning; + * [User-defined callbacks](#additional-parameters) for partition creation event handling; + * Non-blocking [concurrent table partitioning](#data-migration); * FDW support (foreign partitions); * Various [GUC](#disabling-pg_pathman) toggles and configurable settings. @@ -182,17 +182,17 @@ Stops a background worker performing a concurrent partitioning task. Note: worke ### Triggers -Triggers are no longer required nor for INSERTs, neither for cross-partition UPDATEs. However, user-supplied triggers *are supported*. +Triggers are no longer required nor for INSERTs, neither for cross-partition UPDATEs. However, user-supplied triggers *are supported*: -Each inserted row results in execution of BEFORE/AFTER INSERT trigger functions of a **corresponding partition**. -Each updated row results in execution of BEFORE/AFTER UPDATE trigger functions of a **corresponding partition**. -Each moved row (cross-partition update) results in execution of BEFORE UPDATE + BEFORE/AFTER DELETE + BEFORE/AFTER INSERT trigger functions of **corresponding partitions**. +* Each **inserted row** results in execution of `BEFORE/AFTER INSERT` trigger functions of a *corresponding partition*. +* Each **updated row** results in execution of `BEFORE/AFTER UPDATE` trigger functions of a *corresponding partition*. +* Each **moved row** (cross-partition update) results in execution of `BEFORE UPDATE` + `BEFORE/AFTER DELETE` + `BEFORE/AFTER INSERT` trigger functions of *corresponding partitions*. ### Post-creation partition management ```plpgsql replace_hash_partition(old_partition REGCLASS, new_partition REGCLASS, - lock_parent BOOL DEFAULT TRUE) + lock_parent BOOLEAN DEFAULT TRUE) ``` Replaces specified partition of HASH-partitioned table with another table. The `lock_parent` parameter will prevent any INSERT/UPDATE/ALTER TABLE queries to parent table. @@ -201,7 +201,7 @@ Replaces specified partition of HASH-partitioned table with another table. The ` split_range_partition(partition_relid REGCLASS, split_value ANYELEMENT, partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL) + tablespace TEXT DEFAULT NULL) ``` Split RANGE `partition` in two by `split_value`. Partition creation callback is invoked for a new partition if available. @@ -434,7 +434,7 @@ SELECT generate_series(1, 10), random(); (4 rows) ``` -`PartitionRouter` is another *proxy node* used in conjunction with `PartitionFilter` to enable cross-partition UPDATEs (i.e. when you update any column of a partitioning key). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; cross-partition `UPDATE` is transformed into `DELETE + INSERT`), it is disabled by default. To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. +`PartitionRouter` is another *proxy node* used in conjunction with `PartitionFilter` to enable cross-partition UPDATEs (i.e. when update of partitioning key requires that we move row to another partition). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; cross-partition `UPDATE` is transformed into `DELETE + INSERT`), it is disabled by default. To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. ```plpgsql EXPLAIN (COSTS OFF) From d1255a5394d2bcebc328fdcda4a24f329d0f162a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 22 Aug 2018 17:47:40 +0300 Subject: [PATCH 0906/1124] WIP various changes due to EPQ --- src/include/partition_router.h | 3 ++ src/partition_filter.c | 60 ++++++++++-------------- src/partition_router.c | 82 ++++++++++++++++++++------------- src/planner_tree_modification.c | 12 ++--- 4 files changed, 83 insertions(+), 74 deletions(-) diff --git a/src/include/partition_router.h b/src/include/partition_router.h index e90893ba..e21940bb 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -32,6 +32,8 @@ typedef struct PartitionRouterState Oid partitioned_table; JunkFilter *junkfilter; + EPQState epqstate; + int epqparam; Plan *subplan; /* proxy variable to store subplan */ } PartitionRouterState; @@ -64,6 +66,7 @@ void init_partition_router_static_data(void); Plan *make_partition_router(Plan *subplan, Oid parent_relid, Index parent_rti, + int epq_param, List *returning_list); diff --git a/src/partition_filter.c b/src/partition_filter.c index f96eb970..f2d06848 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -87,7 +87,7 @@ static Node *fix_returning_list_mutator(Node *node, void *state); static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); -static List * pfilter_build_tlist(Relation parent_rel, Plan *subplan); +static List *pfilter_build_tlist(Plan *subplan); static void pf_memcxt_callback(void *arg); static estate_mod_data * fetch_estate_mod_data(EState *estate); @@ -637,7 +637,6 @@ make_partition_filter(Plan *subplan, CmdType command_type) { CustomScan *cscan = makeNode(CustomScan); - Relation parent_rel; /* Currently we don't support ON CONFLICT clauses */ if (conflict_action != ONCONFLICT_NONE) @@ -655,14 +654,12 @@ make_partition_filter(Plan *subplan, cscan->methods = &partition_filter_plan_methods; cscan->custom_plans = list_make1(subplan); - /* Build an appropriate target list using a cached Relation entry */ - parent_rel = RelationIdGetRelation(parent_relid); - cscan->scan.plan.targetlist = pfilter_build_tlist(parent_rel, subplan); - RelationClose(parent_rel); - /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); + /* Prepare 'custom_scan_tlist' for EXPLAIN (VERBOSE) */ cscan->custom_scan_tlist = copyObject(cscan->scan.plan.targetlist); ChangeVarNodes((Node *) cscan->custom_scan_tlist, INDEX_VAR, parent_rti, 0); @@ -830,44 +827,37 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e * Build partition filter's target list pointing to subplan tuple's elements. */ static List * -pfilter_build_tlist(Relation parent_rel, Plan *subplan) +pfilter_build_tlist(Plan *subplan) { List *result_tlist = NIL; ListCell *lc; foreach (lc, subplan->targetlist) { - TargetEntry *tle = (TargetEntry *) lfirst(lc), - *newtle = NULL; + TargetEntry *tle = (TargetEntry *) lfirst(lc), + *newtle = NULL; if (IsA(tle->expr, Const)) - newtle = makeTargetEntry(copyObject(tle->expr), tle->resno, tle->resname, - tle->resjunk); - + { + /* TODO: maybe we should use copyObject(tle->expr)? */ + newtle = makeTargetEntry(tle->expr, + tle->resno, + tle->resname, + tle->resjunk); + } else { - if (tle->expr != NULL && IsA(tle->expr, Var)) - { - Var *var = (Var *) palloc(sizeof(Var)); - *var = *((Var *)(tle->expr)); - var->varno = INDEX_VAR; - var->varattno = tle->resno; - - newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, - tle->resjunk); - } - else - { - Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ - tle->resno, - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); - - newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, - tle->resjunk); - } + Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ + tle->resno, + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); + + newtle = makeTargetEntry((Expr *) var, + tle->resno, + tle->resname, + tle->resjunk); } result_tlist = lappend(result_tlist, newtle); diff --git a/src/partition_router.c b/src/partition_router.c index a354fd87..94dbae05 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -28,9 +28,9 @@ bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; -static TupleTableSlot *ExecDeleteInternal(ItemPointer tupleid, - EPQState *epqstate, - EState *estate); +static bool ExecDeleteInternal(ItemPointer tupleid, + EPQState *epqstate, + EState *estate); void init_partition_router_static_data(void) @@ -65,6 +65,7 @@ Plan * make_partition_router(Plan *subplan, Oid parent_relid, Index parent_rti, + int epq_param, List *returning_list) { @@ -85,16 +86,17 @@ make_partition_router(Plan *subplan, cscan->scan.plan.plan_rows = subplan->plan_rows; cscan->scan.plan.plan_width = subplan->plan_width; - /* Setup methods and child plan */ + /* Setup methods, child plan and param number for EPQ */ cscan->methods = &partition_router_plan_methods; cscan->custom_plans = list_make1(pfilter); - - /* Build an appropriate target list */ - cscan->scan.plan.targetlist = pfilter->targetlist; + cscan->custom_private = list_make1(makeInteger(epq_param)); /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter->targetlist; + /* FIXME: should we use the same tlist? */ cscan->custom_scan_tlist = subplan->targetlist; @@ -113,7 +115,9 @@ partition_router_create_scan_state(CustomScan *node) state->css.methods = &partition_router_exec_methods; /* Extract necessary variables */ + state->epqparam = intVal(linitial(node->custom_private)); state->subplan = (Plan *) linitial(node->custom_plans); + return (Node *) state; } @@ -122,6 +126,10 @@ partition_router_begin(CustomScanState *node, EState *estate, int eflags) { PartitionRouterState *state = (PartitionRouterState *) node; + EvalPlanQualInit(&state->epqstate, estate, + state->subplan, NIL, + state->epqparam); + /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); } @@ -134,6 +142,7 @@ partition_router_exec(CustomScanState *node) TupleTableSlot *slot; PartitionRouterState *state = (PartitionRouterState *) node; +take_next_tuple: /* execute PartitionFilter child node */ slot = ExecProcNode(child_ps); @@ -141,7 +150,6 @@ partition_router_exec(CustomScanState *node) { ResultRelInfo *new_rri, /* new tuple owner */ *old_rri; /* previous tuple owner */ - EPQState epqstate; PartitionFilterState *child_state; char relkind; ItemPointerData ctid; @@ -203,8 +211,12 @@ partition_router_exec(CustomScanState *node) /* Delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); - EvalPlanQualSetSlot(&epqstate, child_state->subplan_slot); - ExecDeleteInternal(&ctid, &epqstate, estate); + EvalPlanQualSetSlot(&state->epqstate, child_state->subplan_slot); + if (!ExecDeleteInternal(&ctid, &state->epqstate, estate)) + { + elog(INFO, "oops, deleted, taking next tuple!"); + goto take_next_tuple; + } /* Magic: replace parent's ResultRelInfo with child's one (INSERT) */ estate->es_result_relation_info = new_rri; @@ -244,40 +256,42 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e * ---------------------------------------------------------------- */ -static TupleTableSlot * +static bool ExecDeleteInternal(ItemPointer tupleid, EPQState *epqstate, EState *estate) { - ResultRelInfo *resultRelInfo; - Relation resultRelationDesc; + ResultRelInfo *rri; + Relation rel; HTSU_Result result; HeapUpdateFailureData hufd; /* * get information on the (current) result relation */ - resultRelInfo = estate->es_result_relation_info; - resultRelationDesc = resultRelInfo->ri_RelationDesc; + rri = estate->es_result_relation_info; + rel = rri->ri_RelationDesc; - /* BEFORE ROW DELETE Triggers */ - if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->trig_delete_before_row) + /* BEFORE ROW UPDATE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_update_before_row) { - bool dodelete; - - dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo, - tupleid, NULL); + elog(INFO, "kek!"); + } - if (!dodelete) - elog(ERROR, "the old row always should be deleted from child table"); + /* BEFORE ROW DELETE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_delete_before_row) + { + if (!ExecBRDeleteTriggers(estate, epqstate, rri, tupleid, NULL)) + return false; } if (tupleid != NULL) { /* delete the tuple */ ldelete: - result = heap_delete_compat(resultRelationDesc, tupleid, + result = heap_delete_compat(rel, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, true /* wait for commit */ , @@ -292,7 +306,7 @@ ExecDeleteInternal(ItemPointer tupleid, errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); /* Else, already deleted by self; nothing to do */ - return NULL; + return false; case HeapTupleMayBeUpdated: break; @@ -302,17 +316,19 @@ ExecDeleteInternal(ItemPointer tupleid, ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); + if (!ItemPointerEquals(tupleid, &hufd.ctid)) { TupleTableSlot *epqslot; epqslot = EvalPlanQual(estate, epqstate, - resultRelationDesc, - resultRelInfo->ri_RangeTableIndex, + rel, + rri->ri_RangeTableIndex, LockTupleExclusive, &hufd.ctid, hufd.xmax); + if (!TupIsNull(epqslot)) { Assert(tupleid != NULL); @@ -320,19 +336,19 @@ ExecDeleteInternal(ItemPointer tupleid, goto ldelete; } } + /* tuple already deleted; nothing to do */ - return NULL; + return false; default: elog(ERROR, "unrecognized heap_delete status: %u", result); - return NULL; } } else elog(ERROR, "tupleid should be specified for deletion"); - /* AFTER ROW DELETE Triggers */ - ExecARDeleteTriggersCompat(estate, resultRelInfo, tupleid, NULL, NULL); + /* AFTER ROW DELETE triggers */ + ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); - return NULL; + return true; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 9d3ffb15..22af4a73 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -666,15 +666,14 @@ partition_router_visitor(Plan *plan, void *context) if (modifytable_contains_fdw(rtable, modify_table)) { - ereport(NOTICE, - (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), - errmsg("discovered mix of local and foreign tables, " - UPDATE_NODE_NAME " will be disabled"))); - return; + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg(UPDATE_NODE_NAME " does not support foreign data wrappers"))); } lc3 = list_head(modify_table->returningLists); - forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) + forboth (lc1, modify_table->plans, + lc2, modify_table->resultRelations) { Index rindex = lfirst_int(lc2); Oid relid = getrelid(rindex, rtable), @@ -698,6 +697,7 @@ partition_router_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_router((Plan *) lfirst(lc1), relid, modify_table->nominalRelation, + modify_table->epqParam, returning_list); } } From c0e3513e7ba99d2a4d449a47ebe6bd91a61a30e9 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 24 Aug 2018 18:03:23 +0300 Subject: [PATCH 0907/1124] WIP more fixes in EPQ handling --- src/hooks.c | 5 +- src/include/partition_filter.h | 9 ++-- src/include/partition_router.h | 7 ++- src/partition_filter.c | 30 ++++------- src/partition_router.c | 93 +++++++++++++++------------------ src/planner_tree_modification.c | 27 +++++++--- 6 files changed, 83 insertions(+), 88 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index cbed54f4..25a2ec5c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -987,8 +987,9 @@ pathman_executor_hook(QueryDesc *queryDesc, { CustomScanState *pr_state = (CustomScanState *) mt_state->mt_plans[i]; - /* Check if this is a PartitionRouter node */ - if (IsPartitionRouterState(pr_state)) + /* Check if this is a PartitionFilter + PartitionRouter combo */ + if (IsPartitionFilterState(pr_state) && + IsPartitionRouterState(linitial(pr_state->custom_ps))) { ResultRelInfo *rri = &mt_state->resultRelInfo[i]; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index ef091e0b..b3ecffeb 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -108,9 +108,6 @@ typedef struct ResultPartsStorage result_parts; /* partition ResultRelInfo cache */ CmdType command_type; - bool warning_triggered; /* warning message counter */ - - TupleTableSlot *subplan_slot; /* slot that was returned from subplan */ TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ } PartitionFilterState; @@ -170,6 +167,8 @@ PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storag TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); +List * pfilter_build_tlist(Plan *subplan); + /* Find suitable partition using 'value' */ Oid * find_partitions_for_value(Datum value, Oid value_type, @@ -183,8 +182,8 @@ Plan * make_partition_filter(Plan *subplan, Oid parent_relid, Index parent_rti, OnConflictAction conflict_action, - List *returning_list, - CmdType command_type); + CmdType command_type, + List *returning_list); Node * partition_filter_create_scan_state(CustomScan *node); diff --git a/src/include/partition_router.h b/src/include/partition_router.h index e21940bb..a0ebf3dd 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -31,10 +31,13 @@ typedef struct PartitionRouterState CustomScanState css; Oid partitioned_table; - JunkFilter *junkfilter; + Plan *subplan; /* proxy variable to store subplan */ + JunkFilter *junkfilter; /* 'ctid' extraction facility */ + EPQState epqstate; int epqparam; - Plan *subplan; /* proxy variable to store subplan */ + + ResultRelInfo *current_rri; } PartitionRouterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index f2d06848..57f153c2 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -16,6 +16,7 @@ #include "partition_filter.h" #include "utils.h" +#include "access/htup_details.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" #include "foreign/fdwapi.h" @@ -87,8 +88,6 @@ static Node *fix_returning_list_mutator(Node *node, void *state); static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); -static List *pfilter_build_tlist(Plan *subplan); - static void pf_memcxt_callback(void *arg); static estate_mod_data * fetch_estate_mod_data(EState *estate); @@ -633,8 +632,8 @@ make_partition_filter(Plan *subplan, Oid parent_relid, Index parent_rti, OnConflictAction conflict_action, - List *returning_list, - CmdType command_type) + CmdType command_type, + List *returning_list) { CustomScan *cscan = makeNode(CustomScan); @@ -723,9 +722,6 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) state->on_conflict_action != ONCONFLICT_NONE, RPS_RRI_CB(prepare_rri_for_insert, state), RPS_RRI_CB(NULL, NULL)); - - /* No warnings yet */ - state->warning_triggered = false; } TupleTableSlot * @@ -739,16 +735,12 @@ partition_filter_exec(CustomScanState *node) TupleTableSlot *slot; slot = ExecProcNode(child_ps); - state->subplan_slot = slot; - - if (state->tup_convert_slot) - ExecClearTuple(state->tup_convert_slot); if (!TupIsNull(slot)) { MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; - ResultRelInfo *resultRelInfo; + ResultRelInfo *rri; /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -760,30 +752,28 @@ partition_filter_exec(CustomScanState *node) MemoryContextSwitchTo(old_mcxt); ResetExprContext(econtext); - resultRelInfo = rri_holder->result_rel_info; + rri = rri_holder->result_rel_info; /* Magic: replace parent's ResultRelInfo with ours */ - estate->es_result_relation_info = resultRelInfo; + estate->es_result_relation_info = rri; /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) { HeapTuple htup_old, htup_new; - Relation child_rel = resultRelInfo->ri_RelationDesc; + Relation child_rel = rri->ri_RelationDesc; htup_old = ExecMaterializeSlot(slot); htup_new = do_convert_tuple(htup_old, rri_holder->tuple_map); + ExecClearTuple(slot); /* Allocate new slot if needed */ if (!state->tup_convert_slot) state->tup_convert_slot = MakeTupleTableSlotCompat(); ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); - ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); - - /* Now replace the original slot */ - slot = state->tup_convert_slot; + slot = ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); } return slot; @@ -826,7 +816,7 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e /* * Build partition filter's target list pointing to subplan tuple's elements. */ -static List * +List * pfilter_build_tlist(Plan *subplan) { List *result_tlist = NIL; diff --git a/src/partition_router.c b/src/partition_router.c index 94dbae05..22c6435a 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -28,9 +28,10 @@ bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; -static bool ExecDeleteInternal(ItemPointer tupleid, - EPQState *epqstate, - EState *estate); +static TupleTableSlot *ExecDeleteInternal(TupleTableSlot *slot, + ItemPointer tupleid, + EPQState *epqstate, + EState *estate); void init_partition_router_static_data(void) @@ -70,15 +71,6 @@ make_partition_router(Plan *subplan, { CustomScan *cscan = makeNode(CustomScan); - Plan *pfilter; - - /* Create child PartitionFilter node */ - pfilter = make_partition_filter(subplan, - parent_relid, - parent_rti, - ONCONFLICT_NONE, - returning_list, - CMD_UPDATE); /* Copy costs etc */ cscan->scan.plan.startup_cost = subplan->startup_cost; @@ -88,14 +80,14 @@ make_partition_router(Plan *subplan, /* Setup methods, child plan and param number for EPQ */ cscan->methods = &partition_router_plan_methods; - cscan->custom_plans = list_make1(pfilter); + cscan->custom_plans = list_make1(subplan); cscan->custom_private = list_make1(makeInteger(epq_param)); /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; /* Build an appropriate target list */ - cscan->scan.plan.targetlist = pfilter->targetlist; + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); /* FIXME: should we use the same tlist? */ cscan->custom_scan_tlist = subplan->targetlist; @@ -126,6 +118,9 @@ partition_router_begin(CustomScanState *node, EState *estate, int eflags) { PartitionRouterState *state = (PartitionRouterState *) node; + /* Remember current relation we're going to delete from */ + state->current_rri = estate->es_result_relation_info; + EvalPlanQualInit(&state->epqstate, estate, state->subplan, NIL, state->epqparam); @@ -148,26 +143,18 @@ partition_router_exec(CustomScanState *node) if (!TupIsNull(slot)) { - ResultRelInfo *new_rri, /* new tuple owner */ - *old_rri; /* previous tuple owner */ - PartitionFilterState *child_state; - char relkind; - ItemPointerData ctid; + ResultRelInfo *current_rri = state->current_rri; + char relkind; + ItemPointerData ctid; ItemPointerSetInvalid(&ctid); - child_state = (PartitionFilterState *) child_ps; - Assert(child_state->command_type == CMD_UPDATE); - - old_rri = child_state->result_parts.base_rri; - new_rri = estate->es_result_relation_info; - /* Build new junkfilter if we have to */ if (state->junkfilter == NULL) { state->junkfilter = ExecInitJunkFilter(state->subplan->targetlist, - old_rri->ri_RelationDesc->rd_att->tdhasoid, + current_rri->ri_RelationDesc->rd_att->tdhasoid, ExecInitExtraTupleSlotCompat(estate)); state->junkfilter->jf_junkAttNo = @@ -177,13 +164,14 @@ partition_router_exec(CustomScanState *node) elog(ERROR, "could not find junk ctid column"); } - relkind = old_rri->ri_RelationDesc->rd_rel->relkind; + /* Additional checks based on 'relkind' */ + relkind = current_rri->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION) { Datum ctid_datum; bool ctid_isnull; - ctid_datum = ExecGetJunkAttribute(child_state->subplan_slot, + ctid_datum = ExecGetJunkAttribute(slot, state->junkfilter->jf_junkAttNo, &ctid_isnull); @@ -199,30 +187,26 @@ partition_router_exec(CustomScanState *node) else elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); - /* - * Clean from junk attributes before INSERT, - * but only if slot wasn't transformed in PartitionFilter. - */ - if (TupIsNull(child_state->tup_convert_slot)) - slot = ExecFilterJunk(state->junkfilter, slot); + elog(INFO, "deleting (%d, %d) from table: %s", + ItemPointerGetBlockNumber(&ctid), + ItemPointerGetOffsetNumber(&ctid), + get_rel_name(RelationGetRelid(current_rri->ri_RelationDesc))); - /* Magic: replace current ResultRelInfo with parent's one (DELETE) */ - estate->es_result_relation_info = old_rri; + /* Magic: replace parent's ResultRelInfo with ours */ + estate->es_result_relation_info = current_rri; /* Delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); - EvalPlanQualSetSlot(&state->epqstate, child_state->subplan_slot); - if (!ExecDeleteInternal(&ctid, &state->epqstate, estate)) + slot = ExecDeleteInternal(slot, &ctid, &state->epqstate, estate); + + if (TupIsNull(slot)) { elog(INFO, "oops, deleted, taking next tuple!"); goto take_next_tuple; } - /* Magic: replace parent's ResultRelInfo with child's one (INSERT) */ - estate->es_result_relation_info = new_rri; - /* Tuple will be inserted by ModifyTable */ - return slot; + return ExecFilterJunk(state->junkfilter, slot); } return NULL; @@ -231,7 +215,10 @@ partition_router_exec(CustomScanState *node) void partition_router_end(CustomScanState *node) { + PartitionRouterState *state = (PartitionRouterState *) node; + Assert(list_length(node->custom_ps) == 1); + EvalPlanQualEnd(&state->epqstate); ExecEndNode((PlanState *) linitial(node->custom_ps)); } @@ -256,8 +243,9 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e * ---------------------------------------------------------------- */ -static bool -ExecDeleteInternal(ItemPointer tupleid, +static TupleTableSlot * +ExecDeleteInternal(TupleTableSlot *slot, + ItemPointer tupleid, EPQState *epqstate, EState *estate) { @@ -284,13 +272,15 @@ ExecDeleteInternal(ItemPointer tupleid, rri->ri_TrigDesc->trig_delete_before_row) { if (!ExecBRDeleteTriggers(estate, epqstate, rri, tupleid, NULL)) - return false; + return NULL; } if (tupleid != NULL) { - /* delete the tuple */ + EvalPlanQualSetSlot(epqstate, slot); + ldelete: + /* delete the tuple */ result = heap_delete_compat(rel, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, @@ -305,8 +295,8 @@ ExecDeleteInternal(ItemPointer tupleid, errmsg("tuple to be updated was already modified by an operation triggered by the current command"), errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); - /* Else, already deleted by self; nothing to do */ - return false; + /* Already deleted by self; nothing to do */ + return NULL; case HeapTupleMayBeUpdated: break; @@ -333,12 +323,13 @@ ExecDeleteInternal(ItemPointer tupleid, { Assert(tupleid != NULL); *tupleid = hufd.ctid; + slot = epqslot; goto ldelete; } } - /* tuple already deleted; nothing to do */ - return false; + /* Tuple already deleted; nothing to do */ + return NULL; default: elog(ERROR, "unrecognized heap_delete status: %u", result); @@ -350,5 +341,5 @@ ExecDeleteInternal(ItemPointer tupleid, /* AFTER ROW DELETE triggers */ ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); - return true; + return slot; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 22af4a73..95706a7e 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -618,7 +618,8 @@ partition_filter_visitor(Plan *plan, void *context) Assert(rtable && IsA(rtable, List)); lc3 = list_head(modify_table->returningLists); - forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) + forboth (lc1, modify_table->plans, + lc2, modify_table->resultRelations) { Index rindex = lfirst_int(lc2); Oid relid = getrelid(rindex, rtable); @@ -638,8 +639,8 @@ partition_filter_visitor(Plan *plan, void *context) lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, modify_table->nominalRelation, modify_table->onConflictAction, - returning_list, - CMD_INSERT); + modify_table->operation, + returning_list); } } } @@ -686,7 +687,9 @@ partition_router_visitor(Plan *plan, void *context) /* Check that table is partitioned */ if (has_pathman_relation_info(relid)) { - List *returning_list = NIL; + List *returning_list = NIL; + Plan *prouter, + *pfilter; /* Extract returning list if possible */ if (lc3) @@ -695,10 +698,18 @@ partition_router_visitor(Plan *plan, void *context) lc3 = lnext(lc3); } - lfirst(lc1) = make_partition_router((Plan *) lfirst(lc1), relid, - modify_table->nominalRelation, - modify_table->epqParam, - returning_list); + prouter = make_partition_router((Plan *) lfirst(lc1), relid, + modify_table->nominalRelation, + modify_table->epqParam, + returning_list); + + pfilter = make_partition_filter((Plan *) prouter, relid, + modify_table->nominalRelation, + ONCONFLICT_NONE, + CMD_UPDATE, + returning_list); + + lfirst(lc1) = pfilter; } } } From c907d6ed150a9d9810394259ebc63c6121f21e3f Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 27 Aug 2018 14:26:22 +0300 Subject: [PATCH 0908/1124] all tests pass --- expected/pathman_update_node.out | 8 ++++---- src/partition_router.c | 15 ++++----------- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 125eedd4..e68bb9ae 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -18,8 +18,8 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 1 QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PartitionRouter) - -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) -> Bitmap Index Scan on test_range_2_val_idx @@ -31,8 +31,8 @@ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = QUERY PLAN ------------------------------------------------------------------- Update on test_range_2 - -> Custom Scan (PartitionRouter) - -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) -> Bitmap Heap Scan on test_range_2 Recheck Cond: (val = '15'::numeric) -> Bitmap Index Scan on test_range_2_val_idx diff --git a/src/partition_router.c b/src/partition_router.c index 22c6435a..f16b7564 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -28,7 +28,7 @@ bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; -static TupleTableSlot *ExecDeleteInternal(TupleTableSlot *slot, +static TupleTableSlot *router_delete_tuple(TupleTableSlot *slot, ItemPointer tupleid, EPQState *epqstate, EState *estate); @@ -187,23 +187,16 @@ partition_router_exec(CustomScanState *node) else elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); - elog(INFO, "deleting (%d, %d) from table: %s", - ItemPointerGetBlockNumber(&ctid), - ItemPointerGetOffsetNumber(&ctid), - get_rel_name(RelationGetRelid(current_rri->ri_RelationDesc))); - /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = current_rri; /* Delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); - slot = ExecDeleteInternal(slot, &ctid, &state->epqstate, estate); + slot = router_delete_tuple(slot, &ctid, &state->epqstate, estate); + /* We require a tuple */ if (TupIsNull(slot)) - { - elog(INFO, "oops, deleted, taking next tuple!"); goto take_next_tuple; - } /* Tuple will be inserted by ModifyTable */ return ExecFilterJunk(state->junkfilter, slot); @@ -244,7 +237,7 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e */ static TupleTableSlot * -ExecDeleteInternal(TupleTableSlot *slot, +router_delete_tuple(TupleTableSlot *slot, ItemPointer tupleid, EPQState *epqstate, EState *estate) From ee0b8272598beb90889bbc3183cd708cfd106b91 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 27 Aug 2018 22:37:45 +0300 Subject: [PATCH 0909/1124] PartitionRouter: call before row update triggers --- src/partition_router.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index f16b7564..22560109 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -29,9 +29,9 @@ CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; static TupleTableSlot *router_delete_tuple(TupleTableSlot *slot, - ItemPointer tupleid, - EPQState *epqstate, - EState *estate); + ItemPointer tupleid, + EPQState *epqstate, + EState *estate); void init_partition_router_static_data(void) @@ -232,24 +232,24 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e /* * ---------------------------------------------------------------- * ExecDeleteInternal - * Basicly is a copy of ExecDelete from executor/nodeModifyTable.c + * This is a modified copy of ExecDelete from executor/nodeModifyTable.c * ---------------------------------------------------------------- */ static TupleTableSlot * router_delete_tuple(TupleTableSlot *slot, - ItemPointer tupleid, - EPQState *epqstate, - EState *estate) + ItemPointer tupleid, + EPQState *epqstate, + EState *estate) { ResultRelInfo *rri; Relation rel; HTSU_Result result; HeapUpdateFailureData hufd; - /* - * get information on the (current) result relation - */ + EvalPlanQualSetSlot(epqstate, slot); + + /* Get information on the (current) result relation */ rri = estate->es_result_relation_info; rel = rri->ri_RelationDesc; @@ -257,7 +257,9 @@ router_delete_tuple(TupleTableSlot *slot, if (rri->ri_TrigDesc && rri->ri_TrigDesc->trig_update_before_row) { - elog(INFO, "kek!"); + slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); + if (TupIsNull(slot)) + return NULL; } /* BEFORE ROW DELETE triggers */ @@ -270,10 +272,8 @@ router_delete_tuple(TupleTableSlot *slot, if (tupleid != NULL) { - EvalPlanQualSetSlot(epqstate, slot); - ldelete: - /* delete the tuple */ + /* Delete the tuple */ result = heap_delete_compat(rel, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, @@ -328,8 +328,7 @@ router_delete_tuple(TupleTableSlot *slot, elog(ERROR, "unrecognized heap_delete status: %u", result); } } - else - elog(ERROR, "tupleid should be specified for deletion"); + else elog(ERROR, "tupleid should be specified for deletion"); /* AFTER ROW DELETE triggers */ ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); From c7dbc90e95687fdee89af7606c6d01e73d1e4ef7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Aug 2018 13:56:17 +0300 Subject: [PATCH 0910/1124] fix python tests --- tests/python/partitioning_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 41390d4a..e234f7ff 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1044,12 +1044,12 @@ def test_update_node_plan1(self): ], "Node Type": "Custom Scan", "Parent Relationship": "child", - "Custom Plan Provider": "PartitionFilter" + "Custom Plan Provider": "PartitionRouter" } ], "Node Type": "Custom Scan", "Parent Relationship": "Member", - "Custom Plan Provider": "PartitionRouter" + "Custom Plan Provider": "PartitionFilter" } ''' From 4abee5cbc116f56200f382dc2373d990fa6694aa Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Aug 2018 14:53:31 +0300 Subject: [PATCH 0911/1124] restore compatibility with PG 11 --- src/include/compat/pg_compat.h | 16 ++++++++++++++++ src/partition_router.c | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index ed152fe3..f2d5ba63 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -685,6 +685,22 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #endif +/* + * ExecBRDeleteTriggers() + */ +#if PG_VERSION_NUM >= 110000 +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (epqslot)) +#else +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple)) +#endif + + /* * ExecARDeleteTriggers() */ diff --git a/src/partition_router.c b/src/partition_router.c index 22560109..9c0a041e 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -266,7 +266,7 @@ router_delete_tuple(TupleTableSlot *slot, if (rri->ri_TrigDesc && rri->ri_TrigDesc->trig_delete_before_row) { - if (!ExecBRDeleteTriggers(estate, epqstate, rri, tupleid, NULL)) + if (!ExecBRDeleteTriggersCompat(estate, epqstate, rri, tupleid, NULL, NULL)) return NULL; } From a26243f614e93886b05692603944e92b4ec86d2c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Aug 2018 18:35:49 +0300 Subject: [PATCH 0912/1124] remove obsolete tests --- expected/pathman_update_trigger.out | 289 ---------------------------- sql/pathman_update_trigger.sql | 164 ---------------- 2 files changed, 453 deletions(-) delete mode 100644 expected/pathman_update_trigger.out delete mode 100644 sql/pathman_update_trigger.sql diff --git a/expected/pathman_update_trigger.out b/expected/pathman_update_trigger.out deleted file mode 100644 index fdc5438a..00000000 --- a/expected/pathman_update_trigger.out +++ /dev/null @@ -1,289 +0,0 @@ -\set VERBOSITY terse -SET search_path = 'public'; -CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_update_trigger; -/* Partition table by RANGE (NUMERIC) */ -CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; -SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); - create_range_partitions -------------------------- - 10 -(1 row) - -SELECT create_update_triggers('test_update_trigger.test_range'); - create_update_triggers ------------------------- - -(1 row) - -/* Update values in 1st partition (rows remain there) */ -UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val < 10 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_1 | 5 | 1 - test_update_trigger.test_range_1 | 5 | 10 - test_update_trigger.test_range_1 | 5 | 2 - test_update_trigger.test_range_1 | 5 | 3 - test_update_trigger.test_range_1 | 5 | 4 - test_update_trigger.test_range_1 | 5 | 5 - test_update_trigger.test_range_1 | 5 | 6 - test_update_trigger.test_range_1 | 5 | 7 - test_update_trigger.test_range_1 | 5 | 8 - test_update_trigger.test_range_1 | 5 | 9 -(10 rows) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Update values in 2nd partition (rows move to 3rd partition) */ -UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val > 20 AND val <= 30 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_3 | 21 | 11 - test_update_trigger.test_range_3 | 22 | 12 - test_update_trigger.test_range_3 | 23 | 13 - test_update_trigger.test_range_3 | 24 | 14 - test_update_trigger.test_range_3 | 25 | 15 - test_update_trigger.test_range_3 | 26 | 16 - test_update_trigger.test_range_3 | 27 | 17 - test_update_trigger.test_range_3 | 28 | 18 - test_update_trigger.test_range_3 | 29 | 19 - test_update_trigger.test_range_3 | 30 | 20 - test_update_trigger.test_range_3 | 21 | 21 - test_update_trigger.test_range_3 | 22 | 22 - test_update_trigger.test_range_3 | 23 | 23 - test_update_trigger.test_range_3 | 24 | 24 - test_update_trigger.test_range_3 | 25 | 25 - test_update_trigger.test_range_3 | 26 | 26 - test_update_trigger.test_range_3 | 27 | 27 - test_update_trigger.test_range_3 | 28 | 28 - test_update_trigger.test_range_3 | 29 | 29 - test_update_trigger.test_range_3 | 30 | 30 -(20 rows) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Move single row */ -UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; -/* Check values #3 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 90 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_9 | 90 | 80 - test_update_trigger.test_range_9 | 90 | 90 -(2 rows) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Move single row (create new partition) */ -UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; -/* Check values #4 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = -1 -ORDER BY comment; - tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_11 | -1 | 50 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Update non-key column */ -UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; -/* Check values #5 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 100 -ORDER BY comment; - tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_10 | 100 | test! -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Try moving row into a gap (ERROR) */ -DROP TABLE test_update_trigger.test_range_4; -UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; -ERROR: cannot spawn a partition -/* Check values #6 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 70 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_7 | 70 | 70 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Test trivial move (same key) */ -UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; -/* Check values #7 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 65 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_7 | 65 | 65 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Test tuple conversion (attached partition) */ -CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); -SELECT attach_range_partition('test_update_trigger.test_range', - 'test_update_trigger.test_range_inv', - 101::NUMERIC, 111::NUMERIC); - attach_range_partition ------------------------------------- - test_update_trigger.test_range_inv -(1 row) - -UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; -/* Check values #8 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 105 -ORDER BY comment; - tableoid | val | comment -------------------------------------+-----+--------- - test_update_trigger.test_range_inv | 105 | 60 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Test tuple conversion (dropped column) */ -ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; -SELECT append_range_partition('test_update_trigger.test_range'); - append_range_partition ------------------------------------ - test_update_trigger.test_range_12 -(1 row) - -UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; -/* Check values #9 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 115; - tableoid | val ------------------------------------+----- - test_update_trigger.test_range_12 | 115 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Partition table by HASH (INT4) */ -CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; -SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); - create_hash_partitions ------------------------- - 3 -(1 row) - -SELECT create_update_triggers('test_update_trigger.test_hash'); - create_update_triggers ------------------------- - -(1 row) - -/* Move all rows into single partition */ -UPDATE test_update_trigger.test_hash SET val = 1; -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 1 -ORDER BY comment; - tableoid | val | comment ----------------------------------+-----+--------- - test_update_trigger.test_hash_2 | 1 | 1 - test_update_trigger.test_hash_2 | 1 | 10 - test_update_trigger.test_hash_2 | 1 | 2 - test_update_trigger.test_hash_2 | 1 | 3 - test_update_trigger.test_hash_2 | 1 | 4 - test_update_trigger.test_hash_2 | 1 | 5 - test_update_trigger.test_hash_2 | 1 | 6 - test_update_trigger.test_hash_2 | 1 | 7 - test_update_trigger.test_hash_2 | 1 | 8 - test_update_trigger.test_hash_2 | 1 | 9 -(10 rows) - -SELECT count(*) FROM test_update_trigger.test_hash; - count -------- - 10 -(1 row) - -/* Don't move any rows */ -UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 3 -ORDER BY comment; - tableoid | val | comment -----------+-----+--------- -(0 rows) - -SELECT count(*) FROM test_update_trigger.test_hash; - count -------- - 10 -(1 row) - -DROP SCHEMA test_update_trigger CASCADE; -NOTICE: drop cascades to 18 other objects -DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_trigger.sql b/sql/pathman_update_trigger.sql deleted file mode 100644 index a5f5b10e..00000000 --- a/sql/pathman_update_trigger.sql +++ /dev/null @@ -1,164 +0,0 @@ -\set VERBOSITY terse - -SET search_path = 'public'; -CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_update_trigger; - - - -/* Partition table by RANGE (NUMERIC) */ -CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; -SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); -SELECT create_update_triggers('test_update_trigger.test_range'); - - -/* Update values in 1st partition (rows remain there) */ -UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; - -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val < 10 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Update values in 2nd partition (rows move to 3rd partition) */ -UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; - -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val > 20 AND val <= 30 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Move single row */ -UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; - -/* Check values #3 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 90 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Move single row (create new partition) */ -UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; - -/* Check values #4 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = -1 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Update non-key column */ -UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; - -/* Check values #5 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 100 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Try moving row into a gap (ERROR) */ -DROP TABLE test_update_trigger.test_range_4; -UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; - -/* Check values #6 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 70 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Test trivial move (same key) */ -UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; - -/* Check values #7 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 65 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Test tuple conversion (attached partition) */ -CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); -SELECT attach_range_partition('test_update_trigger.test_range', - 'test_update_trigger.test_range_inv', - 101::NUMERIC, 111::NUMERIC); -UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; - -/* Check values #8 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 105 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Test tuple conversion (dropped column) */ -ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; -SELECT append_range_partition('test_update_trigger.test_range'); -UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; - -/* Check values #9 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 115; - -SELECT count(*) FROM test_update_trigger.test_range; - - - -/* Partition table by HASH (INT4) */ -CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; -SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); -SELECT create_update_triggers('test_update_trigger.test_hash'); - - -/* Move all rows into single partition */ -UPDATE test_update_trigger.test_hash SET val = 1; - -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 1 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_hash; - - -/* Don't move any rows */ -UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; - -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 3 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_hash; - - - -DROP SCHEMA test_update_trigger CASCADE; -DROP EXTENSION pg_pathman; From 59ce6526e1eda45cd6037ad030692ce6ef38561e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 29 Aug 2018 18:57:27 +0300 Subject: [PATCH 0913/1124] more tests for PartitionRouter, add FIXME --- Makefile | 1 + expected/pathman_update_triggers.out | 110 +++++++++++++++++++++++++++ sql/pathman_update_triggers.sql | 70 +++++++++++++++++ src/hooks.c | 1 + 4 files changed, 182 insertions(+) create mode 100644 expected/pathman_update_triggers.out create mode 100644 sql/pathman_update_triggers.sql diff --git a/Makefile b/Makefile index 44f80b79..f9567f94 100644 --- a/Makefile +++ b/Makefile @@ -55,6 +55,7 @@ REGRESS = pathman_array_qual \ pathman_runtime_nodes \ pathman_subpartitions \ pathman_update_node \ + pathman_update_triggers \ pathman_upd_del \ pathman_utility_stmt \ pathman_views diff --git a/expected/pathman_update_triggers.out b/expected/pathman_update_triggers.out new file mode 100644 index 00000000..5c1092f2 --- /dev/null +++ b/expected/pathman_update_triggers.out @@ -0,0 +1,110 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + create_hash_partitions +------------------------ + 2 +(1 row) + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +insert into test_update_triggers.test values (1); +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) +set pg_pathman.enable_partitionrouter = t; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) + val | tableoid +-----+----------------------------- + 2 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_2) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT ROW (test_2) + val | tableoid +-----+----------------------------- + 3 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: BEFORE DELETE ROW (test_2) +NOTICE: BEFORE INSERT ROW (test_2) +NOTICE: AFTER DELETE ROW (test_2) +NOTICE: AFTER INSERT ROW (test_2) + val | tableoid +-----+----------------------------- + 4 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: BEFORE DELETE ROW (test_2) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_2) +NOTICE: AFTER INSERT ROW (test_1) + val | tableoid +-----+----------------------------- + 5 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) + val | tableoid +-----+----------------------------- + 6 | test_update_triggers.test_1 +(1 row) + +DROP SCHEMA test_update_triggers CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_update_triggers.sql b/sql/pathman_update_triggers.sql new file mode 100644 index 00000000..c289d12c --- /dev/null +++ b/sql/pathman_update_triggers.sql @@ -0,0 +1,70 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; + + + +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; + + +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); + +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); + + +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); + +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); + + +insert into test_update_triggers.test values (1); + +set pg_pathman.enable_partitionrouter = t; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; + + + +DROP SCHEMA test_update_triggers CASCADE; +DROP EXTENSION pg_pathman CASCADE; diff --git a/src/hooks.c b/src/hooks.c index 25a2ec5c..ac0e595e 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -978,6 +978,7 @@ pathman_executor_hook(QueryDesc *queryDesc, PlanState *state = (PlanState *) queryDesc->planstate; + /* FIXME: we should modify ALL ModifyTable nodes! They might be hidden deeper. */ if (IsA(state, ModifyTableState)) { ModifyTableState *mt_state = (ModifyTableState *) state; From dde5eb2f9332011f4956637269391839599699c7 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Thu, 30 Aug 2018 14:54:48 +0300 Subject: [PATCH 0914/1124] pass correct plan to BeginForeignModify (based on patch #171 by @arssher) --- src/partition_filter.c | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 57f153c2..80d6ecff 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -976,13 +976,18 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, if (fdw_routine->PlanForeignModify) { RangeTblEntry *rte; - ModifyTableState mtstate; - List *fdw_private; Query query; + PlanState pstate, + *pstate_ptr; + ModifyTableState mtstate; PlannedStmt *plan; + + /* This is the value we'd like to get */ + List *fdw_private; + TupleDesc tupdesc; - int i, - target_attr; + int target_attr, + i; /* Fetch RangeTblEntry for partition */ rte = rt_fetch(rri->ri_RangeTableIndex, estate->es_range_table); @@ -1033,26 +1038,33 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, target_attr++; } - /* Create fake ModifyTableState */ - memset((void *) &mtstate, 0, sizeof(ModifyTableState)); + /* HACK: plan a fake query for FDW access to be planned as well */ + elog(DEBUG1, "FDW(%u): plan fake query for fdw_private", partid); + plan = standard_planner(&query, 0, NULL); + + /* HACK: create a fake PlanState */ + memset(&pstate, 0, sizeof(PlanState)); + pstate.plan = plan->planTree; + pstate_ptr = &pstate; + + /* HACK: create a fake ModifyTableState */ + memset(&mtstate, 0, sizeof(ModifyTableState)); NodeSetTag(&mtstate, T_ModifyTableState); mtstate.ps.state = estate; mtstate.operation = CMD_INSERT; + mtstate.mt_plans = &pstate_ptr; + mtstate.mt_nplans = 1; + mtstate.mt_whichplan = 0; mtstate.resultRelInfo = rri; #if PG_VERSION_NUM < 110000 mtstate.mt_onconflict = ONCONFLICT_NONE; #endif - /* Plan fake query in for FDW access to be planned as well */ - elog(DEBUG1, "FDW(%u): plan fake query for fdw_private", partid); - plan = standard_planner(&query, 0, NULL); - /* Extract fdw_private from useless plan */ elog(DEBUG1, "FDW(%u): extract fdw_private", partid); - fdw_private = (List *) - linitial(((ModifyTable *) plan->planTree)->fdwPrivLists); + fdw_private = linitial(((ModifyTable *) plan->planTree)->fdwPrivLists); - /* call BeginForeignModify on 'rri' */ + /* HACK: call BeginForeignModify on 'rri' */ elog(DEBUG1, "FDW(%u): call BeginForeignModify on a fake INSERT node", partid); fdw_routine->BeginForeignModify(&mtstate, rri, fdw_private, 0, 0); From 0feb47b94292437e32314db4e2f8b51fa2536331 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 3 Sep 2018 14:47:41 +0300 Subject: [PATCH 0915/1124] implement state_tree_visitor() --- src/hooks.c | 33 +----- src/include/partition_router.h | 2 + src/include/planner_tree_modification.h | 11 +- src/partition_router.c | 32 ++++++ src/planner_tree_modification.c | 143 ++++++++++++++++++++---- 5 files changed, 170 insertions(+), 51 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index ac0e595e..1ebb726b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -976,35 +976,10 @@ pathman_executor_hook(QueryDesc *queryDesc, #define EXECUTOR_RUN(q,d,c) standard_ExecutorRun((q),(d),(c)) #endif - PlanState *state = (PlanState *) queryDesc->planstate; - - /* FIXME: we should modify ALL ModifyTable nodes! They might be hidden deeper. */ - if (IsA(state, ModifyTableState)) - { - ModifyTableState *mt_state = (ModifyTableState *) state; - int i; - - for (i = 0; i < mt_state->mt_nplans; i++) - { - CustomScanState *pr_state = (CustomScanState *) mt_state->mt_plans[i]; - - /* Check if this is a PartitionFilter + PartitionRouter combo */ - if (IsPartitionFilterState(pr_state) && - IsPartitionRouterState(linitial(pr_state->custom_ps))) - { - ResultRelInfo *rri = &mt_state->resultRelInfo[i]; - - /* - * HACK: We unset junkfilter to disable - * junk cleaning in ExecModifyTable. - */ - rri->ri_junkFilter = NULL; - - /* HACK: change UPDATE operation to INSERT */ - mt_state->operation = CMD_INSERT; - } - } - } + /* Prepare ModifyTable nodes for PartitionRouter hackery */ + state_tree_visitor((PlanState *) queryDesc->planstate, + prepare_modify_table_for_partition_router, + NULL); /* Call hooks set by other extensions if needed */ if (EXECUTOR_HOOK) diff --git a/src/include/partition_router.h b/src/include/partition_router.h index a0ebf3dd..7c36641a 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -72,6 +72,8 @@ Plan *make_partition_router(Plan *subplan, int epq_param, List *returning_list); +void prepare_modify_table_for_partition_router(PlanState *state, void *context); + Node *partition_router_create_scan_state(CustomScan *node); diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index 71fcf25d..b93224ba 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -25,9 +25,14 @@ void assign_query_id(Query *query); void reset_query_id_generator(void); /* Plan tree rewriting utility */ -void plan_tree_walker(Plan *plan, - void (*visitor) (Plan *plan, void *context), - void *context); +void plan_tree_visitor(Plan *plan, + void (*visitor) (Plan *plan, void *context), + void *context); + +/* PlanState tree rewriting utility */ +void state_tree_visitor(PlanState *state, + void (*visitor) (PlanState *state, void *context), + void *context); /* Query tree rewriting utility */ void pathman_transform_query(Query *parse, ParamListInfo params); diff --git a/src/partition_router.c b/src/partition_router.c index 9c0a041e..27ce88d8 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -95,6 +95,38 @@ make_partition_router(Plan *subplan, return &cscan->scan.plan; } +void +prepare_modify_table_for_partition_router(PlanState *state, void *context) +{ + if (IsA(state, ModifyTableState)) + { + ModifyTableState *mt_state = (ModifyTableState *) state; + int i; + + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pr_state = (CustomScanState *) mt_state->mt_plans[i]; + + /* Check if this is a PartitionFilter + PartitionRouter combo */ + if (IsPartitionFilterState(pr_state) && + IsPartitionRouterState(linitial(pr_state->custom_ps))) + { + ResultRelInfo *rri = &mt_state->resultRelInfo[i]; + + /* + * HACK: We unset junkfilter to disable + * junk cleaning in ExecModifyTable. + */ + rri->ri_junkFilter = NULL; + + /* HACK: change UPDATE operation to INSERT */ + mt_state->operation = CMD_INSERT; + } + } + } +} + + Node * partition_router_create_scan_state(CustomScan *node) { diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 95706a7e..58c92bd3 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -113,6 +113,9 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context static void partition_filter_visitor(Plan *plan, void *context); static void partition_router_visitor(Plan *plan, void *context); +static void state_visit_subplans(List *plans, void (*visitor) (), void *context); +static void state_visit_members(PlanState **planstates, int nplans, void (*visitor) (), void *context); + static Oid find_deepest_partition(Oid relid, Index rti, Expr *quals); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); static Node *adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context); @@ -152,9 +155,9 @@ reset_query_id_generator(void) * 'visitor' is applied right before return. */ void -plan_tree_walker(Plan *plan, - void (*visitor) (Plan *plan, void *context), - void *context) +plan_tree_visitor(Plan *plan, + void (*visitor) (Plan *plan, void *context), + void *context) { ListCell *l; @@ -167,50 +170,152 @@ plan_tree_walker(Plan *plan, switch (nodeTag(plan)) { case T_SubqueryScan: - plan_tree_walker(((SubqueryScan *) plan)->subplan, visitor, context); + plan_tree_visitor(((SubqueryScan *) plan)->subplan, visitor, context); break; case T_CustomScan: - foreach(l, ((CustomScan *) plan)->custom_plans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + foreach (l, ((CustomScan *) plan)->custom_plans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; case T_ModifyTable: foreach (l, ((ModifyTable *) plan)->plans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; case T_Append: - foreach(l, ((Append *) plan)->appendplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + foreach (l, ((Append *) plan)->appendplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; case T_MergeAppend: - foreach(l, ((MergeAppend *) plan)->mergeplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + foreach (l, ((MergeAppend *) plan)->mergeplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; case T_BitmapAnd: - foreach(l, ((BitmapAnd *) plan)->bitmapplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + foreach (l, ((BitmapAnd *) plan)->bitmapplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; case T_BitmapOr: - foreach(l, ((BitmapOr *) plan)->bitmapplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + foreach (l, ((BitmapOr *) plan)->bitmapplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; default: break; } - plan_tree_walker(plan->lefttree, visitor, context); - plan_tree_walker(plan->righttree, visitor, context); + plan_tree_visitor(plan->lefttree, visitor, context); + plan_tree_visitor(plan->righttree, visitor, context); /* Apply visitor to the current node */ visitor(plan, context); } +void +state_tree_visitor(PlanState *state, + void (*visitor) (PlanState *plan, void *context), + void *context) +{ + Plan *plan; + ListCell *lc; + + if (state == NULL) + return; + + plan = state->plan; + + check_stack_depth(); + + /* Plan-type-specific fixes */ + switch (nodeTag(plan)) + { + case T_SubqueryScan: + state_tree_visitor(((SubqueryScanState *) state)->subplan, visitor, context); + break; + + case T_CustomScan: + foreach (lc, ((CustomScanState *) state)->custom_ps) + state_tree_visitor((PlanState *) lfirst(lc),visitor, context); + break; + + case T_ModifyTable: + state_visit_members(((ModifyTableState *) state)->mt_plans, + ((ModifyTableState *) state)->mt_nplans, + visitor, context); + break; + + case T_Append: + state_visit_members(((AppendState *) state)->appendplans, + ((AppendState *) state)->as_nplans, + visitor, context); + break; + + case T_MergeAppend: + state_visit_members(((MergeAppendState *) state)->mergeplans, + ((MergeAppendState *) state)->ms_nplans, + visitor, context); + break; + + case T_BitmapAnd: + state_visit_members(((BitmapAndState *) state)->bitmapplans, + ((BitmapAndState *) state)->nplans, + visitor, context); + break; + + case T_BitmapOr: + state_visit_members(((BitmapOrState *) state)->bitmapplans, + ((BitmapOrState *) state)->nplans, + visitor, context); + break; + + default: + break; + } + + state_visit_subplans(state->initPlan, visitor, context); + state_visit_subplans(state->subPlan, visitor, context); + + state_tree_visitor(state->lefttree, visitor, context); + state_tree_visitor(state->righttree, visitor, context); + + /* Apply visitor to the current node */ + visitor(state, context); +} + +/* + * Walk a list of SubPlans (or initPlans, which also use SubPlan nodes). + */ +static void +state_visit_subplans(List *plans, + void (*visitor) (), + void *context) +{ + ListCell *lc; + + foreach (lc, plans) + { + SubPlanState *sps = lfirst_node(SubPlanState, lc); + visitor(sps->planstate, context); + } +} + +/* + * Walk the constituent plans of a ModifyTable, Append, MergeAppend, + * BitmapAnd, or BitmapOr node. + */ +static void +state_visit_members(PlanState **planstates, int nplans, + void (*visitor) (), void *context) +{ + int i; + + for (i = 0; i < nplans; i++) + visitor(planstates[i], context); +} + /* * ------------------------------- @@ -586,7 +691,7 @@ void add_partition_filters(List *rtable, Plan *plan) { if (pg_pathman_enable_partition_filter) - plan_tree_walker(plan, partition_filter_visitor, rtable); + plan_tree_visitor(plan, partition_filter_visitor, rtable); } /* Add PartitionRouter nodes to the plan tree */ @@ -594,7 +699,7 @@ void add_partition_routers(List *rtable, Plan *plan) { if (pg_pathman_enable_partition_router) - plan_tree_walker(plan, partition_router_visitor, rtable); + plan_tree_visitor(plan, partition_router_visitor, rtable); } /* From 47633ba69ddc44be9266c0638f59bcf289de9c4a Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 4 Sep 2018 15:52:08 +0300 Subject: [PATCH 0916/1124] get rid of ugly ExecEvalExprCompat() macro --- src/compat/pg_compat.c | 11 ----------- src/include/compat/pg_compat.h | 24 +++++++++--------------- src/partition_filter.c | 3 +-- src/pg_pathman.c | 3 +-- 4 files changed, 11 insertions(+), 30 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 5547231e..4bc021fd 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -118,17 +118,6 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) #endif -/* - * ExecEvalExpr - * - * global variables for macro wrapper evaluation - */ -#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 100000 -Datum exprResult; -ExprDoneCond isDone; -#endif - - /* * get_all_actual_clauses */ diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index f2d5ba63..5f1d59d4 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -367,26 +367,20 @@ extern void create_plain_partial_paths(PlannerInfo *root, * NOTE: 'errmsg' specifies error string when ExecEvalExpr returns multiple values. */ #if PG_VERSION_NUM >= 100000 -#define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ +#define ExecEvalExprCompat(expr, econtext, isNull) \ ExecEvalExpr((expr), (econtext), (isNull)) #elif PG_VERSION_NUM >= 90500 +static inline Datum +ExecEvalExprCompat(ExprState *expr, ExprContext *econtext, bool *isnull) +{ + ExprDoneCond isdone; + Datum result = ExecEvalExpr(expr, econtext, isnull, &isdone); -/* Variables for ExecEvalExprCompat() */ -extern Datum exprResult; -extern ExprDoneCond isDone; + if (isdone != ExprSingleResult) + elog(ERROR, "expression should return single value"); -/* Error handlers */ -static inline void mult_result_handler() -{ - elog(ERROR, "partitioning expression should return single value"); + return result; } - -#define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ -( \ - exprResult = ExecEvalExpr((expr), (econtext), (isNull), &isDone), \ - (isDone != ExprSingleResult) ? (errHandler)() : (0), \ - exprResult \ -) #endif diff --git a/src/partition_filter.c b/src/partition_filter.c index 80d6ecff..f51f7896 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -519,8 +519,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, expr_context->ecxt_scantuple = slot; /* Execute expression */ - value = ExecEvalExprCompat(expr_state, expr_context, - &isnull, mult_result_handler); + value = ExecEvalExprCompat(expr_state, expr_context, &isnull); if (isnull) elog(ERROR, ERR_PART_ATTR_NULL); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 588f5417..69497f92 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -202,8 +202,7 @@ ExtractConst(Node *node, const WalkerContext *context) /* Evaluate expression */ estate = ExecInitExpr((Expr *) node, NULL); - value = ExecEvalExprCompat(estate, econtext, &isnull, - mult_result_handler); + value = ExecEvalExprCompat(estate, econtext, &isnull); #if PG_VERSION_NUM >= 100000 /* Free temp econtext if needed */ From d946697879864cbd940552e131560a95d2b8a046 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 4 Sep 2018 16:10:59 +0300 Subject: [PATCH 0917/1124] WIP: now PartitionRouter is able to use both UPDATE & DELETE + INSERT --- src/include/compat/pg_compat.h | 40 +++- src/include/partition_router.h | 8 +- src/include/relation_info.h | 1 + src/partition_router.c | 343 +++++++++++++++++++------------- src/planner_tree_modification.c | 2 +- src/relation_info.c | 4 +- 6 files changed, 250 insertions(+), 148 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 5f1d59d4..978279d2 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -364,7 +364,6 @@ extern void create_plain_partial_paths(PlannerInfo *root, /* * ExecEvalExpr() - * NOTE: 'errmsg' specifies error string when ExecEvalExpr returns multiple values. */ #if PG_VERSION_NUM >= 100000 #define ExecEvalExprCompat(expr, econtext, isNull) \ @@ -384,6 +383,33 @@ ExecEvalExprCompat(ExprState *expr, ExprContext *econtext, bool *isnull) #endif +/* + * ExecCheck() + */ +#if PG_VERSION_NUM < 100000 +static inline bool +ExecCheck(ExprState *state, ExprContext *econtext) +{ + Datum ret; + bool isnull; + MemoryContext old_mcxt; + + /* short-circuit (here and in ExecInitCheck) for empty restriction list */ + if (state == NULL) + return true; + + old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + ret = ExecEvalExprCompat(state, econtext, &isnull); + MemoryContextSwitchTo(old_mcxt); + + if (isnull) + return true; + + return DatumGetBool(ret); +} +#endif + + /* * extract_actual_join_clauses() */ @@ -790,11 +816,15 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, * heap_delete() */ #if PG_VERSION_NUM >= 110000 -#define heap_delete_compat(relation, tid, cid, crosscheck, wait, hufd) \ - heap_delete((relation), (tid), (cid), (crosscheck), (wait), (hufd), false) +#define heap_delete_compat(relation, tid, cid, crosscheck, \ + wait, hufd, changing_part) \ + heap_delete((relation), (tid), (cid), (crosscheck), \ + (wait), (hufd), (changing_part)) #else -#define heap_delete_compat(relation, tid, cid, crosscheck, wait, hufd) \ - heap_delete((relation), (tid), (cid), (crosscheck), (wait), (hufd)) +#define heap_delete_compat(relation, tid, cid, crosscheck, \ + wait, hufd, changing_part) \ + heap_delete((relation), (tid), (cid), (crosscheck), \ + (wait), (hufd)) #endif /* diff --git a/src/include/partition_router.h b/src/include/partition_router.h index 7c36641a..f1526335 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -30,13 +30,15 @@ typedef struct PartitionRouterState { CustomScanState css; - Oid partitioned_table; - Plan *subplan; /* proxy variable to store subplan */ - JunkFilter *junkfilter; /* 'ctid' extraction facility */ + Plan *subplan; /* proxy variable to store subplan */ + JunkFilter *junkfilter; /* 'ctid' extraction facility */ + ExprState *constraint; /* should tuple remain in partition? */ EPQState epqstate; int epqparam; + ModifyTableState *mt_state; /* need this for a GREAT deal of hackery */ + ResultRelInfo *current_rri; } PartitionRouterState; diff --git a/src/include/relation_info.h b/src/include/relation_info.h index d2a3d053..3a5f0fa8 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -367,6 +367,7 @@ void shout_if_prel_is_invalid(const Oid parent_oid, const PartType expected_part_type); /* Bounds cache */ +Expr *get_partition_constraint_expr(Oid partition); void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); void invalidate_bounds_cache(void); diff --git a/src/partition_router.c b/src/partition_router.c index 27ce88d8..fc8b50ba 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -20,6 +20,7 @@ #include "commands/trigger.h" #include "executor/nodeModifyTable.h" #include "foreign/fdwapi.h" +#include "storage/bufmgr.h" #include "utils/guc.h" #include "utils/rel.h" @@ -28,10 +29,18 @@ bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; -static TupleTableSlot *router_delete_tuple(TupleTableSlot *slot, - ItemPointer tupleid, - EPQState *epqstate, - EState *estate); + +static void router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate); +static void router_lazy_init_constraint(PartitionRouterState *state); + +static ItemPointerData router_extract_ctid(PartitionRouterState *state, + TupleTableSlot *slot); + +static TupleTableSlot *router_lock_or_delete_tuple(PartitionRouterState *state, + TupleTableSlot *slot, + ItemPointer tupleid, + bool *deleted, + EState *estate); void init_partition_router_static_data(void) @@ -105,22 +114,15 @@ prepare_modify_table_for_partition_router(PlanState *state, void *context) for (i = 0; i < mt_state->mt_nplans; i++) { - CustomScanState *pr_state = (CustomScanState *) mt_state->mt_plans[i]; + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; + PartitionRouterState *pr_state; /* Check if this is a PartitionFilter + PartitionRouter combo */ - if (IsPartitionFilterState(pr_state) && - IsPartitionRouterState(linitial(pr_state->custom_ps))) + if (IsPartitionFilterState(pf_state) && + IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) { - ResultRelInfo *rri = &mt_state->resultRelInfo[i]; - - /* - * HACK: We unset junkfilter to disable - * junk cleaning in ExecModifyTable. - */ - rri->ri_junkFilter = NULL; - - /* HACK: change UPDATE operation to INSERT */ - mt_state->operation = CMD_INSERT; + /* HACK: PartitionRouter might change ModifyTable's state */ + pr_state->mt_state = mt_state; } } } @@ -166,8 +168,8 @@ partition_router_exec(CustomScanState *node) { EState *estate = node->ss.ps.state; PlanState *child_ps = (PlanState *) linitial(node->custom_ps); - TupleTableSlot *slot; PartitionRouterState *state = (PartitionRouterState *) node; + TupleTableSlot *slot; take_next_tuple: /* execute PartitionFilter child node */ @@ -175,63 +177,36 @@ partition_router_exec(CustomScanState *node) if (!TupIsNull(slot)) { - ResultRelInfo *current_rri = state->current_rri; - char relkind; + bool deleted; ItemPointerData ctid; ItemPointerSetInvalid(&ctid); - /* Build new junkfilter if we have to */ - if (state->junkfilter == NULL) - { - state->junkfilter = - ExecInitJunkFilter(state->subplan->targetlist, - current_rri->ri_RelationDesc->rd_att->tdhasoid, - ExecInitExtraTupleSlotCompat(estate)); - - state->junkfilter->jf_junkAttNo = - ExecFindJunkAttribute(state->junkfilter, "ctid"); - - if (!AttributeNumberIsValid(state->junkfilter->jf_junkAttNo)) - elog(ERROR, "could not find junk ctid column"); - } - - /* Additional checks based on 'relkind' */ - relkind = current_rri->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION) - { - Datum ctid_datum; - bool ctid_isnull; + /* Build new junkfilter lazily */ + router_lazy_init_junkfilter(state, estate); - ctid_datum = ExecGetJunkAttribute(slot, - state->junkfilter->jf_junkAttNo, - &ctid_isnull); + /* Build recheck constraint state lazily */ + router_lazy_init_constraint(state); - /* shouldn't ever get a null result... */ - if (ctid_isnull) - elog(ERROR, "ctid is NULL"); - - /* Get item pointer to tuple */ - ctid = *(ItemPointer) DatumGetPointer(ctid_datum); - } - else if (relkind == RELKIND_FOREIGN_TABLE) - elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); - else - elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); + /* Extract item pointer from current tuple */ + ctid = router_extract_ctid(state, slot); /* Magic: replace parent's ResultRelInfo with ours */ - estate->es_result_relation_info = current_rri; + estate->es_result_relation_info = state->current_rri; /* Delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); - slot = router_delete_tuple(slot, &ctid, &state->epqstate, estate); + slot = router_lock_or_delete_tuple(state, slot, &ctid, + &deleted, estate); /* We require a tuple */ if (TupIsNull(slot)) goto take_next_tuple; - /* Tuple will be inserted by ModifyTable */ - return ExecFilterJunk(state->junkfilter, slot); + /* HACK: change command type in ModifyTable */ + state->mt_state->operation = deleted ? CMD_INSERT : CMD_UPDATE; + + return slot; } return NULL; @@ -261,109 +236,205 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e } -/* - * ---------------------------------------------------------------- - * ExecDeleteInternal - * This is a modified copy of ExecDelete from executor/nodeModifyTable.c - * ---------------------------------------------------------------- - */ +static void +router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate) +{ + Relation rel = state->current_rri->ri_RelationDesc; + + if (state->junkfilter == NULL) + { + state->junkfilter = + ExecInitJunkFilter(state->subplan->targetlist, + RelationGetDescr(rel)->tdhasoid, + ExecInitExtraTupleSlotCompat(estate)); + + state->junkfilter->jf_junkAttNo = + ExecFindJunkAttribute(state->junkfilter, "ctid"); + + if (!AttributeNumberIsValid(state->junkfilter->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } +} + +static void +router_lazy_init_constraint(PartitionRouterState *state) +{ + Relation rel = state->current_rri->ri_RelationDesc; + + if (state->constraint == NULL) + { + Expr *expr = get_partition_constraint_expr(RelationGetRelid(rel)); + state->constraint = ExecInitExpr(expr, NULL); + } +} + +/* Extract ItemPointer from tuple using JunkFilter */ +static ItemPointerData +router_extract_ctid(PartitionRouterState *state, TupleTableSlot *slot) +{ + Relation rel = state->current_rri->ri_RelationDesc; + char relkind = RelationGetForm(rel)->relkind; + + if (relkind == RELKIND_RELATION) + { + Datum ctid_datum; + bool ctid_isnull; + + ctid_datum = ExecGetJunkAttribute(slot, + state->junkfilter->jf_junkAttNo, + &ctid_isnull); + + /* shouldn't ever get a null result... */ + if (ctid_isnull) + elog(ERROR, "ctid is NULL"); + + /* Get item pointer to tuple */ + return *(ItemPointer) DatumGetPointer(ctid_datum); + } + else if (relkind == RELKIND_FOREIGN_TABLE) + elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); + else + elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); +} +/* This is a heavily modified copy of ExecDelete from nodeModifyTable.c */ static TupleTableSlot * -router_delete_tuple(TupleTableSlot *slot, - ItemPointer tupleid, - EPQState *epqstate, - EState *estate) +router_lock_or_delete_tuple(PartitionRouterState *state, + TupleTableSlot *slot, + ItemPointer tupleid, + bool *deleted, /* return value #1 */ + EState *estate) { - ResultRelInfo *rri; - Relation rel; - HTSU_Result result; - HeapUpdateFailureData hufd; + ResultRelInfo *rri; + Relation rel; + + ExprContext *econtext = GetPerTupleExprContext(estate); + ExprState *constraint = state->constraint; + + HeapUpdateFailureData hufd; + HTSU_Result result; + EPQState *epqstate = &state->epqstate; + + LOCKMODE lockmode; + bool try_delete; + + *deleted = false; EvalPlanQualSetSlot(epqstate, slot); /* Get information on the (current) result relation */ rri = estate->es_result_relation_info; rel = rri->ri_RelationDesc; + lockmode = ExecUpdateLockMode(estate, rri); - /* BEFORE ROW UPDATE triggers */ - if (rri->ri_TrigDesc && - rri->ri_TrigDesc->trig_update_before_row) - { - slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); - if (TupIsNull(slot)) - return NULL; - } +recheck: + /* Does tuple still belong to current partition? */ + econtext->ecxt_scantuple = slot; + try_delete = !ExecCheck(constraint, econtext); - /* BEFORE ROW DELETE triggers */ - if (rri->ri_TrigDesc && - rri->ri_TrigDesc->trig_delete_before_row) + /* Lock or delete tuple */ + if (try_delete) { - if (!ExecBRDeleteTriggersCompat(estate, epqstate, rri, tupleid, NULL, NULL)) - return NULL; - } + /* BEFORE ROW UPDATE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_update_before_row) + { + slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); + if (TupIsNull(slot)) + return NULL; + } + + /* BEFORE ROW DELETE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_delete_before_row) + { + if (!ExecBRDeleteTriggersCompat(estate, epqstate, rri, tupleid, NULL, NULL)) + return NULL; + } - if (tupleid != NULL) - { -ldelete: /* Delete the tuple */ result = heap_delete_compat(rel, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, - true /* wait for commit */ , - &hufd); - switch (result) - { - case HeapTupleSelfUpdated: - if (hufd.cmax != estate->es_output_cid) - ereport(ERROR, - (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), - errmsg("tuple to be updated was already modified by an operation triggered by the current command"), - errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); - - /* Already deleted by self; nothing to do */ - return NULL; + true /* wait for commit */, &hufd, + true /* changing partition */); + } + else + { + HeapTupleData tuple; + Buffer buffer; + + tuple.t_self = *tupleid; + result = heap_lock_tuple(rel, &tuple, + estate->es_output_cid, + lockmode, LockWaitBlock, + false, &buffer, &hufd); + + ReleaseBuffer(buffer); + } - case HeapTupleMayBeUpdated: - break; + /* Check lock/delete status */ + switch (result) + { + case HeapTupleSelfUpdated: + if (hufd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + + /* Already deleted by self; nothing to do */ + return NULL; - case HeapTupleUpdated: - if (IsolationUsesXactSnapshot()) - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); + case HeapTupleMayBeUpdated: + break; - if (!ItemPointerEquals(tupleid, &hufd.ctid)) + case HeapTupleUpdated: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + + if (!ItemPointerEquals(tupleid, &hufd.ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + epqstate, + rel, + rri->ri_RangeTableIndex, + LockTupleExclusive, + &hufd.ctid, + hufd.xmax); + + if (!TupIsNull(epqslot)) { - TupleTableSlot *epqslot; - - epqslot = EvalPlanQual(estate, - epqstate, - rel, - rri->ri_RangeTableIndex, - LockTupleExclusive, - &hufd.ctid, - hufd.xmax); - - if (!TupIsNull(epqslot)) - { - Assert(tupleid != NULL); - *tupleid = hufd.ctid; - slot = epqslot; - goto ldelete; - } + Assert(tupleid != NULL); + *tupleid = hufd.ctid; + slot = epqslot; + goto recheck; } + } - /* Tuple already deleted; nothing to do */ - return NULL; + /* Tuple already deleted; nothing to do */ + return NULL; - default: - elog(ERROR, "unrecognized heap_delete status: %u", result); - } + case HeapTupleInvisible: + elog(ERROR, "attempted to lock invisible tuple"); + break; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + break; } - else elog(ERROR, "tupleid should be specified for deletion"); - /* AFTER ROW DELETE triggers */ - ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); + /* Additional work for delete s*/ + if (try_delete) + { + /* AFTER ROW DELETE triggers */ + ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); + } + *deleted = try_delete; return slot; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 58c92bd3..b4baabfa 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -238,7 +238,7 @@ state_tree_visitor(PlanState *state, case T_CustomScan: foreach (lc, ((CustomScanState *) state)->custom_ps) - state_tree_visitor((PlanState *) lfirst(lc),visitor, context); + state_tree_visitor((PlanState *) lfirst(lc), visitor, context); break; case T_ModifyTable: diff --git a/src/relation_info.c b/src/relation_info.c index a18ceeec..25b86d31 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -150,8 +150,6 @@ static void resonwner_prel_callback(ResourceReleasePhase phase, bool isTopLevel, void *arg); -static Expr *get_partition_constraint_expr(Oid partition); - static void fill_prel_with_partitions(PartRelationInfo *prel, const Oid *partitions, const uint32 parts_count); @@ -1047,7 +1045,7 @@ invalidate_bounds_cache(void) * * build_check_constraint_name_internal() is used to build conname. */ -static Expr * +Expr * get_partition_constraint_expr(Oid partition) { Oid conid; /* constraint Oid */ From 65033edd831e028e56eb0e5402871c259b997d4e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 4 Sep 2018 17:01:02 +0300 Subject: [PATCH 0918/1124] Support create_append_path in PGPROEE11 --- src/include/compat/pg_compat.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index f2d5ba63..f33d41cc 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -239,7 +239,9 @@ create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ (parallel_workers), false, NIL, -1) #else -/* TODO */ +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ + (parallel_workers), false, NIL, -1, false, NIL) #endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 100000 From ffa6335b507069f15e8aec6416abcd82c3c3edf0 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 4 Sep 2018 22:42:06 +0300 Subject: [PATCH 0919/1124] WIP router_run_modify_table() hackery works in simple cases --- src/include/partition_router.h | 1 + src/partition_filter.c | 1 + src/partition_router.c | 142 ++++++++++++++++++++++++++++++--- 3 files changed, 131 insertions(+), 13 deletions(-) diff --git a/src/include/partition_router.h b/src/include/partition_router.h index f1526335..ac1b3ea4 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -38,6 +38,7 @@ typedef struct PartitionRouterState int epqparam; ModifyTableState *mt_state; /* need this for a GREAT deal of hackery */ + TupleTableSlot *saved_slot; ResultRelInfo *current_rri; } PartitionRouterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index f51f7896..9850dde1 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -771,6 +771,7 @@ partition_filter_exec(CustomScanState *node) if (!state->tup_convert_slot) state->tup_convert_slot = MakeTupleTableSlotCompat(); + /* TODO: why should we *always* set a new slot descriptor? */ ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); slot = ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); } diff --git a/src/partition_router.c b/src/partition_router.c index fc8b50ba..3106e487 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -24,12 +24,28 @@ #include "utils/guc.h" #include "utils/rel.h" + +#define MTHackField(mt_state, field) ( (mt_state)->field ) + + bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; +/* FIXME: replace this magic with a CustomScan */ +static ExecProcNodeMtd mt_method = NULL; + + +static TupleTableSlot *router_run_modify_table(PlanState *state); + +static TupleTableSlot *router_set_slot(PartitionRouterState *state, + TupleTableSlot *slot, + CmdType operation); +static TupleTableSlot *router_get_slot(PartitionRouterState *state, + bool *should_process); + static void router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate); static void router_lazy_init_constraint(PartitionRouterState *state); @@ -110,6 +126,7 @@ prepare_modify_table_for_partition_router(PlanState *state, void *context) if (IsA(state, ModifyTableState)) { ModifyTableState *mt_state = (ModifyTableState *) state; + bool changed_method = false; int i; for (i = 0; i < mt_state->mt_nplans; i++) @@ -121,8 +138,19 @@ prepare_modify_table_for_partition_router(PlanState *state, void *context) if (IsPartitionFilterState(pf_state) && IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) { - /* HACK: PartitionRouter might change ModifyTable's state */ + /* HACK: point to ModifyTable in PartitionRouter */ pr_state->mt_state = mt_state; + + if (!changed_method) + { + if (!mt_method) + mt_method = state->ExecProcNodeReal; + + /* HACK: replace ModifyTable's execution method */ + ExecSetExecProcNode(state, router_run_modify_table); + + changed_method = true; + } } } } @@ -166,17 +194,18 @@ partition_router_begin(CustomScanState *node, EState *estate, int eflags) TupleTableSlot * partition_router_exec(CustomScanState *node) { - EState *estate = node->ss.ps.state; - PlanState *child_ps = (PlanState *) linitial(node->custom_ps); - PartitionRouterState *state = (PartitionRouterState *) node; - TupleTableSlot *slot; + EState *estate = node->ss.ps.state; + PartitionRouterState *state = (PartitionRouterState *) node; + TupleTableSlot *slot; + bool should_process; take_next_tuple: - /* execute PartitionFilter child node */ - slot = ExecProcNode(child_ps); + /* Get next tuple for processing */ + slot = router_get_slot(state, &should_process); - if (!TupIsNull(slot)) + if (should_process) { + CmdType new_cmd; bool deleted; ItemPointerData ctid; @@ -203,13 +232,14 @@ partition_router_exec(CustomScanState *node) if (TupIsNull(slot)) goto take_next_tuple; - /* HACK: change command type in ModifyTable */ - state->mt_state->operation = deleted ? CMD_INSERT : CMD_UPDATE; + /* Should we use UPDATE or DELETE + INSERT? */ + new_cmd = deleted ? CMD_INSERT : CMD_UPDATE; - return slot; + /* Alter ModifyTable's state and return */ + return router_set_slot(state, slot, new_cmd); } - return NULL; + return slot; } void @@ -218,15 +248,20 @@ partition_router_end(CustomScanState *node) PartitionRouterState *state = (PartitionRouterState *) node; Assert(list_length(node->custom_ps) == 1); - EvalPlanQualEnd(&state->epqstate); ExecEndNode((PlanState *) linitial(node->custom_ps)); + + EvalPlanQualEnd(&state->epqstate); } void partition_router_rescan(CustomScanState *node) { + PartitionRouterState *state = (PartitionRouterState *) node; + Assert(list_length(node->custom_ps) == 1); ExecReScan((PlanState *) linitial(node->custom_ps)); + + state->saved_slot = NULL; } void @@ -236,6 +271,87 @@ partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *e } +static TupleTableSlot * +router_run_modify_table(PlanState *state) +{ + ModifyTableState *mt_state; + TupleTableSlot *slot; + int mt_plans_old, + mt_plans_new; + + mt_state = (ModifyTableState *) state; + + mt_plans_old = MTHackField(mt_state, mt_nplans); + + /* Fetch next tuple */ + slot = mt_method(state); + + mt_plans_new = MTHackField(mt_state, mt_nplans); + + /* PartitionRouter asked us to restart */ + if (mt_plans_new != mt_plans_old) + { + int state_idx = mt_state->mt_whichplan - 1; + + /* HACK: partially restore ModifyTable's state */ + MTHackField(mt_state, mt_done) = false; + MTHackField(mt_state, mt_nplans) = mt_plans_old; + MTHackField(mt_state, mt_whichplan) = state_idx; + + /* Restart ModifyTable */ + return mt_method(state); + } + + return slot; +} + +static TupleTableSlot * +router_set_slot(PartitionRouterState *state, + TupleTableSlot *slot, + CmdType operation) +{ + ModifyTableState *mt_state = state->mt_state; + + Assert(!TupIsNull(slot)); + + if (mt_state->operation == operation) + return slot; + + /* HACK: alter ModifyTable's state */ + MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; + MTHackField(mt_state, operation) = operation; + + /* Set saved_slot and yield */ + state->saved_slot = slot; + return NULL; +} + +static TupleTableSlot * +router_get_slot(PartitionRouterState *state, + bool *should_process) +{ + TupleTableSlot *slot; + + if (!TupIsNull(state->saved_slot)) + { + /* Reset saved_slot */ + slot = state->saved_slot; + state->saved_slot = NULL; + + /* We shouldn't process preserved slot... */ + *should_process = false; + } + else + { + slot = ExecProcNode((PlanState *) linitial(state->css.custom_ps)); + + /* But we have to process non-empty slot */ + *should_process = !TupIsNull(slot); + } + + return slot; +} + static void router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate) { From 8e92ca1ba42b025089a6f613e6712404ac297727 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Sep 2018 14:35:57 +0300 Subject: [PATCH 0920/1124] WIP conditionally disable junk filter --- src/include/partition_router.h | 5 ++--- src/partition_router.c | 24 +++++++++--------------- 2 files changed, 11 insertions(+), 18 deletions(-) diff --git a/src/include/partition_router.h b/src/include/partition_router.h index ac1b3ea4..79ae71a3 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -31,16 +31,15 @@ typedef struct PartitionRouterState CustomScanState css; Plan *subplan; /* proxy variable to store subplan */ - JunkFilter *junkfilter; /* 'ctid' extraction facility */ ExprState *constraint; /* should tuple remain in partition? */ + JunkFilter *junkfilter; /* 'ctid' extraction facility */ + ResultRelInfo *current_rri; EPQState epqstate; int epqparam; ModifyTableState *mt_state; /* need this for a GREAT deal of hackery */ TupleTableSlot *saved_slot; - - ResultRelInfo *current_rri; } PartitionRouterState; diff --git a/src/partition_router.c b/src/partition_router.c index 3106e487..968d5a3e 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -291,7 +291,7 @@ router_run_modify_table(PlanState *state) /* PartitionRouter asked us to restart */ if (mt_plans_new != mt_plans_old) { - int state_idx = mt_state->mt_whichplan - 1; + int state_idx = -mt_plans_new; /* HACK: partially restore ModifyTable's state */ MTHackField(mt_state, mt_done) = false; @@ -312,7 +312,9 @@ router_set_slot(PartitionRouterState *state, { ModifyTableState *mt_state = state->mt_state; + /* Check invariants */ Assert(!TupIsNull(slot)); + Assert(state->junkfilter); if (mt_state->operation == operation) return slot; @@ -321,6 +323,11 @@ router_set_slot(PartitionRouterState *state, MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; MTHackField(mt_state, operation) = operation; + /* HACK: conditionally disable junk filter in result relation */ + state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? + state->junkfilter : + NULL; + /* Set saved_slot and yield */ state->saved_slot = slot; return NULL; @@ -355,21 +362,8 @@ router_get_slot(PartitionRouterState *state, static void router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate) { - Relation rel = state->current_rri->ri_RelationDesc; - if (state->junkfilter == NULL) - { - state->junkfilter = - ExecInitJunkFilter(state->subplan->targetlist, - RelationGetDescr(rel)->tdhasoid, - ExecInitExtraTupleSlotCompat(estate)); - - state->junkfilter->jf_junkAttNo = - ExecFindJunkAttribute(state->junkfilter, "ctid"); - - if (!AttributeNumberIsValid(state->junkfilter->jf_junkAttNo)) - elog(ERROR, "could not find junk ctid column"); - } + state->junkfilter = state->current_rri->ri_junkFilter; } static void From 2385cdd6192c60e3acd33c82a8e8af01842c0167 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Sep 2018 14:36:21 +0300 Subject: [PATCH 0921/1124] add approved tests --- expected/pathman_update_triggers.out | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/expected/pathman_update_triggers.out b/expected/pathman_update_triggers.out index 5c1092f2..6b366fb3 100644 --- a/expected/pathman_update_triggers.out +++ b/expected/pathman_update_triggers.out @@ -52,10 +52,7 @@ NOTICE: AFTER INSERT ROW (test_1) set pg_pathman.enable_partitionrouter = t; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; NOTICE: BEFORE UPDATE ROW (test_1) -NOTICE: BEFORE DELETE ROW (test_1) -NOTICE: BEFORE INSERT ROW (test_1) -NOTICE: AFTER DELETE ROW (test_1) -NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) val | tableoid -----+----------------------------- 2 | test_update_triggers.test_1 @@ -74,10 +71,7 @@ NOTICE: AFTER INSERT ROW (test_2) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; NOTICE: BEFORE UPDATE ROW (test_2) -NOTICE: BEFORE DELETE ROW (test_2) -NOTICE: BEFORE INSERT ROW (test_2) -NOTICE: AFTER DELETE ROW (test_2) -NOTICE: AFTER INSERT ROW (test_2) +NOTICE: AFTER UPDATE ROW (test_2) val | tableoid -----+----------------------------- 4 | test_update_triggers.test_2 @@ -96,10 +90,7 @@ NOTICE: AFTER INSERT ROW (test_1) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; NOTICE: BEFORE UPDATE ROW (test_1) -NOTICE: BEFORE DELETE ROW (test_1) -NOTICE: BEFORE INSERT ROW (test_1) -NOTICE: AFTER DELETE ROW (test_1) -NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) val | tableoid -----+----------------------------- 6 | test_update_triggers.test_1 From 42e2c10cf73ce5d38673b788d716fc3f40727adb Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Sep 2018 15:04:29 +0300 Subject: [PATCH 0922/1124] WIP fix router_run_modify_table(), more tests --- expected/pathman_update_node.out | 26 ++++++++++++++++++++++++++ sql/pathman_update_node.sql | 15 +++++++++++++++ src/partition_router.c | 8 ++++++-- 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index e68bb9ae..4f379e05 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -374,6 +374,32 @@ SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); 3 (1 row) +/* Shuffle rows a few times */ +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +/* Check values #0 */ +SELECT tableoid::regclass, * FROM test_update_node.test_hash ORDER BY val; + tableoid | val | comment +------------------------------+-----+--------- + test_update_node.test_hash_2 | 10 | 1 + test_update_node.test_hash_1 | 11 | 2 + test_update_node.test_hash_1 | 12 | 3 + test_update_node.test_hash_2 | 13 | 4 + test_update_node.test_hash_1 | 14 | 5 + test_update_node.test_hash_1 | 15 | 6 + test_update_node.test_hash_2 | 16 | 7 + test_update_node.test_hash_0 | 17 | 8 + test_update_node.test_hash_1 | 18 | 9 + test_update_node.test_hash_0 | 19 | 10 +(10 rows) + /* Move all rows into single partition */ UPDATE test_update_node.test_hash SET val = 1; /* Check values #1 */ diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index aff7f8ec..2c7e97f7 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -174,6 +174,21 @@ INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); +/* Shuffle rows a few times */ +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; + +/* Check values #0 */ +SELECT tableoid::regclass, * FROM test_update_node.test_hash ORDER BY val; + + /* Move all rows into single partition */ UPDATE test_update_node.test_hash SET val = 1; diff --git a/src/partition_router.c b/src/partition_router.c index 968d5a3e..3348adb7 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -281,16 +281,20 @@ router_run_modify_table(PlanState *state) mt_state = (ModifyTableState *) state; + /* Get initial signal */ mt_plans_old = MTHackField(mt_state, mt_nplans); +restart: /* Fetch next tuple */ slot = mt_method(state); + /* Get current signal */ mt_plans_new = MTHackField(mt_state, mt_nplans); - /* PartitionRouter asked us to restart */ + /* Did PartitionRouter ask us to restart? */ if (mt_plans_new != mt_plans_old) { + /* Signal points to current plan */ int state_idx = -mt_plans_new; /* HACK: partially restore ModifyTable's state */ @@ -299,7 +303,7 @@ router_run_modify_table(PlanState *state) MTHackField(mt_state, mt_whichplan) = state_idx; /* Restart ModifyTable */ - return mt_method(state); + goto restart; } return slot; From c3399f3d123e9a6bdddf4e39aa29558b638d3045 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Sep 2018 15:15:58 +0300 Subject: [PATCH 0923/1124] WIP add comments here and there --- src/partition_router.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index 3348adb7..56008743 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -223,12 +223,12 @@ partition_router_exec(CustomScanState *node) /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = state->current_rri; - /* Delete tuple from old partition */ + /* Lock or delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); slot = router_lock_or_delete_tuple(state, slot, &ctid, &deleted, estate); - /* We require a tuple */ + /* We require a tuple (previous one has vanished) */ if (TupIsNull(slot)) goto take_next_tuple; @@ -265,12 +265,15 @@ partition_router_rescan(CustomScanState *node) } void -partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *es) +partition_router_explain(CustomScanState *node, + List *ancestors, + ExplainState *es) { /* Nothing to do here now */ } +/* Smart wrapper over ModifyTable */ static TupleTableSlot * router_run_modify_table(PlanState *state) { @@ -309,6 +312,7 @@ router_run_modify_table(PlanState *state) return slot; } +/* Return tuple OR stash it and change ModifyTable's operation */ static TupleTableSlot * router_set_slot(PartitionRouterState *state, TupleTableSlot *slot, @@ -337,6 +341,7 @@ router_set_slot(PartitionRouterState *state, return NULL; } +/* Fetch next tuple (either fresh or stashed) */ static TupleTableSlot * router_get_slot(PartitionRouterState *state, bool *should_process) From 0927b9fc57eb040ca9a6aef089a9c7d85f35ee4e Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 5 Sep 2018 18:05:36 +0300 Subject: [PATCH 0924/1124] EPQ: fix multilevel (see router_lazy_init_constraint()) --- src/include/relation_info.h | 2 +- src/partition_router.c | 30 ++++++++++++++++++++++++++---- src/relation_info.c | 20 +++++++++++--------- 3 files changed, 38 insertions(+), 14 deletions(-) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 3a5f0fa8..f3faa3d3 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -367,9 +367,9 @@ void shout_if_prel_is_invalid(const Oid parent_oid, const PartType expected_part_type); /* Bounds cache */ -Expr *get_partition_constraint_expr(Oid partition); void forget_bounds_of_partition(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); +Expr *get_partition_constraint_expr(Oid partition, bool raise_error); void invalidate_bounds_cache(void); /* Parents cache */ diff --git a/src/partition_router.c b/src/partition_router.c index 56008743..64feddd9 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -20,6 +20,7 @@ #include "commands/trigger.h" #include "executor/nodeModifyTable.h" #include "foreign/fdwapi.h" +#include "optimizer/clauses.h" #include "storage/bufmgr.h" #include "utils/guc.h" #include "utils/rel.h" @@ -378,12 +379,33 @@ router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate) static void router_lazy_init_constraint(PartitionRouterState *state) { - Relation rel = state->current_rri->ri_RelationDesc; - if (state->constraint == NULL) { - Expr *expr = get_partition_constraint_expr(RelationGetRelid(rel)); - state->constraint = ExecInitExpr(expr, NULL); + Relation rel = state->current_rri->ri_RelationDesc; + Oid relid = RelationGetRelid(rel); + List *clauses = NIL; + Expr *expr; + + while (OidIsValid(relid)) + { + /* It's probably OK if expression is NULL */ + expr = get_partition_constraint_expr(relid, false); + expr = expression_planner(expr); + + if (!expr) + break; + + /* Add this constraint to set */ + clauses = lappend(clauses, expr); + + /* Consider parent's check constraint as well */ + relid = get_parent_of_partition(relid); + } + + if (!clauses) + elog(ERROR, "no recheck constraint for relid %d", relid); + + state->constraint = ExecInitExpr(make_ands_explicit(clauses), NULL); } } diff --git a/src/relation_info.c b/src/relation_info.c index 25b86d31..386008d2 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1000,7 +1000,7 @@ get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) pbin_local.byval = prel->ev_byval; /* Try to build constraint's expression tree (may emit ERROR) */ - con_expr = get_partition_constraint_expr(partition); + con_expr = get_partition_constraint_expr(partition, true); /* Grab bounds/hash and fill in 'pbin_local' (may emit ERROR) */ fill_pbin_with_bounds(&pbin_local, prel, con_expr); @@ -1046,7 +1046,7 @@ invalidate_bounds_cache(void) * build_check_constraint_name_internal() is used to build conname. */ Expr * -get_partition_constraint_expr(Oid partition) +get_partition_constraint_expr(Oid partition, bool raise_error) { Oid conid; /* constraint Oid */ char *conname; /* constraint name */ @@ -1060,11 +1060,12 @@ get_partition_constraint_expr(Oid partition) if (!OidIsValid(conid)) { - DisablePathman(); /* disable pg_pathman since config is broken */ + if (!raise_error) + return NULL; + ereport(ERROR, (errmsg("constraint \"%s\" of partition \"%s\" does not exist", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); + conname, get_rel_name_or_relid(partition)))); } con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); @@ -1073,11 +1074,12 @@ get_partition_constraint_expr(Oid partition) &conbin_isnull); if (conbin_isnull) { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(WARNING, + if (!raise_error) + return NULL; + + ereport(ERROR, (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); + conname, get_rel_name_or_relid(partition)))); pfree(conname); return NULL; /* could not parse */ From baf8fc2fd85d3754fd671ca45c0d773c92841642 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 7 Sep 2018 16:57:49 +0300 Subject: [PATCH 0925/1124] optimize find_deepest_partition() for single tables --- src/planner_tree_modification.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 58c92bd3..ab84b254 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -885,7 +885,8 @@ modifytable_contains_fdw(List *rtable, ModifyTable *node) /* * Find a single deepest subpartition using quals. - * Return InvalidOid if it's not possible. + * It's always better to narrow down the set of tables to be scanned. + * Return InvalidOid if it's not possible (e.g. table is not partitioned). */ static Oid find_deepest_partition(Oid relid, Index rti, Expr *quals) @@ -931,8 +932,13 @@ find_deepest_partition(Oid relid, Index rti, Expr *quals) Oid *children = PrelGetChildrenArray(prel), child = children[irange_lower(irange)]; + /* Scan this partition */ + result = child; + /* Try to go deeper and see if there are subpartitions */ - result = find_deepest_partition(child, rti, quals); + child = find_deepest_partition(child, rti, quals); + if (OidIsValid(child)) + result = child; } break; @@ -943,8 +949,6 @@ find_deepest_partition(Oid relid, Index rti, Expr *quals) /* Don't forget to close 'prel'! */ close_pathman_relation_info(prel); } - /* Otherwise, return this table */ - else result = relid; return result; } From 7a4aa43adebce49e0c3383dbb945d9cbaebff529 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Mon, 10 Sep 2018 20:36:27 +0300 Subject: [PATCH 0926/1124] sometimes break saves the day (issue #174) --- src/nodes_common.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/nodes_common.c b/src/nodes_common.c index f9f394ec..5f0c0c14 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -159,6 +159,7 @@ build_parent_tlist(List *tlist, AppendRelInfo *appinfo) { tlist_var->varattno = attnum; found_column = true; /* successful mapping */ + break; } } From 906dafbd86189dc4866268bf5159b39d7e2b4cce Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Sep 2018 16:56:32 +0300 Subject: [PATCH 0927/1124] fix CustomEvalParamExternCompat(), many thanks to Alexander Kuzmenkov --- src/include/compat/pg_compat.h | 8 ++++---- src/planner_tree_modification.c | 5 ++++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index f33d41cc..17f037cd 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -864,15 +864,15 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* See ExecEvalParamExtern() */ static inline ParamExternData * -CustomEvalParamExternCompat(Param *param, ParamListInfo params) +CustomEvalParamExternCompat(Param *param, + ParamListInfo params, + ParamExternData *prmdata) { ParamExternData *prm; #if PG_VERSION_NUM >= 110000 - ParamExternData prmdata; - if (params->paramFetch != NULL) - prm = params->paramFetch(params, param->paramid, false, &prmdata); + prm = params->paramFetch(params, param->paramid, false, prmdata); else prm = ¶ms->params[param->paramid - 1]; #else diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index ab84b254..ee05108a 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -971,7 +971,10 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) param->paramid > 0 && param->paramid <= params->numParams) { - ParamExternData *prm = CustomEvalParamExternCompat(param, params); + ParamExternData prmdata; /* storage for 'prm' (PG 11) */ + ParamExternData *prm = CustomEvalParamExternCompat(param, + params, + &prmdata); if (OidIsValid(prm->ptype)) { From 063114712b3426ea0f8fcb072b8537c452f6fd7c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Sep 2018 18:06:16 +0300 Subject: [PATCH 0928/1124] run 11-based builds (Travis CI) --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index db2eebc9..946eb606 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,8 @@ notifications: on_failure: always env: + - PG_VERSION=11 LEVEL=hardcore + - PG_VERSION=11 - PG_VERSION=10 LEVEL=hardcore - PG_VERSION=10 - PG_VERSION=9.6 LEVEL=hardcore From b0eefc5e69fbac72e5176c8d013af7511386e382 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Sep 2018 18:30:12 +0300 Subject: [PATCH 0929/1124] add test variant for PG 10 and PG 11 --- expected/pathman_basic_1.out | 1845 ++++++++++++++++++++++++++++++++++ 1 file changed, 1845 insertions(+) create mode 100644 expected/pathman_basic_1.out diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out new file mode 100644 index 00000000..692de996 --- /dev/null +++ b/expected/pathman_basic_1.out @@ -0,0 +1,1845 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER); +INSERT INTO test.hash_rel VALUES (1, 1); +INSERT INTO test.hash_rel VALUES (2, 2); +INSERT INTO test.hash_rel VALUES (3, 3); +\set VERBOSITY default +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +ERROR: failed to analyze partitioning expression "value" +DETAIL: column "value" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- +(0 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 0 rows copied from test.hash_rel_0 +NOTICE: 0 rows copied from test.hash_rel_1 +NOTICE: 0 rows copied from test.hash_rel_2 + drop_partitions +----------------- + 3 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'Value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.hash_rel VALUES (4, 4); +INSERT INTO test.hash_rel VALUES (5, 5); +INSERT INTO test.hash_rel VALUES (6, 6); +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 6 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +\set VERBOSITY default +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); +ERROR: failed to analyze partitioning expression "dt" +DETAIL: column "dt" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.range_rel; + count +------- + 120 +(1 row) + +SELECT COUNT(*) FROM ONLY test.range_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 3000 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(7 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + set_enable_parent +------------------- + +(1 row) + +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(3 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 12 other objects +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 + test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from test.improved_dummy_1 +NOTICE: 0 rows copied from test.improved_dummy_2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from p1 +NOTICE: 0 rows copied from p2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); + create_range_partitions +------------------------- + 5 +(1 row) + +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(7 rows) + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select + Filter: (val <= 80) + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(9 rows) + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; + count +------- + 100 +(1 row) + +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; +NOTICE: drop cascades to 6 other objects +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (2 = value) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 = id) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + Filter: (id >= 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_1 + Filter: (id >= 500) + -> Seq Scan on num_range_rel_2 + Filter: (id < 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on range_rel_2 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_1 + Filter: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_2 + Filter: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (2 = value) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (2500 = id) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id >= 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 500) + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id < 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id <= 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on range_rel_2 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-01-15' ORDER BY dt DESC; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan Backward using range_rel_4_dt_idx on range_rel_4 + -> Index Scan Backward using range_rel_3_dt_idx on range_rel_3 + -> Index Scan Backward using range_rel_2_dt_idx on range_rel_2 + -> Index Scan Backward using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +/* + * Sorting + */ +SET enable_indexscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Merge Append + Sort Key: range_rel_1.dt + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(4 rows) + +/* + * Join + */ +set enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j1.id = j2.id) + -> Hash Join + Hash Cond: (j3.id = j1.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 +(20 rows) + +/* + * Test inlined SQL functions + */ +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); + QUERY PLAN +-------------------------------------- + Limit + -> Append + -> Seq Scan on sql_inline_0 + Filter: (id = 5) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); + QUERY PLAN +-------------------------------------- + Limit + -> Append + -> Seq Scan on sql_inline_2 + Filter: (id = 1) +(4 rows) + +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test by @baiyinqiqi (issue #60) + */ +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT * FROM test.hash_varchar WHERE val = 'a'; + val +----- +(0 rows) + +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + val +----- + 12 +(1 row) + +DROP TABLE test.hash_varchar CASCADE; +NOTICE: drop cascades to 4 other objects +/* + * Test split and merge + */ +/* Split first partition in half */ +SELECT pathman.split_range_partition('test.num_range_rel_1', 500); + split_range_partition +----------------------- + test.num_range_rel_5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 100) + -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 + Index Cond: (id <= 700) +(5 rows) + +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + +SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); + split_range_partition +----------------------- + test.range_rel_5 +(1 row) + +/* Merge two partitions into one */ +SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); + merge_range_partitions +------------------------ + test.num_range_rel_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: ((id >= 100) AND (id <= 700)) +(3 rows) + +SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +/* Append and prepend partitions */ +SELECT pathman.append_range_partition('test.num_range_rel'); + append_range_partition +------------------------ + test.num_range_rel_6 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_6 +(2 rows) + +SELECT pathman.prepend_range_partition('test.num_range_rel'); + prepend_range_partition +------------------------- + test.num_range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_7 +(2 rows) + +SELECT pathman.drop_range_partition('test.num_range_rel_7'); + drop_range_partition +---------------------- + test.num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 +(4 rows) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 +(3 rows) + +SELECT pathman.append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_7_dt_idx on range_rel_7 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +SELECT pathman.drop_range_partition('test.range_rel_7'); + drop_range_partition +---------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(3 rows) + +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); +ERROR: specified range [12-01-2014, 01-02-2015) overlaps with existing partitions +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + add_range_partition +--------------------- + test.range_rel_8 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_8_dt_idx on range_rel_8 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +CREATE TABLE test.range_rel_archive (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); +ERROR: specified range [01-01-2014, 01-01-2015) overlaps with existing partitions +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); + attach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_archive_dt_idx on range_rel_archive + Index Cond: (dt >= 'Sat Nov 15 00:00:00 2014'::timestamp without time zone) + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +SELECT pathman.detach_range_partition('test.range_rel_archive'); + detach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +CREATE TABLE test.range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT, + abc INTEGER); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: partition must have a compatible tuple format +CREATE TABLE test.range_rel_test2 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: column "dt" in child table must be marked NOT NULL +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); + add_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); + add_range_partition +------------------------------ + test.range_rel_plus_infinity +(1 row) + +SELECT pathman.append_range_partition('test.range_rel'); +ERROR: Cannot append partition because last partition's range is half open +SELECT pathman.prepend_range_partition('test.range_rel'); +ERROR: Cannot prepend partition because first partition's range is half open +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); + attach_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; + parent | partition | parttype | expr | range_min | range_max +----------------+-------------------------------+----------+------+--------------------------+-------------------------- + test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 + test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 + test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 + test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 + test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 + test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | +(8 rows) + +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on range_rel_minus_infinity + -> Seq Scan on range_rel_8 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on range_rel_6 + -> Seq Scan on range_rel_plus_infinity +(3 rows) + +/* + * Zero partitions count and adding partitions with specified name + */ +CREATE TABLE test.zero( + id SERIAL PRIMARY KEY, + value INT NOT NULL); +INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); +ERROR: relation "zero" has no partitions +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); +ERROR: relation "zero" has no partitions +SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); + add_range_partition +--------------------- + test.zero_50 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_appended'); + append_range_partition +------------------------ + test.zero_appended +(1 row) + +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); + prepend_range_partition +------------------------- + test.zero_prepended +(1 row) + +SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); + split_range_partition +----------------------- + test."test.zero_60" +(1 row) + +DROP TABLE test.zero CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Check that altering table columns doesn't break trigger + */ +ALTER TABLE test.hash_rel ADD COLUMN abc int; +INSERT INTO test.hash_rel (id, value, abc) VALUES (123, 456, 789); +SELECT * FROM test.hash_rel WHERE id = 123; + id | value | abc +-----+-------+----- + 123 | 456 | 789 +(1 row) + +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); + replace_hash_partition +------------------------ + test.hash_rel_extern +(1 row) + +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; + parent | partition | parttype +---------------+----------------------+---------- + test.hash_rel | test.hash_rel_1 | 1 + test.hash_rel | test.hash_rel_2 | 1 + test.hash_rel | test.hash_rel_extern | 1 +(3 rows) + +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; + oid | indexes | triggers +----------------------+---------------------------------------------------------------------------------------+---------- + test.hash_rel_0 | {"CREATE UNIQUE INDEX hash_rel_0_pkey ON test.hash_rel_0 USING btree (id)"} | {NULL} + test.hash_rel_extern | {"CREATE UNIQUE INDEX hash_rel_extern_pkey ON test.hash_rel_extern USING btree (id)"} | {NULL} +(2 rows) + +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + is_tuple_convertible +---------------------- + t +(1 row) + +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +ERROR: column "value" in child table must be marked NOT NULL +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +/* + * Clean up + */ +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 3 rows copied from test.hash_rel_1 +NOTICE: 2 rows copied from test.hash_rel_2 +NOTICE: 2 rows copied from test.hash_rel_extern + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 7 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT pathman.drop_partitions('test.hash_rel', TRUE); + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +DROP TABLE test.hash_rel CASCADE; +SELECT pathman.drop_partitions('test.num_range_rel'); +NOTICE: 999 rows copied from test.num_range_rel_1 +NOTICE: 1000 rows copied from test.num_range_rel_2 +NOTICE: 1000 rows copied from test.num_range_rel_3 + drop_partitions +----------------- + 3 +(1 row) + +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE UNLOGGED TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | u +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | u +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test automatic partition creation */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.range_rel (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); +INSERT INTO test.range_rel (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_14 + Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) +(3 rows) + +SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + id | dt | data +-----+--------------------------+------ + 137 | Mon Dec 15 00:00:00 2014 | +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_8 + Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(3 rows) + +SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + id | dt | data +----+--------------------------+------ + 74 | Sun Mar 15 00:00:00 2015 | +(1 row) + +SELECT pathman.set_auto('test.range_rel', false); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +ERROR: no suitable partition for key 'Mon Jun 01 00:00:00 2015' +SELECT pathman.set_auto('test.range_rel', true); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval | cooked_expr +----------------+------+----------+----------------+------------------------------------------------------------------------------------------------------------------------- + test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 21 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval | cooked_expr +---------+------+----------+----------------+------------- +(0 rows) + +/* Check overlaps */ +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4001, 5000); +ERROR: specified range [4001, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4000, 5000); +ERROR: specified range [4000, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3999, 5000); +ERROR: specified range [3999, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3000, 3500); +ERROR: specified range [3000, 3500) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 999); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1000); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1001); +ERROR: specified range [0, 1001) overlaps with existing partitions +/* CaMeL cAsE table names and attributes */ +CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); +SELECT pathman.create_hash_partitions('test.TeSt', 'a', 3); +ERROR: relation "test.test" does not exist at character 39 +SELECT pathman.create_hash_partitions('test."TeSt"', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO test."TeSt" VALUES (1, 1); +INSERT INTO test."TeSt" VALUES (2, 2); +INSERT INTO test."TeSt" VALUES (3, 3); +SELECT * FROM test."TeSt"; + a | b +---+--- + 3 | 3 + 2 | 2 + 1 | 1 +(3 rows) + +DROP TABLE test."TeSt" CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test."RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT pathman.append_range_partition('test."RangeRel"'); + append_range_partition +------------------------ + test."RangeRel_4" +(1 row) + +SELECT pathman.prepend_range_partition('test."RangeRel"'); + prepend_range_partition +------------------------- + test."RangeRel_5" +(1 row) + +SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); + merge_range_partitions +------------------------ + test."RangeRel_1" +(1 row) + +SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); + split_range_partition +----------------------- + test."RangeRel_6" +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 6 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval | cooked_expr +--------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- + test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman; +/* Test that everything works fine without schemas */ +CREATE EXTENSION pg_pathman; +/* Hash */ +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; + QUERY PLAN +------------------------------------------------------ + Append + -> Index Scan using hash_rel_0_pkey on hash_rel_0 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_1_pkey on hash_rel_1 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_2_pkey on hash_rel_2 + Index Cond: (id = 1234) +(7 rows) + +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); + create_range_partitions +------------------------- + 12 +(1 row) + +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); + split_range_partition +----------------------- + test.range_rel_13 +(1 row) + +SELECT append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_14 +(1 row) + +SELECT prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_15 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on range_rel_15 + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_13 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_12 + Filter: (dt > 'Wed Dec 15 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on range_rel_14 +(4 rows) + +/* Create range partitions from whole range */ +SELECT drop_partitions('test.range_rel'); +NOTICE: 45 rows copied from test.range_rel_1 +NOTICE: 31 rows copied from test.range_rel_3 +NOTICE: 30 rows copied from test.range_rel_4 +NOTICE: 31 rows copied from test.range_rel_5 +NOTICE: 30 rows copied from test.range_rel_6 +NOTICE: 31 rows copied from test.range_rel_7 +NOTICE: 31 rows copied from test.range_rel_8 +NOTICE: 30 rows copied from test.range_rel_9 +NOTICE: 31 rows copied from test.range_rel_10 +NOTICE: 30 rows copied from test.range_rel_11 +NOTICE: 31 rows copied from test.range_rel_12 +NOTICE: 14 rows copied from test.range_rel_13 +NOTICE: 0 rows copied from test.range_rel_14 +NOTICE: 0 rows copied from test.range_rel_15 + drop_partitions +----------------- + 14 +(1 row) + +/* Test NOT operator */ +CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO bool_test SELECT g, (g % 4) = 0 FROM generate_series(1, 100) AS g; +SELECT count(*) FROM bool_test; + count +------- + 100 +(1 row) + +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); + count +------- + 0 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ + count +------- + 75 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ + count +------- + 25 +(1 row) + +DROP TABLE bool_test CASCADE; +NOTICE: drop cascades to 3 other objects +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s + Filter: ((val < 75) AND (comment = 'a'::text)) + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(7 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); + add_range_partition +--------------------------- + test.index_on_childs_1_1k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); + append_range_partition +---------------------------- + test.index_on_childs_1k_2k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); + append_range_partition +---------------------------- + test.index_on_childs_2k_3k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); + append_range_partition +---------------------------- + test.index_on_childs_3k_4k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); + append_range_partition +---------------------------- + test.index_on_childs_4k_5k +(1 row) + +SELECT set_enable_parent('test.index_on_childs', true); + set_enable_parent +------------------- + +(1 row) + +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + QUERY PLAN +------------------------------------------------------------------------------ + Append + -> Index Scan using index_on_childs_c2_idx on index_on_childs + Index Cond: (c2 = 500) + Filter: ((c1 > 100) AND (c1 < 2500)) + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k + Index Cond: (c2 = 500) + Filter: (c1 > 100) + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k + Index Cond: (c2 = 500) + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k + Index Cond: (c2 = 500) + Filter: (c1 < 2500) +(12 rows) + +/* Test create_range_partitions() + partition_names */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + create_hash_partitions +------------------------ + 2 +(1 row) + +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + partition +----------- + p1 + p2 +(2 rows) + +DROP TABLE test.provided_part_names CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 28 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; From 2ee5d316155b5445a2c67b01a28cd034f2593d1d Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Sep 2018 18:47:21 +0300 Subject: [PATCH 0930/1124] build FDW in hardcore mode --- run_tests.sh | 7 ++++++- tests/python/partitioning_test.py | 9 +++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index d0581e7f..82d1f9d3 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -45,13 +45,18 @@ if [ "$LEVEL" = "hardcore" ] || \ # enable additional options ./configure \ - CFLAGS='-O0 -ggdb3 -fno-omit-frame-pointer' \ + CFLAGS='-Og -ggdb3 -fno-omit-frame-pointer' \ --enable-cassert \ --prefix=$CUSTOM_PG_BIN \ --quiet + # build & install PG time make -s -j$(nproc) && make -s install + # build & install FDW + time make -s -C contrib/postgres_fdw -j$(nproc) && \ + make -s -C contrib/postgres_fdw install + # override default PostgreSQL instance export PATH=$CUSTOM_PG_BIN/bin:$PATH export LD_LIBRARY_PATH=$CUSTOM_PG_BIN/lib diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index e234f7ff..f2b2ea51 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -76,10 +76,7 @@ def is_postgres_fdw_ready(): select count(*) from pg_available_extensions where name = 'postgres_fdw' """) - if result[0][0] > 0: - return True - - return False + return result[0][0] > 0 class Tests(unittest.TestCase): @@ -334,7 +331,7 @@ def check_tablespace(node, tablename, tablespace): self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) - @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') + @unittest.skipUnless(is_postgres_fdw_ready(), 'FDW might be missing') def test_foreign_table(self): """ Test foreign tables """ @@ -427,7 +424,7 @@ def test_foreign_table(self): b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') master.safe_psql("select drop_partitions('hash_test')") - @unittest.skipUnless(is_postgres_fdw_ready(), 'might be missing') + @unittest.skipUnless(is_postgres_fdw_ready(), 'FDW might be missing') def test_parallel_nodes(self): """ Test parallel queries under partitions """ From 801f2ae4b9114af012a1ee2f59da60f42d44419c Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 11 Sep 2018 20:53:13 +0300 Subject: [PATCH 0931/1124] PartitionRouter supports AFTER STATEMENT triggers --- expected/pathman_update_triggers.out | 90 +++++++++++++++++++++- sql/pathman_update_triggers.sql | 76 ++++++++++++++++++- src/include/partition_router.h | 11 ++- src/partition_filter.c | 3 +- src/partition_router.c | 107 +++++++++++++++++++-------- 5 files changed, 249 insertions(+), 38 deletions(-) diff --git a/expected/pathman_update_triggers.out b/expected/pathman_update_triggers.out index 6b366fb3..d5c92b9f 100644 --- a/expected/pathman_update_triggers.out +++ b/expected/pathman_update_triggers.out @@ -22,6 +22,76 @@ begin return new; end if; end; $$ language plpgsql; +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; +/* + * Statement level triggers + */ +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +select count(distinct val) from test_update_triggers.test; + count +------- + 200 +(1 row) + +truncate test_update_triggers.test; +/* + * Row level triggers + */ create trigger bu before update ON test_update_triggers.test_1 for each row execute procedure test_update_triggers.test_trigger (); create trigger bd before delete ON test_update_triggers.test_1 @@ -46,56 +116,74 @@ create trigger ad after delete ON test_update_triggers.test_2 for each row execute procedure test_update_triggers.test_trigger (); create trigger ai after insert ON test_update_triggers.test_2 for each row execute procedure test_update_triggers.test_trigger (); +/* single value */ insert into test_update_triggers.test values (1); +NOTICE: BEFORE INSERT STATEMENT (test) NOTICE: BEFORE INSERT ROW (test_1) NOTICE: AFTER INSERT ROW (test_1) -set pg_pathman.enable_partitionrouter = t; +NOTICE: AFTER INSERT STATEMENT (test) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) NOTICE: BEFORE UPDATE ROW (test_1) NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) val | tableoid -----+----------------------------- 2 | test_update_triggers.test_1 (1 row) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) NOTICE: BEFORE UPDATE ROW (test_1) NOTICE: BEFORE DELETE ROW (test_1) NOTICE: BEFORE INSERT ROW (test_2) NOTICE: AFTER DELETE ROW (test_1) NOTICE: AFTER INSERT ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) val | tableoid -----+----------------------------- 3 | test_update_triggers.test_2 (1 row) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) NOTICE: BEFORE UPDATE ROW (test_2) NOTICE: AFTER UPDATE ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) val | tableoid -----+----------------------------- 4 | test_update_triggers.test_2 (1 row) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) NOTICE: BEFORE UPDATE ROW (test_2) NOTICE: BEFORE DELETE ROW (test_2) NOTICE: BEFORE INSERT ROW (test_1) NOTICE: AFTER DELETE ROW (test_2) NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) val | tableoid -----+----------------------------- 5 | test_update_triggers.test_1 (1 row) update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) NOTICE: BEFORE UPDATE ROW (test_1) NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) val | tableoid -----+----------------------------- 6 | test_update_triggers.test_1 (1 row) +select count(distinct val) from test_update_triggers.test; + count +------- + 1 +(1 row) + DROP SCHEMA test_update_triggers CASCADE; NOTICE: drop cascades to 4 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_update_triggers.sql b/sql/pathman_update_triggers.sql index c289d12c..e8405acb 100644 --- a/sql/pathman_update_triggers.sql +++ b/sql/pathman_update_triggers.sql @@ -25,6 +25,79 @@ begin $$ language plpgsql; +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; + + +/* + * Statement level triggers + */ + +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); + + +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); + + +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); + +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); + + +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); + +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); + + +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); + +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; + +select count(distinct val) from test_update_triggers.test; + + +truncate test_update_triggers.test; + + +/* + * Row level triggers + */ + create trigger bu before update ON test_update_triggers.test_1 for each row execute procedure test_update_triggers.test_trigger (); create trigger bd before delete ON test_update_triggers.test_1 @@ -55,15 +128,16 @@ create trigger ai after insert ON test_update_triggers.test_2 for each row execute procedure test_update_triggers.test_trigger (); +/* single value */ insert into test_update_triggers.test values (1); -set pg_pathman.enable_partitionrouter = t; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +select count(distinct val) from test_update_triggers.test; DROP SCHEMA test_update_triggers CASCADE; diff --git a/src/include/partition_router.h b/src/include/partition_router.h index 79ae71a3..683af938 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -35,11 +35,18 @@ typedef struct PartitionRouterState JunkFilter *junkfilter; /* 'ctid' extraction facility */ ResultRelInfo *current_rri; + /* Machinery required for EvalPlanQual */ EPQState epqstate; int epqparam; - ModifyTableState *mt_state; /* need this for a GREAT deal of hackery */ - TupleTableSlot *saved_slot; + /* Preserved slot from last call */ + bool yielded; + TupleTableSlot *yielded_slot; + + /* Need these for a GREAT deal of hackery */ + ModifyTableState *mt_state; + bool update_stmt_triggers, + insert_stmt_triggers; } PartitionRouterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 9850dde1..098a72a5 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -801,8 +801,7 @@ partition_filter_end(CustomScanState *node) void partition_filter_rescan(CustomScanState *node) { - Assert(list_length(node->custom_ps) == 1); - ExecReScan((PlanState *) linitial(node->custom_ps)); + elog(ERROR, "partition_filter_rescan is not implemented"); } void diff --git a/src/partition_router.c b/src/partition_router.c index 64feddd9..b746765e 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -26,8 +26,38 @@ #include "utils/rel.h" +/* Highlight hacks with ModifyTable's fields */ #define MTHackField(mt_state, field) ( (mt_state)->field ) +/* Is current plan the last one? */ +#define MTIsLastPlan(mt_state) ( (mt_state)->mt_whichplan == (mt_state)->mt_nplans - 1 ) + + +#define MTDisableStmtTriggers(mt_state, pr_state) \ + do { \ + TriggerDesc *triggers = (mt_state)->resultRelInfo->ri_TrigDesc; \ + \ + if (triggers) \ + { \ + (pr_state)->insert_stmt_triggers |= triggers->trig_insert_after_statement; \ + (pr_state)->update_stmt_triggers |= triggers->trig_update_after_statement; \ + triggers->trig_insert_after_statement = false; \ + triggers->trig_update_after_statement = false; \ + } \ + } while (0) + +#define MTEnableStmtTriggers(mt_state, pr_state) \ + do { \ + TriggerDesc *triggers = (mt_state)->resultRelInfo->ri_TrigDesc; \ + \ + if (triggers) \ + { \ + triggers->trig_insert_after_statement = (pr_state)->insert_stmt_triggers; \ + triggers->trig_update_after_statement = (pr_state)->update_stmt_triggers; \ + } \ + } while (0) + + bool pg_pathman_enable_partition_router = true; @@ -47,7 +77,7 @@ static TupleTableSlot *router_set_slot(PartitionRouterState *state, static TupleTableSlot *router_get_slot(PartitionRouterState *state, bool *should_process); -static void router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate); +static void router_lazy_init_junkfilter(PartitionRouterState *state); static void router_lazy_init_constraint(PartitionRouterState *state); static ItemPointerData router_extract_ctid(PartitionRouterState *state, @@ -56,8 +86,7 @@ static ItemPointerData router_extract_ctid(PartitionRouterState *state, static TupleTableSlot *router_lock_or_delete_tuple(PartitionRouterState *state, TupleTableSlot *slot, ItemPointer tupleid, - bool *deleted, - EState *estate); + bool *deleted); void init_partition_router_static_data(void) @@ -213,7 +242,7 @@ partition_router_exec(CustomScanState *node) ItemPointerSetInvalid(&ctid); /* Build new junkfilter lazily */ - router_lazy_init_junkfilter(state, estate); + router_lazy_init_junkfilter(state); /* Build recheck constraint state lazily */ router_lazy_init_constraint(state); @@ -226,8 +255,8 @@ partition_router_exec(CustomScanState *node) /* Lock or delete tuple from old partition */ Assert(ItemPointerIsValid(&ctid)); - slot = router_lock_or_delete_tuple(state, slot, &ctid, - &deleted, estate); + slot = router_lock_or_delete_tuple(state, slot, + &ctid, &deleted); /* We require a tuple (previous one has vanished) */ if (TupIsNull(slot)) @@ -257,12 +286,7 @@ partition_router_end(CustomScanState *node) void partition_router_rescan(CustomScanState *node) { - PartitionRouterState *state = (PartitionRouterState *) node; - - Assert(list_length(node->custom_ps) == 1); - ExecReScan((PlanState *) linitial(node->custom_ps)); - - state->saved_slot = NULL; + elog(ERROR, "partition_router_rescan is not implemented"); } void @@ -313,18 +337,15 @@ router_run_modify_table(PlanState *state) return slot; } -/* Return tuple OR stash it and change ModifyTable's operation */ +/* Return tuple OR yield it and change ModifyTable's operation */ static TupleTableSlot * router_set_slot(PartitionRouterState *state, TupleTableSlot *slot, CmdType operation) { - ModifyTableState *mt_state = state->mt_state; - - /* Check invariants */ - Assert(!TupIsNull(slot)); - Assert(state->junkfilter); + ModifyTableState *mt_state = state->mt_state; + /* Fast path for correct operation type */ if (mt_state->operation == operation) return slot; @@ -332,36 +353,58 @@ router_set_slot(PartitionRouterState *state, MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; MTHackField(mt_state, operation) = operation; - /* HACK: conditionally disable junk filter in result relation */ - state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? - state->junkfilter : - NULL; + /* HACK: disable AFTER STATEMENT triggers */ + MTDisableStmtTriggers(mt_state, state); + + if (!TupIsNull(slot)) + { + /* We should've cached junk filter already */ + Assert(state->junkfilter); + + /* HACK: conditionally disable junk filter in result relation */ + state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? + state->junkfilter : + NULL; - /* Set saved_slot and yield */ - state->saved_slot = slot; + /* Don't forget to set saved_slot! */ + state->yielded_slot = slot; + } + + /* Yield */ + state->yielded = true; return NULL; } -/* Fetch next tuple (either fresh or stashed) */ +/* Fetch next tuple (either fresh or yielded) */ static TupleTableSlot * router_get_slot(PartitionRouterState *state, bool *should_process) { TupleTableSlot *slot; - if (!TupIsNull(state->saved_slot)) + /* Do we have a preserved slot? */ + if (state->yielded) { - /* Reset saved_slot */ - slot = state->saved_slot; - state->saved_slot = NULL; + /* HACK: enable AFTER STATEMENT triggers */ + MTEnableStmtTriggers(state->mt_state, state); + + /* Reset saved slot */ + slot = state->yielded_slot; + state->yielded_slot = NULL; + state->yielded = false; /* We shouldn't process preserved slot... */ *should_process = false; } else { + /* Fetch next tuple */ slot = ExecProcNode((PlanState *) linitial(state->css.custom_ps)); + /* Restore operation type for AFTER STATEMENT triggers */ + if (TupIsNull(slot) && MTIsLastPlan(state->mt_state)) + slot = router_set_slot(state, NULL, CMD_UPDATE); + /* But we have to process non-empty slot */ *should_process = !TupIsNull(slot); } @@ -370,7 +413,7 @@ router_get_slot(PartitionRouterState *state, } static void -router_lazy_init_junkfilter(PartitionRouterState *state, EState *estate) +router_lazy_init_junkfilter(PartitionRouterState *state) { if (state->junkfilter == NULL) state->junkfilter = state->current_rri->ri_junkFilter; @@ -443,12 +486,12 @@ static TupleTableSlot * router_lock_or_delete_tuple(PartitionRouterState *state, TupleTableSlot *slot, ItemPointer tupleid, - bool *deleted, /* return value #1 */ - EState *estate) + bool *deleted /* return value #1 */) { ResultRelInfo *rri; Relation rel; + EState *estate = state->css.ss.ps.state; ExprContext *econtext = GetPerTupleExprContext(estate); ExprState *constraint = state->constraint; From 45e040aacc8fc719292d746fd52b34e7728e4c10 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 12 Sep 2018 13:29:42 +0300 Subject: [PATCH 0932/1124] fix memory issues found by Valgrind (reset state after each subplan) --- src/partition_router.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index b746765e..55331bff 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -29,9 +29,6 @@ /* Highlight hacks with ModifyTable's fields */ #define MTHackField(mt_state, field) ( (mt_state)->field ) -/* Is current plan the last one? */ -#define MTIsLastPlan(mt_state) ( (mt_state)->mt_whichplan == (mt_state)->mt_nplans - 1 ) - #define MTDisableStmtTriggers(mt_state, pr_state) \ do { \ @@ -402,7 +399,7 @@ router_get_slot(PartitionRouterState *state, slot = ExecProcNode((PlanState *) linitial(state->css.custom_ps)); /* Restore operation type for AFTER STATEMENT triggers */ - if (TupIsNull(slot) && MTIsLastPlan(state->mt_state)) + if (TupIsNull(slot)) slot = router_set_slot(state, NULL, CMD_UPDATE); /* But we have to process non-empty slot */ From 431b316a08c7747f2193ed058e432fdaa2731421 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Wed, 12 Sep 2018 13:52:45 +0300 Subject: [PATCH 0933/1124] PG 11: check moved rows in router_lock_or_delete_tuple() --- src/include/compat/pg_compat.h | 10 ++++++++++ src/partition_router.c | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 6f748eb1..1ae1b33b 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -529,6 +529,16 @@ char get_rel_persistence(Oid relid); #endif +/* + * ItemPointerIndicatesMovedPartitions() + * + * supported since v11, provide a stub for previous versions. + */ +#if PG_VERSION_NUM < 110000 +#define ItemPointerIndicatesMovedPartitions(ctid) ( false ) +#endif + + /* * make_restrictinfo() */ diff --git a/src/partition_router.c b/src/partition_router.c index 55331bff..7459315a 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -575,6 +575,10 @@ router_lock_or_delete_tuple(PartitionRouterState *state, ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); + if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be updated was already moved to another partition due to concurrent update"))); if (!ItemPointerEquals(tupleid, &hufd.ctid)) { From 1817d26fd099dc6249a635ee315d38e1d0891507 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 12 Sep 2018 16:34:49 +0300 Subject: [PATCH 0934/1124] Add more tests on dropped columns (issue #174) --- expected/pathman_dropped_cols.out | 126 ++++++++++++++++++++++++++++++ sql/pathman_dropped_cols.sql | 61 +++++++++++++++ 2 files changed, 187 insertions(+) diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out index 89585b52..7c9e2806 100644 --- a/expected/pathman_dropped_cols.out +++ b/expected/pathman_dropped_cols.out @@ -80,5 +80,131 @@ select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathm drop table test_hash cascade; NOTICE: drop cascades to 3 other objects +-- Yury Smirnov case +CREATE TABLE root_dict ( + id BIGSERIAL PRIMARY KEY NOT NULL, + root_id BIGINT NOT NULL, + start_date DATE, + num TEXT, + main TEXT, + dict_code TEXT, + dict_name TEXT, + edit_num TEXT, + edit_date DATE, + sign CHAR(4) +); +CREATE INDEX "root_dict_root_id_idx" ON "root_dict" ("root_id"); +DO +$$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM generate_series(1, 3) r + LOOP + FOR d IN 1..2 LOOP + INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES + (r.r, now(), 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + END LOOP; + END LOOP; +END +$$; +ALTER TABLE root_dict ADD COLUMN dict_id BIGINT DEFAULT 3; +ALTER TABLE root_dict DROP COLUMN dict_code, + DROP COLUMN dict_name, + DROP COLUMN sign; +CREATE EXTENSION pg_pathman; +ERROR: extension "pg_pathman" already exists +SELECT create_hash_partitions('root_dict' :: REGCLASS, + 'root_id', + 3, + true); + create_hash_partitions +------------------------ + 3 +(1 row) + +VACUUM FULL ANALYZE "root_dict"; +SELECT set_enable_parent('root_dict' :: REGCLASS, FALSE); + set_enable_parent +------------------- + +(1 row) + +PREPARE getbyroot AS +SELECT + id, root_id, start_date, num, main, edit_num, edit_date, dict_id +FROM root_dict +WHERE root_id = $1; +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +-- errors usually start here +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 + 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 +(2 rows) + +EXPLAIN EXECUTE getbyroot(2); + QUERY PLAN +-------------------------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) (cost=4.17..11.28 rows=3 width=128) + Prune by: (root_dict.root_id = $1) + -> Bitmap Heap Scan on root_dict_0 root_dict (cost=4.17..11.28 rows=3 width=128) + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_0_root_id_idx (cost=0.00..4.17 rows=3 width=0) + Index Cond: (root_id = $1) + -> Bitmap Heap Scan on root_dict_1 root_dict (cost=4.17..11.28 rows=3 width=128) + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_1_root_id_idx (cost=0.00..4.17 rows=3 width=0) + Index Cond: (root_id = $1) + -> Bitmap Heap Scan on root_dict_2 root_dict (cost=4.17..11.28 rows=3 width=128) + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_2_root_id_idx (cost=0.00..4.17 rows=3 width=0) + Index Cond: (root_id = $1) +(14 rows) + +DROP TABLE root_dict CASCADE; +NOTICE: drop cascades to 3 other objects DROP SCHEMA dropped_cols CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql index 32589c8c..6338d2f7 100644 --- a/sql/pathman_dropped_cols.sql +++ b/sql/pathman_dropped_cols.sql @@ -38,6 +38,67 @@ select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathm select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_dummy_check'; drop table test_hash cascade; +-- Yury Smirnov case +CREATE TABLE root_dict ( + id BIGSERIAL PRIMARY KEY NOT NULL, + root_id BIGINT NOT NULL, + start_date DATE, + num TEXT, + main TEXT, + dict_code TEXT, + dict_name TEXT, + edit_num TEXT, + edit_date DATE, + sign CHAR(4) +); +CREATE INDEX "root_dict_root_id_idx" ON "root_dict" ("root_id"); + +DO +$$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM generate_series(1, 3) r + LOOP + FOR d IN 1..2 LOOP + INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES + (r.r, now(), 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + END LOOP; + END LOOP; +END +$$; + +ALTER TABLE root_dict ADD COLUMN dict_id BIGINT DEFAULT 3; +ALTER TABLE root_dict DROP COLUMN dict_code, + DROP COLUMN dict_name, + DROP COLUMN sign; + +CREATE EXTENSION pg_pathman; +SELECT create_hash_partitions('root_dict' :: REGCLASS, + 'root_id', + 3, + true); +VACUUM FULL ANALYZE "root_dict"; +SELECT set_enable_parent('root_dict' :: REGCLASS, FALSE); + +PREPARE getbyroot AS +SELECT + id, root_id, start_date, num, main, edit_num, edit_date, dict_id +FROM root_dict +WHERE root_id = $1; + +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); + +-- errors usually start here +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXPLAIN EXECUTE getbyroot(2); + +DROP TABLE root_dict CASCADE; DROP SCHEMA dropped_cols CASCADE; DROP EXTENSION pg_pathman; From 8918bd37f8fca14588d8aff72cd9bdba42029df4 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 12 Sep 2018 16:47:23 +0300 Subject: [PATCH 0935/1124] Make few fixes in tests added by 1817d26f --- expected/pathman_dropped_cols.out | 3 +-- sql/pathman_dropped_cols.sql | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out index 7c9e2806..4a4f3549 100644 --- a/expected/pathman_dropped_cols.out +++ b/expected/pathman_dropped_cols.out @@ -112,8 +112,6 @@ ALTER TABLE root_dict ADD COLUMN dict_id BIGINT DEFAULT 3; ALTER TABLE root_dict DROP COLUMN dict_code, DROP COLUMN dict_name, DROP COLUMN sign; -CREATE EXTENSION pg_pathman; -ERROR: extension "pg_pathman" already exists SELECT create_hash_partitions('root_dict' :: REGCLASS, 'root_id', 3, @@ -204,6 +202,7 @@ EXPLAIN EXECUTE getbyroot(2); Index Cond: (root_id = $1) (14 rows) +DEALLOCATE getbyroot; DROP TABLE root_dict CASCADE; NOTICE: drop cascades to 3 other objects DROP SCHEMA dropped_cols CASCADE; diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql index 6338d2f7..a4d3c844 100644 --- a/sql/pathman_dropped_cols.sql +++ b/sql/pathman_dropped_cols.sql @@ -74,7 +74,6 @@ ALTER TABLE root_dict DROP COLUMN dict_code, DROP COLUMN dict_name, DROP COLUMN sign; -CREATE EXTENSION pg_pathman; SELECT create_hash_partitions('root_dict' :: REGCLASS, 'root_id', 3, @@ -99,6 +98,7 @@ EXECUTE getbyroot(2); EXECUTE getbyroot(2); EXPLAIN EXECUTE getbyroot(2); +DEALLOCATE getbyroot; DROP TABLE root_dict CASCADE; DROP SCHEMA dropped_cols CASCADE; DROP EXTENSION pg_pathman; From 4666195a96dbef63bc800736bf61943815c0b0dd Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 13 Sep 2018 16:52:56 +0300 Subject: [PATCH 0936/1124] Fix compilation for v10 and fix dropped_cols test, there is still segfault on update nodes --- expected/pathman_dropped_cols.out | 30 +++++++++++++++--------------- sql/pathman_dropped_cols.sql | 2 +- src/include/compat/pg_compat.h | 2 +- src/partition_router.c | 11 ++++++++++- 4 files changed, 27 insertions(+), 18 deletions(-) diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out index 4a4f3549..79e781b2 100644 --- a/expected/pathman_dropped_cols.out +++ b/expected/pathman_dropped_cols.out @@ -103,7 +103,7 @@ BEGIN LOOP FOR d IN 1..2 LOOP INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES - (r.r, now(), 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + (r.r, '2010-10-10'::date, 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); END LOOP; END LOOP; END @@ -136,51 +136,51 @@ WHERE root_id = $1; EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) -- errors usually start here EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXECUTE getbyroot(2); id | root_id | start_date | num | main | edit_num | edit_date | dict_id ----+---------+------------+-------+------+----------+-----------+--------- - 3 | 2 | 09-12-2018 | num_1 | 2 | | | 3 - 4 | 2 | 09-12-2018 | num_2 | 1 | | | 3 + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) EXPLAIN EXECUTE getbyroot(2); diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql index a4d3c844..0ae16c8a 100644 --- a/sql/pathman_dropped_cols.sql +++ b/sql/pathman_dropped_cols.sql @@ -63,7 +63,7 @@ BEGIN LOOP FOR d IN 1..2 LOOP INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES - (r.r, now(), 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + (r.r, '2010-10-10'::date, 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); END LOOP; END LOOP; END diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 1ae1b33b..fdb421ce 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -415,7 +415,7 @@ ExecCheck(ExprState *state, ExprContext *econtext) /* * extract_actual_join_clauses() */ -#if (PG_VERSION_NUM >= 100004) || \ +#if (PG_VERSION_NUM >= 100003) || \ (PG_VERSION_NUM < 100000 && PG_VERSION_NUM >= 90609) || \ (PG_VERSION_NUM < 90600 && PG_VERSION_NUM >= 90513) #define extract_actual_join_clauses_compat(restrictinfo_list, \ diff --git a/src/partition_router.c b/src/partition_router.c index 7459315a..efd3a382 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -170,11 +170,20 @@ prepare_modify_table_for_partition_router(PlanState *state, void *context) if (!changed_method) { + /* HACK: replace ModifyTable's execution method */ +#if PG_VERSION_NUM >= 110000 if (!mt_method) mt_method = state->ExecProcNodeReal; - /* HACK: replace ModifyTable's execution method */ ExecSetExecProcNode(state, router_run_modify_table); +#elif PG_VERSION_NUM >= 100000 + if (!mt_method) + mt_method = state->ExecProcNode; + + state->ExecProcNode = router_run_modify_table; +#else +#error "doesn't supported yet" +#endif changed_method = true; } From 94f621b47d4fd3f697c82d871d2a36d8ba674c14 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 13 Sep 2018 17:15:48 +0300 Subject: [PATCH 0937/1124] Read parents list before it could lead to segfault --- src/include/compat/pg_compat.h | 2 +- src/relation_info.c | 29 ++++------------------------- 2 files changed, 5 insertions(+), 26 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 4228d264..b3abfcd2 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -322,7 +322,7 @@ static inline void mult_result_handler() { elog(ERROR, ERR_PART_ATTR_MULTIPLE_RE /* * extract_actual_join_clauses() */ -#if (PG_VERSION_NUM >= 100004) || \ +#if (PG_VERSION_NUM >= 100003) || \ (PG_VERSION_NUM < 100000 && PG_VERSION_NUM >= 90609) || \ (PG_VERSION_NUM < 90600 && PG_VERSION_NUM >= 90513) #define extract_actual_join_clauses_compat(restrictinfo_list, \ diff --git a/src/relation_info.c b/src/relation_info.c index 1d191f1a..eacc491b 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -991,7 +991,6 @@ finish_delayed_invalidation(void) { Oid *parents = NULL; int parents_count = 0; - bool parents_fetched = false; ListCell *lc; AcceptInvalidationMessages(); @@ -1017,26 +1016,19 @@ finish_delayed_invalidation(void) /* Disregard all remaining invalidation jobs */ delayed_invalidation_whole_cache = false; - free_invalidation_lists(); - /* No need to continue, exit */ - return; + goto end; } } + parents = read_parent_oids(&parents_count); + /* We might be asked to perform a complete cache invalidation */ if (delayed_invalidation_whole_cache) { /* Unset 'invalidation_whole_cache' flag */ delayed_invalidation_whole_cache = false; - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - /* Invalidate live entries and remove dead ones */ invalidate_pathman_relation_info_cache(parents, parents_count); } @@ -1050,13 +1042,6 @@ finish_delayed_invalidation(void) if (IsToastNamespace(get_rel_namespace(parent))) continue; - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - /* Check if parent still exists */ if (bsearch_oid(parent, parents, parents_count)) /* get_pathman_relation_info() will refresh this entry */ @@ -1074,13 +1059,6 @@ finish_delayed_invalidation(void) if (IsToastNamespace(get_rel_namespace(vague_rel))) continue; - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } - /* It might be a partitioned table or a partition */ if (!try_invalidate_parent(vague_rel, parents, parents_count)) { @@ -1117,6 +1095,7 @@ finish_delayed_invalidation(void) } } +end: /* Finally, free invalidation jobs lists */ free_invalidation_lists(); From 0c69df3454dd39e02a200dc31e53bd80336d3c05 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 13 Sep 2018 18:08:55 +0300 Subject: [PATCH 0938/1124] Fix hash join test on 10.5 --- META.json | 2 +- Makefile | 1 + expected/pathman_basic.out | 35 ---------------- expected/pathman_calamity.out | 2 +- expected/pathman_hashjoin.out | 73 +++++++++++++++++++++++++++++++++ expected/pathman_hashjoin_1.out | 73 +++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 12 ------ sql/pathman_hashjoin.sql | 44 ++++++++++++++++++++ src/include/init.h | 2 +- 9 files changed, 194 insertions(+), 50 deletions(-) create mode 100644 expected/pathman_hashjoin.out create mode 100644 expected/pathman_hashjoin_1.out create mode 100644 sql/pathman_hashjoin.sql diff --git a/META.json b/META.json index a198d696..447629a4 100644 --- a/META.json +++ b/META.json @@ -23,7 +23,7 @@ "pg_pathman": { "file": "pg_pathman--1.4.sql", "docfile": "README.md", - "version": "1.4.13", + "version": "1.4.14", "abstract": "Partitioning tool" } }, diff --git a/Makefile b/Makefile index 8fdc0cde..42456b07 100644 --- a/Makefile +++ b/Makefile @@ -45,6 +45,7 @@ REGRESS = pathman_array_qual \ pathman_interval \ pathman_join_clause \ pathman_lateral \ + pathman_hashjoin \ pathman_mergejoin \ pathman_multilevel \ pathman_only \ diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index fa946d72..c9bce988 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -810,41 +810,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test. -> Index Scan using range_rel_2_dt_idx on range_rel_2 (4 rows) -/* - * Join - */ -set enable_nestloop = OFF; -SET enable_hashjoin = ON; -SET enable_mergejoin = OFF; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: j2.dt - -> Hash Join - Hash Cond: (j3.id = j2.id) - -> Append - -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 - -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 - -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 - -> Hash - -> Hash Join - Hash Cond: (j2.id = j1.id) - -> Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 - -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 - -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 j1 - -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 -(20 rows) - /* * Test inlined SQL functions */ diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 5cda7bc5..e1e65af0 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT get_pathman_lib_version(); get_pathman_lib_version ------------------------- - 1.4.13 + 1.4.14 (1 row) set client_min_messages = NOTICE; diff --git a/expected/pathman_hashjoin.out b/expected/pathman_hashjoin.out new file mode 100644 index 00000000..71ea1085 --- /dev/null +++ b/expected/pathman_hashjoin.out @@ -0,0 +1,73 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j1.id = j2.id) + -> Hash Join + Hash Cond: (j3.id = j1.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 +(20 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_hashjoin_1.out b/expected/pathman_hashjoin_1.out new file mode 100644 index 00000000..8e0007d4 --- /dev/null +++ b/expected/pathman_hashjoin_1.out @@ -0,0 +1,73 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Hash Join + Hash Cond: (j2.id = j1.id) + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 +(20 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index f24716c0..b7d460c4 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -215,18 +215,6 @@ SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; -/* - * Join - */ -set enable_nestloop = OFF; -SET enable_hashjoin = ON; -SET enable_mergejoin = OFF; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - /* * Test inlined SQL functions */ diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql new file mode 100644 index 00000000..d3cc1b2b --- /dev/null +++ b/sql/pathman_hashjoin.sql @@ -0,0 +1,44 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; + +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; + +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/src/include/init.h b/src/include/init.h index 6bdccc2e..2227533e 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simpify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT 0x010400 /* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010413 +#define CURRENT_LIB_VERSION 0x010414 void *pathman_cache_search_relid(HTAB *cache_table, From d34a77e061963f4e14a0e8ec9f89e35fc3eb1e3c Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 13 Sep 2018 18:36:01 +0300 Subject: [PATCH 0939/1124] Bump version in META.json --- META.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/META.json b/META.json index 447629a4..a211fc36 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.13", + "version": "1.4.14", "maintainer": [ "Dmitry Ivanov ", "Ildus Kurbangaliev " From 7e76912ea3777aadb4851416d61102139b5ba81b Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 18 Sep 2018 16:28:24 +0300 Subject: [PATCH 0940/1124] Add support of pg11 --- src/include/compat/pg_compat.h | 25 +++++++++++++------------ src/partition_router.c | 23 ++++++++++++++--------- src/utility_stmt_hooking.c | 5 ++--- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index fdb421ce..145b2113 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -802,17 +802,6 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, MakeTupleTableSlot() #endif -/* - * ExecInitExtraTupleSlot() - */ -#if PG_VERSION_NUM >= 110000 -#define ExecInitExtraTupleSlotCompat(estate) \ - ExecInitExtraTupleSlot((estate), NULL) -#else -#define ExecInitExtraTupleSlotCompat(estate) \ - ExecInitExtraTupleSlot(estate) -#endif - /* * BackgroundWorkerInitializeConnectionByOid() */ @@ -877,7 +866,6 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, find_childrel_appendrelinfo((root), (rel)) #endif - /* * HeapTupleGetXmin() * Vanilla PostgreSQL has HeaptTupleHeaderGetXmin, but for 64-bit xid @@ -895,6 +883,19 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, * Common code * ------------- */ +static inline TupleTableSlot * +ExecInitExtraTupleSlotCompat(EState *s, TupleDesc t) +{ +#if PG_VERSION_NUM >= 110000 + return ExecInitExtraTupleSlot(s,t); +#else + TupleTableSlot *res = ExecInitExtraTupleSlot(s); + if (t) + ExecSetSlotDescriptor(res, t); + + return res; +#endif +} /* See ExecEvalParamExtern() */ static inline ParamExternData * diff --git a/src/partition_router.c b/src/partition_router.c index efd3a382..6f3a143b 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -171,15 +171,12 @@ prepare_modify_table_for_partition_router(PlanState *state, void *context) if (!changed_method) { /* HACK: replace ModifyTable's execution method */ -#if PG_VERSION_NUM >= 110000 if (!mt_method) mt_method = state->ExecProcNodeReal; +#if PG_VERSION_NUM >= 110000 ExecSetExecProcNode(state, router_run_modify_table); #elif PG_VERSION_NUM >= 100000 - if (!mt_method) - mt_method = state->ExecProcNode; - state->ExecProcNode = router_run_modify_table; #else #error "doesn't supported yet" @@ -316,7 +313,7 @@ router_run_modify_table(PlanState *state) mt_state = (ModifyTableState *) state; /* Get initial signal */ - mt_plans_old = MTHackField(mt_state, mt_nplans); + mt_plans_old = mt_state->mt_nplans; restart: /* Fetch next tuple */ @@ -359,21 +356,29 @@ router_set_slot(PartitionRouterState *state, MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; MTHackField(mt_state, operation) = operation; - /* HACK: disable AFTER STATEMENT triggers */ - MTDisableStmtTriggers(mt_state, state); - if (!TupIsNull(slot)) { /* We should've cached junk filter already */ Assert(state->junkfilter); + /* HACK: disable AFTER STATEMENT triggers */ + MTDisableStmtTriggers(mt_state, state); + + /* HACK: conditionally disable junk filter in result relation */ state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? state->junkfilter : NULL; /* Don't forget to set saved_slot! */ - state->yielded_slot = slot; + state->yielded_slot = ExecInitExtraTupleSlotCompat(mt_state->ps.state, + slot->tts_tupleDescriptor); + ExecCopySlot(state->yielded_slot, slot); + } + else + { + /* HACK: enable AFTER STATEMENT triggers */ + MTEnableStmtTriggers(mt_state, state); } /* Yield */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index fcd6a1dc..c90a01da 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -514,10 +514,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, RPS_RRI_CB(finish_rri_for_copy, NULL)); /* Set up a tuple slot too */ - myslot = ExecInitExtraTupleSlotCompat(estate); - ExecSetSlotDescriptor(myslot, tupDesc); + myslot = ExecInitExtraTupleSlotCompat(estate, NULL); /* Triggers might need a slot as well */ - estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate); + estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate, tupDesc); /* Prepare to catch AFTER triggers. */ AfterTriggerBeginQuery(); From 0207c4e64b99470ec1c639726ff593d172235cf1 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 18 Sep 2018 18:28:00 +0300 Subject: [PATCH 0941/1124] Add overseer node (not working yet --- Makefile | 2 +- src/hooks.c | 74 ++++++----------- src/include/partition_overseer.h | 54 ++++++++++++ src/include/partition_router.h | 8 +- src/include/planner_tree_modification.h | 8 +- src/partition_overseer.c | 105 ++++++++++++++++++++++++ src/partition_router.c | 64 ++------------- src/pg_pathman.c | 4 +- src/planner_tree_modification.c | 46 ++++++----- 9 files changed, 225 insertions(+), 140 deletions(-) create mode 100644 src/include/partition_overseer.h create mode 100644 src/partition_overseer.c diff --git a/Makefile b/Makefile index f9567f94..7ba97cbd 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ - $(WIN32RES) + src/partition_overseer.o $(WIN32RES) ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include diff --git a/src/hooks.c b/src/hooks.c index 1ebb726b..b8c7a194 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -68,7 +68,6 @@ planner_hook_type pathman_planner_hook_next = NULL; post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next = NULL; shmem_startup_hook_type pathman_shmem_startup_hook_next = NULL; ProcessUtility_hook_type pathman_process_utility_hook_next = NULL; -ExecutorRun_hook_type pathman_executor_run_hook_next = NULL; /* Take care of joins */ @@ -616,6 +615,29 @@ pathman_enable_assign_hook(bool newval, void *extra) newval ? "enabled" : "disabled"); } +static void +execute_for_plantree(PlannedStmt *planned_stmt, + Plan *(*proc) (List *rtable, Plan *plan)) +{ + List *subplans = NIL; + ListCell *lc; + Plan *resplan = proc(planned_stmt->rtable, planned_stmt->planTree); + + if (resplan) + planned_stmt->planTree = resplan; + + foreach (lc, planned_stmt->subplans) + { + Plan *subplan = lfirst(lc); + resplan = proc(planned_stmt->rtable, (Plan *) lfirst(lc)); + if (resplan) + subplans = lappend(subplans, resplan); + else + subplans = lappend(subplans, subplan); + } + planned_stmt->subplans = subplans; +} + /* * Planner hook. It disables inheritance for tables that have been partitioned * by pathman to prevent standart PostgreSQL partitioning mechanism from @@ -624,14 +646,6 @@ pathman_enable_assign_hook(bool newval, void *extra) PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { -#define ExecuteForPlanTree(planned_stmt, proc) \ - do { \ - ListCell *lc; \ - proc((planned_stmt)->rtable, (planned_stmt)->planTree); \ - foreach (lc, (planned_stmt)->subplans) \ - proc((planned_stmt)->rtable, (Plan *) lfirst(lc)); \ - } while (0) - PlannedStmt *result; uint32 query_id = parse->queryId; @@ -658,10 +672,10 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready) { /* Add PartitionFilter node for INSERT queries */ - ExecuteForPlanTree(result, add_partition_filters); + execute_for_plantree(result, add_partition_filters); /* Add PartitionRouter node for UPDATE queries */ - ExecuteForPlanTree(result, add_partition_routers); + execute_for_plantree(result, add_partition_routers); /* Decrement planner() calls count */ decr_planner_calls_count(); @@ -686,7 +700,6 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Finally return the Plan */ return result; -#undef ExecuteForPlanTree } /* @@ -950,40 +963,3 @@ pathman_process_utility_hook(Node *first_arg, context, params, queryEnv, dest, completionTag); } - -/* - * Executor hook (for PartitionRouter). - */ -#if PG_VERSION_NUM >= 100000 -void -pathman_executor_hook(QueryDesc *queryDesc, - ScanDirection direction, - ExecutorRun_CountArgType count, - bool execute_once) -#else -void -pathman_executor_hook(QueryDesc *queryDesc, - ScanDirection direction, - ExecutorRun_CountArgType count) -#endif -{ -#define EXECUTOR_HOOK pathman_executor_run_hook_next -#if PG_VERSION_NUM >= 100000 -#define EXECUTOR_HOOK_NEXT(q,d,c) EXECUTOR_HOOK((q),(d),(c), execute_once) -#define EXECUTOR_RUN(q,d,c) standard_ExecutorRun((q),(d),(c), execute_once) -#else -#define EXECUTOR_HOOK_NEXT(q,d,c) EXECUTOR_HOOK((q),(d),(c)) -#define EXECUTOR_RUN(q,d,c) standard_ExecutorRun((q),(d),(c)) -#endif - - /* Prepare ModifyTable nodes for PartitionRouter hackery */ - state_tree_visitor((PlanState *) queryDesc->planstate, - prepare_modify_table_for_partition_router, - NULL); - - /* Call hooks set by other extensions if needed */ - if (EXECUTOR_HOOK) - EXECUTOR_HOOK_NEXT(queryDesc, direction, count); - /* Else call internal implementation */ - else EXECUTOR_RUN(queryDesc, direction, count); -} diff --git a/src/include/partition_overseer.h b/src/include/partition_overseer.h new file mode 100644 index 00000000..ddf84c7a --- /dev/null +++ b/src/include/partition_overseer.h @@ -0,0 +1,54 @@ +/* ------------------------------------------------------------------------ + * + * partition_overseer.h + * Restart ModifyTable for unobvious reasons + * + * Copyright (c) 2018, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PARTITION_OVERSEER_H +#define PARTITION_OVERSEER_H + +#include "relation_info.h" +#include "utils.h" + +#include "postgres.h" +#include "access/tupconvert.h" +#include "commands/explain.h" +#include "optimizer/planner.h" + +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + + +#define OVERSEER_NODE_NAME "PartitionOverseer" + + +extern CustomScanMethods partition_overseer_plan_methods; +extern CustomExecMethods partition_overseer_exec_methods; + + +void init_partition_overseer_static_data(void); +Plan *make_partition_overseer(Plan *subplan); + +Node *partition_overseer_create_scan_state(CustomScan *node); + +void partition_overseer_begin(CustomScanState *node, + EState *estate, + int eflags); + +TupleTableSlot *partition_overseer_exec(CustomScanState *node); + +void partition_overseer_end(CustomScanState *node); + +void partition_overseer_rescan(CustomScanState *node); + +void partition_overseer_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); + + +#endif /* PARTITION_OVERSEER_H */ diff --git a/src/include/partition_router.h b/src/include/partition_router.h index 683af938..a07bde60 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -74,12 +74,7 @@ extern CustomExecMethods partition_router_exec_methods; void init_partition_router_static_data(void); - -Plan *make_partition_router(Plan *subplan, - Oid parent_relid, - Index parent_rti, - int epq_param, - List *returning_list); +Plan *make_partition_router(Plan *subplan, int epq_param); void prepare_modify_table_for_partition_router(PlanState *state, void *context); @@ -98,5 +93,6 @@ void partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *es); +TupleTableSlot *partition_router_run_modify_table(PlanState *state); #endif /* PARTITION_UPDATE_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index b93224ba..43f7a24b 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -25,8 +25,8 @@ void assign_query_id(Query *query); void reset_query_id_generator(void); /* Plan tree rewriting utility */ -void plan_tree_visitor(Plan *plan, - void (*visitor) (Plan *plan, void *context), +Plan * plan_tree_visitor(Plan *plan, + Plan *(*visitor) (Plan *plan, void *context), void *context); /* PlanState tree rewriting utility */ @@ -38,8 +38,8 @@ void state_tree_visitor(PlanState *state, void pathman_transform_query(Query *parse, ParamListInfo params); /* These functions scribble on Plan tree */ -void add_partition_filters(List *rtable, Plan *plan); -void add_partition_routers(List *rtable, Plan *plan); +Plan *add_partition_filters(List *rtable, Plan *plan); +Plan *add_partition_routers(List *rtable, Plan *plan); /* used by assign_rel_parenthood_status() etc */ diff --git a/src/partition_overseer.c b/src/partition_overseer.c new file mode 100644 index 00000000..52eea377 --- /dev/null +++ b/src/partition_overseer.c @@ -0,0 +1,105 @@ +#include "postgres.h" + +#include "partition_overseer.h" +#include "partition_filter.h" +#include "partition_router.h" + +CustomScanMethods partition_overseer_plan_methods; +CustomExecMethods partition_overseer_exec_methods; + +void +init_partition_overseer_static_data(void) +{ + partition_overseer_plan_methods.CustomName = OVERSEER_NODE_NAME; + partition_overseer_plan_methods.CreateCustomScanState = partition_overseer_create_scan_state; + + partition_overseer_exec_methods.CustomName = OVERSEER_NODE_NAME; + partition_overseer_exec_methods.BeginCustomScan = partition_overseer_begin; + partition_overseer_exec_methods.ExecCustomScan = partition_overseer_exec; + partition_overseer_exec_methods.EndCustomScan = partition_overseer_end; + partition_overseer_exec_methods.ReScanCustomScan = partition_overseer_rescan; + partition_overseer_exec_methods.MarkPosCustomScan = NULL; + partition_overseer_exec_methods.RestrPosCustomScan = NULL; + partition_overseer_exec_methods.ExplainCustomScan = partition_overseer_explain; + + RegisterCustomScanMethods(&partition_overseer_plan_methods); +} + +Plan * +make_partition_overseer(Plan *subplan) +{ + CustomScan *cscan = makeNode(CustomScan); + + /* Copy costs etc */ + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; + + /* Setup methods, child plan and param number for EPQ */ + cscan->methods = &partition_overseer_plan_methods; + cscan->custom_plans = list_make1(subplan); + cscan->custom_private = NIL; + + /* No physical relation will be scanned */ + cscan->scan.scanrelid = 0; + + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); + cscan->custom_scan_tlist = subplan->targetlist; + + return &cscan->scan.plan; +} + + +Node * +partition_overseer_create_scan_state(CustomScan *node) +{ + CustomScanState *state = palloc0(sizeof(CustomScanState)); + NodeSetTag(state, T_CustomScanState); + + state->flags = node->flags; + state->methods = &partition_overseer_exec_methods; + + return (Node *) state; +} + +void +partition_overseer_begin(CustomScanState *node, + EState *estate, + int eflags) +{ + CustomScan *css = (CustomScan *) node->ss.ps.plan; + Plan *plan = linitial(css->custom_plans); + + /* It's convenient to store PlanState in 'custom_ps' */ + node->custom_ps = list_make1(ExecInitNode(plan, estate, eflags)); +} + +TupleTableSlot * +partition_overseer_exec(CustomScanState *node) +{ + PlanState *state = linitial(node->custom_ps); + return partition_router_run_modify_table(state); +} + +void +partition_overseer_end(CustomScanState *node) +{ + Assert(list_length(node->custom_ps) == 1); + ExecEndNode((PlanState *) linitial(node->custom_ps)); +} + +void +partition_overseer_rescan(CustomScanState *node) +{ + elog(ERROR, "partition_overseer_rescan is not implemented"); +} + +void +partition_overseer_explain(CustomScanState *node, + List *ancestors, + ExplainState *es) +{ + /* nothing to do */ +} diff --git a/src/partition_router.c b/src/partition_router.c index 6f3a143b..53349730 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -61,13 +61,6 @@ bool pg_pathman_enable_partition_router = true; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; - -/* FIXME: replace this magic with a CustomScan */ -static ExecProcNodeMtd mt_method = NULL; - - -static TupleTableSlot *router_run_modify_table(PlanState *state); - static TupleTableSlot *router_set_slot(PartitionRouterState *state, TupleTableSlot *slot, CmdType operation); @@ -115,12 +108,7 @@ init_partition_router_static_data(void) } Plan * -make_partition_router(Plan *subplan, - Oid parent_relid, - Index parent_rti, - int epq_param, - List *returning_list) - +make_partition_router(Plan *subplan, int epq_param) { CustomScan *cscan = makeNode(CustomScan); @@ -147,49 +135,6 @@ make_partition_router(Plan *subplan, return &cscan->scan.plan; } -void -prepare_modify_table_for_partition_router(PlanState *state, void *context) -{ - if (IsA(state, ModifyTableState)) - { - ModifyTableState *mt_state = (ModifyTableState *) state; - bool changed_method = false; - int i; - - for (i = 0; i < mt_state->mt_nplans; i++) - { - CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; - PartitionRouterState *pr_state; - - /* Check if this is a PartitionFilter + PartitionRouter combo */ - if (IsPartitionFilterState(pf_state) && - IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) - { - /* HACK: point to ModifyTable in PartitionRouter */ - pr_state->mt_state = mt_state; - - if (!changed_method) - { - /* HACK: replace ModifyTable's execution method */ - if (!mt_method) - mt_method = state->ExecProcNodeReal; - -#if PG_VERSION_NUM >= 110000 - ExecSetExecProcNode(state, router_run_modify_table); -#elif PG_VERSION_NUM >= 100000 - state->ExecProcNode = router_run_modify_table; -#else -#error "doesn't supported yet" -#endif - - changed_method = true; - } - } - } - } -} - - Node * partition_router_create_scan_state(CustomScan *node) { @@ -198,6 +143,7 @@ partition_router_create_scan_state(CustomScan *node) state = (PartitionRouterState *) palloc0(sizeof(PartitionRouterState)); NodeSetTag(state, T_CustomScanState); + state = (PartitionRouterState *) makeNode(CustomScanState); state->css.flags = node->flags; state->css.methods = &partition_router_exec_methods; @@ -302,8 +248,8 @@ partition_router_explain(CustomScanState *node, /* Smart wrapper over ModifyTable */ -static TupleTableSlot * -router_run_modify_table(PlanState *state) +TupleTableSlot * +partition_router_run_modify_table(PlanState *state) { ModifyTableState *mt_state; TupleTableSlot *slot; @@ -317,7 +263,7 @@ router_run_modify_table(PlanState *state) restart: /* Fetch next tuple */ - slot = mt_method(state); + slot = ExecProcNode(state); /* Get current signal */ mt_plans_new = MTHackField(mt_state, mt_nplans); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 69497f92..1b65a832 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -17,6 +17,7 @@ #include "pathman.h" #include "partition_filter.h" #include "partition_router.h" +#include "partition_overseer.h" #include "planner_tree_modification.h" #include "runtime_append.h" #include "runtime_merge_append.h" @@ -317,8 +318,6 @@ _PG_init(void) planner_hook = pathman_planner_hook; pathman_process_utility_hook_next = ProcessUtility_hook; ProcessUtility_hook = pathman_process_utility_hook; - pathman_executor_run_hook_next = ExecutorRun_hook; - ExecutorRun_hook = pathman_executor_hook; /* Initialize static data for all subsystems */ init_main_pathman_toggles(); @@ -327,6 +326,7 @@ _PG_init(void) init_runtime_merge_append_static_data(); init_partition_filter_static_data(); init_partition_router_static_data(); + init_partition_overseer_static_data(); } /* Get cached PATHMAN_CONFIG relation Oid */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 071c179f..6b453256 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -14,6 +14,7 @@ #include "partition_filter.h" #include "partition_router.h" +#include "partition_overseer.h" #include "planner_tree_modification.h" #include "relation_info.h" #include "rewrite/rewriteManip.h" @@ -110,8 +111,8 @@ static bool pathman_transform_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse, transform_query_cxt *context); static void handle_modification_query(Query *parse, transform_query_cxt *context); -static void partition_filter_visitor(Plan *plan, void *context); -static void partition_router_visitor(Plan *plan, void *context); +static Plan *partition_filter_visitor(Plan *plan, void *context); +static Plan *partition_router_visitor(Plan *plan, void *context); static void state_visit_subplans(List *plans, void (*visitor) (), void *context); static void state_visit_members(PlanState **planstates, int nplans, void (*visitor) (), void *context); @@ -154,15 +155,15 @@ reset_query_id_generator(void) * * 'visitor' is applied right before return. */ -void +Plan * plan_tree_visitor(Plan *plan, - void (*visitor) (Plan *plan, void *context), + Plan *(*visitor) (Plan *plan, void *context), void *context) { ListCell *l; if (plan == NULL) - return; + return NULL; check_stack_depth(); @@ -211,7 +212,7 @@ plan_tree_visitor(Plan *plan, plan_tree_visitor(plan->righttree, visitor, context); /* Apply visitor to the current node */ - visitor(plan, context); + return visitor(plan, context); } void @@ -687,19 +688,23 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) */ /* Add PartitionFilter nodes to the plan tree */ -void +Plan * add_partition_filters(List *rtable, Plan *plan) { if (pg_pathman_enable_partition_filter) - plan_tree_visitor(plan, partition_filter_visitor, rtable); + return plan_tree_visitor(plan, partition_filter_visitor, rtable); + + return NULL; } /* Add PartitionRouter nodes to the plan tree */ -void +Plan * add_partition_routers(List *rtable, Plan *plan) { if (pg_pathman_enable_partition_router) - plan_tree_visitor(plan, partition_router_visitor, rtable); + return plan_tree_visitor(plan, partition_router_visitor, rtable); + + return NULL; } /* @@ -707,7 +712,7 @@ add_partition_routers(List *rtable, Plan *plan) * * 'context' should point to the PlannedStmt->rtable. */ -static void +static Plan * partition_filter_visitor(Plan *plan, void *context) { List *rtable = (List *) context; @@ -718,7 +723,7 @@ partition_filter_visitor(Plan *plan, void *context) /* Skip if not ModifyTable with 'INSERT' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_INSERT) - return; + return NULL; Assert(rtable && IsA(rtable, List)); @@ -748,6 +753,8 @@ partition_filter_visitor(Plan *plan, void *context) returning_list); } } + + return NULL; } /* @@ -755,7 +762,7 @@ partition_filter_visitor(Plan *plan, void *context) * * 'context' should point to the PlannedStmt->rtable. */ -static void +static Plan * partition_router_visitor(Plan *plan, void *context) { List *rtable = (List *) context; @@ -766,15 +773,16 @@ partition_router_visitor(Plan *plan, void *context) /* Skip if not ModifyTable with 'UPDATE' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) - return; + return NULL; Assert(rtable && IsA(rtable, List)); if (modifytable_contains_fdw(rtable, modify_table)) { - ereport(ERROR, + ereport(WARNING, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg(UPDATE_NODE_NAME " does not support foreign data wrappers"))); + return NULL; } lc3 = list_head(modify_table->returningLists); @@ -803,10 +811,8 @@ partition_router_visitor(Plan *plan, void *context) lc3 = lnext(lc3); } - prouter = make_partition_router((Plan *) lfirst(lc1), relid, - modify_table->nominalRelation, - modify_table->epqParam, - returning_list); + prouter = make_partition_router((Plan *) lfirst(lc1), + modify_table->epqParam); pfilter = make_partition_filter((Plan *) prouter, relid, modify_table->nominalRelation, @@ -817,6 +823,8 @@ partition_router_visitor(Plan *plan, void *context) lfirst(lc1) = pfilter; } } + + return make_partition_overseer(plan); } From f18aa524276ea3a960afc1412cdbc326cd097e7c Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 18 Sep 2018 19:57:09 +0300 Subject: [PATCH 0942/1124] Fix updating using Overseer node (still has errors) --- expected/pathman_update_node.out | 42 ++++++++++---------- src/include/partition_router.h | 30 ++++---------- src/partition_overseer.c | 68 ++++++++++++++++++++++++++++++-- src/partition_router.c | 45 --------------------- src/planner_tree_modification.c | 17 +++++--- 5 files changed, 104 insertions(+), 98 deletions(-) diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 4f379e05..120b42c4 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -15,29 +15,31 @@ SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); /* Moving from 2st to 1st partition */ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; - QUERY PLAN -------------------------------------------------------------------- - Update on test_range_2 - -> Custom Scan (PartitionFilter) - -> Custom Scan (PartitionRouter) - -> Bitmap Heap Scan on test_range_2 - Recheck Cond: (val = '15'::numeric) - -> Bitmap Index Scan on test_range_2_val_idx - Index Cond: (val = '15'::numeric) -(7 rows) + QUERY PLAN +------------------------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on test_range_2 + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(8 rows) /* Keep same partition */ EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; - QUERY PLAN -------------------------------------------------------------------- - Update on test_range_2 - -> Custom Scan (PartitionFilter) - -> Custom Scan (PartitionRouter) - -> Bitmap Heap Scan on test_range_2 - Recheck Cond: (val = '15'::numeric) - -> Bitmap Index Scan on test_range_2_val_idx - Index Cond: (val = '15'::numeric) -(7 rows) + QUERY PLAN +------------------------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on test_range_2 + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(8 rows) /* Update values in 1st partition (rows remain there) */ UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; diff --git a/src/include/partition_router.h b/src/include/partition_router.h index a07bde60..8240d13b 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -56,43 +56,27 @@ extern CustomScanMethods partition_router_plan_methods; extern CustomExecMethods partition_router_exec_methods; -#define IsPartitionRouterPlan(node) \ - ( \ - IsA((node), CustomScan) && \ - (((CustomScan *) (node))->methods == &partition_router_plan_methods) \ - ) - #define IsPartitionRouterState(node) \ ( \ IsA((node), CustomScanState) && \ (((CustomScanState *) (node))->methods == &partition_router_exec_methods) \ ) -#define IsPartitionRouter(node) \ - ( IsPartitionRouterPlan(node) || IsPartitionRouterState(node) ) - +/* Highlight hacks with ModifyTable's fields */ +#define MTHackField(mt_state, field) ( (mt_state)->field ) void init_partition_router_static_data(void); - -Plan *make_partition_router(Plan *subplan, int epq_param); - -void prepare_modify_table_for_partition_router(PlanState *state, void *context); - - -Node *partition_router_create_scan_state(CustomScan *node); - +void prepare_modify_table_for_partition_router(PlanState *state, + void *context); void partition_router_begin(CustomScanState *node, EState *estate, int eflags); - -TupleTableSlot *partition_router_exec(CustomScanState *node); - void partition_router_end(CustomScanState *node); - void partition_router_rescan(CustomScanState *node); - void partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *es); -TupleTableSlot *partition_router_run_modify_table(PlanState *state); +Plan *make_partition_router(Plan *subplan, int epq_param); +Node *partition_router_create_scan_state(CustomScan *node); +TupleTableSlot *partition_router_exec(CustomScanState *node); #endif /* PARTITION_UPDATE_H */ diff --git a/src/partition_overseer.c b/src/partition_overseer.c index 52eea377..5178150d 100644 --- a/src/partition_overseer.c +++ b/src/partition_overseer.c @@ -1,8 +1,9 @@ #include "postgres.h" -#include "partition_overseer.h" #include "partition_filter.h" +#include "partition_overseer.h" #include "partition_router.h" +#include "planner_tree_modification.h" CustomScanMethods partition_overseer_plan_methods; CustomExecMethods partition_overseer_exec_methods; @@ -64,6 +65,30 @@ partition_overseer_create_scan_state(CustomScan *node) return (Node *) state; } +static void +set_mt_state_for_router(PlanState *state, void *context) +{ + if (IsA(state, ModifyTableState)) + { + ModifyTableState *mt_state = (ModifyTableState *) state; + int i; + + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; + PartitionRouterState *pr_state; + + /* Check if this is a PartitionFilter + PartitionRouter combo */ + if (IsPartitionFilterState(pf_state) && + IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) + { + /* HACK: point to ModifyTable in PartitionRouter */ + pr_state->mt_state = mt_state; + } + } + } +} + void partition_overseer_begin(CustomScanState *node, EState *estate, @@ -74,13 +99,48 @@ partition_overseer_begin(CustomScanState *node, /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(plan, estate, eflags)); + + /* Save ModifyTableState in PartitionRouterState structs */ + state_tree_visitor((PlanState *) linitial(node->custom_ps), + set_mt_state_for_router, + NULL); } TupleTableSlot * partition_overseer_exec(CustomScanState *node) { - PlanState *state = linitial(node->custom_ps); - return partition_router_run_modify_table(state); + ModifyTableState *mt_state = linitial(node->custom_ps); + + TupleTableSlot *slot; + int mt_plans_old, + mt_plans_new; + + /* Get initial signal */ + mt_plans_old = mt_state->mt_nplans; + +restart: + /* Fetch next tuple */ + slot = ExecProcNode((PlanState *) mt_state); + + /* Get current signal */ + mt_plans_new = MTHackField(mt_state, mt_nplans); + + /* Did PartitionRouter ask us to restart? */ + if (mt_plans_new != mt_plans_old) + { + /* Signal points to current plan */ + int state_idx = -mt_plans_new; + + /* HACK: partially restore ModifyTable's state */ + MTHackField(mt_state, mt_done) = false; + MTHackField(mt_state, mt_nplans) = mt_plans_old; + MTHackField(mt_state, mt_whichplan) = state_idx; + + /* Restart ModifyTable */ + goto restart; + } + + return slot; } void @@ -101,5 +161,5 @@ partition_overseer_explain(CustomScanState *node, List *ancestors, ExplainState *es) { - /* nothing to do */ + /* Nothing to do here now */ } diff --git a/src/partition_router.c b/src/partition_router.c index 53349730..3ac1ece6 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -26,10 +26,6 @@ #include "utils/rel.h" -/* Highlight hacks with ModifyTable's fields */ -#define MTHackField(mt_state, field) ( (mt_state)->field ) - - #define MTDisableStmtTriggers(mt_state, pr_state) \ do { \ TriggerDesc *triggers = (mt_state)->resultRelInfo->ri_TrigDesc; \ @@ -143,7 +139,6 @@ partition_router_create_scan_state(CustomScan *node) state = (PartitionRouterState *) palloc0(sizeof(PartitionRouterState)); NodeSetTag(state, T_CustomScanState); - state = (PartitionRouterState *) makeNode(CustomScanState); state->css.flags = node->flags; state->css.methods = &partition_router_exec_methods; @@ -246,46 +241,6 @@ partition_router_explain(CustomScanState *node, /* Nothing to do here now */ } - -/* Smart wrapper over ModifyTable */ -TupleTableSlot * -partition_router_run_modify_table(PlanState *state) -{ - ModifyTableState *mt_state; - TupleTableSlot *slot; - int mt_plans_old, - mt_plans_new; - - mt_state = (ModifyTableState *) state; - - /* Get initial signal */ - mt_plans_old = mt_state->mt_nplans; - -restart: - /* Fetch next tuple */ - slot = ExecProcNode(state); - - /* Get current signal */ - mt_plans_new = MTHackField(mt_state, mt_nplans); - - /* Did PartitionRouter ask us to restart? */ - if (mt_plans_new != mt_plans_old) - { - /* Signal points to current plan */ - int state_idx = -mt_plans_new; - - /* HACK: partially restore ModifyTable's state */ - MTHackField(mt_state, mt_done) = false; - MTHackField(mt_state, mt_nplans) = mt_plans_old; - MTHackField(mt_state, mt_whichplan) = state_idx; - - /* Restart ModifyTable */ - goto restart; - } - - return slot; -} - /* Return tuple OR yield it and change ModifyTable's operation */ static TupleTableSlot * router_set_slot(PartitionRouterState *state, diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 6b453256..a3b06873 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -765,11 +765,12 @@ partition_filter_visitor(Plan *plan, void *context) static Plan * partition_router_visitor(Plan *plan, void *context) { - List *rtable = (List *) context; - ModifyTable *modify_table = (ModifyTable *) plan; - ListCell *lc1, - *lc2, - *lc3; + List *rtable = (List *) context; + ModifyTable *modify_table = (ModifyTable *) plan; + ListCell *lc1, + *lc2, + *lc3; + bool changed = false; /* Skip if not ModifyTable with 'UPDATE' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) @@ -821,10 +822,14 @@ partition_router_visitor(Plan *plan, void *context) returning_list); lfirst(lc1) = pfilter; + changed = true; } } - return make_partition_overseer(plan); + if (changed) + return make_partition_overseer(plan); + + return NULL; } From 9d7980aebfc4767a0ada9ae8fb5835bf7fab1481 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 19 Sep 2018 13:19:16 +0300 Subject: [PATCH 0943/1124] Fix partition router running --- src/partition_overseer.c | 41 ++++++++++++++++++++-------------------- src/partition_router.c | 25 ++++++------------------ 2 files changed, 26 insertions(+), 40 deletions(-) diff --git a/src/partition_overseer.c b/src/partition_overseer.c index 5178150d..2456f6aa 100644 --- a/src/partition_overseer.c +++ b/src/partition_overseer.c @@ -68,25 +68,24 @@ partition_overseer_create_scan_state(CustomScan *node) static void set_mt_state_for_router(PlanState *state, void *context) { - if (IsA(state, ModifyTableState)) - { - ModifyTableState *mt_state = (ModifyTableState *) state; - int i; - - for (i = 0; i < mt_state->mt_nplans; i++) - { - CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; - PartitionRouterState *pr_state; - - /* Check if this is a PartitionFilter + PartitionRouter combo */ - if (IsPartitionFilterState(pf_state) && - IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) - { - /* HACK: point to ModifyTable in PartitionRouter */ - pr_state->mt_state = mt_state; - } - } - } + ModifyTableState *mt_state = (ModifyTableState *) state; + + if (!IsA(state, ModifyTableState)) + return; + + for (int i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; + PartitionRouterState *pr_state; + + /* Check if this is a PartitionFilter + PartitionRouter combo */ + if (IsPartitionFilterState(pf_state) && + IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) + { + /* HACK: point to ModifyTable in PartitionRouter */ + pr_state->mt_state = mt_state; + } + } } void @@ -119,7 +118,7 @@ partition_overseer_exec(CustomScanState *node) mt_plans_old = mt_state->mt_nplans; restart: - /* Fetch next tuple */ + /* Run ModifyTable */ slot = ExecProcNode((PlanState *) mt_state); /* Get current signal */ @@ -136,7 +135,7 @@ partition_overseer_exec(CustomScanState *node) MTHackField(mt_state, mt_nplans) = mt_plans_old; MTHackField(mt_state, mt_whichplan) = state_idx; - /* Restart ModifyTable */ + /* Rerun ModifyTable */ goto restart; } diff --git a/src/partition_router.c b/src/partition_router.c index 3ac1ece6..82578c5d 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -63,7 +63,6 @@ static TupleTableSlot *router_set_slot(PartitionRouterState *state, static TupleTableSlot *router_get_slot(PartitionRouterState *state, bool *should_process); -static void router_lazy_init_junkfilter(PartitionRouterState *state); static void router_lazy_init_constraint(PartitionRouterState *state); static ItemPointerData router_extract_ctid(PartitionRouterState *state, @@ -185,8 +184,9 @@ partition_router_exec(CustomScanState *node) ItemPointerSetInvalid(&ctid); - /* Build new junkfilter lazily */ - router_lazy_init_junkfilter(state); + /* Build new junkfilter if needed */ + if (state->junkfilter == NULL) + state->junkfilter = state->current_rri->ri_junkFilter; /* Build recheck constraint state lazily */ router_lazy_init_constraint(state); @@ -257,15 +257,14 @@ router_set_slot(PartitionRouterState *state, MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; MTHackField(mt_state, operation) = operation; + /* HACK: disable AFTER STATEMENT triggers */ + MTDisableStmtTriggers(mt_state, state); + if (!TupIsNull(slot)) { /* We should've cached junk filter already */ Assert(state->junkfilter); - /* HACK: disable AFTER STATEMENT triggers */ - MTDisableStmtTriggers(mt_state, state); - - /* HACK: conditionally disable junk filter in result relation */ state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? state->junkfilter : @@ -276,11 +275,6 @@ router_set_slot(PartitionRouterState *state, slot->tts_tupleDescriptor); ExecCopySlot(state->yielded_slot, slot); } - else - { - /* HACK: enable AFTER STATEMENT triggers */ - MTEnableStmtTriggers(mt_state, state); - } /* Yield */ state->yielded = true; @@ -324,13 +318,6 @@ router_get_slot(PartitionRouterState *state, return slot; } -static void -router_lazy_init_junkfilter(PartitionRouterState *state) -{ - if (state->junkfilter == NULL) - state->junkfilter = state->current_rri->ri_junkFilter; -} - static void router_lazy_init_constraint(PartitionRouterState *state) { From 5ab36a6b61adc8b49ce858775418e9cebed9474b Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 19 Sep 2018 16:02:00 +0300 Subject: [PATCH 0944/1124] Fix python tests --- tests/python/partitioning_test.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index f2b2ea51..cb1282c6 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1022,6 +1022,12 @@ def test_update_node_plan1(self): plan = con.execute('SELECT query_plan(\'%s\')' % test_query)[0][0] plan = plan[0]["Plan"] + # PartitionOverseer + self.assertEqual(plan["Node Type"], "Custom Scan") + self.assertEqual(plan["Custom Plan Provider"], 'PartitionOverseer') + + # ModifyTable + plan = plan["Plans"][0] self.assertEqual(plan["Node Type"], "ModifyTable") self.assertEqual(plan["Operation"], "Update") self.assertEqual(plan["Relation Name"], "test_range") From 60268e8e712e6c56c6d3efa080ca45305f35e730 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 19 Sep 2018 17:31:21 +0300 Subject: [PATCH 0945/1124] Return get_pathman_lib_version as deprecated function --- init.sql | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/init.sql b/init.sql index 12546cca..6fd6a0c7 100644 --- a/init.sql +++ b/init.sql @@ -847,3 +847,8 @@ LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.pathman_version() RETURNS CSTRING AS 'pg_pathman', 'pathman_version' LANGUAGE C STRICT; + +-- deprecated +CREATE OR REPLACE FUNCTION public.get_pathman_lib_version() +RETURNS CSTRING AS 'pg_pathman', 'pathman_version' +LANGUAGE C STRICT; From 77ab2c4283ff543525b9556e12670494cd312e4b Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 19 Sep 2018 17:45:19 +0300 Subject: [PATCH 0946/1124] Add first revision of migration file --- pg_pathman--1.4--1.5.sql | 833 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 833 insertions(+) create mode 100644 pg_pathman--1.4--1.5.sql diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql new file mode 100644 index 00000000..8b02dcf4 --- /dev/null +++ b/pg_pathman--1.4--1.5.sql @@ -0,0 +1,833 @@ +/* + * Drop triggers + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + triggername TEXT; + relation OID; + +BEGIN + triggername := concat(parent_relid::text, '_upd_trig'); + + /* Drop trigger for each partition if exists */ + FOR relation IN (SELECT pg_catalog.pg_inherits.inhrelid + FROM pg_catalog.pg_inherits + JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid + WHERE inhparent = parent_relid AND tgname = triggername) + LOOP + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + triggername, + relation::REGCLASS); + END LOOP; + + /* Drop trigger on parent */ + IF EXISTS (SELECT * FROM pg_catalog.pg_trigger + WHERE tgname = triggername AND tgrelid = parent_relid) + THEN + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + triggername, + parent_relid::TEXT); + END IF; +END +$$ LANGUAGE plpgsql STRICT; + +DO $$ +DECLARE r record; +BEGIN + FOR r IN SELECT parent_relid FROM @extschema@.pathman_config + LOOP + PERFORM @extschema@.drop_triggers(r.parent_relid); + + END LOOP; +END$$; + +/* + * Add new partition + */ +CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_name TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; + END IF; + + /* Check range overlap */ + IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN + PERFORM @extschema@.check_range_available(parent_relid, + start_value, + end_value); + END IF; + + /* Create new partition */ + part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Append new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '+') THEN + RAISE EXCEPTION 'type % does not support ''+'' operator', part_expr_type::REGTYPE; + END IF; + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Attach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS $$ +DECLARE + part_expr TEXT; + part_type INTEGER; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition_relid INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition_relid::TEXT; + END IF; + + /* Check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); + + IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + part_expr := @extschema@.get_partition_key(parent_relid); + part_type := @extschema@.get_partition_type(parent_relid); + + IF part_expr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid), + @extschema@.build_range_condition(partition_relid, + part_expr, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + /* Invoke an initialization callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition_relid, + v_init_callback, + start_value, + end_value); + + RETURN partition_relid; +END +$$ LANGUAGE plpgsql; + +/* + * Create a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( + parent_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); + + RETURN seq_name; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Creates RANGE partitions for specified relation based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION '"p_count" must not be less than 0'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; + + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + p_count := 0; + WHILE cur_value <= max_value + LOOP + cur_value := cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* Compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on numerical expression + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION 'partitions count must not be less than zero'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; + + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + IF max_value IS NULL THEN + RAISE EXCEPTION 'expression "%" can return NULL values', expression; + END IF; + + p_count := 0; + WHILE cur_value <= max_value + LOOP + cur_value := cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* Compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on bounds array + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + bounds ANYARRAY, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + part_count INTEGER := 0; + +BEGIN + IF array_ndims(bounds) > 1 THEN + RAISE EXCEPTION 'Bounds array must be a one dimensional array'; + END IF; + + IF array_length(bounds, 1) < 2 THEN + RAISE EXCEPTION 'Bounds array must have at least two values'; + END IF; + + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + bounds[1], + bounds[array_length(bounds, 1)]); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); + + /* Create partitions */ + part_count := @extschema@.create_range_partitions_internal(parent_relid, + bounds, + partition_names, + tablespaces); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ +LANGUAGE plpgsql; + +/* + * Detach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( + partition_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_type INTEGER; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on partition's scheme */ + PERFORM @extschema@.prevent_part_modification(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Remove inheritance */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', + partition_relid::TEXT, + parent_relid::TEXT); + + /* Remove check constraint */ + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid)); + + RETURN partition_relid; +END +$$ LANGUAGE plpgsql; + +/* + * Disable pathman partitioning for specified relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( + parent_relid REGCLASS) +RETURNS VOID AS $$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Delete rows from both config tables */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; +END +$$ LANGUAGE plpgsql STRICT; + +/* + * Drop a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Drop partitions. If delete_data set to TRUE, partitions + * will be dropped with all the data. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( + parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) +RETURNS INTEGER AS $$ +DECLARE + child REGCLASS; + rows_count BIGINT; + part_count INTEGER := 0; + rel_kind CHAR; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + IF NOT EXISTS (SELECT FROM @extschema@.pathman_config + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has no partitions', parent_relid::TEXT; + END IF; + + /* Also drop naming sequence */ + PERFORM @extschema@.drop_naming_sequence(parent_relid); + + FOR child IN (SELECT inhrelid::REGCLASS + FROM pg_catalog.pg_inherits + WHERE inhparent::regclass = parent_relid + ORDER BY inhrelid ASC) + LOOP + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + child::TEXT); + GET DIAGNOSTICS rows_count = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', rows_count, child; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = child + INTO rel_kind; + + /* + * Determine the kind of child relation. It can be either a regular + * table (r) or a foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF rel_kind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', child); + ELSE + EXECUTE format('DROP TABLE %s', child); + END IF; + + part_count := part_count + 1; + END LOOP; + + /* Finally delete both config entries */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + RETURN part_count; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + +/* + * Drop range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( + partition_relid REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_name TEXT; + part_type INTEGER; + v_relkind CHAR; + v_rows BIGINT; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + part_name := partition_relid::TEXT; /* save the name to be returned */ + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition_relid::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition_relid + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', partition_relid::TEXT); + END IF; + + RETURN part_name; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + +-- deprecated +CREATE OR REPLACE FUNCTION public.get_pathman_lib_version() +RETURNS CSTRING AS 'pg_pathman', 'pathman_version' +LANGUAGE C STRICT; + +/* + * Get number of partitions managed by pg_pathman. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT count(*)::INT4 + FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Get partitioning key. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ + SELECT expr + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Get partitioning key type. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( + parent_relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' +LANGUAGE C STRICT; + +/* + * Get partitioning type. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Merge RANGE partitions. + */ +DROP FUNCTION public.merge_range_partitions(regclass[]); +DROP FUNCTION public.merge_range_partitions(regclass, regclass); + +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + variadic partitions REGCLASS[]) +RETURNS REGCLASS AS 'pg_pathman', 'merge_range_partitions' +LANGUAGE C STRICT; + +/* + * Prepend new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '-') THEN + RAISE EXCEPTION 'type % does not support ''-'' operator', part_expr_type::REGTYPE; + END IF; + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Show all existing concurrent partitioning tasks. + */ +CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() +RETURNS TABLE ( + userid REGROLE, + pid INT, + dbid OID, + relid REGCLASS, + processed INT8, + status TEXT) +AS 'pg_pathman', 'show_concurrent_part_tasks_internal' +LANGUAGE C STRICT; + +/* + * Split RANGE partition in two using a pivot. + */ +DROP FUNCTION public.split_range_partition(regclass, anyelement, text, text, OUT anyarray); +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'split_range_partition' +LANGUAGE C; + +ALTER TABLE public.pathman_concurrent_part_tasks + ALTER COLUMN processed SET TYPE bigint; + +DROP FUNCTION @extschema@.build_update_trigger_func_name(regclass); +DROP FUNCTION @extschema@.build_update_trigger_name(regclass); +DROP FUNCTION @extschema@.create_single_update_trigger(regclass, regclass); +DROP FUNCTION @extschema@.create_update_triggers(regclass); +DROP FUNCTION @extschema@.drop_triggers(regclass); +DROP FUNCTION @extschema@.has_update_trigger(regclass); +DROP FUNCTION @extschema@.pathman_update_trigger_func(); From 4f932ab2c734a72e5021107609fd5a2914546b7c Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 19 Sep 2018 18:40:20 +0300 Subject: [PATCH 0947/1124] Fix migration script --- Makefile | 3 +- pg_pathman--1.4--1.5.sql | 68 +++++++++------------------------------- 2 files changed, 17 insertions(+), 54 deletions(-) diff --git a/Makefile b/Makefile index 7ba97cbd..efd0cbc5 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,8 @@ DATA_built = pg_pathman--$(EXTVERSION).sql DATA = pg_pathman--1.0--1.1.sql \ pg_pathman--1.1--1.2.sql \ pg_pathman--1.2--1.3.sql \ - pg_pathman--1.3--1.4.sql + pg_pathman--1.3--1.4.sql \ + pg_pathman--1.4--1.5.sql PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql index 8b02dcf4..cdda146b 100644 --- a/pg_pathman--1.4--1.5.sql +++ b/pg_pathman--1.4--1.5.sql @@ -1,47 +1,3 @@ -/* - * Drop triggers - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( - parent_relid REGCLASS) -RETURNS VOID AS $$ -DECLARE - triggername TEXT; - relation OID; - -BEGIN - triggername := concat(parent_relid::text, '_upd_trig'); - - /* Drop trigger for each partition if exists */ - FOR relation IN (SELECT pg_catalog.pg_inherits.inhrelid - FROM pg_catalog.pg_inherits - JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid - WHERE inhparent = parent_relid AND tgname = triggername) - LOOP - EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', - triggername, - relation::REGCLASS); - END LOOP; - - /* Drop trigger on parent */ - IF EXISTS (SELECT * FROM pg_catalog.pg_trigger - WHERE tgname = triggername AND tgrelid = parent_relid) - THEN - EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', - triggername, - parent_relid::TEXT); - END IF; -END -$$ LANGUAGE plpgsql STRICT; - -DO $$ -DECLARE r record; -BEGIN - FOR r IN SELECT parent_relid FROM @extschema@.pathman_config - LOOP - PERFORM @extschema@.drop_triggers(r.parent_relid); + - END LOOP; -END$$; - /* * Add new partition */ @@ -685,8 +641,7 @@ END $$ LANGUAGE plpgsql SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ --- deprecated -CREATE OR REPLACE FUNCTION public.get_pathman_lib_version() +CREATE FUNCTION @extschema@.pathman_version() RETURNS CSTRING AS 'pg_pathman', 'pathman_version' LANGUAGE C STRICT; @@ -706,7 +661,8 @@ LANGUAGE sql STRICT; /* * Get partitioning key. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( +DROP FUNCTION @extschema@.get_partition_key(REGCLASS); +CREATE FUNCTION @extschema@.get_partition_key( parent_relid REGCLASS) RETURNS TEXT AS $$ @@ -719,7 +675,8 @@ LANGUAGE sql STRICT; /* * Get partitioning key type. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( +DROP FUNCTION @extschema@.get_partition_key_type(REGCLASS); +CREATE FUNCTION @extschema@.get_partition_key_type( parent_relid REGCLASS) RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' LANGUAGE C STRICT; @@ -727,6 +684,7 @@ LANGUAGE C STRICT; /* * Get partitioning type. */ +DROP FUNCTION @extschema@.get_partition_type(REGCLASS); CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( parent_relid REGCLASS) RETURNS INT4 AS @@ -798,7 +756,9 @@ $$ LANGUAGE plpgsql; /* * Show all existing concurrent partitioning tasks. */ -CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() +DROP VIEW @extschema@.pathman_concurrent_part_tasks; +DROP FUNCTION @extschema@.show_concurrent_part_tasks(); +CREATE FUNCTION @extschema@.show_concurrent_part_tasks() RETURNS TABLE ( userid REGROLE, pid INT, @@ -809,6 +769,10 @@ RETURNS TABLE ( AS 'pg_pathman', 'show_concurrent_part_tasks_internal' LANGUAGE C STRICT; +CREATE VIEW @extschema@.pathman_concurrent_part_tasks +AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); +GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; + /* * Split RANGE partition in two using a pivot. */ @@ -821,13 +785,11 @@ CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( RETURNS REGCLASS AS 'pg_pathman', 'split_range_partition' LANGUAGE C; -ALTER TABLE public.pathman_concurrent_part_tasks - ALTER COLUMN processed SET TYPE bigint; - DROP FUNCTION @extschema@.build_update_trigger_func_name(regclass); DROP FUNCTION @extschema@.build_update_trigger_name(regclass); DROP FUNCTION @extschema@.create_single_update_trigger(regclass, regclass); DROP FUNCTION @extschema@.create_update_triggers(regclass); DROP FUNCTION @extschema@.drop_triggers(regclass); DROP FUNCTION @extschema@.has_update_trigger(regclass); -DROP FUNCTION @extschema@.pathman_update_trigger_func(); +DROP FUNCTION @extschema@.pathman_update_trigger_func() CASCADE; +DROP FUNCTION @extschema@.get_pathman_lib_version(); From fc4463e77ea8b809409d6815aad24615b42df4ab Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 12:36:58 +0300 Subject: [PATCH 0948/1124] Remove get_pathman_lib_version --- init.sql | 5 ----- 1 file changed, 5 deletions(-) diff --git a/init.sql b/init.sql index 6fd6a0c7..12546cca 100644 --- a/init.sql +++ b/init.sql @@ -847,8 +847,3 @@ LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION @extschema@.pathman_version() RETURNS CSTRING AS 'pg_pathman', 'pathman_version' LANGUAGE C STRICT; - --- deprecated -CREATE OR REPLACE FUNCTION public.get_pathman_lib_version() -RETURNS CSTRING AS 'pg_pathman', 'pathman_version' -LANGUAGE C STRICT; From b5f7633b99dcc8a3d8cc596e575f377b547220f5 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 14:08:00 +0300 Subject: [PATCH 0949/1124] Fix migration script --- pg_pathman--1.4--1.5.sql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql index cdda146b..fe29a586 100644 --- a/pg_pathman--1.4--1.5.sql +++ b/pg_pathman--1.4--1.5.sql @@ -698,8 +698,8 @@ LANGUAGE sql STRICT; /* * Merge RANGE partitions. */ -DROP FUNCTION public.merge_range_partitions(regclass[]); -DROP FUNCTION public.merge_range_partitions(regclass, regclass); +DROP FUNCTION @extschema@.merge_range_partitions(regclass[]); +DROP FUNCTION @extschema@.merge_range_partitions(regclass, regclass); CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( variadic partitions REGCLASS[]) @@ -776,7 +776,7 @@ GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; /* * Split RANGE partition in two using a pivot. */ -DROP FUNCTION public.split_range_partition(regclass, anyelement, text, text, OUT anyarray); +DROP FUNCTION @extschema@.split_range_partition(regclass, anyelement, text, text, OUT anyarray); CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( partition_relid REGCLASS, split_value ANYELEMENT, From e9041c78028cff641ccc26dc75651e27bde22087 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 16:35:47 +0300 Subject: [PATCH 0950/1124] Remove cooked_expr column from pathman_config --- expected/pathman_basic.out | 16 ++++----- expected/pathman_calamity.out | 18 +++++----- expected/pathman_column_type.out | 35 ++++++++++++------- expected/pathman_permissions.out | 6 ++-- init.sql | 15 +++++--- pg_pathman--1.4--1.5.sql | 27 ++++++++++++++ sql/pathman_calamity.sql | 16 ++++----- sql/pathman_column_type.sql | 10 +++--- src/hooks.c | 3 -- src/include/pathman.h | 3 +- src/include/relation_info.h | 16 +-------- src/init.c | 60 -------------------------------- src/partition_creation.c | 12 +++---- src/pl_funcs.c | 26 +++++++++++--- src/pl_range_funcs.c | 31 ++--------------- src/relation_info.c | 35 ++++++------------- 16 files changed, 132 insertions(+), 197 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index e9950470..3a9e0a65 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1438,16 +1438,16 @@ INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); */ ALTER TABLE test.range_rel DROP COLUMN data; SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr -----------------+------+----------+----------------+------------------------------------------------------------------------------------------------------------------------- - test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days (1 row) DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 21 other objects SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr ----------+------+----------+----------------+------------- + partrel | expr | parttype | range_interval +---------+------+----------+---------------- (0 rows) /* Check overlaps */ @@ -1544,9 +1544,9 @@ SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 6 other objects SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr ---------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- - test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 (1 row) CREATE TABLE test."RangeRel" ( diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 2889cc80..e28777bf 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -281,21 +281,21 @@ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ (1 row) /* check function validate_interval_value() */ -SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ ERROR: relation "1" does not exist -SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ ERROR: 'partrel' should not be NULL -SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ ERROR: 'expression' should not be NULL -SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ ERROR: 'parttype' should not be NULL -SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ ERROR: interval should be NULL for HASH partitioned table -SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ ERROR: failed to analyze partitioning expression "expr" -SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ -ERROR: unrecognized token: "cooked_expr" -SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ ERROR: failed to analyze partitioning expression "EXPR" /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index eacdb97a..d3022d77 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -29,12 +29,30 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; (4 rows) /* change column's type (should flush caches) */ +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that parsed expression was cleared */ -SELECT partrel, cooked_expr FROM pathman_config; - partrel | cooked_expr ------------------------+------------- - test_column_type.test | +/* check that expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) /* make sure that everything works properly */ @@ -43,13 +61,6 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -/* check that expression has been built */ -SELECT partrel, cooked_expr FROM pathman_config; - partrel | cooked_expr ------------------------+------------------------------------------------------------------------------------------------------------------------- - test_column_type.test | {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} -(1 row) - SELECT context, entries FROM pathman_cache_stats ORDER BY context; context | entries -------------------------+--------- diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 388fc2bc..d03588c7 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -44,9 +44,9 @@ SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); /* Should be able to see */ SET ROLE user2; SELECT * FROM pathman_config; - partrel | expr | parttype | range_interval | cooked_expr --------------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- - permissions.user1_table | id | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} + partrel | expr | parttype | range_interval +-------------------------+------+----------+---------------- + permissions.user1_table | id | 2 | 10 (1 row) SELECT * FROM pathman_config_params; diff --git a/init.sql b/init.sql index 12546cca..fdb774db 100644 --- a/init.sql +++ b/init.sql @@ -18,8 +18,7 @@ CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( partrel REGCLASS, expr TEXT, parttype INTEGER, - range_interval TEXT, - cooked_expr TEXT) + range_interval TEXT) RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' LANGUAGE C; @@ -37,7 +36,6 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( expr TEXT NOT NULL, parttype INTEGER NOT NULL, range_interval TEXT DEFAULT NULL, - cooked_expr TEXT DEFAULT NULL, /* check for allowed part types */ CONSTRAINT pathman_config_parttype_check CHECK (parttype IN (1, 2)), @@ -47,8 +45,7 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( CHECK (@extschema@.validate_interval_value(partrel, expr, parttype, - range_interval, - cooked_expr)) + range_interval)) ); @@ -674,6 +671,14 @@ CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' LANGUAGE C STRICT; +/* + * Get parsed and analyzed expression. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_cooked_key( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_partition_cooked_key_pl' +LANGUAGE C STRICT; + /* * Get partitioning type. */ diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql index fe29a586..a8e7fb21 100644 --- a/pg_pathman--1.4--1.5.sql +++ b/pg_pathman--1.4--1.5.sql @@ -1,3 +1,30 @@ +ALTER TABLE @extschema@.pathman_config DROP CONSTRAINT pathman_config_interval_check; + +DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, + TEXT, TEXT); +CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( + partrel REGCLASS, + expr TEXT, + parttype INTEGER, + range_interval TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C; + +ALTER TABLE @extschema@.pathman_config DROP COLUMN cooked_expr; +ALTER TABLE @extschema@.pathman_config ADD CONSTRAINT pathman_config_interval_check + CHECK (@extschema@.validate_interval_value(partrel, + expr, + parttype, + range_interval)); + +/* + * Get parsed and analyzed expression. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_cooked_key( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_partition_cooked_key_pl' +LANGUAGE C STRICT; + /* * Add new partition */ diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 1c48138e..51827887 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -132,14 +132,14 @@ SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ /* check function validate_interval_value() */ -SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH', NULL); /* not ok */ -SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ -SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index 47d38cc5..ab2b43f1 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -18,17 +18,17 @@ SELECT * FROM test_column_type.test; SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* change column's type (should flush caches) */ +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that parsed expression was cleared */ -SELECT partrel, cooked_expr FROM pathman_config; +/* check that expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -/* check that expression has been built */ -SELECT partrel, cooked_expr FROM pathman_config; - SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* check insert dispatching */ diff --git a/src/hooks.c b/src/hooks.c index b8c7a194..5cd3e14c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -949,9 +949,6 @@ pathman_process_utility_hook(Node *first_arg, " of table \"%s\" partitioned by HASH", get_attname_compat(relation_oid, attr_number), get_rel_name(relation_oid)))); - - /* Don't forget to invalidate parsed partitioning expression */ - pathman_config_invalidate_parsed_expression(relation_oid); } } diff --git a/src/include/pathman.h b/src/include/pathman.h index b5f9a156..b9acfe59 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -45,12 +45,11 @@ * Definitions for the "pathman_config" table. */ #define PATHMAN_CONFIG "pathman_config" -#define Natts_pathman_config 5 +#define Natts_pathman_config 4 #define Anum_pathman_config_partrel 1 /* partitioned relation (regclass) */ #define Anum_pathman_config_expr 2 /* partition expression (original) */ #define Anum_pathman_config_parttype 3 /* partitioning type (1|2) */ #define Anum_pathman_config_range_interval 4 /* interval for RANGE pt. (text) */ -#define Anum_pathman_config_cooked_expr 5 /* parsed partitioning expression (text) */ /* type modifier (typmod) for 'range_interval' */ #define PATHMAN_CONFIG_interval_typmod -1 diff --git a/src/include/relation_info.h b/src/include/relation_info.h index f3faa3d3..6b9ffa92 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -384,27 +384,13 @@ Node *parse_partitioning_expression(const Oid relid, char **query_string_out, Node **parsetree_out); -Datum cook_partitioning_expression(const Oid relid, +Node *cook_partitioning_expression(const Oid relid, const char *expr_cstr, Oid *expr_type); char *canonicalize_partitioning_expression(const Oid relid, const char *expr_cstr); -/* Partitioning expression routines */ -Node *parse_partitioning_expression(const Oid relid, - const char *expr_cstr, - char **query_string_out, - Node **parsetree_out); - -Datum cook_partitioning_expression(const Oid relid, - const char *expr_cstr, - Oid *expr_type); - -char *canonicalize_partitioning_expression(const Oid relid, - const char *expr_cstr); - - /* Global invalidation routines */ void delay_pathman_shutdown(void); void finish_delayed_invalidation(void); diff --git a/src/init.c b/src/init.c index 9e15628e..f6ddbdae 100644 --- a/src/init.c +++ b/src/init.c @@ -675,66 +675,6 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, return contains_rel; } -/* Invalidate parsed partitioning expression in PATHMAN_CONFIG */ -void -pathman_config_invalidate_parsed_expression(Oid relid) -{ - ItemPointerData iptr; /* pointer to tuple */ - Datum values[Natts_pathman_config]; - bool nulls[Natts_pathman_config]; - - /* Check that PATHMAN_CONFIG table contains this relation */ - if (pathman_config_contains_relation(relid, values, nulls, NULL, &iptr)) - { - Relation rel; - HeapTuple new_htup; - - /* Reset parsed expression */ - values[Anum_pathman_config_cooked_expr - 1] = (Datum) 0; - nulls[Anum_pathman_config_cooked_expr - 1] = true; - - rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); - - /* Form new tuple and perform an update */ - new_htup = heap_form_tuple(RelationGetDescr(rel), values, nulls); - CatalogTupleUpdate(rel, &iptr, new_htup); - heap_freetuple(new_htup); - - heap_close(rel, RowExclusiveLock); - } -} - -/* Refresh parsed partitioning expression in PATHMAN_CONFIG */ -void -pathman_config_refresh_parsed_expression(Oid relid, - Datum *values, - bool *isnull, - ItemPointer iptr) -{ - char *expr_cstr; - Datum expr_datum; - - Relation rel; - HeapTuple htup_new; - - /* get and parse expression */ - expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); - expr_datum = cook_partitioning_expression(relid, expr_cstr, NULL); - pfree(expr_cstr); - - /* prepare tuple values */ - values[Anum_pathman_config_cooked_expr - 1] = expr_datum; - isnull[Anum_pathman_config_cooked_expr - 1] = false; - - rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); - - htup_new = heap_form_tuple(RelationGetDescr(rel), values, isnull); - CatalogTupleUpdate(rel, iptr, htup_new); - - heap_close(rel, RowExclusiveLock); -} - - /* * Loads additional pathman parameters like 'enable_parent' * or 'auto' from PATHMAN_CONFIG_PARAMS. diff --git a/src/partition_creation.c b/src/partition_creation.c index 1ddc39e1..fc950c4f 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1854,20 +1854,15 @@ build_partitioning_expression(Oid parent_relid, expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); expr = parse_partitioning_expression(parent_relid, expr_cstr, NULL, NULL); - pfree(expr_cstr); /* We need expression type for hash functions */ if (expr_type) { - char *expr_p_cstr; - - /* We can safely assume that this field will always remain not null */ - Assert(!isnull[Anum_pathman_config_cooked_expr - 1]); - expr_p_cstr = - TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); + Node *expr; + expr = cook_partitioning_expression(parent_relid, expr_cstr, NULL); /* Finally return expression type */ - *expr_type = exprType(stringToNode(expr_p_cstr)); + *expr_type = exprType(expr); } if (columns) @@ -1877,5 +1872,6 @@ build_partitioning_expression(Oid parent_relid, extract_column_names(expr, columns); } + pfree(expr_cstr); return expr; } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index b90619e0..44a5f93f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -47,6 +47,7 @@ PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); PG_FUNCTION_INFO_V1( get_partition_key_type_pl ); +PG_FUNCTION_INFO_V1( get_partition_cooked_key_pl ); PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); PG_FUNCTION_INFO_V1( get_base_type_pl ); PG_FUNCTION_INFO_V1( get_tablespace_pl ); @@ -140,6 +141,25 @@ get_partition_key_type_pl(PG_FUNCTION_ARGS) PG_RETURN_OID(typid); } +/* + * Return partition key type. + */ +Datum +get_partition_cooked_key_pl(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + PartRelationInfo *prel; + Datum res; + + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_ANY); + + res = CStringGetTextDatum(nodeToString(prel->expr)); + close_pathman_relation_info(prel); + + PG_RETURN_TEXT_P(res); +} + /* * Extract basic type of a domain. */ @@ -685,7 +705,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) HeapTuple htup; Oid expr_type; - Datum expr_datum; PathmanInitState init_state; @@ -750,7 +769,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) } /* Parse and check expression */ - expr_datum = cook_partitioning_expression(relid, expression, &expr_type); + cook_partitioning_expression(relid, expression, &expr_type); /* Canonicalize user's expression (trim whitespaces etc) */ expression = canonicalize_partitioning_expression(relid, expression); @@ -778,9 +797,6 @@ add_to_pathman_config(PG_FUNCTION_ARGS) values[Anum_pathman_config_expr - 1] = CStringGetTextDatum(expression); isnull[Anum_pathman_config_expr - 1] = false; - values[Anum_pathman_config_cooked_expr - 1] = expr_datum; - isnull[Anum_pathman_config_cooked_expr - 1] = false; - /* Insert new row into PATHMAN_CONFIG */ pathman_config = heap_open(get_pathman_config_relid(false), RowExclusiveLock); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index f8f52e9d..351926f7 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -400,7 +400,6 @@ validate_interval_value(PG_FUNCTION_ARGS) #define ARG_EXPRESSION 1 #define ARG_PARTTYPE 2 #define ARG_RANGE_INTERVAL 3 -#define ARG_EXPRESSION_P 4 Oid partrel; PartType parttype; @@ -433,35 +432,9 @@ validate_interval_value(PG_FUNCTION_ARGS) else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); /* - * Fetch partitioning expression's type using - * either user's expression or parsed expression. - * - * NOTE: we check number of function's arguments - * in case of late updates (e.g. 1.1 => 1.4). + * Try to parse partitioning expression, could fail with ERROR. */ - if (PG_ARGISNULL(ARG_EXPRESSION_P) || PG_NARGS() <= ARG_EXPRESSION_P) - { - Datum expr_datum; - - /* We'll have to parse expression with our own hands */ - expr_datum = cook_partitioning_expression(partrel, expr_cstr, &expr_type); - - /* Free both expressions */ - pfree(DatumGetPointer(expr_datum)); - pfree(expr_cstr); - } - else - { - char *expr_p_cstr; - - /* Good, let's use a cached parsed expression */ - expr_p_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION_P)); - expr_type = exprType(stringToNode(expr_p_cstr)); - - /* Free both expressions */ - pfree(expr_p_cstr); - pfree(expr_cstr); - } + cook_partitioning_expression(partrel, expr_cstr, &expr_type); /* * NULL interval is fine for both HASH and RANGE. diff --git a/src/relation_info.c b/src/relation_info.c index 386008d2..8ee74217 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -340,19 +340,12 @@ get_pathman_relation_info(Oid relid) bool isnull[Natts_pathman_config]; bool found; - /* Check if PATHMAN_CONFIG table contains this relation */ + /* + * Check if PATHMAN_CONFIG table contains this relation and + * build a partitioned table cache entry (might emit ERROR). + */ if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) - { - bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; - - /* Update pending partitioning expression */ - if (upd_expr) - pathman_config_refresh_parsed_expression(relid, values, - isnull, &iptr); - - /* Build a partitioned table cache entry (might emit ERROR) */ prel = build_pathman_relation_info(relid, values); - } /* Create a new entry for this relation */ psin = pathman_cache_search_relid(status_cache, @@ -414,7 +407,6 @@ build_pathman_relation_info(Oid relid, Datum *values) { MemoryContext old_mcxt; const TypeCacheEntry *typcache; - char *expr; Datum param_values[Natts_pathman_config_params]; bool param_isnull[Natts_pathman_config_params]; Oid *prel_children; @@ -428,15 +420,12 @@ build_pathman_relation_info(Oid relid, Datum *values) /* Set partitioning type */ prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - /* Fetch cooked partitioning expression */ - expr = TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); - /* Switch to persistent memory context */ old_mcxt = MemoryContextSwitchTo(prel->mcxt); /* Build partitioning expression tree */ prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); - prel->expr = (Node *) stringToNode(expr); + prel->expr = cook_partitioning_expression(relid, prel->expr_cstr, NULL); fix_opfuncids(prel->expr); /* Extract Vars and varattnos of partitioning expression */ @@ -1361,18 +1350,16 @@ parse_partitioning_expression(const Oid relid, } /* Parse partitioning expression and return its type and nodeToString() as TEXT */ -Datum +Node * cook_partitioning_expression(const Oid relid, const char *expr_cstr, Oid *expr_type_out) /* ret value #1 */ { + Node *expr; Node *parse_tree; List *query_tree_list; - char *query_string, - *expr_serialized = ""; /* keep compiler happy */ - - Datum expr_datum; + char *query_string; MemoryContext parse_mcxt, old_mcxt; @@ -1400,7 +1387,6 @@ cook_partitioning_expression(const Oid relid, PG_TRY(); { Query *query; - Node *expr; int expr_attr; Relids expr_varnos; Bitmapset *expr_varattnos = NULL; @@ -1478,7 +1464,6 @@ cook_partitioning_expression(const Oid relid, bms_free(expr_varattnos); Assert(expr); - expr_serialized = nodeToString(expr); /* Set 'expr_type_out' if needed */ if (expr_type_out) @@ -1514,12 +1499,12 @@ cook_partitioning_expression(const Oid relid, MemoryContextSwitchTo(old_mcxt); /* Get Datum of serialized expression (right mcxt) */ - expr_datum = CStringGetTextDatum(expr_serialized); + expr = copyObject(expr); /* Free memory */ MemoryContextDelete(parse_mcxt); - return expr_datum; + return expr; } /* Canonicalize user's expression (trim whitespaces etc) */ From d739585de7366ca70acf0c4d50a6a1f1cd228fd1 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 16:44:43 +0300 Subject: [PATCH 0951/1124] Fix tests for postgres with version >= 10 --- expected/pathman_basic_1.out | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out index 692de996..61aed5db 100644 --- a/expected/pathman_basic_1.out +++ b/expected/pathman_basic_1.out @@ -1438,16 +1438,16 @@ INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); */ ALTER TABLE test.range_rel DROP COLUMN data; SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr -----------------+------+----------+----------------+------------------------------------------------------------------------------------------------------------------------- - test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days (1 row) DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 21 other objects SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr ----------+------+----------+----------------+------------- + partrel | expr | parttype | range_interval +---------+------+----------+---------------- (0 rows) /* Check overlaps */ @@ -1544,9 +1544,9 @@ SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 6 other objects SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr ---------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- - test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 (1 row) CREATE TABLE test."RangeRel" ( From adf5fd776dd7f6ed359a54d0b16c3e39472c4334 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 18:59:01 +0300 Subject: [PATCH 0952/1124] Start working on update checking script --- tests/update/check_update.py | 253 +++++++++++++++++++++++++++++++++++ 1 file changed, 253 insertions(+) create mode 100755 tests/update/check_update.py diff --git a/tests/update/check_update.py b/tests/update/check_update.py new file mode 100755 index 00000000..f1ac3cef --- /dev/null +++ b/tests/update/check_update.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python +#coding: utf-8 + +import os +import contextlib +import sys +import argparse +import testgres +import subprocess +import difflib + +repo_dir = os.path.abspath(os.path.join('../..', os.path.dirname(__file__))) + +compilation = ''' +make USE_PGXS=1 clean +make USE_PGXS=1 install +''' + +# just bunch of tables to create +run_sql = ''' +CREATE EXTENSION pg_pathman; + +CREATE TABLE hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO hash_rel VALUES (1, 1); +INSERT INTO hash_rel VALUES (2, 2); +INSERT INTO hash_rel VALUES (3, 3); + +SELECT create_hash_partitions('hash_rel', 'Value', 3); + +CREATE TABLE range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON range_rel (dt); +INSERT INTO range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT create_range_partitions('range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +CREATE TABLE num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT create_range_partitions('num_range_rel', 'id', 0, 1000, 4); +INSERT INTO num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; + +CREATE TABLE improved_dummy_test1 (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO improved_dummy_test1 (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT create_range_partitions('improved_dummy_test1', 'id', 1, 10); +INSERT INTO improved_dummy_test1 (name) VALUES ('test'); /* spawns new partition */ +ALTER TABLE improved_dummy_1 ADD CHECK (name != 'ib'); /* make improved_dummy_1 disappear */ + +CREATE TABLE test_improved_dummy_test2 (val INT NOT NULL); +SELECT create_range_partitions('test_improved_dummy_test2', 'val', + generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + +CREATE TABLE insert_into_select(val int NOT NULL); +INSERT INTO insert_into_select SELECT generate_series(1, 100); +SELECT create_range_partitions('insert_into_select', 'val', 1, 20); +CREATE TABLE insert_into_select_copy (LIKE insert_into_select); /* INSERT INTO ... SELECT ... */ + +# just a lot of actions + +SELECT split_range_partition('num_range_rel_1', 500); +SELECT split_range_partition('range_rel_1', '2015-01-15'::DATE); + +/* Merge two partitions into one */ +SELECT merge_range_partitions('num_range_rel_1', 'num_range_rel_' || currval('num_range_rel_seq')); +SELECT merge_range_partitions('range_rel_1', 'range_rel_' || currval('range_rel_seq')); + +/* Append and prepend partitions */ +SELECT append_range_partition('num_range_rel'); +SELECT prepend_range_partition('num_range_rel'); +SELECT drop_range_partition('num_range_rel_7'); + +SELECT drop_range_partition_expand_next('num_range_rel_4'); +SELECT drop_range_partition_expand_next('num_range_rel_6'); + +SELECT append_range_partition('range_rel'); +SELECT prepend_range_partition('range_rel'); +SELECT drop_range_partition('range_rel_7'); +SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); +SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + +CREATE TABLE range_rel_archive (LIKE range_rel INCLUDING ALL); +SELECT attach_range_partition('range_rel', 'range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); +SELECT attach_range_partition('range_rel', 'range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); +SELECT detach_range_partition('range_rel_archive'); + +CREATE TABLE range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT, + abc INTEGER); +SELECT attach_range_partition('range_rel', 'range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); +CREATE TABLE range_rel_test2 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP); +SELECT attach_range_partition('range_rel', 'range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); + +/* Half open ranges */ +SELECT add_range_partition('range_rel', NULL, '2014-12-01'::DATE, 'range_rel_minus_infinity'); +SELECT add_range_partition('range_rel', '2015-06-01'::DATE, NULL, 'range_rel_plus_infinity'); +SELECT append_range_partition('range_rel'); +SELECT prepend_range_partition('range_rel'); + +CREATE TABLE range_rel_minus_infinity (LIKE range_rel INCLUDING ALL); +SELECT attach_range_partition('range_rel', 'range_rel_minus_infinity', NULL, '2014-12-01'::DATE); +INSERT INTO range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO range_rel (dt) VALUES ('2015-12-15'); + +CREATE TABLE zero( + id SERIAL PRIMARY KEY, + value INT NOT NULL); +INSERT INTO zero SELECT g, g FROM generate_series(1, 100) as g; +SELECT create_range_partitions('zero', 'value', 50, 10, 0); +SELECT append_range_partition('zero', 'zero_0'); +SELECT prepend_range_partition('zero', 'zero_1'); +SELECT add_range_partition('zero', 50, 70, 'zero_50'); +SELECT append_range_partition('zero', 'zero_appended'); +SELECT prepend_range_partition('zero', 'zero_prepended'); +SELECT split_range_partition('zero_50', 60, 'zero_60'); + +CREATE TABLE hash_rel_extern (LIKE hash_rel INCLUDING ALL); +SELECT replace_hash_partition('hash_rel_0', 'hash_rel_extern'); + +-- automatic partitions creation +CREATE TABLE range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT create_range_partitions('range_rel_test1', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); +INSERT INTO range_rel_test1 (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); + +INSERT INTO range_rel_test1 (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); + +/* CaMeL cAsE table names and attributes */ +CREATE TABLE "TeSt" (a INT NOT NULL, b INT); +SELECT create_hash_partitions('TeSt', 'a', 3); +SELECT create_hash_partitions('"TeSt"', 'a', 3); +INSERT INTO "TeSt" VALUES (1, 1); +INSERT INTO "TeSt" VALUES (2, 2); +INSERT INTO "TeSt" VALUES (3, 3); + +CREATE TABLE "RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO "RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT create_range_partitions('"RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); +SELECT append_range_partition('"RangeRel"'); +SELECT prepend_range_partition('"RangeRel"'); +SELECT merge_range_partitions('"RangeRel_1"', '"RangeRel_' || currval('"RangeRel_seq"') || '"'); +SELECT split_range_partition('"RangeRel_1"', '2015-01-01'::DATE); + +CREATE TABLE hash_rel_next1 ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO hash_rel_next1 (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('hash_rel_next1', 'value', 3); + +CREATE TABLE range_rel_next1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO range_rel_next1 (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('range_rel_next1', 'dt', '2010-01-01'::date, '1 month'::interval, 12); +SELECT merge_range_partitions('range_rel_1', 'range_rel_2'); +SELECT split_range_partition('range_rel_1', '2010-02-15'::date); +SELECT append_range_partition('range_rel_next1'); +SELECT prepend_range_partition('range_rel_next1'); +''' + +@contextlib.contextmanager +def cwd(path): + print("cwd: ", path) + curdir = os.getcwd() + os.chdir(path) + + try: + yield + finally: + print("cwd:", curdir) + os.chdir(curdir) + +dump1_file = '/tmp/dump1.sql' +dump2_file = '/tmp/dump2.sql' + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='pg_pathman update checker') + parser.add_argument('branches', nargs=2, + help='specify branches ("main rel_1.5")') + + args = parser.parse_args() + + with open('dump_pathman_objects.sql') as f: + dump_sql = f.read() + + with cwd(repo_dir): + subprocess.check_output("git checkout %s" % args.branches[0], shell=True) + subprocess.check_output(compilation, shell=True) + + with testgres.get_new_node('updated') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + + node.start() + node.safe_psql('postgres', run_sql) + node.dump(dump1_file, 'postgres') + node.stop() + + subprocess.check_output("git checkout %s" % args.branches[1], shell=True) + subprocess.check_output(compilation, shell=True) + + version = None + with open('pg_pathman.control') as f: + for line in f.readlines(): + if line.startswith('default_version'): + version = line.split('=').strip() + + if version is None: + print("cound not find version in second branch") + exit(1) + + node.start() + node.safe_psql("postgres", "alter extension pg_pathman update to %s" % version) + dumped_objects_old = node.safe_psql("postgres", dump_sql) + node.stop() + + # now make clean install + with testgres.get_new_node('from_scratch') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + node.start() + node.safe_psql('postgres', run_sql) + dumped_objects_new = node.safe_psql("postgres", dump_sql) + node.dump(dump2_file, 'postgres') + + # check dumps + node.safe_psql('postgres', 'create database d1') + node.restore(dump1_file, 'd1') + + node.safe_psql('postgres', 'create database d2') + node.restore(dump2_file, 'd2') + node.stop() + + if dumped_objects != dumped_objects_new: + pass From 08eb0f439bb307afafedeab3cdfa14faf5f08b1d Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 20 Sep 2018 19:44:10 +0300 Subject: [PATCH 0953/1124] Fix update checking script --- tests/update/check_update.py | 99 +++++++++++++----------------------- 1 file changed, 36 insertions(+), 63 deletions(-) diff --git a/tests/update/check_update.py b/tests/update/check_update.py index f1ac3cef..be5f2aa2 100755 --- a/tests/update/check_update.py +++ b/tests/update/check_update.py @@ -1,6 +1,7 @@ #!/usr/bin/env python #coding: utf-8 +import shutil import os import contextlib import sys @@ -9,7 +10,9 @@ import subprocess import difflib -repo_dir = os.path.abspath(os.path.join('../..', os.path.dirname(__file__))) +my_dir = os.path.dirname(os.path.abspath(__file__)) +repo_dir = os.path.abspath(os.path.join(my_dir, '../../')) +print(repo_dir) compilation = ''' make USE_PGXS=1 clean @@ -31,7 +34,7 @@ CREATE TABLE range_rel ( id SERIAL PRIMARY KEY, - dt TIMESTAMP, + dt TIMESTAMP not null, txt TEXT); CREATE INDEX ON range_rel (dt); INSERT INTO range_rel (dt, txt) @@ -49,7 +52,7 @@ INSERT INTO improved_dummy_test1 (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; SELECT create_range_partitions('improved_dummy_test1', 'id', 1, 10); INSERT INTO improved_dummy_test1 (name) VALUES ('test'); /* spawns new partition */ -ALTER TABLE improved_dummy_1 ADD CHECK (name != 'ib'); /* make improved_dummy_1 disappear */ +ALTER TABLE improved_dummy_test1 ADD CHECK (name != 'ib'); CREATE TABLE test_improved_dummy_test2 (val INT NOT NULL); SELECT create_range_partitions('test_improved_dummy_test2', 'val', @@ -61,7 +64,7 @@ SELECT create_range_partitions('insert_into_select', 'val', 1, 20); CREATE TABLE insert_into_select_copy (LIKE insert_into_select); /* INSERT INTO ... SELECT ... */ -# just a lot of actions +-- just a lot of actions SELECT split_range_partition('num_range_rel_1', 500); SELECT split_range_partition('range_rel_1', '2015-01-15'::DATE); @@ -81,48 +84,13 @@ SELECT append_range_partition('range_rel'); SELECT prepend_range_partition('range_rel'); SELECT drop_range_partition('range_rel_7'); -SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); -CREATE TABLE range_rel_archive (LIKE range_rel INCLUDING ALL); -SELECT attach_range_partition('range_rel', 'range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); -SELECT attach_range_partition('range_rel', 'range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); -SELECT detach_range_partition('range_rel_archive'); - -CREATE TABLE range_rel_test1 ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP, - txt TEXT, - abc INTEGER); -SELECT attach_range_partition('range_rel', 'range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); -CREATE TABLE range_rel_test2 ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP); -SELECT attach_range_partition('range_rel', 'range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); - -/* Half open ranges */ -SELECT add_range_partition('range_rel', NULL, '2014-12-01'::DATE, 'range_rel_minus_infinity'); -SELECT add_range_partition('range_rel', '2015-06-01'::DATE, NULL, 'range_rel_plus_infinity'); -SELECT append_range_partition('range_rel'); -SELECT prepend_range_partition('range_rel'); - CREATE TABLE range_rel_minus_infinity (LIKE range_rel INCLUDING ALL); SELECT attach_range_partition('range_rel', 'range_rel_minus_infinity', NULL, '2014-12-01'::DATE); INSERT INTO range_rel (dt) VALUES ('2012-06-15'); INSERT INTO range_rel (dt) VALUES ('2015-12-15'); -CREATE TABLE zero( - id SERIAL PRIMARY KEY, - value INT NOT NULL); -INSERT INTO zero SELECT g, g FROM generate_series(1, 100) as g; -SELECT create_range_partitions('zero', 'value', 50, 10, 0); -SELECT append_range_partition('zero', 'zero_0'); -SELECT prepend_range_partition('zero', 'zero_1'); -SELECT add_range_partition('zero', 50, 70, 'zero_50'); -SELECT append_range_partition('zero', 'zero_appended'); -SELECT prepend_range_partition('zero', 'zero_prepended'); -SELECT split_range_partition('zero_50', 60, 'zero_60'); - CREATE TABLE hash_rel_extern (LIKE hash_rel INCLUDING ALL); SELECT replace_hash_partition('hash_rel_0', 'hash_rel_extern'); @@ -140,7 +108,6 @@ /* CaMeL cAsE table names and attributes */ CREATE TABLE "TeSt" (a INT NOT NULL, b INT); -SELECT create_hash_partitions('TeSt', 'a', 3); SELECT create_hash_partitions('"TeSt"', 'a', 3); INSERT INTO "TeSt" VALUES (1, 1); INSERT INTO "TeSt" VALUES (2, 2); @@ -163,17 +130,6 @@ value INTEGER NOT NULL); INSERT INTO hash_rel_next1 (value) SELECT g FROM generate_series(1, 10000) as g; SELECT create_hash_partitions('hash_rel_next1', 'value', 3); - -CREATE TABLE range_rel_next1 ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP NOT NULL, - value INTEGER); -INSERT INTO range_rel_next1 (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; -SELECT create_range_partitions('range_rel_next1', 'dt', '2010-01-01'::date, '1 month'::interval, 12); -SELECT merge_range_partitions('range_rel_1', 'range_rel_2'); -SELECT split_range_partition('range_rel_1', '2010-02-15'::date); -SELECT append_range_partition('range_rel_next1'); -SELECT prepend_range_partition('range_rel_next1'); ''' @contextlib.contextmanager @@ -188,6 +144,10 @@ def cwd(path): print("cwd:", curdir) os.chdir(curdir) +def shell(cmd): + print(cmd) + subprocess.check_output(cmd, shell=True) + dump1_file = '/tmp/dump1.sql' dump2_file = '/tmp/dump2.sql' @@ -198,12 +158,17 @@ def cwd(path): args = parser.parse_args() - with open('dump_pathman_objects.sql') as f: + with open(os.path.join(my_dir, 'dump_pathman_objects.sql'), 'r') as f: dump_sql = f.read() - with cwd(repo_dir): - subprocess.check_output("git checkout %s" % args.branches[0], shell=True) - subprocess.check_output(compilation, shell=True) + shutil.rmtree('/tmp/pg_pathman') + shutil.copytree(repo_dir, '/tmp/pg_pathman') + + with cwd('/tmp/pg_pathman'): + shell("git clean -fdx") + shell("git reset --hard") + shell("git checkout %s" % args.branches[0]) + shell(compilation) with testgres.get_new_node('updated') as node: node.init() @@ -214,22 +179,24 @@ def cwd(path): node.dump(dump1_file, 'postgres') node.stop() - subprocess.check_output("git checkout %s" % args.branches[1], shell=True) - subprocess.check_output(compilation, shell=True) + shell("git clean -fdx") + shell("git checkout %s" % args.branches[1]) + shell(compilation) version = None with open('pg_pathman.control') as f: for line in f.readlines(): if line.startswith('default_version'): - version = line.split('=').strip() + version = line.split('=')[1].strip() if version is None: print("cound not find version in second branch") exit(1) node.start() - node.safe_psql("postgres", "alter extension pg_pathman update to %s" % version) - dumped_objects_old = node.safe_psql("postgres", dump_sql) + p = subprocess.Popen(["psql", "postgres"], stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + dumped_objects_old = p.communicate(input=dump_sql.encode())[0].decode() node.stop() # now make clean install @@ -238,7 +205,9 @@ def cwd(path): node.append_conf("shared_preload_libraries='pg_pathman'\n") node.start() node.safe_psql('postgres', run_sql) - dumped_objects_new = node.safe_psql("postgres", dump_sql) + p = subprocess.Popen(["psql", "postgres"], stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + dumped_objects_new = p.communicate(input=dump_sql.encode())[0].decode() node.dump(dump2_file, 'postgres') # check dumps @@ -249,5 +218,9 @@ def cwd(path): node.restore(dump2_file, 'd2') node.stop() - if dumped_objects != dumped_objects_new: - pass + if dumped_objects_old != dumped_objects_new: + print("\nDIFF:") + for line in difflib.context_diff(dumped_objects_old.split('\n'), dumped_objects_new.split('\n')): + print(line) + else: + print("\nUPDATE CHECK: ALL GOOD") From 6a089e80f32320e0cb599ada66b8c5b9d5dfe937 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 24 Sep 2018 15:02:24 +0300 Subject: [PATCH 0954/1124] Add tests for concurrent updates --- Makefile | 2 +- tests/python/Makefile | 6 ++- tests/python/partitioning_test.py | 67 +++++++++++++++++++++++++++++-- 3 files changed, 70 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index efd0cbc5..7292cd43 100644 --- a/Makefile +++ b/Makefile @@ -93,7 +93,7 @@ isolationcheck: | submake-isolation $(ISOLATIONCHECKS) python_tests: - $(MAKE) -C tests/python partitioning_tests + $(MAKE) -C tests/python partitioning_tests CASE=$(CASE) cmocka_tests: $(MAKE) -C tests/cmocka check diff --git a/tests/python/Makefile b/tests/python/Makefile index ee650ea4..f8a71e41 100644 --- a/tests/python/Makefile +++ b/tests/python/Makefile @@ -1,2 +1,6 @@ partitioning_tests: - python -m unittest --verbose --failfast partitioning_test.py +ifneq ($(CASE),) + python partitioning_test.py Tests.$(CASE) +else + python partitioning_test.py +endif diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index cb1282c6..0e3d1492 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -7,15 +7,18 @@ Copyright (c) 2015-2017, Postgres Professional """ +import functools import json import math +import multiprocessing import os +import random import re import subprocess +import sys import threading import time import unittest -import functools from distutils.version import LooseVersion from testgres import get_new_node, get_pg_version @@ -85,10 +88,17 @@ def set_trace(self, con, command="pg_debug"): p = subprocess.Popen([command], stdin=subprocess.PIPE) p.communicate(str(pid).encode()) - def start_new_pathman_cluster(self, allow_streaming=False, test_data=False): + def start_new_pathman_cluster(self, + allow_streaming=False, + test_data=False, + enable_partitionrouter=False): + node = get_new_node() node.init(allow_streaming=allow_streaming) node.append_conf("shared_preload_libraries='pg_pathman'\n") + if enable_partitionrouter: + node.append_conf("pg_pathman.enable_partitionrouter=on\n") + node.start() node.psql('create extension pg_pathman') @@ -1065,6 +1075,57 @@ def test_update_node_plan1(self): node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') + def test_concurrent_updates(self): + ''' + Test whether conncurrent updates work correctly between + partitions. + ''' + + create_sql = ''' + CREATE TABLE test1(id INT, b INT NOT NULL); + INSERT INTO test1 + SELECT i, i FROM generate_series(1, 100) i; + SELECT create_range_partitions('test1', 'b', 1, 5); + ''' + + with self.start_new_pathman_cluster(enable_partitionrouter=True) as node: + node.safe_psql(create_sql) + + pool = multiprocessing.Pool(processes=4) + for count in range(1, 200): + pool.apply_async(make_updates, (node, count, )) + + pool.close() + pool.join() + + # check all data is there and not duplicated + with node.connect() as con: + for i in range(1, 100): + row = con.execute("select count(*) from test1 where id = %d" % i)[0] + self.assertEqual(row[0], 1) + + self.assertEqual(node.execute("select count(*) from test1")[0][0], 100) + + +def make_updates(node, count): + update_sql = ''' + BEGIN; + UPDATE test1 SET b = trunc(random() * 100 + 1) WHERE id in (%s); + COMMIT; + ''' + + with node.connect() as con: + for i in range(count): + rows_to_update = random.randint(20, 50) + ids = set([str(random.randint(1, 100)) for i in range(rows_to_update)]) + con.execute(update_sql % ','.join(ids)) + if __name__ == "__main__": - unittest.main() + if len(sys.argv) > 1: + suite = unittest.TestLoader().loadTestsFromName(sys.argv[1], + module=sys.modules[__name__]) + else: + suite = unittest.TestLoader().loadTestsFromTestCase(Tests) + + unittest.TextTestRunner(verbosity=2, failfast=True).run(suite) From 3d98a8e62c7d72b82408e07474a5091277021e9c Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 24 Sep 2018 17:25:29 +0300 Subject: [PATCH 0955/1124] Update README --- README.md | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 2a2796d7..2bf95a2e 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions The extension is compatible with: - * PostgreSQL 9.5, 9.6, 10; + * PostgreSQL 9.5, 9.6, 10, 11; * Postgres Pro Standard 9.5, 9.6; * Postgres Pro Enterprise; @@ -63,7 +63,7 @@ More interesting features are yet to come. Stay tuned! * Effective query planning for partitioned tables (JOINs, subselects etc); * `RuntimeAppend` & `RuntimeMergeAppend` custom plan nodes to pick partitions at runtime; * [`PartitionFilter`](#custom-plan-nodes): an efficient drop-in replacement for INSERT triggers; - * [`PartitionRouter`](#custom-plan-nodes) for cross-partition UPDATE queries (instead of triggers); + * [`PartitionRouter`](#custom-plan-nodes) and [`PartitionOverseer`](#custom-plan-nodes) for cross-partition UPDATE queries (instead of triggers); * Automatic partition creation for new INSERTed data (only for RANGE partitioning); * Improved `COPY FROM` statement that is able to insert rows directly into partitions; * [User-defined callbacks](#additional-parameters) for partition creation event handling; @@ -105,7 +105,7 @@ In order to update pg_pathman: 3. Execute the following queries: ```plpgsql -/* only required for major releases, e.g. 1.3 -> 1.4 */ +/* only required for major releases, e.g. 1.4 -> 1.5 */ ALTER EXTENSION pg_pathman UPDATE; SET pg_pathman.enable = t; ``` @@ -417,6 +417,7 @@ Shows memory consumption of various caches. - `RuntimeAppend` (overrides `Append` plan node) - `RuntimeMergeAppend` (overrides `MergeAppend` plan node) - `PartitionFilter` (drop-in replacement for INSERT triggers) +- `PartitionOverseer` (implements cross-partition UPDATEs) - `PartitionRouter` (implements cross-partition UPDATEs) `PartitionFilter` acts as a *proxy node* for INSERT's child scan, which means it can redirect output tuples to the corresponding partition: @@ -434,20 +435,27 @@ SELECT generate_series(1, 10), random(); (4 rows) ``` -`PartitionRouter` is another *proxy node* used in conjunction with `PartitionFilter` to enable cross-partition UPDATEs (i.e. when update of partitioning key requires that we move row to another partition). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; cross-partition `UPDATE` is transformed into `DELETE + INSERT`), it is disabled by default. To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. +`PartitionOverseer` and `PartitionRouter` are another *proxy nodes* used +in conjunction with `PartitionFilter` to enable cross-partition UPDATEs +(i.e. when update of partitioning key requires that we move row to another +partition). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; +cross-partition `UPDATE` is transformed into `DELETE + INSERT`), +it is disabled by default. +To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. ```plpgsql EXPLAIN (COSTS OFF) UPDATE partitioned_table SET value = value + 1 WHERE value = 2; - QUERY PLAN ---------------------------------------------------- - Update on partitioned_table_0 - -> Custom Scan (PartitionRouter) + QUERY PLAN +--------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on partitioned_table_2 -> Custom Scan (PartitionFilter) - -> Seq Scan on partitioned_table_0 - Filter: (value = 2) -(5 rows) + -> Custom Scan (PartitionRouter) + -> Seq Scan on partitioned_table_2 + Filter: (value = 2) +(6 rows) ``` `RuntimeAppend` and `RuntimeMergeAppend` have much in common: they come in handy in a case when WHERE condition takes form of: From c69f0f2f63ffa3f4edce1edbafbd1f6677afb473 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 25 Sep 2018 14:27:35 +0300 Subject: [PATCH 0956/1124] Fix compilation error on older GCCs --- src/partition_overseer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/partition_overseer.c b/src/partition_overseer.c index 2456f6aa..41590425 100644 --- a/src/partition_overseer.c +++ b/src/partition_overseer.c @@ -68,12 +68,13 @@ partition_overseer_create_scan_state(CustomScan *node) static void set_mt_state_for_router(PlanState *state, void *context) { + int i; ModifyTableState *mt_state = (ModifyTableState *) state; if (!IsA(state, ModifyTableState)) return; - for (int i = 0; i < mt_state->mt_nplans; i++) + for (i = 0; i < mt_state->mt_nplans; i++) { CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; PartitionRouterState *pr_state; From a4eab851be6ad4af99fe597d1baddf5ff82e7877 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 25 Sep 2018 22:04:07 +0300 Subject: [PATCH 0957/1124] Bump version to 1.5.1 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 4c40be86..744310c2 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.0", + "version": "1.5.1", "maintainer": [ "Dmitry Ivanov ", "Ildus Kurbangaliev " @@ -23,7 +23,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.0", + "version": "1.5.1", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index e28777bf..6b73351e 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.0 + 1.5.1 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 99426810..1c8c1584 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.0" +#define CURRENT_LIB_VERSION "1.5.1" void *pathman_cache_search_relid(HTAB *cache_table, From 50c078b45e7245d7758cde7ded98852a139bc396 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 26 Sep 2018 13:50:11 +0300 Subject: [PATCH 0958/1124] Add PGDLLIMPORT for FrontendProtocol --- src/utility_stmt_hooking.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index c90a01da..3f1772a1 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -35,7 +35,7 @@ /* we avoid includig libpq.h because it requires openssl.h */ #include "libpq/pqcomm.h" -extern ProtocolVersion FrontendProtocol; +extern PGDLLIMPORT ProtocolVersion FrontendProtocol; extern void pq_endmsgread(void); /* Determine whether we should enable COPY or not (PostgresPro has a fix) */ From 84682ff795c668862d6c40edb6ac3c6455855dc2 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 23 Jun 2017 13:02:13 +0300 Subject: [PATCH 0959/1124] Add support for ALTER TABLE .. ATTACH PARTITION --- Makefile | 2 +- src/declarative.c | 237 ++++++++++++++++++++++++ src/hooks.c | 31 ++++ src/include/declarative.h | 20 ++ src/include/planner_tree_modification.h | 3 +- src/planner_tree_modification.c | 33 ++++ src/utility_stmt_hooking.c | 1 - 7 files changed, 324 insertions(+), 3 deletions(-) create mode 100644 src/declarative.c create mode 100644 src/include/declarative.h diff --git a/Makefile b/Makefile index 82b0fc28..a754f7aa 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ - src/partition_overseer.o $(WIN32RES) + src/partition_overseer.o src/declarative.o $(WIN32RES) ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include diff --git a/src/declarative.c b/src/declarative.c new file mode 100644 index 00000000..48992f6d --- /dev/null +++ b/src/declarative.c @@ -0,0 +1,237 @@ +#include "declarative.h" +#include "utils.h" + +#include "fmgr.h" +#include "access/htup_details.h" +#include "catalog/namespace.h" +#include "catalog/pg_type.h" +#include "catalog/pg_proc.h" +#include "nodes/nodeFuncs.h" +#include "parser/parse_func.h" +#include "parser/parse_coerce.h" +#include "utils/int8.h" +#include "utils/lsyscache.h" +#include "utils/builtins.h" +#include "utils/int8.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" +#include "utils/varbit.h" + +/* + * Modifies query of declarative partitioning commands, + * There is a little hack here, ATTACH PARTITION command + * expects relation with REL_PARTITIONED_TABLE relkind. + * To avoid this check we negate subtype, and then after the checks + * we set it back (look `is_pathman_related_partitioning_cmd`) + */ +void +modify_declative_partitioning_query(Query *query) +{ + if (query->commandType != CMD_UTILITY) + return; + + if (IsA(query->utilityStmt, AlterTableStmt)) + { + ListCell *lcmd; + Oid relid; + + AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt; + relid = RangeVarGetRelid(stmt->relation, NoLock, true); + if (get_pathman_relation_info(relid) != NULL) + { + foreach(lcmd, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); + switch (cmd->subtype) + { + case AT_AttachPartition: + case AT_DetachPartition: + cmd->subtype = -cmd->subtype; + break; + default: + break; + } + } + } + } +} + +/* is it one of declarative partitioning commands? */ +bool is_pathman_related_partitioning_cmd(Node *parsetree) +{ + if (IsA(parsetree, AlterTableStmt)) + { + ListCell *lc; + AlterTableStmt *stmt = (AlterTableStmt *) parsetree; + int cnt = 0; + + foreach(lc, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lc); + int subtype = cmd->subtype; + + if (subtype < 0) + subtype = -subtype; + + switch (subtype) + { + case AT_AttachPartition: + case AT_DetachPartition: + /* + * we need to fix all subtypes, + * possibly we're not going to handle this + */ + cmd->subtype = -(cmd->subtype); + continue; + default: + cnt++; + } + } + + return (cnt == 0); + } + return false; +} + +static FuncExpr * +make_fn_expr(Oid funcOid, List *args) +{ + FuncExpr *fn_expr; + HeapTuple procTup; + Form_pg_proc procStruct; + + procTup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcOid)); + if (!HeapTupleIsValid(procTup)) + elog(ERROR, "cache lookup failed for function %u", funcOid); + procStruct = (Form_pg_proc) GETSTRUCT(procTup); + + fn_expr = makeFuncExpr(funcOid, procStruct->prorettype, args, + InvalidOid, InvalidOid, COERCE_EXPLICIT_CALL); + ReleaseSysCache(procTup); + return fn_expr; +} + +/* + * Transform one constant in a partition bound spec + */ +static Const * +transform_bound_value(ParseState *pstate, A_Const *con, + Oid colType, int32 colTypmod) +{ + Node *value; + + /* Make it into a Const */ + value = (Node *) make_const(pstate, &con->val, con->location); + + /* Coerce to correct type */ + value = coerce_to_target_type(pstate, + value, exprType(value), + colType, + colTypmod, + COERCION_ASSIGNMENT, + COERCE_IMPLICIT_CAST, + -1); + + if (value == NULL) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("specified value cannot be cast to type %s", + format_type_be(colType)), + parser_errposition(pstate, con->location))); + + /* Simplify the expression, in case we had a coercion */ + if (!IsA(value, Const)) + value = (Node *) expression_planner((Expr *) value); + + /* Fail if we don't have a constant (i.e., non-immutable coercion) */ + if (!IsA(value, Const)) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("specified value cannot be cast to type %s", + format_type_be(colType)), + errdetail("The cast requires a non-immutable conversion."), + errhint("Try putting the literal value in single quotes."), + parser_errposition(pstate, con->location))); + + return (Const *) value; +} + +/* handle ALTER TABLE .. ATTACH PARTITION command */ +void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) +{ + Oid parent_relid, + partition_relid, + proc_args[] = { REGCLASSOID, REGCLASSOID, + ANYELEMENTOID, ANYELEMENTOID }; + + List *proc_name; + FmgrInfo proc_flinfo; + FunctionCallInfoData proc_fcinfo; + char *pathman_schema; + PartitionRangeDatum *ldatum, + *rdatum; + Const *lval, + *rval; + A_Const *con; + List *fn_args; + ParseState *pstate = make_parsestate(NULL); + const PartRelationInfo *prel; + + PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + + Assert(cmd->subtype == AT_AttachPartition); + + parent_relid = RangeVarGetRelid(stmt->relation, NoLock, false); + if ((prel = get_pathman_relation_info(parent_relid)) == NULL) + elog(ERROR, "relation is not partitioned"); + + partition_relid = RangeVarGetRelid(pcmd->name, NoLock, false); + + /* Fetch pg_pathman's schema */ + pathman_schema = get_namespace_name(get_pathman_schema()); + + /* Build function's name */ + proc_name = list_make2(makeString(pathman_schema), + makeString(CppAsString(attach_range_partition))); + + ldatum = (PartitionRangeDatum *) linitial(pcmd->bound->lowerdatums); + con = castNode(A_Const, ldatum->value); + lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + rdatum = (PartitionRangeDatum *) linitial(pcmd->bound->upperdatums); + con = castNode(A_Const, rdatum->value); + rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + /* Lookup function's Oid and get FmgrInfo */ + fmgr_info(LookupFuncName(proc_name, 4, proc_args, false), &proc_flinfo); + + InitFunctionCallInfoData(proc_fcinfo, &proc_flinfo, + 4, InvalidOid, NULL, NULL); + proc_fcinfo.arg[0] = ObjectIdGetDatum(parent_relid); + proc_fcinfo.argnull[0] = false; + proc_fcinfo.arg[1] = ObjectIdGetDatum(partition_relid); + proc_fcinfo.argnull[1] = false; + + /* Make function expression, we will need it to determine argument types */ + fn_args = list_make4(NULL, NULL, lval, rval); + proc_fcinfo.flinfo->fn_expr = + (Node *) make_fn_expr(proc_fcinfo.flinfo->fn_oid, fn_args); + + if ((!list_length(pcmd->bound->lowerdatums)) || + (!list_length(pcmd->bound->upperdatums))) + elog(ERROR, "provide start and end value for range partition"); + + proc_fcinfo.arg[2] = lval->constvalue; + proc_fcinfo.argnull[2] = ldatum->infinite || lval->constisnull; + proc_fcinfo.arg[3] = rval->constvalue; + proc_fcinfo.argnull[3] = rdatum->infinite || rval->constisnull; + + /* Invoke the callback */ + FunctionCallInvoke(&proc_fcinfo); +} + +/* handle ALTER TABLE .. DETACH PARTITION command */ +void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) +{ + Assert(cmd->subtype == AT_DetachPartition); +} diff --git a/src/hooks.c b/src/hooks.c index 5cd3e14c..4aa5bf40 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -13,6 +13,7 @@ #include "compat/pg_compat.h" #include "compat/rowmarks_fix.h" +#include "declarative.h" #include "hooks.h" #include "init.h" #include "partition_filter.h" @@ -766,6 +767,8 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) { load_config(); /* perform main cache initialization */ } + if (!IsPathmanReady()) + return; /* Process inlined SQL functions (we've already entered planning stage) */ if (IsPathmanReady() && get_planner_calls_count() > 0) @@ -812,7 +815,10 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) /* Modify query tree if needed */ pathman_transform_query(query, NULL); + return; } + + pathman_post_analyze_query(query); } /* @@ -950,6 +956,31 @@ pathman_process_utility_hook(Node *first_arg, get_attname_compat(relation_oid, attr_number), get_rel_name(relation_oid)))); } + else if (is_pathman_related_partitioning_cmd(parsetree)) + { + /* we can handle all the partitioning commands */ + if (IsA(parsetree, AlterTableStmt)) + { + ListCell *lc; + AlterTableStmt *stmt = (AlterTableStmt *) parsetree; + + foreach(lc, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lc); + switch (cmd->subtype) + { + case AT_AttachPartition: + handle_attach_partition(stmt, cmd); + return; + case AT_DetachPartition: + handle_detach_partition(stmt, cmd); + return; + default: + elog(ERROR, "can't handle this command"); + } + } + } + } } /* Finally call process_utility_hook_next or standard_ProcessUtility */ diff --git a/src/include/declarative.h b/src/include/declarative.h new file mode 100644 index 00000000..56ce0ed7 --- /dev/null +++ b/src/include/declarative.h @@ -0,0 +1,20 @@ +#ifndef DECLARATIVE_H +#define DECLARATIVE_H + +#include "postgres.h" +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" + +typedef enum DeclarativeCommandType { + DP_ATTACH, /* ALTER TABLE .. ATTACH PARTITION */ + DP_DETACH /* ALTER TABLE .. DETACH PARTITION */ +} DeclarativeCommandType; + +void modify_declative_partitioning_query(Query *query); +bool is_pathman_related_partitioning_cmd(Node *parsetree); + +/* actual actions */ +void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd); +void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd); + +#endif /* DECLARATIVE_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index 43f7a24b..4e33ca34 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -34,8 +34,9 @@ void state_tree_visitor(PlanState *state, void (*visitor) (PlanState *state, void *context), void *context); -/* Query tree rewriting utility */ +/* Query tree rewriting utilities */ void pathman_transform_query(Query *parse, ParamListInfo params); +void pathman_post_analyze_query(Query *parse); /* These functions scribble on Plan tree */ Plan *add_partition_filters(List *rtable, Plan *plan); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index a3b06873..d1412835 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -12,6 +12,7 @@ #include "compat/rowmarks_fix.h" +#include "declarative.h" #include "partition_filter.h" #include "partition_router.h" #include "partition_overseer.h" @@ -107,6 +108,7 @@ typedef struct } adjust_appendrel_varnos_cxt; static bool pathman_transform_query_walker(Node *node, void *context); +static bool pathman_post_analyze_query_walker(Node *node, void *context); static void disable_standard_inheritance(Query *parse, transform_query_cxt *context); static void handle_modification_query(Query *parse, transform_query_cxt *context); @@ -337,6 +339,12 @@ pathman_transform_query(Query *parse, ParamListInfo params) pathman_transform_query_walker((Node *) parse, (void *) &context); } +void +pathman_post_analyze_query(Query *parse) +{ + pathman_post_analyze_query_walker((Node *) parse, NULL); +} + /* Walker for pathman_transform_query() */ static bool pathman_transform_query_walker(Node *node, void *context) @@ -410,6 +418,31 @@ pathman_transform_query_walker(Node *node, void *context) context); } +static bool +pathman_post_analyze_query_walker(Node *node, void *context) +{ + if (node == NULL) + return false; + + else if (IsA(node, Query)) + { + Query *query = (Query *) node; + + /* Make changes for declarative syntax */ + modify_declative_partitioning_query(query); + + /* Handle Query node */ + return query_tree_walker(query, + pathman_post_analyze_query_walker, + context, + 0); + } + + /* Handle expression subtree */ + return expression_tree_walker(node, + pathman_post_analyze_query_walker, + context); +} /* * ---------------------- diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 3f1772a1..9683914b 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -271,7 +271,6 @@ is_pathman_related_alter_column_type(Node *parsetree, return result; } - /* * CopyGetAttnums - build an integer list of attnums to be copied * From 722ddb012b8f66756f40040f53bb90f86ca27986 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 23 Jun 2017 15:00:32 +0300 Subject: [PATCH 0960/1124] Add support of ALTER TABLE .. DETACH PARTITION --- src/declarative.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/declarative.c b/src/declarative.c index 48992f6d..0fdbf1a0 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -233,5 +233,32 @@ void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) /* handle ALTER TABLE .. DETACH PARTITION command */ void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) { + List *proc_name; + FmgrInfo proc_flinfo; + FunctionCallInfoData proc_fcinfo; + char *pathman_schema; + Oid partition_relid, + args = REGCLASSOID; + PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + Assert(cmd->subtype == AT_DetachPartition); + partition_relid = RangeVarGetRelid(pcmd->name, NoLock, false); + + /* Fetch pg_pathman's schema */ + pathman_schema = get_namespace_name(get_pathman_schema()); + + /* Build function's name */ + proc_name = list_make2(makeString(pathman_schema), + makeString(CppAsString(detach_range_partition))); + + /* Lookup function's Oid and get FmgrInfo */ + fmgr_info(LookupFuncName(proc_name, 1, &args, false), &proc_flinfo); + + InitFunctionCallInfoData(proc_fcinfo, &proc_flinfo, + 4, InvalidOid, NULL, NULL); + proc_fcinfo.arg[0] = ObjectIdGetDatum(partition_relid); + proc_fcinfo.argnull[0] = false; + + /* Invoke the callback */ + FunctionCallInvoke(&proc_fcinfo); } From fe70668a0863f9fadf3fbd94ad535222da526985 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 23 Jun 2017 17:47:19 +0300 Subject: [PATCH 0961/1124] Add test files for declarative syntax --- Makefile | 6 ++++++ expected/pathman_declarative.out | 0 sql/pathman_declarative.sql | 30 ++++++++++++++++++++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 expected/pathman_declarative.out create mode 100644 sql/pathman_declarative.sql diff --git a/Makefile b/Makefile index a754f7aa..17d241e5 100644 --- a/Makefile +++ b/Makefile @@ -67,9 +67,15 @@ EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output +DECL_CHECK_VERSIONS = 10beta1 + ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) +VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') +ifeq ($(VNUM),$(filter $(VNUM), $(DECL_CHECK_VERSIONS))) + REGRESS += pathman_declarative +endif include $(PGXS) else subdir = contrib/pg_pathman diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out new file mode 100644 index 00000000..e69de29b diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql new file mode 100644 index 00000000..183be291 --- /dev/null +++ b/sql/pathman_declarative.sql @@ -0,0 +1,30 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); + +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + +SELECT * FROM pathman_partition_list; +CREATE TABLE test.r2 LIKE (test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman_partition_list; +\d+ test.r2; +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +SELECT * FROM pathman_partition_list; +\d+ test.r2; + +DROP SCHEMA test CASCADE; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; From ca9f9ebe86281b4443ce2e30eba48ad85a831728 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Fri, 23 Jun 2017 18:37:32 +0300 Subject: [PATCH 0962/1124] Add tests for ATTACH and DETACH PARTITION commands --- expected/pathman_declarative.out | 72 ++++++++++++++++++++++++++++++++ sql/pathman_declarative.sql | 8 ++-- 2 files changed, 76 insertions(+), 4 deletions(-) diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index e69de29b..9dc5cb93 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -0,0 +1,72 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 + test.range_rel | test.r2 | 2 | dt | 05-01-2015 | 06-01-2015 +(5 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | +Check constraints: + "pathman_r2_check" CHECK (dt >= '05-01-2015'::date AND dt < '06-01-2015'::date) +Inherits: test.range_rel + +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +NOTICE: trigger "range_rel_upd_trig" for relation "test.r2" does not exist, skipping +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 7 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql index 183be291..4bb6b2b8 100644 --- a/sql/pathman_declarative.sql +++ b/sql/pathman_declarative.sql @@ -15,14 +15,14 @@ SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); -SELECT * FROM pathman_partition_list; -CREATE TABLE test.r2 LIKE (test.range_rel); +SELECT * FROM pathman.pathman_partition_list; +CREATE TABLE test.r2 (LIKE test.range_rel); ALTER TABLE test.range_rel ATTACH PARTITION test.r2 FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); -SELECT * FROM pathman_partition_list; +SELECT * FROM pathman.pathman_partition_list; \d+ test.r2; ALTER TABLE test.range_rel DETACH PARTITION test.r2; -SELECT * FROM pathman_partition_list; +SELECT * FROM pathman.pathman_partition_list; \d+ test.r2; DROP SCHEMA test CASCADE; From e426c71947478af6c5429f8db9ad85de3fd3198a Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 26 Jun 2017 09:38:35 +0300 Subject: [PATCH 0963/1124] Enable compilation of declarative syntax only for pg10+ --- Makefile | 5 +++-- src/hooks.c | 8 ++++++++ src/include/declarative.h | 5 ----- src/planner_tree_modification.c | 2 ++ 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 17d241e5..4fbe4b19 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ - src/partition_overseer.o src/declarative.o $(WIN32RES) + src/partition_overseer.o $(WIN32RES) ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include @@ -67,7 +67,7 @@ EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output -DECL_CHECK_VERSIONS = 10beta1 +DECL_CHECK_VERSIONS = 10 11 ifdef USE_PGXS PG_CONFIG = pg_config @@ -75,6 +75,7 @@ PGXS := $(shell $(PG_CONFIG) --pgxs) VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') ifeq ($(VNUM),$(filter $(VNUM), $(DECL_CHECK_VERSIONS))) REGRESS += pathman_declarative + OBJS += src/declarative.o endif include $(PGXS) else diff --git a/src/hooks.c b/src/hooks.c index 4aa5bf40..cd49d60e 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -818,7 +818,13 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) return; } +#if PG_VERSION_NUM >= 100000 + /* + * for now this call works only for declarative partitioning so + * we disabled it + */ pathman_post_analyze_query(query); +#endif } /* @@ -956,6 +962,7 @@ pathman_process_utility_hook(Node *first_arg, get_attname_compat(relation_oid, attr_number), get_rel_name(relation_oid)))); } +#if PG_VERSION_NUM >= 100000 else if (is_pathman_related_partitioning_cmd(parsetree)) { /* we can handle all the partitioning commands */ @@ -981,6 +988,7 @@ pathman_process_utility_hook(Node *first_arg, } } } +#endif } /* Finally call process_utility_hook_next or standard_ProcessUtility */ diff --git a/src/include/declarative.h b/src/include/declarative.h index 56ce0ed7..b38eebaa 100644 --- a/src/include/declarative.h +++ b/src/include/declarative.h @@ -5,11 +5,6 @@ #include "nodes/nodes.h" #include "nodes/parsenodes.h" -typedef enum DeclarativeCommandType { - DP_ATTACH, /* ALTER TABLE .. ATTACH PARTITION */ - DP_DETACH /* ALTER TABLE .. DETACH PARTITION */ -} DeclarativeCommandType; - void modify_declative_partitioning_query(Query *query); bool is_pathman_related_partitioning_cmd(Node *parsetree); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index d1412835..b3391bbf 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -429,7 +429,9 @@ pathman_post_analyze_query_walker(Node *node, void *context) Query *query = (Query *) node; /* Make changes for declarative syntax */ +#if PG_VERSION_NUM >= 100000 modify_declative_partitioning_query(query); +#endif /* Handle Query node */ return query_tree_walker(query, From 48df7a378d44555d90c593f287e89143a775164b Mon Sep 17 00:00:00 2001 From: Ildus K Date: Mon, 26 Jun 2017 10:06:08 +0300 Subject: [PATCH 0964/1124] Fix Makefile --- Makefile | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 4fbe4b19..ed2d624b 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,19 @@ MODULE_big = pg_pathman +# versions of postgresql with declarative partitioning +DECL_CHECK_VERSIONS = 10 11 + +ifdef USE_PGXS +PG_CONFIG = pg_config +VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') +ifeq ($(VNUM),$(filter $(VNUM), $(DECL_CHECK_VERSIONS))) + EXTRA_REGRESS = pathman_declarative + EXTRA_OBJS = src/declarative.o +endif +endif +include $(PGXS) + OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/runtime_append.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ @@ -60,23 +73,15 @@ REGRESS = pathman_array_qual \ pathman_update_triggers \ pathman_upd_del \ pathman_utility_stmt \ - pathman_views - + pathman_views ${EXTRA_REGRESS} EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output -DECL_CHECK_VERSIONS = 10 11 - ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) -VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') -ifeq ($(VNUM),$(filter $(VNUM), $(DECL_CHECK_VERSIONS))) - REGRESS += pathman_declarative - OBJS += src/declarative.o -endif include $(PGXS) else subdir = contrib/pg_pathman From e8521d31e875fd5929f91a09f2aab10909c1d361 Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 27 Jun 2017 10:43:11 +0300 Subject: [PATCH 0965/1124] Add support for CREATE TABLE .. PARTITION OF --- expected/pathman_declarative.out | 33 ++++++++- sql/pathman_declarative.sql | 16 ++++- src/declarative.c | 119 +++++++++++++++++++++++++++---- src/hooks.c | 13 ++-- src/include/declarative.h | 7 +- 5 files changed, 164 insertions(+), 24 deletions(-) diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index 9dc5cb93..e853898d 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -7,6 +7,10 @@ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt DATE NOT NULL ); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +ERROR: "range_rel" is not partitioned INSERT INTO test.range_rel (dt) SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; SELECT pathman.create_range_partitions('test.range_rel', 'dt', @@ -25,7 +29,12 @@ SELECT * FROM pathman.pathman_partition_list; test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 (4 rows) -CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions ALTER TABLE test.range_rel ATTACH PARTITION test.r2 FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); SELECT * FROM pathman.pathman_partition_list; @@ -66,7 +75,27 @@ SELECT * FROM pathman.pathman_partition_list; id | integer | | not null | | plain | | dt | date | | not null | | plain | | +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + Table "test.r4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+--------------------------------------------+---------+--------------+------------- + id | integer | | not null | nextval('test.range_rel_id_seq'::regclass) | plain | | + dt | date | | not null | | plain | | +Indexes: + "r4_pkey" PRIMARY KEY, btree (id) +Check constraints: + "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) +Inherits: test.range_rel + DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 7 other objects +NOTICE: drop cascades to 8 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql index 4bb6b2b8..864e3af8 100644 --- a/sql/pathman_declarative.sql +++ b/sql/pathman_declarative.sql @@ -9,6 +9,9 @@ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt DATE NOT NULL ); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); INSERT INTO test.range_rel (dt) SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; @@ -16,7 +19,10 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); SELECT * FROM pathman.pathman_partition_list; -CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); ALTER TABLE test.range_rel ATTACH PARTITION test.r2 FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); SELECT * FROM pathman.pathman_partition_list; @@ -25,6 +31,14 @@ ALTER TABLE test.range_rel DETACH PARTITION test.r2; SELECT * FROM pathman.pathman_partition_list; \d+ test.r2; +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/src/declarative.c b/src/declarative.c index 0fdbf1a0..02d4a875 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -1,5 +1,6 @@ #include "declarative.h" #include "utils.h" +#include "partition_creation.h" #include "fmgr.h" #include "access/htup_details.h" @@ -57,7 +58,8 @@ modify_declative_partitioning_query(Query *query) } /* is it one of declarative partitioning commands? */ -bool is_pathman_related_partitioning_cmd(Node *parsetree) +bool +is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) { if (IsA(parsetree, AlterTableStmt)) { @@ -65,23 +67,28 @@ bool is_pathman_related_partitioning_cmd(Node *parsetree) AlterTableStmt *stmt = (AlterTableStmt *) parsetree; int cnt = 0; + *parent_relid = RangeVarGetRelid(stmt->relation, NoLock, false); + if (get_pathman_relation_info(*parent_relid) == NULL) + return false; + + /* + * Since cmds can contain multiple commmands but we can handle only + * two of them here, so we need to check that there are only commands + * we can handle. In case if cmds contain other commands we skip all + * commands in this statement. + */ foreach(lc, stmt->cmds) { AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lc); - int subtype = cmd->subtype; - - if (subtype < 0) - subtype = -subtype; - - switch (subtype) + switch (abs(cmd->subtype)) { case AT_AttachPartition: case AT_DetachPartition: /* - * we need to fix all subtypes, + * We need to fix all subtypes, * possibly we're not going to handle this */ - cmd->subtype = -(cmd->subtype); + cmd->subtype = abs(cmd->subtype); continue; default: cnt++; @@ -90,6 +97,26 @@ bool is_pathman_related_partitioning_cmd(Node *parsetree) return (cnt == 0); } + else if (IsA(parsetree, CreateStmt)) + { + /* inhRelations != NULL, partbound != NULL, tableElts == NULL */ + CreateStmt *stmt = (CreateStmt *) parsetree; + + if (stmt->inhRelations && stmt->partbound != NULL) + { + RangeVar *rv = castNode(RangeVar, linitial(stmt->inhRelations)); + *parent_relid = RangeVarGetRelid(rv, NoLock, false); + if (get_pathman_relation_info(*parent_relid) == NULL) + return false; + + if (stmt->tableElts != NIL) + elog(ERROR, "pg_pathman doesn't support column definitions " + "in declarative syntax yet"); + + return true; + + } + } return false; } @@ -157,10 +184,10 @@ transform_bound_value(ParseState *pstate, A_Const *con, } /* handle ALTER TABLE .. ATTACH PARTITION command */ -void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) +void +handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) { - Oid parent_relid, - partition_relid, + Oid partition_relid, proc_args[] = { REGCLASSOID, REGCLASSOID, ANYELEMENTOID, ANYELEMENTOID }; @@ -181,7 +208,10 @@ void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) Assert(cmd->subtype == AT_AttachPartition); - parent_relid = RangeVarGetRelid(stmt->relation, NoLock, false); + if (pcmd->bound->strategy != PARTITION_STRATEGY_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_pathman only supports queries for range partitions"))); + if ((prel = get_pathman_relation_info(parent_relid)) == NULL) elog(ERROR, "relation is not partitioned"); @@ -231,7 +261,8 @@ void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) } /* handle ALTER TABLE .. DETACH PARTITION command */ -void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) +void +handle_detach_partition(AlterTableCmd *cmd) { List *proc_name; FmgrInfo proc_flinfo; @@ -262,3 +293,63 @@ void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd) /* Invoke the callback */ FunctionCallInvoke(&proc_fcinfo); } + +/* handle CREATE TABLE .. PARTITION OF FOR VALUES FROM .. TO .. */ +void +handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) +{ + Bound start, + end; + const PartRelationInfo *prel; + ParseState *pstate = make_parsestate(NULL); + PartitionRangeDatum *ldatum, + *rdatum; + Const *lval, + *rval; + A_Const *con; + + /* we show errors earlier for these asserts */ + Assert(stmt->inhRelations != NULL); + Assert(stmt->tableElts == NIL); + + if (stmt->partbound->strategy != PARTITION_STRATEGY_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_pathman only supports queries for range partitions"))); + + if ((prel = get_pathman_relation_info(parent_relid)) == NULL) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)))); + + if (prel->parttype != PT_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned by RANGE", + get_rel_name_or_relid(parent_relid)))); + + ldatum = (PartitionRangeDatum *) linitial(stmt->partbound->lowerdatums); + con = castNode(A_Const, ldatum->value); + lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + rdatum = (PartitionRangeDatum *) linitial(stmt->partbound->upperdatums); + con = castNode(A_Const, rdatum->value); + rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + start = lval->constisnull? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(lval->constvalue); + + end = rval->constisnull? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(rval->constvalue); + + /* more checks */ + check_range_available(parent_relid, &start, &end, lval->consttype, true); + + /* Create a new RANGE partition and return its Oid */ + create_single_range_partition_internal(parent_relid, + &start, + &end, + lval->consttype, + stmt->relation, + stmt->tablespacename); +} diff --git a/src/hooks.c b/src/hooks.c index cd49d60e..b38d71e3 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -963,9 +963,9 @@ pathman_process_utility_hook(Node *first_arg, get_rel_name(relation_oid)))); } #if PG_VERSION_NUM >= 100000 - else if (is_pathman_related_partitioning_cmd(parsetree)) + else if (is_pathman_related_partitioning_cmd(parsetree, &relation_oid)) { - /* we can handle all the partitioning commands */ + /* we can handle all the partitioning commands in ALTER .. TABLE */ if (IsA(parsetree, AlterTableStmt)) { ListCell *lc; @@ -977,16 +977,21 @@ pathman_process_utility_hook(Node *first_arg, switch (cmd->subtype) { case AT_AttachPartition: - handle_attach_partition(stmt, cmd); + handle_attach_partition(relation_oid, cmd); return; case AT_DetachPartition: - handle_detach_partition(stmt, cmd); + handle_detach_partition(cmd); return; default: elog(ERROR, "can't handle this command"); } } } + else if (IsA(parsetree, CreateStmt)) + { + handle_create_partition_of(relation_oid, (CreateStmt *) parsetree); + return; + } } #endif } diff --git a/src/include/declarative.h b/src/include/declarative.h index b38eebaa..05993c79 100644 --- a/src/include/declarative.h +++ b/src/include/declarative.h @@ -6,10 +6,11 @@ #include "nodes/parsenodes.h" void modify_declative_partitioning_query(Query *query); -bool is_pathman_related_partitioning_cmd(Node *parsetree); +bool is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid); /* actual actions */ -void handle_attach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd); -void handle_detach_partition(AlterTableStmt *stmt, AlterTableCmd *cmd); +void handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd); +void handle_detach_partition(AlterTableCmd *cmd); +void handle_create_partition_of(Oid parent_relid, CreateStmt *stmt); #endif /* DECLARATIVE_H */ From 61f5c8067383abd1da9035a50d89ec354297b8eb Mon Sep 17 00:00:00 2001 From: Ildus K Date: Tue, 27 Jun 2017 11:33:50 +0300 Subject: [PATCH 0966/1124] Add compability with REL_10_beta1 branch --- src/declarative.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/src/declarative.c b/src/declarative.c index 02d4a875..7517d82d 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -204,11 +204,14 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) ParseState *pstate = make_parsestate(NULL); const PartRelationInfo *prel; - PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + + /* in 10beta1, PartitionCmd->bound is (Node *) */ + PartitionBoundSpec *bound = (PartitionBoundSpec *) pcmd->bound; Assert(cmd->subtype == AT_AttachPartition); - if (pcmd->bound->strategy != PARTITION_STRATEGY_RANGE) + if (bound->strategy != PARTITION_STRATEGY_RANGE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_pathman only supports queries for range partitions"))); @@ -224,11 +227,15 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) proc_name = list_make2(makeString(pathman_schema), makeString(CppAsString(attach_range_partition))); - ldatum = (PartitionRangeDatum *) linitial(pcmd->bound->lowerdatums); + if ((!list_length(bound->lowerdatums)) || + (!list_length(bound->upperdatums))) + elog(ERROR, "provide start and end value for range partition"); + + ldatum = (PartitionRangeDatum *) linitial(bound->lowerdatums); con = castNode(A_Const, ldatum->value); lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); - rdatum = (PartitionRangeDatum *) linitial(pcmd->bound->upperdatums); + rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); con = castNode(A_Const, rdatum->value); rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); @@ -247,10 +254,6 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) proc_fcinfo.flinfo->fn_expr = (Node *) make_fn_expr(proc_fcinfo.flinfo->fn_oid, fn_args); - if ((!list_length(pcmd->bound->lowerdatums)) || - (!list_length(pcmd->bound->upperdatums))) - elog(ERROR, "provide start and end value for range partition"); - proc_fcinfo.arg[2] = lval->constvalue; proc_fcinfo.argnull[2] = ldatum->infinite || lval->constisnull; proc_fcinfo.arg[3] = rval->constvalue; @@ -308,11 +311,14 @@ handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) *rval; A_Const *con; + /* in 10beta1, PartitionCmd->bound is (Node *) */ + PartitionBoundSpec *bound = (PartitionBoundSpec *) stmt->partbound; + /* we show errors earlier for these asserts */ Assert(stmt->inhRelations != NULL); Assert(stmt->tableElts == NIL); - if (stmt->partbound->strategy != PARTITION_STRATEGY_RANGE) + if (bound->strategy != PARTITION_STRATEGY_RANGE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_pathman only supports queries for range partitions"))); @@ -326,11 +332,11 @@ handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) errmsg("table \"%s\" is not partitioned by RANGE", get_rel_name_or_relid(parent_relid)))); - ldatum = (PartitionRangeDatum *) linitial(stmt->partbound->lowerdatums); + ldatum = (PartitionRangeDatum *) linitial(bound->lowerdatums); con = castNode(A_Const, ldatum->value); lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); - rdatum = (PartitionRangeDatum *) linitial(stmt->partbound->upperdatums); + rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); con = castNode(A_Const, rdatum->value); rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); From 740239fa40adf18f9dfc9b48109ed330577e563d Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 4 Oct 2018 19:06:52 +0300 Subject: [PATCH 0967/1124] Fix declarative syntax for pg10 --- Makefile | 10 ++++----- expected/pathman_declarative.out | 1 - src/declarative.c | 37 +++++++++++++++++++++----------- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/Makefile b/Makefile index ed2d624b..e0cf5197 100644 --- a/Makefile +++ b/Makefile @@ -2,13 +2,10 @@ MODULE_big = pg_pathman -# versions of postgresql with declarative partitioning -DECL_CHECK_VERSIONS = 10 11 - ifdef USE_PGXS PG_CONFIG = pg_config VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') -ifeq ($(VNUM),$(filter $(VNUM), $(DECL_CHECK_VERSIONS))) +ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) EXTRA_REGRESS = pathman_declarative EXTRA_OBJS = src/declarative.o endif @@ -21,7 +18,7 @@ OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ - src/partition_overseer.o $(WIN32RES) + src/partition_overseer.o $(EXTRA_OBJS) $(WIN32RES) ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include @@ -73,7 +70,8 @@ REGRESS = pathman_array_qual \ pathman_update_triggers \ pathman_upd_del \ pathman_utility_stmt \ - pathman_views ${EXTRA_REGRESS} + pathman_views $(EXTRA_REGRESS) + EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index e853898d..011a0f71 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -58,7 +58,6 @@ Check constraints: Inherits: test.range_rel ALTER TABLE test.range_rel DETACH PARTITION test.r2; -NOTICE: trigger "range_rel_upd_trig" for relation "test.r2" does not exist, skipping SELECT * FROM pathman.pathman_partition_list; parent | partition | parttype | expr | range_min | range_max ----------------+------------------+----------+------+------------+------------ diff --git a/src/declarative.c b/src/declarative.c index 7517d82d..891efd62 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -1,19 +1,22 @@ +#include "pathman.h" #include "declarative.h" #include "utils.h" #include "partition_creation.h" -#include "fmgr.h" #include "access/htup_details.h" #include "catalog/namespace.h" -#include "catalog/pg_type.h" #include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "fmgr.h" +#include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" -#include "parser/parse_func.h" +#include "optimizer/planner.h" #include "parser/parse_coerce.h" -#include "utils/int8.h" -#include "utils/lsyscache.h" +#include "parser/parse_func.h" #include "utils/builtins.h" #include "utils/int8.h" +#include "utils/int8.h" +#include "utils/lsyscache.h" #include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/varbit.h" @@ -33,13 +36,16 @@ modify_declative_partitioning_query(Query *query) if (IsA(query->utilityStmt, AlterTableStmt)) { + PartRelationInfo *prel; ListCell *lcmd; Oid relid; AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt; relid = RangeVarGetRelid(stmt->relation, NoLock, true); - if (get_pathman_relation_info(relid) != NULL) + if ((prel = get_pathman_relation_info(relid)) != NULL) { + close_pathman_relation_info(prel); + foreach(lcmd, stmt->cmds) { AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); @@ -61,6 +67,8 @@ modify_declative_partitioning_query(Query *query) bool is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) { + PartRelationInfo *prel; + if (IsA(parsetree, AlterTableStmt)) { ListCell *lc; @@ -68,9 +76,11 @@ is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) int cnt = 0; *parent_relid = RangeVarGetRelid(stmt->relation, NoLock, false); - if (get_pathman_relation_info(*parent_relid) == NULL) + if ((prel = get_pathman_relation_info(*parent_relid)) == NULL) return false; + close_pathman_relation_info(prel); + /* * Since cmds can contain multiple commmands but we can handle only * two of them here, so we need to check that there are only commands @@ -106,9 +116,10 @@ is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) { RangeVar *rv = castNode(RangeVar, linitial(stmt->inhRelations)); *parent_relid = RangeVarGetRelid(rv, NoLock, false); - if (get_pathman_relation_info(*parent_relid) == NULL) + if ((prel = get_pathman_relation_info(*parent_relid)) == NULL) return false; + close_pathman_relation_info(prel); if (stmt->tableElts != NIL) elog(ERROR, "pg_pathman doesn't support column definitions " "in declarative syntax yet"); @@ -202,7 +213,7 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) A_Const *con; List *fn_args; ParseState *pstate = make_parsestate(NULL); - const PartRelationInfo *prel; + PartRelationInfo *prel; PartitionCmd *pcmd = (PartitionCmd *) cmd->def; @@ -238,6 +249,7 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); con = castNode(A_Const, rdatum->value); rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + close_pathman_relation_info(prel); /* Lookup function's Oid and get FmgrInfo */ fmgr_info(LookupFuncName(proc_name, 4, proc_args, false), &proc_flinfo); @@ -255,9 +267,9 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) (Node *) make_fn_expr(proc_fcinfo.flinfo->fn_oid, fn_args); proc_fcinfo.arg[2] = lval->constvalue; - proc_fcinfo.argnull[2] = ldatum->infinite || lval->constisnull; + proc_fcinfo.argnull[2] = lval->constisnull; proc_fcinfo.arg[3] = rval->constvalue; - proc_fcinfo.argnull[3] = rdatum->infinite || rval->constisnull; + proc_fcinfo.argnull[3] = rval->constisnull; /* Invoke the callback */ FunctionCallInvoke(&proc_fcinfo); @@ -303,7 +315,7 @@ handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) { Bound start, end; - const PartRelationInfo *prel; + PartRelationInfo *prel; ParseState *pstate = make_parsestate(NULL); PartitionRangeDatum *ldatum, *rdatum; @@ -339,6 +351,7 @@ handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); con = castNode(A_Const, rdatum->value); rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + close_pathman_relation_info(prel); start = lval->constisnull? MakeBoundInf(MINUS_INFINITY) : From e06bbc73bcb4d98eb28cee11a4e742c1055efe3d Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 4 Oct 2018 19:11:10 +0300 Subject: [PATCH 0968/1124] Fix declarative test on pg11 --- expected/pathman_declarative_1.out | 100 +++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 expected/pathman_declarative_1.out diff --git a/expected/pathman_declarative_1.out b/expected/pathman_declarative_1.out new file mode 100644 index 00000000..8ef4e556 --- /dev/null +++ b/expected/pathman_declarative_1.out @@ -0,0 +1,100 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +ERROR: table "range_rel" is not partitioned +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 + test.range_rel | test.r2 | 2 | dt | 05-01-2015 | 06-01-2015 +(5 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | +Check constraints: + "pathman_r2_check" CHECK (dt >= '05-01-2015'::date AND dt < '06-01-2015'::date) +Inherits: test.range_rel + +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | + +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + Table "test.r4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+--------------------------------------------+---------+--------------+------------- + id | integer | | not null | nextval('test.range_rel_id_seq'::regclass) | plain | | + dt | date | | not null | | plain | | +Indexes: + "r4_pkey" PRIMARY KEY, btree (id) +Check constraints: + "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) +Inherits: test.range_rel + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 8 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; From afdf2f5c6803c71d825f491812c487f731a221ae Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 4 Oct 2018 19:30:46 +0300 Subject: [PATCH 0969/1124] Add documentation for declarative partitioning --- README.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/README.md b/README.md index 2bf95a2e..1a33e01a 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ More interesting features are yet to come. Stay tuned! * Non-blocking [concurrent table partitioning](#data-migration); * FDW support (foreign partitions); * Various [GUC](#disabling-pg_pathman) toggles and configurable settings. + * Partial support of [`declarative partitioning`](#declarative-partitioning) (from PostgreSQL 10). ## Installation guide To install `pg_pathman`, execute this in the module's directory: @@ -410,6 +411,26 @@ AS SELECT * FROM @extschema@.show_cache_stats(); ``` Shows memory consumption of various caches. +## Declarative partitioning + +From PostgreSQL 10 `ATTACH PARTITION`, `DETACH PARTITION` +and `CREATE TABLE .. PARTITION OF` commands could be with with tables +partitioned by `pg_pathman`: + +```plpgsql +CREATE TABLE child1 (LIKE partitioned_table); + +--- attach new partition +ALTER TABLE partitioned_table ATTACH PARTITION child1 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); + +--- detach the partition +ALTER TABLE partitioned_table DETACH PARTITION child1; + +-- create a partition +CREATE TABLE child2 PARTITION OF partitioned_table + FOR VALUES IN ('2015-05-01', '2015-06-01'); +``` ## Custom plan nodes `pg_pathman` provides a couple of [custom plan nodes](https://wiki.postgresql.org/wiki/CustomScanAPI) which aim to reduce execution time, namely: From a6bd7b4838c495a58ff4d8b958f011dbc7a5a19f Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 8 Oct 2018 17:39:10 +0300 Subject: [PATCH 0970/1124] Fix typo in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1a33e01a..684e37c4 100644 --- a/README.md +++ b/README.md @@ -414,7 +414,7 @@ Shows memory consumption of various caches. ## Declarative partitioning From PostgreSQL 10 `ATTACH PARTITION`, `DETACH PARTITION` -and `CREATE TABLE .. PARTITION OF` commands could be with with tables +and `CREATE TABLE .. PARTITION OF` commands could be used with tables partitioned by `pg_pathman`: ```plpgsql From b33eb4df1bd617c6c06b4446bd260a12d8aef1bb Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 8 Oct 2018 17:46:52 +0300 Subject: [PATCH 0971/1124] Bump version to 1.5.2 --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 744310c2..90e38663 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.1", + "version": "1.5.2", "maintainer": [ "Dmitry Ivanov ", "Ildus Kurbangaliev " @@ -23,7 +23,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.1", + "version": "1.5.2", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 6b73351e..eadb9a70 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.1 + 1.5.2 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 1c8c1584..3c959a78 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.1" +#define CURRENT_LIB_VERSION "1.5.2" void *pathman_cache_search_relid(HTAB *cache_table, From 8a1d81324d741395b8f5964a40370a5b8f374a9c Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 8 Oct 2018 18:57:20 +0300 Subject: [PATCH 0972/1124] Make get_partition_cooked_key work for tables with no partitions. --- src/pl_funcs.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 44a5f93f..ea718752 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -142,22 +142,33 @@ get_partition_key_type_pl(PG_FUNCTION_ARGS) } /* - * Return partition key type. + * Return cooked partition key. */ Datum get_partition_cooked_key_pl(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - PartRelationInfo *prel; - Datum res; + /* Values extracted from PATHMAN_CONFIG */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + Oid relid = PG_GETARG_OID(0); + char *expr_cstr; + Node *expr; + char *cooked_cstr; + + /* Check that table is registered in PATHMAN_CONFIG */ + if (!pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(relid)); - prel = get_pathman_relation_info(relid); - shout_if_prel_is_invalid(relid, prel, PT_ANY); + expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); + expr = cook_partitioning_expression(relid, expr_cstr, NULL); + cooked_cstr = nodeToString(expr); - res = CStringGetTextDatum(nodeToString(prel->expr)); - close_pathman_relation_info(prel); + pfree(expr_cstr); + pfree(expr); - PG_RETURN_TEXT_P(res); + PG_RETURN_TEXT_P(CStringGetTextDatum(cooked_cstr)); } /* From 497ab20a2cb119278fca0c68c3c1b7140bdc31d9 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 8 Oct 2018 20:10:38 +0300 Subject: [PATCH 0973/1124] Return private function for cached cooked key to fix tests coverage --- expected/pathman_column_type.out | 23 +++++++++++++++++++++-- sql/pathman_column_type.sql | 15 +++++++++++++-- src/pl_funcs.c | 21 +++++++++++++++++++++ 3 files changed, 55 insertions(+), 4 deletions(-) diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index d3022d77..3ae9355c 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -28,21 +28,33 @@ SELECT context, entries FROM pathman_cache_stats ORDER BY context; partition status cache | 3 (4 rows) -/* change column's type (should flush caches) */ +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); get_partition_cooked_key ----------------------------------------------------------------------------------------------------------------------- {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + SELECT get_partition_key_type('test_column_type.test'::REGCLASS); get_partition_key_type ------------------------ integer (1 row) +/* change column's type (should also flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that expression has been built */ +/* check that correct expression has been built */ SELECT get_partition_key_type('test_column_type.test'::REGCLASS); get_partition_key_type ------------------------ @@ -55,6 +67,13 @@ SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); /* make sure that everything works properly */ SELECT * FROM test_column_type.test; val diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index ab2b43f1..98c73908 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -17,14 +17,25 @@ SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); SELECT * FROM test_column_type.test; SELECT context, entries FROM pathman_cache_stats ORDER BY context; -/* change column's type (should flush caches) */ +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; + SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + +/* change column's type (should also flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that expression has been built */ +/* check that correct expression has been built */ SELECT get_partition_key_type('test_column_type.test'::REGCLASS); SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); /* make sure that everything works properly */ SELECT * FROM test_column_type.test; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ea718752..26d5af8c 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -48,6 +48,7 @@ PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); PG_FUNCTION_INFO_V1( get_partition_key_type_pl ); PG_FUNCTION_INFO_V1( get_partition_cooked_key_pl ); +PG_FUNCTION_INFO_V1( get_cached_partition_cooked_key_pl ); PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); PG_FUNCTION_INFO_V1( get_base_type_pl ); PG_FUNCTION_INFO_V1( get_tablespace_pl ); @@ -171,6 +172,26 @@ get_partition_cooked_key_pl(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(CStringGetTextDatum(cooked_cstr)); } +/* + * Return cached cooked partition key. + * + * Used in tests for invalidation. + */ +Datum +get_cached_partition_cooked_key_pl(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + PartRelationInfo *prel; + Datum res; + + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_ANY); + res = CStringGetTextDatum(nodeToString(prel->expr)); + close_pathman_relation_info(prel); + + PG_RETURN_TEXT_P(res); +} + /* * Extract basic type of a domain. */ From 085f2362d3f348404f1160eead053d431406a19a Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 9 Oct 2018 15:32:23 +0300 Subject: [PATCH 0974/1124] Fix build errors and tests with our standard and enterprise versions --- Makefile | 39 +++++++++++++++++++++------------ README.md | 2 +- sql/pathman_hashjoin.sql | 1 + sql/pathman_mergejoin.sql | 1 - src/declarative.c | 2 +- src/hooks.c | 2 +- src/include/declarative.h | 2 +- src/planner_tree_modification.c | 4 ++-- 8 files changed, 32 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index e0cf5197..c6e800a7 100644 --- a/Makefile +++ b/Makefile @@ -2,23 +2,13 @@ MODULE_big = pg_pathman -ifdef USE_PGXS -PG_CONFIG = pg_config -VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') -ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) - EXTRA_REGRESS = pathman_declarative - EXTRA_OBJS = src/declarative.o -endif -endif -include $(PGXS) - OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ src/runtime_append.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ - src/partition_overseer.o $(EXTRA_OBJS) $(WIN32RES) + src/partition_overseer.o $(WIN32RES) ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include @@ -70,7 +60,7 @@ REGRESS = pathman_array_qual \ pathman_update_triggers \ pathman_upd_del \ pathman_utility_stmt \ - pathman_views $(EXTRA_REGRESS) + pathman_views EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add @@ -78,16 +68,37 @@ EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output ifdef USE_PGXS -PG_CONFIG = pg_config +PG_CONFIG=pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) +VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') else subdir = contrib/pg_pathman top_builddir = ../.. include $(top_builddir)/src/Makefile.global +endif + +# our standard version could also use declarative syntax +ifdef PGPRO_EDITION +ifeq ($(PGPRO_EDITION),standard) +VNUM := $(VERSION) +endif +endif + +ifdef VNUM +ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) +REGRESS += pathman_declarative +OBJS += src/declarative.o +override PG_CPPFLAGS += -DENABLE_DECLARATIVE +endif +endif + +ifdef USE_PGXS +include $(PGXS) +else include $(top_srcdir)/contrib/contrib-global.mk endif + $(EXTENSION)--$(EXTVERSION).sql: init.sql hash.sql range.sql cat $^ > $@ diff --git a/README.md b/README.md index 684e37c4..4fb8e5ac 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions The extension is compatible with: * PostgreSQL 9.5, 9.6, 10, 11; - * Postgres Pro Standard 9.5, 9.6; + * Postgres Pro Standard 9.5, 9.6, 10; * Postgres Pro Enterprise; Take a look at our Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql index d3cc1b2b..411e0a7f 100644 --- a/sql/pathman_hashjoin.sql +++ b/sql/pathman_hashjoin.sql @@ -33,6 +33,7 @@ SET enable_seqscan = OFF; SET enable_nestloop = OFF; SET enable_hashjoin = ON; SET enable_mergejoin = OFF; + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql index 90bf3166..9b0b95b1 100644 --- a/sql/pathman_mergejoin.sql +++ b/sql/pathman_mergejoin.sql @@ -44,7 +44,6 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; - DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman; DROP SCHEMA pathman CASCADE; diff --git a/src/declarative.c b/src/declarative.c index 891efd62..ca4fe165 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -29,7 +29,7 @@ * we set it back (look `is_pathman_related_partitioning_cmd`) */ void -modify_declative_partitioning_query(Query *query) +modify_declarative_partitioning_query(Query *query) { if (query->commandType != CMD_UTILITY) return; diff --git a/src/hooks.c b/src/hooks.c index b38d71e3..656efe9f 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -962,7 +962,7 @@ pathman_process_utility_hook(Node *first_arg, get_attname_compat(relation_oid, attr_number), get_rel_name(relation_oid)))); } -#if PG_VERSION_NUM >= 100000 +#ifdef ENABLE_DECLARATIVE else if (is_pathman_related_partitioning_cmd(parsetree, &relation_oid)) { /* we can handle all the partitioning commands in ALTER .. TABLE */ diff --git a/src/include/declarative.h b/src/include/declarative.h index 05993c79..ee4ea40b 100644 --- a/src/include/declarative.h +++ b/src/include/declarative.h @@ -5,7 +5,7 @@ #include "nodes/nodes.h" #include "nodes/parsenodes.h" -void modify_declative_partitioning_query(Query *query); +void modify_declarative_partitioning_query(Query *query); bool is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid); /* actual actions */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index b3391bbf..f40c152f 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -429,8 +429,8 @@ pathman_post_analyze_query_walker(Node *node, void *context) Query *query = (Query *) node; /* Make changes for declarative syntax */ -#if PG_VERSION_NUM >= 100000 - modify_declative_partitioning_query(query); +#ifdef ENABLE_DECLARATIVE + modify_declarative_partitioning_query(query); #endif /* Handle Query node */ From dd072e560148ada0fb267443ff9b8b9dc5b4108f Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 9 Oct 2018 15:32:41 +0300 Subject: [PATCH 0975/1124] Add forgotten files --- expected/pathman_hashjoin_2.out | 66 ++++++++++++++++++++++++++++++++ expected/pathman_mergejoin_2.out | 65 +++++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 expected/pathman_hashjoin_2.out create mode 100644 expected/pathman_mergejoin_2.out diff --git a/expected/pathman_hashjoin_2.out b/expected/pathman_hashjoin_2.out new file mode 100644 index 00000000..d0cba65d --- /dev/null +++ b/expected/pathman_hashjoin_2.out @@ -0,0 +1,66 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(13 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_mergejoin_2.out b/expected/pathman_mergejoin_2.out new file mode 100644 index 00000000..acff2247 --- /dev/null +++ b/expected/pathman_mergejoin_2.out @@ -0,0 +1,65 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Merge Append + Sort Key: j2.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 +(13 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman CASCADE; From dd71813ae159e9f9c677adca7d7aaf4058b6f711 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Wed, 10 Oct 2018 01:10:28 +0300 Subject: [PATCH 0976/1124] Mind that build_check_constraint_name returns quoted name. --- hash.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hash.sql b/hash.sql index 8cf9b19a..0f694882 100644 --- a/hash.sql +++ b/hash.sql @@ -108,7 +108,7 @@ BEGIN /* Fetch definition of old_partition's HASH constraint */ SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint - WHERE conrelid = old_partition AND conname = old_constr_name + WHERE conrelid = old_partition AND quote_ident(conname) = old_constr_name INTO old_constr_def; /* Detach old partition */ From 8fe15fc6e9470cf0801510c4361266768df31d25 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 10 Oct 2018 12:54:35 +0300 Subject: [PATCH 0977/1124] Disable declarative syntax for in-tree builds --- Makefile | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/Makefile b/Makefile index c6e800a7..80f74e7f 100644 --- a/Makefile +++ b/Makefile @@ -71,34 +71,22 @@ ifdef USE_PGXS PG_CONFIG=pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') -else -subdir = contrib/pg_pathman -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -endif - -# our standard version could also use declarative syntax -ifdef PGPRO_EDITION -ifeq ($(PGPRO_EDITION),standard) -VNUM := $(VERSION) -endif -endif -ifdef VNUM +# check for declarative syntax ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) REGRESS += pathman_declarative OBJS += src/declarative.o override PG_CPPFLAGS += -DENABLE_DECLARATIVE endif -endif -ifdef USE_PGXS include $(PGXS) else +subdir = contrib/pg_pathman +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif - $(EXTENSION)--$(EXTVERSION).sql: init.sql hash.sql range.sql cat $^ > $@ From 02e0db7650d48bdc5b25e6176d29ebaa34e5a921 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 19 Nov 2018 18:46:28 +0300 Subject: [PATCH 0978/1124] Check readiness in add_to_pathman_config --- expected/pathman_calamity.out | 2 +- src/pl_funcs.c | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index eadb9a70..08beae66 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -1005,7 +1005,7 @@ SHOW pg_pathman.enable; (1 row) SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ -ERROR: pg_pathman is not initialized yet +ERROR: pg_pathman is disabled SELECT * FROM pathman_partition_list; /* not ok */ ERROR: pg_pathman is not initialized yet SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 26d5af8c..06b1cf56 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -740,6 +740,9 @@ add_to_pathman_config(PG_FUNCTION_ARGS) PathmanInitState init_state; + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); + if (!PG_ARGISNULL(0)) { relid = PG_GETARG_OID(0); From 6c9d435f155453ede59251a36c10a5a6703e8666 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 17 Dec 2018 12:21:34 +0300 Subject: [PATCH 0979/1124] Bump version to 1.5.3 --- META.json | 5 ++--- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/META.json b/META.json index 90e38663..510d6082 100644 --- a/META.json +++ b/META.json @@ -2,9 +2,8 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.2", + "version": "1.5.3", "maintainer": [ - "Dmitry Ivanov ", "Ildus Kurbangaliev " ], "license": "postgresql", @@ -23,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.2", + "version": "1.5.3", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 08beae66..ea6827ff 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.2 + 1.5.3 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 3c959a78..4ec6fbe3 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.2" +#define CURRENT_LIB_VERSION "1.5.3" void *pathman_cache_search_relid(HTAB *cache_table, From d1032b0f012a250efca1ede699434a4df93b4608 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 18 Jan 2019 12:40:57 +0300 Subject: [PATCH 0980/1124] [PGPRO-2355] Check pathman readiness in split and merge range parts. --- src/pl_range_funcs.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 351926f7..daf6cf4c 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -489,6 +489,9 @@ split_range_partition(PG_FUNCTION_ARGS) char *query; int i; + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); + if (!PG_ARGISNULL(0)) { partition1 = PG_GETARG_OID(0); @@ -652,6 +655,9 @@ merge_range_partitions(PG_FUNCTION_ARGS) FmgrInfo finfo; int i; + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); + /* Validate array type */ Assert(ARR_ELEMTYPE(arr) == REGCLASSOID); From 16810d9211e8e0e4a38bf8fc0d73d07b745ab5a8 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 1 Feb 2019 16:06:23 +0300 Subject: [PATCH 0981/1124] Revisit pathman readiness again. Most pathman functions break if pathman is disabled: earlier I put defense checks in split and merge range partitions, now it is drop_range_partition_expand_next. Looks like the reason is that pathman caches are not being invalidated if it is disabled: pathman_relcache_hook exits right away then. This is kinda reasonable: if we want to disable pathman completely, why maintain the caches? So this time try to bury the readiness check deeper, in get_pathman_relation_info itself. BTW, pathman caches are not dropped when it is disabled, which looks suspicious on its own -- probably if we re-enable it later, caches might be inconsistent. --- expected/pathman_calamity.out | 2 +- src/pl_range_funcs.c | 6 ------ src/relation_info.c | 3 +++ 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index ea6827ff..bef99948 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -1009,7 +1009,7 @@ ERROR: pg_pathman is disabled SELECT * FROM pathman_partition_list; /* not ok */ ERROR: pg_pathman is not initialized yet SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ -ERROR: pg_pathman is not initialized yet +ERROR: pg_pathman is disabled EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ QUERY PLAN ------------------------------ diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index daf6cf4c..351926f7 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -489,9 +489,6 @@ split_range_partition(PG_FUNCTION_ARGS) char *query; int i; - if (!IsPathmanReady()) - elog(ERROR, "pg_pathman is disabled"); - if (!PG_ARGISNULL(0)) { partition1 = PG_GETARG_OID(0); @@ -655,9 +652,6 @@ merge_range_partitions(PG_FUNCTION_ARGS) FmgrInfo finfo; int i; - if (!IsPathmanReady()) - elog(ERROR, "pg_pathman is disabled"); - /* Validate array type */ Assert(ARR_ELEMTYPE(arr) == REGCLASSOID); diff --git a/src/relation_info.c b/src/relation_info.c index 8ee74217..9bb8d0db 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -320,6 +320,9 @@ get_pathman_relation_info(Oid relid) { PartStatusInfo *psin; + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); + /* Should always be called in transaction */ Assert(IsTransactionState()); From 64992326d83b75d031827002a8ef5ae7b8c28ff5 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 5 Feb 2019 15:39:54 +0300 Subject: [PATCH 0982/1124] Bump 1.5.4 lib version. --- META.json | 6 +++--- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/META.json b/META.json index 510d6082..9321b82c 100644 --- a/META.json +++ b/META.json @@ -2,9 +2,9 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.3", + "version": "1.5.4", "maintainer": [ - "Ildus Kurbangaliev " + "Arseny Sher " ], "license": "postgresql", "resources": { @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.3", + "version": "1.5.4", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index bef99948..44f14d96 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.3 + 1.5.4 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 4ec6fbe3..5b133d01 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.3" +#define CURRENT_LIB_VERSION "1.5.4" void *pathman_cache_search_relid(HTAB *cache_table, From 10e6c71f9b870ba2fba59bacd49bb6cc7e8ecb4a Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 5 Feb 2019 15:40:19 +0300 Subject: [PATCH 0983/1124] Fix upgrade from 1.4 to 1.5. This upgrade drops a column from pg_config. This is problematic, because pg_attribute entry is never actually removed in Postgres and fresh install and upgraded one had different number of attrs. To avoid bothering with this, recreate pg_config during upgrade from scratch. To test this, rewrite check_update.py which was outright broken; now it runs large part of regression tests. Also, test script revealed that update script hasn't included dd71813ae1 fix for replace_hash_partition. --- pg_pathman--1.4--1.5.sql | 129 +++++++++++++++++++ tests/update/README.md | 6 + tests/update/check_update.py | 243 ++++++++++++++++++++++------------- 3 files changed, 287 insertions(+), 91 deletions(-) diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql index a8e7fb21..11406476 100644 --- a/pg_pathman--1.4--1.5.sql +++ b/pg_pathman--1.4--1.5.sql @@ -11,6 +11,41 @@ RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' LANGUAGE C; ALTER TABLE @extschema@.pathman_config DROP COLUMN cooked_expr; +/* + * Dropped columns are never actually purged, entry in pg_attribute remains. + * Since dealing with different number of attrs in C code is cumbersome, + * let's recreate table instead. + */ +CREATE TABLE @extschema@.pathman_config_tmp (LIKE @extschema@.pathman_config INCLUDING ALL); +INSERT INTO @extschema@.pathman_config_tmp SELECT * FROM @extschema@.pathman_config; +ALTER EVENT TRIGGER pathman_ddl_trigger DISABLE; +DROP TABLE @extschema@.pathman_config; +ALTER TABLE @extschema@.pathman_config_tmp RENAME TO pathman_config; +ALTER EVENT TRIGGER pathman_ddl_trigger ENABLE; + +/* + * Get back stuff not preserved by CREATE TABLE LIKE: ACL, RLS and + * pg_extension_config_dump mark. + */ + +GRANT SELECT, INSERT, UPDATE, DELETE +ON @extschema@.pathman_config +TO public; + +/* + * Row security policy to restrict partitioning operations to owner and superusers only + */ +CREATE POLICY deny_modification ON @extschema@.pathman_config +FOR ALL USING (check_security_policy(partrel)); +CREATE POLICY allow_select ON @extschema@.pathman_config FOR SELECT USING (true); +ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; + +/* + * Enable dump of config tables with pg_dump. + */ +SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config', ''); + + ALTER TABLE @extschema@.pathman_config ADD CONSTRAINT pathman_config_interval_check CHECK (@extschema@.validate_interval_value(partrel, expr, @@ -505,6 +540,100 @@ BEGIN END $$ LANGUAGE plpgsql; +/* + * Replace hash partition with another one. It could be useful in case when + * someone wants to attach foreign table as a partition. + * + * lock_parent - should we take an exclusive lock? + */ +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS $$ +DECLARE + parent_relid REGCLASS; + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); + + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + END IF; + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(old_partition); + PERFORM @extschema@.prevent_data_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.is_tuple_convertible(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + /* Check that table is partitioned */ + IF @extschema@.get_partition_key(parent_relid) IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND quote_ident(conname) = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + RETURN new_partition; +END +$$ LANGUAGE plpgsql; + /* * Disable pathman partitioning for specified relation. */ diff --git a/tests/update/README.md b/tests/update/README.md index f31f4116..fd042822 100644 --- a/tests/update/README.md +++ b/tests/update/README.md @@ -9,3 +9,9 @@ PG_CONFIG=... ./dump_pathman_objects %DBNAME% diff file_1 file_2 ``` + +check_update.py script tries to verify that update is ok automatically. For +instance, +```bash +tests/update/check_update.py d34a77e worktree +``` diff --git a/tests/update/check_update.py b/tests/update/check_update.py index be5f2aa2..9ac4db62 100755 --- a/tests/update/check_update.py +++ b/tests/update/check_update.py @@ -8,17 +8,12 @@ import argparse import testgres import subprocess -import difflib +import time my_dir = os.path.dirname(os.path.abspath(__file__)) repo_dir = os.path.abspath(os.path.join(my_dir, '../../')) print(repo_dir) -compilation = ''' -make USE_PGXS=1 clean -make USE_PGXS=1 install -''' - # just bunch of tables to create run_sql = ''' CREATE EXTENSION pg_pathman; @@ -132,95 +127,161 @@ SELECT create_hash_partitions('hash_rel_next1', 'value', 3); ''' -@contextlib.contextmanager -def cwd(path): - print("cwd: ", path) - curdir = os.getcwd() - os.chdir(path) - - try: - yield - finally: - print("cwd:", curdir) - os.chdir(curdir) - def shell(cmd): print(cmd) - subprocess.check_output(cmd, shell=True) + cp = subprocess.run(cmd, shell=True) + if cp.returncode != 0: + raise subprocess.CalledProcessError(cp.returncode, cmd) + # print(subprocess.check_output(cmd, shell=True).decode("utf-8")) -dump1_file = '/tmp/dump1.sql' -dump2_file = '/tmp/dump2.sql' +def shell_call(cmd): + print(cmd) + return subprocess.run(cmd, shell=True) + +def reinstall_pathman(tmp_pathman_path, revision): + if revision == 'worktree': + shutil.rmtree(tmp_pathman_path) + shutil.copytree(repo_dir, tmp_pathman_path) + os.chdir(tmp_pathman_path) + else: + os.chdir(tmp_pathman_path) + shell("git clean -fdx") + shell("git reset --hard") + shell("git checkout %s" % revision) + shell('make USE_PGXS=1 clean && make USE_PGXS=1 install -j4') if __name__ == '__main__': - parser = argparse.ArgumentParser(description='pg_pathman update checker') + parser = argparse.ArgumentParser(description=''' + pg_pathman update checker. Testgres is used. Junks into /tmp/pathman_check_update. + First do some partitioned stuff on new version. Save full database dump to + dump_new.sql and pathman object definitions to pathman_objects_new.sql. + Then run old version, do the same stuff. Upgrade and make dumps. Ensure + dumps are the same. Finally, run regressions tests on upgraded version. + ''') parser.add_argument('branches', nargs=2, - help='specify branches ("main rel_1.5")') - + help='specify branches , e.g. "d34a77e master". Special value "worktree" means, well, working tree.') args = parser.parse_args() - - with open(os.path.join(my_dir, 'dump_pathman_objects.sql'), 'r') as f: - dump_sql = f.read() - - shutil.rmtree('/tmp/pg_pathman') - shutil.copytree(repo_dir, '/tmp/pg_pathman') - - with cwd('/tmp/pg_pathman'): - shell("git clean -fdx") - shell("git reset --hard") - shell("git checkout %s" % args.branches[0]) - shell(compilation) - - with testgres.get_new_node('updated') as node: - node.init() - node.append_conf("shared_preload_libraries='pg_pathman'\n") - - node.start() - node.safe_psql('postgres', run_sql) - node.dump(dump1_file, 'postgres') - node.stop() - - shell("git clean -fdx") - shell("git checkout %s" % args.branches[1]) - shell(compilation) - - version = None - with open('pg_pathman.control') as f: - for line in f.readlines(): - if line.startswith('default_version'): - version = line.split('=')[1].strip() - - if version is None: - print("cound not find version in second branch") - exit(1) - - node.start() - p = subprocess.Popen(["psql", "postgres"], stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - dumped_objects_old = p.communicate(input=dump_sql.encode())[0].decode() - node.stop() - - # now make clean install - with testgres.get_new_node('from_scratch') as node: - node.init() - node.append_conf("shared_preload_libraries='pg_pathman'\n") - node.start() - node.safe_psql('postgres', run_sql) - p = subprocess.Popen(["psql", "postgres"], stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - dumped_objects_new = p.communicate(input=dump_sql.encode())[0].decode() - node.dump(dump2_file, 'postgres') - - # check dumps - node.safe_psql('postgres', 'create database d1') - node.restore(dump1_file, 'd1') - - node.safe_psql('postgres', 'create database d2') - node.restore(dump2_file, 'd2') - node.stop() - - if dumped_objects_old != dumped_objects_new: - print("\nDIFF:") - for line in difflib.context_diff(dumped_objects_old.split('\n'), dumped_objects_new.split('\n')): - print(line) - else: - print("\nUPDATE CHECK: ALL GOOD") + old_branch, new_branch = args.branches[0], args.branches[1] + + pathman_objs_script = os.path.join(my_dir, 'dump_pathman_objects.sql') + + data_prefix = "/tmp/pathman_check_update" + if os.path.isdir(data_prefix): + shutil.rmtree(data_prefix) + dump_new_path = os.path.join(data_prefix, 'dump_new.sql') + dump_updated_path = os.path.join(data_prefix, 'dump_updated.sql') + dump_diff_path = os.path.join(data_prefix, 'dump.diff') + pathman_objs_new_path = os.path.join(data_prefix, 'pathman_objects_new.sql') + pathman_objs_updated_path = os.path.join(data_prefix, 'pathman_objects_updated.sql') + pathman_objs_diff_path = os.path.join(data_prefix, 'pathman_objs.diff') + tmp_pathman_path = os.path.join(data_prefix, "pg_pathman") + + shutil.copytree(repo_dir, tmp_pathman_path) + + reinstall_pathman(tmp_pathman_path, new_branch) + with testgres.get_new_node('brand_new') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + node.start() + node.safe_psql('postgres', run_sql) + node.dump(dump_new_path, 'postgres') + # default user is current OS one + shell("psql -p {} -h {} -f {} -X -q -a -At > {} 2>&1".format(node.port, node.host, pathman_objs_script, pathman_objs_new_path)) + node.stop() + + # now install old version... + reinstall_pathman(tmp_pathman_path, old_branch) + with testgres.get_new_node('updated') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + + node.start() + # do the same stuff... + node.safe_psql('postgres', run_sql) + # and prepare regression db, see below + node.safe_psql('postgres', 'create database contrib_regression') + node.safe_psql('contrib_regression', 'create extension pg_pathman') + + # and upgrade pathman + node.stop() + reinstall_pathman(tmp_pathman_path, new_branch) + node.start() + print("Running updated db on port {}, datadir {}".format(node.port, node.base_dir)) + node.safe_psql('postgres', "alter extension pg_pathman update") + node.safe_psql('postgres', "set pg_pathman.enable = t;") + + # regression tests db, see below + node.safe_psql('contrib_regression', "alter extension pg_pathman update") + node.safe_psql('contrib_regression', "set pg_pathman.enable = t;") + + node.dump(dump_updated_path, 'postgres') + # time.sleep(432432) + # default user is current OS one + shell("psql -p {} -h {} -f {} -X -q -a -At > {} 2>&1".format(node.port, node.host, pathman_objs_script, pathman_objs_updated_path)) + + # check diffs + shell_call("diff -U3 {} {} > {} 2>&1".format(dump_updated_path, dump_new_path, dump_diff_path)) + if os.stat(dump_diff_path).st_size != 0: + msg = "DB dumps are not equal, check out the diff at {}\nProbably that's actually ok, please eyeball the diff manually and say, continue?".format(dump_diff_path) + if input("%s (y/N) " % msg).lower() != 'y': + sys.exit(1) + shell_call("diff -U3 {} {} > {} 2>&1".format(pathman_objs_updated_path, pathman_objs_new_path, pathman_objs_diff_path)) + if os.stat(pathman_objs_diff_path).st_size != 0: + print("pathman objects dumps are not equal, check out the diff at {}".format(pathman_objs_diff_path)) + # sys.exit(1) + + print("just in case, checking that dump can be restored...") + node.safe_psql('postgres', 'create database tmp') + node.restore(dump_updated_path, 'tmp') + + print("finally, run (some) pathman regression tests") + # This is a bit tricky because we want to run tests on exactly this + # installation of extension. It means we must create db beforehand, + # tell pg_regress not create it and discard all create/drop extension + # from tests. + # Not all tests can be thus adapted instantly, so I think that's enough + # for now. + # generated with smth like ls ~/postgres/pg_pathman/sql/ | sort | sed 's/.sql//' | xargs -n 1 printf "'%s',\n" + os.chdir(tmp_pathman_path) + REGRESS = ['pathman_array_qual', + 'pathman_bgw', + 'pathman_callbacks', + 'pathman_column_type', + 'pathman_cte', + 'pathman_domains', + 'pathman_dropped_cols', + 'pathman_expressions', + 'pathman_foreign_keys', + 'pathman_gaps', + 'pathman_inserts', + 'pathman_interval', + 'pathman_lateral', + 'pathman_only', + 'pathman_param_upd_del', + 'pathman_permissions', + 'pathman_rebuild_deletes', + 'pathman_rebuild_updates', + 'pathman_rowmarks', + 'pathman_subpartitions', + 'pathman_update_node', + 'pathman_update_triggers', + 'pathman_utility_stmt', + 'pathman_views' + ] + outfiles = os.listdir(os.path.join(tmp_pathman_path, 'expected')) + for tname in REGRESS: + shell("sed -i '/CREATE EXTENSION pg_pathman;/d' sql/{}.sql".format(tname)) + # CASCADE also removed + shell("sed -i '/DROP EXTENSION pg_pathman/d' sql/{}.sql".format(tname)) + # there might be more then one .out file + for outfile in outfiles: + if outfile.startswith(tname): + shell("sed -i '/CREATE EXTENSION pg_pathman;/d' expected/{}".format(outfile)) + shell("sed -i '/DROP EXTENSION pg_pathman/d' expected/{}".format(outfile)) + + # time.sleep(43243242) + shell("make USE_PGXS=1 PGPORT={} EXTRA_REGRESS_OPTS=--use-existing REGRESS='{}' installcheck 2>&1".format(node.port, " ".join(REGRESS))) + + node.stop() + + print("It's Twelve O'clock and All's Well.") From 85fc5ccf1216a5d26e5d144fd7fc67e092587940 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 25 Feb 2019 13:56:56 +0300 Subject: [PATCH 0984/1124] Blow out cached bounds of all children when parent is invalidated. A concrete example where leaving them is not ok is - Range partition table - Delete entry from pathman_config (psin was blown, but bounds not) - Now hash partition table; bounds cache with uninitialized hash_idx is used. While here, also spawn relcache inval message on delete from pathman_config, not only from pathman_config_params. --- init.sql | 6 +++++- pg_pathman--1.4--1.5.sql | 4 ++++ src/hooks.c | 2 +- src/include/relation_info.h | 2 +- src/pl_funcs.c | 17 +++++++++++----- src/relation_info.c | 40 ++++++++++++++++++++++++++++++++++++- 6 files changed, 62 insertions(+), 9 deletions(-) diff --git a/init.sql b/init.sql index fdb774db..7dab67d8 100644 --- a/init.sql +++ b/init.sql @@ -111,7 +111,7 @@ ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; /* - * Invalidate relcache every time someone changes parameters config. + * Invalidate relcache every time someone changes parameters config or pathman_config */ CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' @@ -121,6 +121,10 @@ CREATE TRIGGER pathman_config_params_trigger AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); +CREATE TRIGGER pathman_config_trigger +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + /* * Enable dump of config tables with pg_dump. */ diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql index 11406476..2aa02bf9 100644 --- a/pg_pathman--1.4--1.5.sql +++ b/pg_pathman--1.4--1.5.sql @@ -52,6 +52,10 @@ ALTER TABLE @extschema@.pathman_config ADD CONSTRAINT pathman_config_interval_ch parttype, range_interval)); +CREATE TRIGGER pathman_config_trigger +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + /* * Get parsed and analyzed expression. */ diff --git a/src/hooks.c b/src/hooks.c index 656efe9f..90d84dc4 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -874,7 +874,7 @@ pathman_relcache_hook(Datum arg, Oid relid) else if (relid >= FirstNormalObjectId) { /* Invalidate PartBoundInfo entry if needed */ - forget_bounds_of_partition(relid); + forget_bounds_of_rel(relid); /* Invalidate PartParentInfo entry if needed */ forget_parent_of_partition(relid); diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 6b9ffa92..5b23cd3b 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -367,7 +367,7 @@ void shout_if_prel_is_invalid(const Oid parent_oid, const PartType expected_part_type); /* Bounds cache */ -void forget_bounds_of_partition(Oid partition); +void forget_bounds_of_rel(Oid partition); PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); Expr *get_partition_constraint_expr(Oid partition, bool raise_error); void invalidate_bounds_cache(void); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 06b1cf56..7ca2ec0a 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -904,14 +904,16 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; Oid pathman_config_params; + Oid pathman_config; Oid partrel; Datum partrel_datum; bool partrel_isnull; /* Fetch Oid of PATHMAN_CONFIG_PARAMS */ pathman_config_params = get_pathman_config_params_relid(true); + pathman_config = get_pathman_config_relid(true); - /* Handle "pg_pathman.enabled = t" case */ + /* Handle "pg_pathman.enabled = f" case */ if (!OidIsValid(pathman_config_params)) goto pathman_config_params_trigger_func_return; @@ -925,12 +927,17 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) trigdata->tg_trigger->tgname); /* Handle wrong relation */ - if (RelationGetRelid(trigdata->tg_relation) != pathman_config_params) - elog(ERROR, "%s: must be fired for relation \"%s\"", + if (RelationGetRelid(trigdata->tg_relation) != pathman_config_params && + RelationGetRelid(trigdata->tg_relation) != pathman_config) + elog(ERROR, "%s: must be fired for relation \"%s\" or \"%s\"", trigdata->tg_trigger->tgname, - get_rel_name(pathman_config_params)); + get_rel_name(pathman_config_params), + get_rel_name(pathman_config)); - /* Extract partitioned relation's Oid */ + /* + * Extract partitioned relation's Oid. + * Hacky: 1 is attrnum of relid for both pathman_config and pathman_config_params + */ partrel_datum = heap_getattr(trigdata->tg_trigtuple, Anum_pathman_config_params_partrel, RelationGetDescr(trigdata->tg_relation), diff --git a/src/relation_info.c b/src/relation_info.c index 9bb8d0db..d524c168 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -160,6 +160,8 @@ static void fill_pbin_with_bounds(PartBoundInfo *pbin, static int cmp_range_entries(const void *p1, const void *p2, void *arg); +static void forget_bounds_of_partition(Oid partition); + static bool query_contains_subqueries(Node *node, void *context); @@ -929,7 +931,7 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, */ /* Remove partition's constraint from cache */ -void +static void forget_bounds_of_partition(Oid partition) { PartBoundInfo *pbin; @@ -953,6 +955,42 @@ forget_bounds_of_partition(Oid partition) HASH_REMOVE, NULL); } + +} + +/* + * Remove rel's constraint from cache, if relid is partition; + * Remove all children constraints, if it is parent. + */ +void +forget_bounds_of_rel(Oid relid) +{ + PartStatusInfo *psin; + + forget_bounds_of_partition(relid); + + /* + * If it was the parent who got invalidated, purge children's bounds. + * We assume here that if bounds_cache has something, parent must be also + * in status_cache. Fragile, but seems better then blowing out full bounds + * cache or digging pathman_config on each relcache invalidation. + */ + + /* Find status cache entry for this relation */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + if (psin != NULL && psin->prel != NULL) + { + uint32 i; + PartRelationInfo *prel = psin->prel; + Oid *children = PrelGetChildrenArray(prel); + + for (i = 0; i < PrelChildrenCount(prel); i++) + { + forget_bounds_of_partition(children[i]); + } + } } /* Return partition's constraint as expression tree */ From dd7483b78fe07498dc2709380a2affc50db04543 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 25 Feb 2019 15:00:18 +0300 Subject: [PATCH 0985/1124] Bump lib version 1.5.5. --- META.json | 2 +- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/META.json b/META.json index 9321b82c..fa06948c 100644 --- a/META.json +++ b/META.json @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.4", + "version": "1.5.5", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 44f14d96..a9305a9e 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.4 + 1.5.5 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 5b133d01..63586f6b 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.4" +#define CURRENT_LIB_VERSION "1.5.5" void *pathman_cache_search_relid(HTAB *cache_table, From 0299398bdbf2ebd35cf6b957c50d73dab5132561 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 28 Feb 2019 18:46:48 +0300 Subject: [PATCH 0986/1124] Wait 100 seconds, not 10 in concurrent partitioning test. Looks like RaspberryPi doesn't like boundary cache blowing added in 85fc5ccf121. --- expected/pathman_bgw.out | 2 +- sql/pathman_bgw.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index 4166ef4e..5d5d2b21 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -212,7 +212,7 @@ BEGIN EXIT; -- exit loop END IF; - IF i > 50 THEN + IF i > 500 THEN RAISE WARNING 'looks like partitioning bgw is stuck!'; EXIT; -- exit loop END IF; diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index e05a829d..28f922e6 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -126,7 +126,7 @@ BEGIN EXIT; -- exit loop END IF; - IF i > 50 THEN + IF i > 500 THEN RAISE WARNING 'looks like partitioning bgw is stuck!'; EXIT; -- exit loop END IF; From debe43dba40c80a869521a0f28756bce57e31146 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 4 Mar 2019 18:02:58 +0300 Subject: [PATCH 0987/1124] Forbid to partition tables with children. Since pathman doesn't check for children existence anyway and duplicates them. This doesn't explain 'attempted to update invisible tuple' in PGPRO-2507 though, but let's leave this for another time. --- init.sql | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/init.sql b/init.sql index 7dab67d8..16ec0b8f 100644 --- a/init.sql +++ b/init.sql @@ -455,6 +455,10 @@ BEGIN RAISE EXCEPTION 'table "%" has already been partitioned', parent_relid; END IF; + IF EXISTS (SELECT 1 FROM pg_inherits WHERE inhparent = parent_relid) THEN + RAISE EXCEPTION 'can''t partition table "%" with existing children', parent_relid; + END IF; + /* Check if there are foreign keys that reference the relation */ FOR constr_name IN (SELECT conname FROM pg_catalog.pg_constraint WHERE confrelid = parent_relid::REGCLASS::OID) From dbf8262dca5edfa375bafa6b7c36073fb0bfd6cc Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 11 Mar 2019 21:01:43 +0300 Subject: [PATCH 0988/1124] A couple of sanity checks. One of them is pretty ugly -- we are checking out pathman_config each time during create_single_range_partition_internal. Also invalidate prel cache after manual add_to_pathman_config, just in case. --- src/partition_creation.c | 17 +++++++++++++++++ src/pl_funcs.c | 7 +++++++ 2 files changed, 24 insertions(+) diff --git a/src/partition_creation.c b/src/partition_creation.c index fc950c4f..b41b2541 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -112,6 +112,23 @@ create_single_range_partition_internal(Oid parent_relid, init_callback_params callback_params; List *trigger_columns = NIL; Node *expr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + + /* + * Sanity check. Probably needed only if some absurd init_callback + * decides to drop the table while we are creating partitions. + * It seems much better to use prel cache here, but this doesn't work + * because it regards tables with no partitions as not partitioned at all + * (build_pathman_relation_info returns NULL), and if I comment out that, + * tests fail for not immediately obvious reasons. Don't want to dig + * into this now. + */ + if (!pathman_config_contains_relation(parent_relid, values, isnull, NULL, NULL)) + { + elog(ERROR, "Can't create range partition: relid %u doesn't exist or not partitioned", parent_relid); + } /* Generate a name if asked to */ if (!partition_rv) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 7ca2ec0a..2f96b53f 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -252,6 +252,11 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) funccxt = SRF_FIRSTCALL_INIT(); + if (!TopPathmanContext) + { + elog(ERROR, "pg_pathman's memory contexts are not initialized yet"); + } + old_mcxt = MemoryContextSwitchTo(funccxt->multi_call_memory_ctx); usercxt = (show_cache_stats_cxt *) palloc(sizeof(show_cache_stats_cxt)); @@ -893,6 +898,8 @@ add_to_pathman_config(PG_FUNCTION_ARGS) } } + CacheInvalidateRelcacheByRelid(relid); + PG_RETURN_BOOL(true); } From 50c8348639a299e6d2132e0e166df5b6a88f0acc Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 1 Apr 2019 17:59:37 +0300 Subject: [PATCH 0989/1124] PG_TRY without PG_RE_THROW is no-no: remove it from is_tuple_convertible. Noticed by errordata_stack_depth overflow: log wasn't flushed. --- hash.sql | 6 ++++-- range.sql | 6 ++++-- src/pl_funcs.c | 30 ++++++++++++------------------ 3 files changed, 20 insertions(+), 22 deletions(-) diff --git a/hash.sql b/hash.sql index 0f694882..45c9b71d 100644 --- a/hash.sql +++ b/hash.sql @@ -94,9 +94,11 @@ BEGIN END IF; /* Check that new partition has an equal structure as parent does */ - IF NOT @extschema@.is_tuple_convertible(parent_relid, new_partition) THEN + BEGIN + PERFORM @extschema@.is_tuple_convertible(parent_relid, new_partition); + EXCEPTION WHEN OTHERS THEN RAISE EXCEPTION 'partition must have a compatible tuple format'; - END IF; + END; /* Check that table is partitioned */ IF @extschema@.get_partition_key(parent_relid) IS NULL THEN diff --git a/range.sql b/range.sql index a014ed0f..5aeaad58 100644 --- a/range.sql +++ b/range.sql @@ -639,9 +639,11 @@ BEGIN /* Check range overlap */ PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); - IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN + BEGIN + PERFORM @extschema@.is_tuple_convertible(parent_relid, partition_relid); + EXCEPTION WHEN OTHERS THEN RAISE EXCEPTION 'partition must have a compatible tuple format'; - END IF; + END; part_expr := @extschema@.get_partition_key(parent_relid); part_type := @extschema@.get_partition_type(parent_relid); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 2f96b53f..99f53bd5 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -661,38 +661,32 @@ is_date_type(PG_FUNCTION_ARGS) PG_RETURN_BOOL(is_date_type_internal(PG_GETARG_OID(0))); } +/* + * Bail out with ERROR if rel1 tuple can't be converted to rel2 tuple. + */ Datum is_tuple_convertible(PG_FUNCTION_ARGS) { Relation rel1, rel2; - bool res = true; + void *map; /* we don't actually need it */ rel1 = heap_open(PG_GETARG_OID(0), AccessShareLock); rel2 = heap_open(PG_GETARG_OID(1), AccessShareLock); - PG_TRY(); - { - void *map; /* we don't actually need it */ - - /* Try to build a conversion map */ - map = convert_tuples_by_name_map(RelationGetDescr(rel1), - RelationGetDescr(rel2), - ERR_PART_DESC_CONVERT); + /* Try to build a conversion map */ + map = convert_tuples_by_name_map(RelationGetDescr(rel1), + RelationGetDescr(rel2), + ERR_PART_DESC_CONVERT); - /* Now free map */ - pfree(map); - } - PG_CATCH(); - { - res = false; - } - PG_END_TRY(); + /* Now free map */ + pfree(map); heap_close(rel1, AccessShareLock); heap_close(rel2, AccessShareLock); - PG_RETURN_BOOL(res); + /* still return true to avoid changing tests */ + PG_RETURN_BOOL(true); } From 5f6de2310a886ae39aa26057a89b94fce0108bc7 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 2 Apr 2019 11:46:22 +0300 Subject: [PATCH 0990/1124] Protect from ATX. --- src/hooks.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/hooks.c b/src/hooks.c index 90d84dc4..462d4c8c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -27,6 +27,7 @@ #include "xact_handling.h" #include "access/transam.h" +#include "access/xact.h" #include "catalog/pg_authid.h" #include "miscadmin.h" #include "optimizer/cost.h" @@ -770,6 +771,11 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) if (!IsPathmanReady()) return; +#if defined(PGPRO_EE) + if (getNestLevelATX() != 0) + elog(FATAL, "pg_pathman extension is not compatible with autonomous transactions and connection pooling"); +#endif /* PGPRO_EE */ + /* Process inlined SQL functions (we've already entered planning stage) */ if (IsPathmanReady() && get_planner_calls_count() > 0) { From 9f51be4fc9a5637a478b8fc71779b2bded1efcc3 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 2 Apr 2019 15:48:14 +0300 Subject: [PATCH 0991/1124] Purge prel_resowner hashtable in fini_local_cache. Wobbling with it in 'resonwner_prel_callback' after all cache was purged is an error. --- src/include/relation_info.h | 3 ++- src/init.c | 6 ++++++ src/relation_info.c | 3 ++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 5b23cd3b..80b92740 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -401,6 +401,7 @@ void init_relation_info_static_data(void); /* For pg_pathman.enable_bounds_cache GUC */ extern bool pg_pathman_enable_bounds_cache; +extern HTAB *prel_resowner; /* This allows us to track leakers of PartRelationInfo */ #ifdef USE_RELINFO_LEAK_TRACKER @@ -419,7 +420,7 @@ extern int prel_resowner_line; close_pathman_relation_info(prel); \ prel = NULL; \ } while (0) -#endif +#endif /* USE_RELINFO_LEAK_TRACKER */ #endif /* RELATION_INFO_H */ diff --git a/src/init.c b/src/init.c index f6ddbdae..c80f118f 100644 --- a/src/init.c +++ b/src/init.c @@ -389,6 +389,12 @@ fini_local_cache(void) status_cache = NULL; bounds_cache = NULL; + if (prel_resowner != NULL) + { + hash_destroy(prel_resowner); + prel_resowner = NULL; + } + /* Now we can clear allocations */ MemoryContextReset(PathmanParentsCacheContext); MemoryContextReset(PathmanStatusCacheContext); diff --git a/src/relation_info.c b/src/relation_info.c index d524c168..30853165 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -129,8 +129,9 @@ static bool delayed_shutdown = false; /* pathman was dropped */ /* * PartRelationInfo is controlled by ResourceOwner; + * resowner -> List of controlled PartRelationInfos by this ResourceOwner */ -static HTAB *prel_resowner = NULL; +HTAB *prel_resowner = NULL; /* Handy wrappers for Oids */ From dddaa24ce4cc3dcad11dec02f1f423c00f69193b Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 2 Apr 2019 15:55:36 +0300 Subject: [PATCH 0992/1124] Relax FATAL to ERROR in atx check. --- src/hooks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hooks.c b/src/hooks.c index 462d4c8c..8db5b1c5 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -773,7 +773,7 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) #if defined(PGPRO_EE) if (getNestLevelATX() != 0) - elog(FATAL, "pg_pathman extension is not compatible with autonomous transactions and connection pooling"); + elog(ERROR, "pg_pathman extension is not compatible with autonomous transactions"); #endif /* PGPRO_EE */ /* Process inlined SQL functions (we've already entered planning stage) */ From cbdde20507397f54e54ee7eb815b0f98c4b54133 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 2 Apr 2019 19:00:30 +0300 Subject: [PATCH 0993/1124] One more atx check inside executor. --- src/partition_filter.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/partition_filter.c b/src/partition_filter.c index 098a72a5..64685e0f 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -17,6 +17,7 @@ #include "utils.h" #include "access/htup_details.h" +#include "access/xact.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" #include "foreign/fdwapi.h" @@ -733,6 +734,12 @@ partition_filter_exec(CustomScanState *node) PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; + /* If statement is prepared, parse_analyze hook won't catch this */ +#if defined(PGPRO_EE) + if (getNestLevelATX() != 0) + elog(ERROR, "pg_pathman extension is not compatible with autonomous transactions"); +#endif /* PGPRO_EE */ + slot = ExecProcNode(child_ps); if (!TupIsNull(slot)) From 7aa7d1c28b5d51a5c5f2278dd4e2a5ec92823ae7 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Sun, 7 Apr 2019 08:18:08 +0300 Subject: [PATCH 0994/1124] Allow atx back again. --- src/hooks.c | 5 ----- src/include/partition_filter.h | 1 + src/partition_filter.c | 6 ------ 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 8db5b1c5..8409d4cf 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -771,11 +771,6 @@ pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) if (!IsPathmanReady()) return; -#if defined(PGPRO_EE) - if (getNestLevelATX() != 0) - elog(ERROR, "pg_pathman extension is not compatible with autonomous transactions"); -#endif /* PGPRO_EE */ - /* Process inlined SQL functions (we've already entered planning stage) */ if (IsPathmanReady() && get_planner_calls_count() > 0) { diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index b3ecffeb..bf03433c 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -76,6 +76,7 @@ struct ResultPartsStorage EState *estate; /* pointer to executor's state */ CmdType command_type; /* INSERT | UPDATE */ + /* partition relid -> ResultRelInfoHolder */ HTAB *result_rels_table; HASHCTL result_rels_table_config; diff --git a/src/partition_filter.c b/src/partition_filter.c index 64685e0f..f905470e 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -734,12 +734,6 @@ partition_filter_exec(CustomScanState *node) PlanState *child_ps = (PlanState *) linitial(node->custom_ps); TupleTableSlot *slot; - /* If statement is prepared, parse_analyze hook won't catch this */ -#if defined(PGPRO_EE) - if (getNestLevelATX() != 0) - elog(ERROR, "pg_pathman extension is not compatible with autonomous transactions"); -#endif /* PGPRO_EE */ - slot = ExecProcNode(child_ps); if (!TupIsNull(slot)) From e251d2a4086befc76ec6c43f10b8c4a02a6c0e15 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Sun, 7 Apr 2019 11:27:36 +0300 Subject: [PATCH 0995/1124] Forbid 0 oid as partition_relid in build_range_condition. --- src/pl_range_funcs.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 351926f7..0d3ca9d7 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -15,6 +15,7 @@ #include "utils.h" #include "xact_handling.h" +#include "access/transam.h" #include "access/xact.h" #include "catalog/heap.h" #include "catalog/namespace.h" @@ -1072,6 +1073,9 @@ build_range_condition(PG_FUNCTION_ARGS) } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'partition_relid' should not be NULL"))); + if (partition_relid < FirstNormalObjectId) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' must be normal object oid"))); if (!PG_ARGISNULL(1)) { From 079797e0d5efe171c3ae10733d3c2f4977e37689 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 11 Apr 2019 12:57:24 +0300 Subject: [PATCH 0996/1124] Purge potentially created bounds cache entries if build_pathman_relation_info failed. Otherwise they might contain obsolete data, e.g. - create range partitioned table T with a couple of partitions - make pathman forget about it - create another table P inherited from previously partitioned one, but with no pathman constraints - attempt to add_to_pathman_config T as range partitioned table it will fail as P has no constraint, but might register other partitions in bounds cache - now add_to_pathman_config T as hash partitioned table will fail in attempt to use this cache (part_idx not initialized) --- src/relation_info.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/src/relation_info.c b/src/relation_info.c index 30853165..848fd521 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -408,7 +408,7 @@ build_pathman_relation_info(Oid relid, Datum *values) prel->fresh = true; prel->mcxt = prel_mcxt; - /* Memory leak protection */ + /* Memory leak and cache protection */ PG_TRY(); { MemoryContext old_mcxt; @@ -496,6 +496,32 @@ build_pathman_relation_info(Oid relid, Datum *values) } PG_CATCH(); { + /* + * If we managed to create some children but failed later, bounds + * cache now might have obsolete data for something that probably is + * not a partitioned table at all. Remove it. + */ + if (prel->children != NULL) + { + uint32 i; + + for (i = 0; i < PrelChildrenCount(prel); i++) + { + Oid child; + + /* + * We rely on children and ranges array allocated with 0s, not + * random data + */ + if (prel->parttype == PT_HASH) + child = prel->children[i]; + else if (prel->parttype == PT_RANGE) + child = prel->ranges[i].child_oid; + + forget_bounds_of_partition(child); + } + } + /* Free this entry */ free_pathman_relation_info(prel); From 2ec39238ce76e7cdc3f6948ff6c2fae366cace05 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 18 Apr 2019 13:36:14 +0300 Subject: [PATCH 0997/1124] Check that pathman is initialized in get_pathman_config_relid. Also, check hash partitioned table consistency always, not under assert checking. --- src/pg_pathman.c | 28 ++++++++++++++++++---------- src/relation_info.c | 2 -- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 1b65a832..daa1afdc 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -333,13 +333,17 @@ _PG_init(void) Oid get_pathman_config_relid(bool invalid_is_ok) { + if (!IsPathmanInitialized()) + { + if (invalid_is_ok) + return InvalidOid; + elog(ERROR, "pg_pathman is not initialized yet"); + } + /* Raise ERROR if Oid is invalid */ if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) - elog(ERROR, - (!IsPathmanInitialized() ? - "pg_pathman is not initialized yet" : - "unexpected error in function " - CppAsString(get_pathman_config_relid))); + elog(ERROR, "unexpected error in function " + CppAsString(get_pathman_config_relid)); return pathman_config_relid; } @@ -348,13 +352,17 @@ get_pathman_config_relid(bool invalid_is_ok) Oid get_pathman_config_params_relid(bool invalid_is_ok) { + if (!IsPathmanInitialized()) + { + if (invalid_is_ok) + return InvalidOid; + elog(ERROR, "pg_pathman is not initialized yet"); + } + /* Raise ERROR if Oid is invalid */ if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) - elog(ERROR, - (!IsPathmanInitialized() ? - "pg_pathman is not initialized yet" : - "unexpected error in function " - CppAsString(get_pathman_config_params_relid))); + elog(ERROR, "unexpected error in function " + CppAsString(get_pathman_config_params_relid)); return pathman_config_params_relid; } diff --git a/src/relation_info.c b/src/relation_info.c index 848fd521..0acb2e6d 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -802,7 +802,6 @@ fill_prel_with_partitions(PartRelationInfo *prel, prel->children[i] = prel->ranges[i].child_oid; } -#ifdef USE_ASSERT_CHECKING /* Check that each partition Oid has been assigned properly */ if (prel->parttype == PT_HASH) for (i = 0; i < PrelChildrenCount(prel); i++) @@ -815,7 +814,6 @@ fill_prel_with_partitions(PartRelationInfo *prel, get_rel_name_or_relid(PrelParentRelid(prel))); } } -#endif } /* qsort() comparison function for RangeEntries */ From de0197de0de7c5c9a16382e0bd6399baa9a5de04 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 18 Apr 2019 18:12:17 +0300 Subject: [PATCH 0998/1124] Warn that pathman is disabled in case of broken hash partitioned table. --- src/relation_info.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index 0acb2e6d..5eb9fb40 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -809,9 +809,10 @@ fill_prel_with_partitions(PartRelationInfo *prel, if (!OidIsValid(prel->children[i])) { DisablePathman(); /* disable pg_pathman since config is broken */ - elog(ERROR, "pg_pathman's cache for relation \"%s\" " - "has not been properly initialized", - get_rel_name_or_relid(PrelParentRelid(prel))); + ereport(ERROR, (errmsg("pg_pathman's cache for relation \"%s\" " + "has not been properly initialized", + get_rel_name_or_relid(PrelParentRelid(prel))), + errhint(INIT_ERROR_HINT))); } } } From e4025126474db48dd72f901f208fe1df6d548d53 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 19 Apr 2019 13:16:09 +0300 Subject: [PATCH 0999/1124] Typos in attempting to find parent in partcache during partstatus invalildation. --- src/hooks.c | 6 +++--- src/relation_info.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 8409d4cf..bf9d0525 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -877,11 +877,11 @@ pathman_relcache_hook(Datum arg, Oid relid) /* Invalidate PartBoundInfo entry if needed */ forget_bounds_of_rel(relid); - /* Invalidate PartParentInfo entry if needed */ - forget_parent_of_partition(relid); - /* Invalidate PartStatusInfo entry if needed */ forget_status_of_relation(relid); + + /* Invalidate PartParentInfo entry if needed */ + forget_parent_of_partition(relid); } } diff --git a/src/relation_info.c b/src/relation_info.c index 5eb9fb40..988873d6 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -215,7 +215,7 @@ forget_status_of_relation(Oid relid) { /* Find status cache entry for parent */ psin = pathman_cache_search_relid(status_cache, - relid, HASH_FIND, + ppar->parent_relid, HASH_FIND, NULL); if (psin) invalidate_psin_entry(psin); From e2d29d2695cfb72f31c98e250f0ea0c5584ae9a4 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 19 Apr 2019 13:32:45 +0300 Subject: [PATCH 1000/1124] Check for children array size sanity in fill_prel_with_partitions. ... which might be violated if has partition was dropped. --- src/relation_info.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/relation_info.c b/src/relation_info.c index 988873d6..bb86cc6a 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -755,6 +755,18 @@ fill_prel_with_partitions(PartRelationInfo *prel, switch (prel->parttype) { case PT_HASH: + /* + * This might be the case if hash part was dropped, and thus + * children array alloc'ed smaller than needed, but parts + * bound cache still keeps entries with high indexes. + */ + if (pbin->part_idx >= PrelChildrenCount(prel)) + ereport(ERROR, (errmsg("pg_pathman's cache for relation \"%s\" " + "has not been properly initialized. " + "Looks like one of hash partitions was dropped.", + get_rel_name_or_relid(PrelParentRelid(prel))), + errhint(INIT_ERROR_HINT))); + prel->children[pbin->part_idx] = pbin->child_relid; break; From a53a60726c870f8cd556ef3de7238f6b7c9425f4 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 19 Apr 2019 13:44:59 +0300 Subject: [PATCH 1001/1124] Fix previous commit: actually disable pathman in case of trouble. --- src/relation_info.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/relation_info.c b/src/relation_info.c index bb86cc6a..9e20e93d 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -761,11 +761,14 @@ fill_prel_with_partitions(PartRelationInfo *prel, * bound cache still keeps entries with high indexes. */ if (pbin->part_idx >= PrelChildrenCount(prel)) + { + DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, (errmsg("pg_pathman's cache for relation \"%s\" " "has not been properly initialized. " "Looks like one of hash partitions was dropped.", get_rel_name_or_relid(PrelParentRelid(prel))), errhint(INIT_ERROR_HINT))); + } prel->children[pbin->part_idx] = pbin->child_relid; break; From 3556ae3165917a5e05410c0866c9dcca95d9cdf0 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 19 Apr 2019 17:49:49 +0300 Subject: [PATCH 1002/1124] A couple of tests for recent commits. --- expected/pathman_basic.out | 58 ++++++++++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 30 ++++++++++++++++++++ 2 files changed, 88 insertions(+) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 0ae1ae6a..289b239e 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1804,6 +1804,64 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects +-- is pathman (caches, in particular) strong enough to carry out this? +-- 079797e0d5 +CREATE TABLE test.part_test(val serial); +INSERT INTO test.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('test.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('test.part_test', 100); + set_interval +-------------- + +(1 row) + +DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; +SELECT drop_partitions('test.part_test'); +ERROR: table "test.part_test" has no partitions +SELECT disable_pathman_for('test.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +CREATE TABLE test.wrong_partition (LIKE test.part_test) INHERITS (test.part_test); +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('test.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +SELECT add_to_pathman_config('test.part_test', 'val'); +ERROR: wrong constraint format for HASH partition "part_test_1" +DROP TABLE test.part_test CASCADE; +NOTICE: drop cascades to 5 other objects +-- +-- 85fc5ccf121 +CREATE TABLE test.part_test(val serial); +INSERT INTO test.part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('test.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 300 +(1 row) + +SELECT append_range_partition('test.part_test'); + append_range_partition +------------------------ + test.part_test_301 +(1 row) + +DELETE FROM test.part_test; +SELECT create_single_range_partition('test.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: cannot create partition with range (-inf, +inf) +DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; +SELECT create_hash_partitions('test.part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: can't partition table "test.part_test" with existing children +DROP TABLE test.part_test CASCADE; +NOTICE: drop cascades to 302 other objects +-- DROP SCHEMA test CASCADE; NOTICE: drop cascades to 28 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 5a0c471d..a9c6f1a3 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -546,6 +546,36 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; +-- is pathman (caches, in particular) strong enough to carry out this? + +-- 079797e0d5 +CREATE TABLE test.part_test(val serial); +INSERT INTO test.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('test.part_test', 'val', 1, 10); +SELECT set_interval('test.part_test', 100); +DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; +SELECT drop_partitions('test.part_test'); +SELECT disable_pathman_for('test.part_test'); + +CREATE TABLE test.wrong_partition (LIKE test.part_test) INHERITS (test.part_test); +SELECT add_to_pathman_config('test.part_test', 'val', '10'); +SELECT add_to_pathman_config('test.part_test', 'val'); + +DROP TABLE test.part_test CASCADE; +-- + +-- 85fc5ccf121 +CREATE TABLE test.part_test(val serial); +INSERT INTO test.part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('test.part_test', 'val', 1, 10); +SELECT append_range_partition('test.part_test'); +DELETE FROM test.part_test; +SELECT create_single_range_partition('test.part_test', NULL::INT4, NULL); /* not ok */ +DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; +SELECT create_hash_partitions('test.part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ + +DROP TABLE test.part_test CASCADE; +-- DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; From 9c2ab5fa0c20148937dc663961245b533a128ba3 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 22 Apr 2019 19:18:26 +0300 Subject: [PATCH 1003/1124] Unify unload_config and DisablePathman and do it (purge caches) on enable = false. Probably there used to be an idea that with 'set pg_pathman = false' you can just temporary disable planning hooks etc without resetting pathman caches, but that never worked properly. At least, pathman_relcache_hook refuses to inval cache if it is disabled. Also, if extension is disabled we never get to unload_config, which means you could - disable pathman (guc disabled, but caches not destroyed) - drop extension (still caches are here) - create it back again, now cached relids are wrong With some care we could totally separate them by maintaining caches even when pathman is disabled, but is it worth it? --- Makefile | 1 + expected/pathman_basic.out | 58 ------------------------ expected/pathman_cache_pranks.out | 75 +++++++++++++++++++++++++++++++ sql/pathman_basic.sql | 31 ------------- sql/pathman_cache_pranks.sql | 49 ++++++++++++++++++++ src/hooks.c | 20 ++++++++- src/include/init.h | 2 +- src/init.c | 9 +++- src/pg_pathman.c | 13 ++---- src/pl_funcs.c | 2 +- src/relation_info.c | 7 +++ 11 files changed, 164 insertions(+), 103 deletions(-) create mode 100644 expected/pathman_cache_pranks.out create mode 100644 sql/pathman_cache_pranks.sql diff --git a/Makefile b/Makefile index 80f74e7f..c1281871 100644 --- a/Makefile +++ b/Makefile @@ -33,6 +33,7 @@ PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" REGRESS = pathman_array_qual \ pathman_basic \ pathman_bgw \ + pathman_cache_pranks \ pathman_calamity \ pathman_callbacks \ pathman_column_type \ diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 289b239e..0ae1ae6a 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1804,64 +1804,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects --- is pathman (caches, in particular) strong enough to carry out this? --- 079797e0d5 -CREATE TABLE test.part_test(val serial); -INSERT INTO test.part_test SELECT generate_series(1, 30); -SELECT create_range_partitions('test.part_test', 'val', 1, 10); - create_range_partitions -------------------------- - 3 -(1 row) - -SELECT set_interval('test.part_test', 100); - set_interval --------------- - -(1 row) - -DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; -SELECT drop_partitions('test.part_test'); -ERROR: table "test.part_test" has no partitions -SELECT disable_pathman_for('test.part_test'); - disable_pathman_for ---------------------- - -(1 row) - -CREATE TABLE test.wrong_partition (LIKE test.part_test) INHERITS (test.part_test); -NOTICE: merging column "val" with inherited definition -SELECT add_to_pathman_config('test.part_test', 'val', '10'); -ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist -SELECT add_to_pathman_config('test.part_test', 'val'); -ERROR: wrong constraint format for HASH partition "part_test_1" -DROP TABLE test.part_test CASCADE; -NOTICE: drop cascades to 5 other objects --- --- 85fc5ccf121 -CREATE TABLE test.part_test(val serial); -INSERT INTO test.part_test SELECT generate_series(1, 3000); -SELECT create_range_partitions('test.part_test', 'val', 1, 10); - create_range_partitions -------------------------- - 300 -(1 row) - -SELECT append_range_partition('test.part_test'); - append_range_partition ------------------------- - test.part_test_301 -(1 row) - -DELETE FROM test.part_test; -SELECT create_single_range_partition('test.part_test', NULL::INT4, NULL); /* not ok */ -ERROR: cannot create partition with range (-inf, +inf) -DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; -SELECT create_hash_partitions('test.part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ -ERROR: can't partition table "test.part_test" with existing children -DROP TABLE test.part_test CASCADE; -NOTICE: drop cascades to 302 other objects --- DROP SCHEMA test CASCADE; NOTICE: drop cascades to 28 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_cache_pranks.out b/expected/pathman_cache_pranks.out new file mode 100644 index 00000000..3ed9570f --- /dev/null +++ b/expected/pathman_cache_pranks.out @@ -0,0 +1,75 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? +SET search_path = 'public'; +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; +-- create it for further tests +CREATE EXTENSION pg_pathman; +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('part_test', 100); + set_interval +-------------- + +(1 row) + +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +ERROR: table "part_test" has no partitions +SELECT disable_pathman_for('part_test'); + disable_pathman_for +--------------------- + +(1 row) + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +SELECT add_to_pathman_config('part_test', 'val'); +ERROR: wrong constraint format for HASH partition "part_test_1" +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 5 other objects +-- +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 300 +(1 row) + +SELECT append_range_partition('part_test'); + append_range_partition +------------------------ + part_test_301 +(1 row) + +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +ERROR: cannot create partition with range (-inf, +inf) +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: can't partition table "part_test" with existing children +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 302 other objects +-- +-- finalize +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index a9c6f1a3..6d2e52e1 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -546,37 +546,6 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; --- is pathman (caches, in particular) strong enough to carry out this? - --- 079797e0d5 -CREATE TABLE test.part_test(val serial); -INSERT INTO test.part_test SELECT generate_series(1, 30); -SELECT create_range_partitions('test.part_test', 'val', 1, 10); -SELECT set_interval('test.part_test', 100); -DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; -SELECT drop_partitions('test.part_test'); -SELECT disable_pathman_for('test.part_test'); - -CREATE TABLE test.wrong_partition (LIKE test.part_test) INHERITS (test.part_test); -SELECT add_to_pathman_config('test.part_test', 'val', '10'); -SELECT add_to_pathman_config('test.part_test', 'val'); - -DROP TABLE test.part_test CASCADE; --- - --- 85fc5ccf121 -CREATE TABLE test.part_test(val serial); -INSERT INTO test.part_test SELECT generate_series(1, 3000); -SELECT create_range_partitions('test.part_test', 'val', 1, 10); -SELECT append_range_partition('test.part_test'); -DELETE FROM test.part_test; -SELECT create_single_range_partition('test.part_test', NULL::INT4, NULL); /* not ok */ -DELETE FROM pathman_config WHERE partrel = 'test.part_test'::REGCLASS; -SELECT create_hash_partitions('test.part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ - -DROP TABLE test.part_test CASCADE; --- - DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_cache_pranks.sql b/sql/pathman_cache_pranks.sql new file mode 100644 index 00000000..f21f0594 --- /dev/null +++ b/sql/pathman_cache_pranks.sql @@ -0,0 +1,49 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? + +SET search_path = 'public'; + +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; + +-- create it for further tests +CREATE EXTENSION pg_pathman; + +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); +SELECT set_interval('part_test', 100); +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +SELECT disable_pathman_for('part_test'); + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +SELECT add_to_pathman_config('part_test', 'val', '10'); +SELECT add_to_pathman_config('part_test', 'val'); + +DROP TABLE part_test CASCADE; +-- + +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); +SELECT append_range_partition('part_test'); +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ + +DROP TABLE part_test CASCADE; +-- + +-- finalize +DROP EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index bf9d0525..2a0543bc 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -615,6 +615,12 @@ pathman_enable_assign_hook(bool newval, void *extra) "RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes " "and some other options have been %s", newval ? "enabled" : "disabled"); + + /* Purge caches if pathman was disabled */ + if (!newval) + { + unload_config(); + } } static void @@ -850,6 +856,8 @@ pathman_shmem_startup_hook(void) void pathman_relcache_hook(Datum arg, Oid relid) { + Oid pathman_config_relid; + /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) return; @@ -863,10 +871,18 @@ pathman_relcache_hook(Datum arg, Oid relid) invalidate_bounds_cache(); invalidate_parents_cache(); invalidate_status_cache(); + delay_pathman_shutdown(); /* see below */ } - /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ - if (relid == get_pathman_config_relid(false)) + /* + * Invalidation event for PATHMAN_CONFIG table (probably DROP EXTENSION). + * Digging catalogs here is expensive and probably illegal, so we take + * cached relid. It is possible that we don't know it atm (e.g. pathman + * was disabled). However, in this case caches must have been cleaned + * on disable, and there is no DROP-specific additional actions. + */ + pathman_config_relid = get_pathman_config_relid(true); + if (relid == pathman_config_relid) { delay_pathman_shutdown(); } diff --git a/src/include/init.h b/src/include/init.h index 63586f6b..25432840 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -139,7 +139,7 @@ simplify_mcxt_name(MemoryContext mcxt) pathman_init_state.pg_pathman_enable = false; \ pathman_init_state.auto_partition = false; \ pathman_init_state.override_copy = false; \ - pathman_init_state.initialization_needed = true; \ + unload_config(); \ } while (0) diff --git a/src/init.c b/src/init.c index c80f118f..f1ed689c 100644 --- a/src/init.c +++ b/src/init.c @@ -134,7 +134,14 @@ save_pathman_init_state(PathmanInitState *temp_init_state) void restore_pathman_init_state(const PathmanInitState *temp_init_state) { - pathman_init_state = *temp_init_state; + /* + * initialization_needed is not restored: it is not just a setting but + * internal thing, caches must be inited when it is set. Better would be + * to separate it from this struct entirely. + */ + pathman_init_state.pg_pathman_enable = temp_init_state->pg_pathman_enable; + pathman_init_state.auto_partition = temp_init_state->auto_partition; + pathman_init_state.override_copy = temp_init_state->override_copy; } /* diff --git a/src/pg_pathman.c b/src/pg_pathman.c index daa1afdc..3511a243 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -284,8 +284,6 @@ estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) void _PG_init(void) { - PathmanInitState temp_init_state; - if (!process_shared_preload_libraries_in_progress) { elog(ERROR, "pg_pathman module must be initialized by Postmaster. " @@ -297,13 +295,10 @@ _PG_init(void) RequestAddinShmemSpace(estimate_pathman_shmem_size()); /* Assign pg_pathman's initial state */ - temp_init_state.pg_pathman_enable = DEFAULT_PATHMAN_ENABLE; - temp_init_state.auto_partition = DEFAULT_PATHMAN_AUTO; - temp_init_state.override_copy = DEFAULT_PATHMAN_OVERRIDE_COPY; - temp_init_state.initialization_needed = true; /* ofc it's needed! */ - - /* Apply initial state */ - restore_pathman_init_state(&temp_init_state); + pathman_init_state.pg_pathman_enable = DEFAULT_PATHMAN_ENABLE; + pathman_init_state.auto_partition = DEFAULT_PATHMAN_AUTO; + pathman_init_state.override_copy = DEFAULT_PATHMAN_OVERRIDE_COPY; + pathman_init_state.initialization_needed = true; /* ofc it's needed! */ /* Set basic hooks */ pathman_set_rel_pathlist_hook_next = set_rel_pathlist_hook; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 99f53bd5..c302089e 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -859,7 +859,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) } PG_CATCH(); { - /* We have to restore all changed flags */ + /* We have to restore changed flags */ restore_pathman_init_state(&init_state); /* Rethrow ERROR */ diff --git a/src/relation_info.c b/src/relation_info.c index 9e20e93d..d2d95351 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -501,6 +501,13 @@ build_pathman_relation_info(Oid relid, Datum *values) * cache now might have obsolete data for something that probably is * not a partitioned table at all. Remove it. */ + if (!IsPathmanInitialized()) + /* + * ... unless failure was so hard that caches were already destoyed, + * i.e. extension disabled + */ + PG_RE_THROW(); + if (prel->children != NULL) { uint32 i; From 09914f44f11548394966986d81d5fe8755382d25 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 23 Apr 2019 07:22:07 +0300 Subject: [PATCH 1004/1124] Reset internal ctxs in fini_local_cache only when they actually exist. --- expected/pathman_cache_pranks.out | 5 +++++ sql/pathman_cache_pranks.sql | 4 ++++ src/init.c | 9 ++++++--- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/expected/pathman_cache_pranks.out b/expected/pathman_cache_pranks.out index 3ed9570f..581063bc 100644 --- a/expected/pathman_cache_pranks.out +++ b/expected/pathman_cache_pranks.out @@ -14,6 +14,11 @@ CREATE EXTENSION pg_pathman; DROP EXTENSION pg_pathman; -- create it for further tests CREATE EXTENSION pg_pathman; +-- make sure nothing breaks on disable/enable +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled -- 079797e0d5 CREATE TABLE part_test(val serial); INSERT INTO part_test SELECT generate_series(1, 30); diff --git a/sql/pathman_cache_pranks.sql b/sql/pathman_cache_pranks.sql index f21f0594..3f3dd714 100644 --- a/sql/pathman_cache_pranks.sql +++ b/sql/pathman_cache_pranks.sql @@ -16,6 +16,10 @@ DROP EXTENSION pg_pathman; -- create it for further tests CREATE EXTENSION pg_pathman; +-- make sure nothing breaks on disable/enable +SET pg_pathman.enable = false; +SET pg_pathman.enable = true; + -- 079797e0d5 CREATE TABLE part_test(val serial); INSERT INTO part_test SELECT generate_series(1, 30); diff --git a/src/init.c b/src/init.c index f1ed689c..92d2d213 100644 --- a/src/init.c +++ b/src/init.c @@ -403,9 +403,12 @@ fini_local_cache(void) } /* Now we can clear allocations */ - MemoryContextReset(PathmanParentsCacheContext); - MemoryContextReset(PathmanStatusCacheContext); - MemoryContextReset(PathmanBoundsCacheContext); + if (TopPathmanContext) + { + MemoryContextReset(PathmanParentsCacheContext); + MemoryContextReset(PathmanStatusCacheContext); + MemoryContextReset(PathmanBoundsCacheContext); + } } From 2dd78a56cc0d20aff33e37346670d0289c2ea345 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 23 Apr 2019 07:37:28 +0300 Subject: [PATCH 1005/1124] Make test from previous commit more useful. --- expected/pathman_cache_pranks.out | 10 +++++----- sql/pathman_cache_pranks.sql | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/expected/pathman_cache_pranks.out b/expected/pathman_cache_pranks.out index 581063bc..5493ae96 100644 --- a/expected/pathman_cache_pranks.out +++ b/expected/pathman_cache_pranks.out @@ -1,6 +1,11 @@ \set VERBOSITY terse -- is pathman (caches, in particular) strong enough to carry out this? SET search_path = 'public'; +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled -- wobble with create-drop ext: tests cached relids sanity CREATE EXTENSION pg_pathman; SET pg_pathman.enable = f; @@ -14,11 +19,6 @@ CREATE EXTENSION pg_pathman; DROP EXTENSION pg_pathman; -- create it for further tests CREATE EXTENSION pg_pathman; --- make sure nothing breaks on disable/enable -SET pg_pathman.enable = false; -NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled -SET pg_pathman.enable = true; -NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled -- 079797e0d5 CREATE TABLE part_test(val serial); INSERT INTO part_test SELECT generate_series(1, 30); diff --git a/sql/pathman_cache_pranks.sql b/sql/pathman_cache_pranks.sql index 3f3dd714..782ef7f0 100644 --- a/sql/pathman_cache_pranks.sql +++ b/sql/pathman_cache_pranks.sql @@ -3,6 +3,10 @@ SET search_path = 'public'; +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +SET pg_pathman.enable = true; + -- wobble with create-drop ext: tests cached relids sanity CREATE EXTENSION pg_pathman; SET pg_pathman.enable = f; @@ -16,10 +20,6 @@ DROP EXTENSION pg_pathman; -- create it for further tests CREATE EXTENSION pg_pathman; --- make sure nothing breaks on disable/enable -SET pg_pathman.enable = false; -SET pg_pathman.enable = true; - -- 079797e0d5 CREATE TABLE part_test(val serial); INSERT INTO part_test SELECT generate_series(1, 30); From 32fd6116fe9ee895981156a211d44a139d152159 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 23 Apr 2019 12:26:30 +0300 Subject: [PATCH 1006/1124] Remove wrong fastpath in pathman_enable_assign_hook. If one of options is disabled, but pathman generally enabled and disable SET comes, we actually need to take some actions. --- src/hooks.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 2a0543bc..30be29e7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -594,16 +594,6 @@ pathman_enable_assign_hook(bool newval, void *extra) elog(DEBUG2, "pg_pathman_enable_assign_hook() [newval = %s] triggered", newval ? "true" : "false"); - /* Return quickly if nothing has changed */ - if (newval == (pathman_init_state.pg_pathman_enable && - pathman_init_state.auto_partition && - pathman_init_state.override_copy && - pg_pathman_enable_runtimeappend && - pg_pathman_enable_runtime_merge_append && - pg_pathman_enable_partition_filter && - pg_pathman_enable_bounds_cache)) - return; - pathman_init_state.auto_partition = newval; pathman_init_state.override_copy = newval; pg_pathman_enable_runtimeappend = newval; From 38bf80ef86676f5bc0ecade1ead49074c4f0f0ff Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 23 Apr 2019 13:18:24 +0300 Subject: [PATCH 1007/1124] Previous commit fix: but still notify about change. --- src/hooks.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 30be29e7..854d422b 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -594,6 +594,21 @@ pathman_enable_assign_hook(bool newval, void *extra) elog(DEBUG2, "pg_pathman_enable_assign_hook() [newval = %s] triggered", newval ? "true" : "false"); + if (!(newval == pathman_init_state.pg_pathman_enable && + newval == pathman_init_state.auto_partition && + newval == pathman_init_state.override_copy && + newval == pg_pathman_enable_runtimeappend && + newval == pg_pathman_enable_runtime_merge_append && + newval == pg_pathman_enable_partition_filter && + newval == pg_pathman_enable_bounds_cache)) + { + elog(NOTICE, + "RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes " + "and some other options have been %s", + newval ? "enabled" : "disabled"); + } + + pathman_init_state.auto_partition = newval; pathman_init_state.override_copy = newval; pg_pathman_enable_runtimeappend = newval; @@ -601,11 +616,6 @@ pathman_enable_assign_hook(bool newval, void *extra) pg_pathman_enable_partition_filter = newval; pg_pathman_enable_bounds_cache = newval; - elog(NOTICE, - "RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes " - "and some other options have been %s", - newval ? "enabled" : "disabled"); - /* Purge caches if pathman was disabled */ if (!newval) { From ce72bc725bed3bd7e4f3a98087908cd6123f22d7 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 29 Apr 2019 13:15:21 +0300 Subject: [PATCH 1008/1124] Don't use wiped memory for reporting offended rel in fill_prel_with_partitions. --- src/relation_info.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/relation_info.c b/src/relation_info.c index d2d95351..f2d17371 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -769,11 +769,14 @@ fill_prel_with_partitions(PartRelationInfo *prel, */ if (pbin->part_idx >= PrelChildrenCount(prel)) { + /* purged caches will destoy prel, save oid for reporting */ + Oid parent_relid = PrelParentRelid(prel); + DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, (errmsg("pg_pathman's cache for relation \"%s\" " + ereport(ERROR, (errmsg("pg_pathman's cache for relation %d " "has not been properly initialized. " "Looks like one of hash partitions was dropped.", - get_rel_name_or_relid(PrelParentRelid(prel))), + parent_relid), errhint(INIT_ERROR_HINT))); } From d7cce4732a058c5fb9618577b41a8df44205bc8f Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 29 Apr 2019 17:04:48 +0300 Subject: [PATCH 1009/1124] Silence clang checker in build_pathman_relation_info bounds cleanup. --- src/relation_info.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/relation_info.c b/src/relation_info.c index f2d17371..2a2548d1 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -522,8 +522,11 @@ build_pathman_relation_info(Oid relid, Datum *values) */ if (prel->parttype == PT_HASH) child = prel->children[i]; - else if (prel->parttype == PT_RANGE) + else + { + Assert(prel->parttype == PT_RANGE) child = prel->ranges[i].child_oid; + } forget_bounds_of_partition(child); } From 2a13ed7d7f73b19807e48ee271345546a22939fa Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 29 Apr 2019 17:08:03 +0300 Subject: [PATCH 1010/1124] Fixup for previous blind commit. --- src/relation_info.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/relation_info.c b/src/relation_info.c index 2a2548d1..d24af71d 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -524,7 +524,7 @@ build_pathman_relation_info(Oid relid, Datum *values) child = prel->children[i]; else { - Assert(prel->parttype == PT_RANGE) + Assert(prel->parttype == PT_RANGE); child = prel->ranges[i].child_oid; } From ba302016ce1a65c23228dff64d99aa02bb12b708 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Sun, 2 Jun 2019 22:31:37 +0300 Subject: [PATCH 1011/1124] Add tableoid junk column to processed_tlist only for top-level parent. (Note that the column is added to RelOptInfo reltarget of all nodes in the hierarchy anyway; but only tle with top-level varno must get to *main* tlist, i.e. processed_tlist.) Previously it was added for each parent in the tree, i.e. multiple times in case of multi-level partitioning, leading to ERROR: variable not found in subplan target lists errors in setref. As comments say, this code better be rewritten to actually let parent deal with its isParent flag. And to recurce with toplever rc. Probably should be done if adjacent bugs arise. --- expected/pathman_subpartitions.out | 33 ++++++++++++++++++++++++++++++ sql/pathman_subpartitions.sql | 18 +++++++++++++++- src/include/rangeset.h | 6 ++++-- src/pg_pathman.c | 17 ++++++++++++++- 4 files changed, 70 insertions(+), 4 deletions(-) diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 4dd5f5dd..a876b457 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -424,6 +424,39 @@ UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; DROP TABLE subpartitions.abc CASCADE; NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; DROP SCHEMA subpartitions CASCADE; NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) DROP EXTENSION pg_pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index b790c20e..05ac9614 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -139,8 +139,24 @@ SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; SET pg_pathman.enable_partitionrouter = ON; UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; +DROP TABLE subpartitions.abc CASCADE; + + +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); + +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + +DROP TABLE subpartitions.a2 CASCADE; +DROP TABLE subpartitions.a1; -DROP TABLE subpartitions.abc CASCADE; DROP SCHEMA subpartitions CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/include/rangeset.h b/src/include/rangeset.h index 96d6bc21..39db6a53 100644 --- a/src/include/rangeset.h +++ b/src/include/rangeset.h @@ -1,7 +1,6 @@ /* ------------------------------------------------------------------------ * * rangeset.h - * IndexRange functions * * Copyright (c) 2015-2016, Postgres Professional * @@ -17,7 +16,10 @@ /* - * IndexRange contains a set of selected partitions. + * IndexRange is essentially a segment [lower; upper]. This module provides + * functions for efficient working (intersection, union) with Lists of + * IndexRange's; this is used for quick selection of partitions. Numbers are + * indexes of partitions in PartRelationInfo's children. */ typedef struct { /* lossy == should we use quals? */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 3511a243..2cd17c6a 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -437,7 +437,7 @@ append_child_relation(PlannerInfo *root, Relation child_relation; AppendRelInfo *appinfo; Index child_rti; - PlanRowMark *child_rowmark; + PlanRowMark *child_rowmark = NULL; Node *childqual; List *childquals; ListCell *lc1, @@ -493,6 +493,10 @@ append_child_relation(PlannerInfo *root, /* Create rowmarks required for child rels */ + /* + * XXX: vanilla recurses down with *top* rowmark, not immediate parent one. + * Not sure about example where this matters though. + */ if (parent_rowmark) { child_rowmark = makeNode(PlanRowMark); @@ -511,6 +515,13 @@ append_child_relation(PlannerInfo *root, root->rowMarks = lappend(root->rowMarks, child_rowmark); /* Adjust tlist for RowMarks (see planner.c) */ + /* + * XXX Saner approach seems to + * 1) Add tle to top parent and processed_tlist once in rel_pathlist_hook. + * 2) Mark isParent = true + * *parent* knows it is parent, after all; why should child bother? + * 3) Recursion (code executed in childs) starts at 2) + */ if (!parent_rowmark->isParent && !root->parse->setOperations) { append_tle_for_rowmark(root, parent_rowmark); @@ -636,6 +647,10 @@ append_child_relation(PlannerInfo *root, if (parent_rte->relid != child_oid && child_relation->rd_rel->relhassubclass) { + /* See XXX above */ + if (child_rowmark) + child_rowmark->isParent = true; + pathman_rel_pathlist_hook(root, child_rel, child_rti, From b530684c8c6208d795ee18351f20202bf48f85f6 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Sun, 2 Jun 2019 22:46:01 +0300 Subject: [PATCH 1012/1124] Bump 1.5.6 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index fa06948c..544b130d 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.4", + "version": "1.5.6", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.5", + "version": "1.5.6", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index a9305a9e..5e72a8e1 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.5 + 1.5.6 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 25432840..8431f70d 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.5" +#define CURRENT_LIB_VERSION "1.5.6" void *pathman_cache_search_relid(HTAB *cache_table, From 43fd918a8dac582c39f4d29e573621b13f68af26 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 3 Jun 2019 12:21:30 +0300 Subject: [PATCH 1013/1124] Heal segfault in handle_modification_query. To encounter this, you need to parse utility query after entering planning. --- src/pg_pathman.c | 1 + src/planner_tree_modification.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 2cd17c6a..7764aa94 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1965,6 +1965,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, /* * set_append_rel_pathlist * Build access paths for an "append relation" + * Similar to PG function with the same name. * * NOTE: this function is 'public' (used in hooks.c) */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index f40c152f..4766ded1 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -516,7 +516,7 @@ handle_modification_query(Query *parse, transform_query_cxt *context) { RangeTblEntry *rte; Oid child; - Node *quals = parse->jointree->quals; + Node *quals; Index result_rti = parse->resultRelation; ParamListInfo params = context->query_params; @@ -525,6 +525,8 @@ handle_modification_query(Query *parse, transform_query_cxt *context) parse->commandType != CMD_DELETE)) return; + /* can't set earlier because CMD_UTILITY doesn't have jointree */ + quals = parse->jointree->quals; rte = rt_fetch(result_rti, parse->rtable); /* Exit if it's ONLY table */ From 9e5f1eaf22e921bed8b58b8a988d7b732c817e7f Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 7 Jun 2019 08:32:08 +0300 Subject: [PATCH 1014/1124] Disable pathman for SELECT FOR SHARE/UPDATE clauses on 9.5. Noticed after ba302016ce1a65 subpartitions test failed on 9.5. --- src/hooks.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/hooks.c b/src/hooks.c index 854d422b..4086fabd 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -352,6 +352,15 @@ pathman_rel_pathlist_hook(PlannerInfo *root, if (root->parse->commandType != CMD_SELECT && root->parse->commandType != CMD_INSERT) return; + + /* SELECT FOR SHARE/UPDATE is not handled by above check */ + foreach(lc, root->rowMarks) + { + PlanRowMark *rc = (PlanRowMark *) lfirst(lc); + + if (rc->rti == rti) + return; + } #endif /* Skip if this table is not allowed to act as parent (e.g. FROM ONLY) */ From f093e76586f8fce4479775abed1dc003e1a63365 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 7 Jun 2019 08:36:02 +0300 Subject: [PATCH 1015/1124] Bump 1.5.7 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 544b130d..5c689c9c 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.6", + "version": "1.5.7", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.6", + "version": "1.5.7", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 5e72a8e1..e7999731 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.6 + 1.5.7 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 8431f70d..e636821c 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.6" +#define CURRENT_LIB_VERSION "1.5.7" void *pathman_cache_search_relid(HTAB *cache_table, From dbcbd02e411e6acea6d97f572234746007979538 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 11 Jun 2019 17:15:13 +0300 Subject: [PATCH 1016/1124] Pass create_single_range_partition through SPI in spawn_partitions_val. To allow DDL-wobbling extensions (mtm, in particular) to intercept it (yeah, through targetlist analysis. Seems like this is the only place performing DDL not covered by declarative partitioning (which can be handled as utility statements). Stas Kelvich. --- range.sql | 3 +-- src/partition_creation.c | 52 ++++++++++++++++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 6 deletions(-) diff --git a/range.sql b/range.sql index 5aeaad58..ef439cee 100644 --- a/range.sql +++ b/range.sql @@ -820,8 +820,7 @@ CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) RETURNS REGCLASS AS 'pg_pathman', 'create_single_range_partition_pl' -LANGUAGE C -SET client_min_messages = WARNING; +LANGUAGE C; /* * Construct CHECK constraint condition for a range partition. diff --git a/src/partition_creation.c b/src/partition_creation.c index b41b2541..47f16ca6 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -32,6 +32,7 @@ #include "commands/tablecmds.h" #include "commands/tablespace.h" #include "commands/trigger.h" +#include "executor/spi.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/parse_func.h" @@ -595,6 +596,14 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ check_lt(&cmp_value_bound_finfo, collid, value, cur_leading_bound)) { Bound bounds[2]; + int rc; + bool isnull; + char *create_sql; + HeapTuple typeTuple; + char *typname; + Oid parent_nsp = get_rel_namespace(parent_relid); + char *parent_nsp_name = get_namespace_name(parent_nsp); + char *partition_name = choose_range_partition_name(parent_relid, parent_nsp); /* Assign the 'following' boundary to current 'leading' value */ cur_following_bound = cur_leading_bound; @@ -607,10 +616,45 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ bounds[0] = MakeBound(should_append ? cur_following_bound : cur_leading_bound); bounds[1] = MakeBound(should_append ? cur_leading_bound : cur_following_bound); - last_partition = create_single_range_partition_internal(parent_relid, - &bounds[0], &bounds[1], - range_bound_type, - NULL, NULL); + /* + * Instead of directly calling create_single_range_partition_internal() + * we are going to call it through SPI, to make it possible for various + * DDL-replicating extensions to catch that call and do something about + * it. --sk + */ + + /* Get typname of range_bound_type to perform cast */ + typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(range_bound_type)); + Assert(HeapTupleIsValid(typeTuple)); + typname = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname)); + ReleaseSysCache(typeTuple); + + /* Construct call to create_single_range_partition() */ + create_sql = psprintf( + "select %s.create_single_range_partition('%s.%s', '%s'::%s, '%s'::%s, '%s.%s')", + get_namespace_name(get_pathman_schema()), + parent_nsp_name, + get_rel_name(parent_relid), + IsInfinite(&bounds[0]) ? "NULL" : datum_to_cstring(bounds[0].value, range_bound_type), + typname, + IsInfinite(&bounds[1]) ? "NULL" : datum_to_cstring(bounds[1].value, range_bound_type), + typname, + parent_nsp_name, + partition_name + ); + + /* ...and call it. */ + SPI_connect(); + PushActiveSnapshot(GetTransactionSnapshot()); + rc = SPI_execute(create_sql, false, 0); + if (rc <= 0 || SPI_processed != 1) + elog(ERROR, "Failed to create range partition"); + last_partition = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], + SPI_tuptable->tupdesc, + 1, &isnull)); + Assert(!isnull); + SPI_finish(); + PopActiveSnapshot(); #ifdef USE_ASSERT_CHECKING elog(DEBUG2, "%s partition with following='%s' & leading='%s' [%u]", From 3fa5d08ac510e4156c6f6d7cd80b3689e211f1d2 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 13 Jun 2019 16:59:35 +0300 Subject: [PATCH 1017/1124] Forbid to spawn partitions using bgw from bgw spawning partitions. --- src/pathman_workers.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 69f5db3b..30ecf6a2 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -84,6 +84,9 @@ static const char *spawn_partitions_bgw = "SpawnPartitionsWorker"; static const char *concurrent_part_bgw = "ConcurrentPartWorker"; +/* Used for preventing spawn bgw recursion trouble */ +static bool am_spawn_bgw = false; + /* * Estimate amount of shmem needed for concurrent partitioning. */ @@ -312,6 +315,11 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) SpawnPartitionArgs *bgw_args; Oid child_oid = InvalidOid; + if (am_spawn_bgw) + ereport(ERROR, + (errmsg("Attempt to spawn partition using bgw from bgw spawning partitions"), + errhint("Probably init_callback has INSERT to its table?"))); + /* Create a dsm segment for the worker to pass arguments */ segment = create_partitions_bg_worker_segment(relid, value, value_type); segment_handle = dsm_segment_handle(segment); @@ -363,6 +371,8 @@ bgw_main_spawn_partitions(Datum main_arg) /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); + am_spawn_bgw = true; + /* Create resource owner */ CurrentResourceOwner = ResourceOwnerCreate(NULL, spawn_partitions_bgw); From e10b7d84fa2dac07c06b2c2a4b43b73c5eb6b74c Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 14 Jun 2019 10:19:56 +0300 Subject: [PATCH 1018/1124] Streamline error handling in spawn partitions worker bgw. Avoid intercepting error in it. Standard bgw error handling code should be enough: it emits error and exits cleanly. --- src/include/partition_creation.h | 3 +- src/partition_creation.c | 205 +++++++++++++------------------ src/pathman_workers.c | 20 +-- 3 files changed, 99 insertions(+), 129 deletions(-) diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 63768a95..cc666923 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -24,8 +24,7 @@ /* Create RANGE partitions to store some value */ Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); -Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, - bool is_background_worker); +Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type); /* Create one RANGE partition */ diff --git a/src/partition_creation.c b/src/partition_creation.c index 47f16ca6..e0cb40e0 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -315,8 +315,7 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) elog(DEBUG2, "create_partitions(): chose backend [%u]", MyProcPid); last_partition = create_partitions_for_value_internal(relid, value, - value_type, - false); /* backend */ + value_type); } } else @@ -348,147 +347,119 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) * use create_partitions_for_value() instead. */ Oid -create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, - bool is_background_worker) +create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) { MemoryContext old_mcxt = CurrentMemoryContext; Oid partid = InvalidOid; /* last created partition (or InvalidOid) */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; - PG_TRY(); + /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ + if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) { - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; + PartRelationInfo *prel; + LockAcquireResult lock_result; /* could we lock the parent? */ + Oid base_bound_type; /* base type of prel->ev_type */ + Oid base_value_type; /* base type of value_type */ - /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) - { - PartRelationInfo *prel; - LockAcquireResult lock_result; /* could we lock the parent? */ - Oid base_bound_type; /* base type of prel->ev_type */ - Oid base_value_type; /* base type of value_type */ - - /* Prevent modifications of partitioning scheme */ - lock_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); + /* Prevent modifications of partitioning scheme */ + lock_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); - /* Fetch PartRelationInfo by 'relid' */ - prel = get_pathman_relation_info(relid); - shout_if_prel_is_invalid(relid, prel, PT_RANGE); + /* Fetch PartRelationInfo by 'relid' */ + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_RANGE); - /* Fetch base types of prel->ev_type & value_type */ - base_bound_type = getBaseType(prel->ev_type); - base_value_type = getBaseType(value_type); + /* Fetch base types of prel->ev_type & value_type */ + base_bound_type = getBaseType(prel->ev_type); + base_value_type = getBaseType(value_type); - /* - * Search for a suitable partition if we didn't hold it, - * since somebody might have just created it for us. - * - * If the table is locked, it means that we've - * already failed to find a suitable partition - * and called this function to do the job. - */ - Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); - if (lock_result == LOCKACQUIRE_OK) - { - Oid *parts; - int nparts; - - /* Search for matching partitions */ - parts = find_partitions_for_value(value, value_type, prel, &nparts); - - /* Shout if there's more than one */ - if (nparts > 1) - elog(ERROR, ERR_PART_ATTR_MULTIPLE); - - /* It seems that we got a partition! */ - else if (nparts == 1) - { - /* Unlock the parent (we're not going to spawn) */ - UnlockRelationOid(relid, ShareUpdateExclusiveLock); + /* + * Search for a suitable partition if we didn't hold it, + * since somebody might have just created it for us. + * + * If the table is locked, it means that we've + * already failed to find a suitable partition + * and called this function to do the job. + */ + Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); + if (lock_result == LOCKACQUIRE_OK) + { + Oid *parts; + int nparts; - /* Simply return the suitable partition */ - partid = parts[0]; - } + /* Search for matching partitions */ + parts = find_partitions_for_value(value, value_type, prel, &nparts); - /* Don't forget to free */ - pfree(parts); - } + /* Shout if there's more than one */ + if (nparts > 1) + elog(ERROR, ERR_PART_ATTR_MULTIPLE); - /* Else spawn a new one (we hold a lock on the parent) */ - if (partid == InvalidOid) + /* It seems that we got a partition! */ + else if (nparts == 1) { - RangeEntry *ranges = PrelGetRangesArray(prel); - Bound bound_min, /* absolute MIN */ - bound_max; /* absolute MAX */ + /* Unlock the parent (we're not going to spawn) */ + UnlockRelationOid(relid, ShareUpdateExclusiveLock); - Oid interval_type = InvalidOid; - Datum interval_binary, /* assigned 'width' of one partition */ - interval_text; + /* Simply return the suitable partition */ + partid = parts[0]; + } - /* Copy datums in order to protect them from cache invalidation */ - bound_min = CopyBound(&ranges[0].min, - prel->ev_byval, - prel->ev_len); + /* Don't forget to free */ + pfree(parts); + } - bound_max = CopyBound(&ranges[PrelLastChild(prel)].max, - prel->ev_byval, - prel->ev_len); + /* Else spawn a new one (we hold a lock on the parent) */ + if (partid == InvalidOid) + { + RangeEntry *ranges = PrelGetRangesArray(prel); + Bound bound_min, /* absolute MIN */ + bound_max; /* absolute MAX */ - /* Check if interval is set */ - if (isnull[Anum_pathman_config_range_interval - 1]) - { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot spawn new partition for key '%s'", - datum_to_cstring(value, value_type)), - errdetail("default range interval is NULL"))); - } + Oid interval_type = InvalidOid; + Datum interval_binary, /* assigned 'width' of one partition */ + interval_text; - /* Retrieve interval as TEXT from tuple */ - interval_text = values[Anum_pathman_config_range_interval - 1]; + /* Copy datums in order to protect them from cache invalidation */ + bound_min = CopyBound(&ranges[0].min, + prel->ev_byval, + prel->ev_len); - /* Convert interval to binary representation */ - interval_binary = extract_binary_interval_from_text(interval_text, - base_bound_type, - &interval_type); + bound_max = CopyBound(&ranges[PrelLastChild(prel)].max, + prel->ev_byval, + prel->ev_len); - /* At last, spawn partitions to store the value */ - partid = spawn_partitions_val(PrelParentRelid(prel), - &bound_min, &bound_max, base_bound_type, - interval_binary, interval_type, - value, base_value_type, - prel->ev_collid); + /* Check if interval is set */ + if (isnull[Anum_pathman_config_range_interval - 1]) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot spawn new partition for key '%s'", + datum_to_cstring(value, value_type)), + errdetail("default range interval is NULL"))); } - /* Don't forget to close 'prel'! */ - close_pathman_relation_info(prel); - } - else - elog(ERROR, "table \"%s\" is not partitioned", - get_rel_name_or_relid(relid)); - } - PG_CATCH(); - { - ErrorData *error; + /* Retrieve interval as TEXT from tuple */ + interval_text = values[Anum_pathman_config_range_interval - 1]; - /* Simply rethrow ERROR if we're in backend */ - if (!is_background_worker) - PG_RE_THROW(); + /* Convert interval to binary representation */ + interval_binary = extract_binary_interval_from_text(interval_text, + base_bound_type, + &interval_type); - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - error = CopyErrorData(); - FlushErrorState(); - - /* Produce log message if we're in BGW */ - elog(LOG, - CppAsString(create_partitions_for_value_internal) ": %s [%u]", - error->message, - MyProcPid); + /* At last, spawn partitions to store the value */ + partid = spawn_partitions_val(PrelParentRelid(prel), + &bound_min, &bound_max, base_bound_type, + interval_binary, interval_type, + value, base_value_type, + prel->ev_collid); + } - /* Reset 'partid' in case of error */ - partid = InvalidOid; + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } - PG_END_TRY(); + else + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(relid)); return partid; } diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 30ecf6a2..ae6d13b9 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -364,6 +364,7 @@ bgw_main_spawn_partitions(Datum main_arg) dsm_segment *segment; SpawnPartitionArgs *args; Datum value; + Oid result; /* Establish signal handlers before unblocking signals. */ pqsignal(SIGTERM, handle_sigterm); @@ -415,18 +416,17 @@ bgw_main_spawn_partitions(Datum main_arg) DebugPrintDatum(value, args->value_type), MyProcPid); #endif - /* Create partitions and save the Oid of the last one */ - args->result = create_partitions_for_value_internal(args->partitioned_table, - value, /* unpacked Datum */ - args->value_type, - true); /* background woker */ + /* + * Create partitions and save the Oid of the last one. + * If we fail here, args->result is 0 since it is zeroed on initialization. + */ + result = create_partitions_for_value_internal(args->partitioned_table, + value, /* unpacked Datum */ + args->value_type); /* Finish transaction in an appropriate way */ - if (args->result == InvalidOid) - AbortCurrentTransaction(); - else - CommitTransactionCommand(); - + CommitTransactionCommand(); + args->result = result; dsm_detach(segment); } From 3176805567e4b19f864c5fa7909e3cb210eedd1c Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 14 Jun 2019 11:42:06 +0300 Subject: [PATCH 1019/1124] Unused var warning. --- src/partition_creation.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index e0cb40e0..bea41379 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -349,7 +349,6 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) { - MemoryContext old_mcxt = CurrentMemoryContext; Oid partid = InvalidOid; /* last created partition (or InvalidOid) */ Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; From b75fbe71ec1095d2eebcbc5d2e2e7786408cf761 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 14 Jun 2019 11:43:56 +0300 Subject: [PATCH 1020/1124] Bump 1.5.8 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 5c689c9c..292b29db 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.7", + "version": "1.5.8", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.7", + "version": "1.5.8", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index e7999731..b167ef3e 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.7 + 1.5.8 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index e636821c..fd11047d 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.7" +#define CURRENT_LIB_VERSION "1.5.8" void *pathman_cache_search_relid(HTAB *cache_table, From c14d2ad74cbf0d2b3d5837d41fdd4f71ccc0f56b Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 19 Jul 2019 20:21:50 +0300 Subject: [PATCH 1021/1124] Additional mtm compatibility --- expected/pathman_basic.out | 6 +++--- sql/pathman_basic.sql | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 0ae1ae6a..95134ee2 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1312,7 +1312,7 @@ DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 10 other objects /* Test attributes copying */ -CREATE UNLOGGED TABLE test.range_rel ( +CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt DATE NOT NULL) WITH (fillfactor = 70); @@ -1328,13 +1328,13 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; reloptions | relpersistence -----------------+---------------- - {fillfactor=70} | u + {fillfactor=70} | p (1 row) SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; reloptions | relpersistence -----------------+---------------- - {fillfactor=70} | u + {fillfactor=70} | p (1 row) DROP TABLE test.range_rel CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 6d2e52e1..9e0c3bf2 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -380,7 +380,7 @@ DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; /* Test attributes copying */ -CREATE UNLOGGED TABLE test.range_rel ( +CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt DATE NOT NULL) WITH (fillfactor = 70); From c48c2b25d6ef4f4a7064f21ff464b463dfeea769 Mon Sep 17 00:00:00 2001 From: Teodor Sigaev Date: Tue, 1 Oct 2019 17:34:56 +0300 Subject: [PATCH 1022/1124] PGPRO-3087 Prevent double expand partitioned table by built-in inheritance and pg_pathman's one --- expected/pathman_basic.out | 20 +++++++++++++++++++- sql/pathman_basic.sql | 8 ++++++++ src/hooks.c | 10 ++++++++-- 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 95134ee2..3baf2989 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1804,7 +1804,25 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) + +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 28 other objects +NOTICE: drop cascades to 32 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 9e0c3bf2..8a97448e 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -546,6 +546,14 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; +SELECT * FROM test.mixinh_parent; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/src/hooks.c b/src/hooks.c index 4086fabd..fcaab6df 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -406,10 +406,16 @@ pathman_rel_pathlist_hook(PlannerInfo *root, * and added its children to the plan. */ if (appinfo->child_relid == rti && - child_oid == parent_oid && OidIsValid(appinfo->parent_reloid)) { - goto cleanup; + if (child_oid == parent_oid) + goto cleanup; + else if (!has_pathman_relation_info(parent_oid)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("could not expand partitioned table \"%s\"", + get_rel_name(child_oid)), + errhint("Do not use inheritance and pg_pathman partitions together"))); } } } From 722db19a9ff5f58abb7b7394eaa58c3c00a06f2e Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Wed, 2 Oct 2019 14:39:46 +0300 Subject: [PATCH 1023/1124] Bump 1.5.9 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- src/include/init.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index 292b29db..cd55fcb4 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.8", + "version": "1.5.9", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.8", + "version": "1.5.9", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index b167ef3e..35d56733 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -13,7 +13,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.8 + 1.5.9 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index fd11047d..15efae16 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.8" +#define CURRENT_LIB_VERSION "1.5.9" void *pathman_cache_search_relid(HTAB *cache_table, From 27ba5db0692a8bb09f97e9b666f4631b4d5f7d4e Mon Sep 17 00:00:00 2001 From: Ildar Musin Date: Fri, 11 Oct 2019 11:17:58 +0200 Subject: [PATCH 1024/1124] README: remove obsolete emails --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 4fb8e5ac..03b64815 100644 --- a/README.md +++ b/README.md @@ -776,8 +776,8 @@ All sections and data will remain unchanged and will be handled by the standard Do not hesitate to post your issues, questions and new ideas at the [issues](https://github.com/postgrespro/pg_pathman/issues) page. ## Authors -Ildar Musin Postgres Professional Ltd., Russia +[Ildar Musin](https://github.com/zilder) Alexander Korotkov Postgres Professional Ltd., Russia -Dmitry Ivanov Postgres Professional Ltd., Russia +[Dmitry Ivanov](https://github.com/funbringer) Maksim Milyutin Postgres Professional Ltd., Russia -Ildus Kurbangaliev Postgres Professional Ltd., Russia +[Ildus Kurbangaliev](https://github.com/ildus) From 3faa7ca42063f760dbb7c4071d24b90039d8220e Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 18 Nov 2019 22:52:17 +0300 Subject: [PATCH 1025/1124] Deprecation note. --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 03b64815..b49c20ec 100644 --- a/README.md +++ b/README.md @@ -3,14 +3,18 @@ [![codecov](https://codecov.io/gh/postgrespro/pg_pathman/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/pg_pathman) [![GitHub license](https://img.shields.io/badge/license-PostgreSQL-blue.svg)](https://raw.githubusercontent.com/postgrespro/pg_pathman/master/LICENSE) +### NOTE: this project is not under development anymore + +`pg_pathman` supports Postgres versions [9.5..12], but most probably it won't be ported to 13 and later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. + # pg_pathman The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions. The extension is compatible with: - * PostgreSQL 9.5, 9.6, 10, 11; - * Postgres Pro Standard 9.5, 9.6, 10; + * PostgreSQL 9.5, 9.6, 10, 11, 12; + * Postgres Pro Standard 9.5, 9.6, 10, 11, 12; * Postgres Pro Enterprise; Take a look at our Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). From 08113c98bee6da0173f5117a86b34aa761606963 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 14 Nov 2019 18:32:08 +0300 Subject: [PATCH 1026/1124] Port to 12. Notable/non-trivial issues: - Dealing with new tableam and abstracted slots while supporting 9.5 is quite hairy. Probably deserves some refactoring. - nodeModifyTable decides to convert received from PartitionFilter tuple, if its tts_ops is not one native of table's am (BufferHeapTupleTableSlot, essentially). That might sound sane, but nodeModifyTable doesn't know anything about our parent->child attr mapping and has only parent's tupledesc. Thus we end up with tupledesc not matching actual tuple. To prevent that, always create BufferHeapTupleTableSlot, which (fortunately and weirdly) can easily store virtual tuple as well as materialized one. (vanilla partitioning does mapping *after* making sure tts_ops is ok) - For some reason which is not clear to me, nodeCustom promises that its tts_ops is fixed TTSOpsVirtual. RuntimeAppend doesn't think so, however. It easily passed BufferHeapTupleSlot up there is no projection, which is fine. That's changed by converting to slot to virtual one even in this case to keep the promise. - append_rte_to_estate: for efficiency 12 introduced estate->es_range_table_array mirroring es_range_table. Further, relcache management in executor was centralized, and now rri's relcache entries get into es_relations, where ExecEndPlan finds them to close. So we also fill both arrays. - Something like core's 4b40e4: now hashtext wants to know collation. We never recorded it, so just pass default one. - Two things led to massive duplication of test outputs: - Append nodes with single subplan are eliminated now. - CTEs are no longer optimization fences by default. --- expected/pathman_array_qual.out | 4 + expected/pathman_array_qual_1.out | 2397 +++++++++++++++++++++++ expected/pathman_basic.out | 5 + expected/pathman_basic_1.out | 249 ++- expected/pathman_calamity.out | 7 + expected/pathman_calamity_1.out | 1061 ++++++++++ expected/pathman_check.out | 0 expected/pathman_cte.out | 10 +- expected/pathman_cte_1.out | 265 +++ expected/pathman_expressions.out | 3 + expected/pathman_expressions_1.out | 3 + expected/pathman_expressions_2.out | 430 ++++ expected/pathman_gaps.out | 4 + expected/pathman_gaps_1.out | 812 ++++++++ expected/pathman_join_clause.out | 4 + expected/pathman_join_clause_1.out | 176 ++ expected/pathman_only.out | 5 + expected/pathman_only_1.out | 277 +++ expected/pathman_rowmarks.out | 3 + expected/pathman_rowmarks_1.out | 3 + expected/pathman_rowmarks_2.out | 387 ++++ expected/pathman_subpartitions.out | 4 + expected/pathman_subpartitions_1.out | 460 +++++ expected/pathman_upd_del.out | 7 + expected/pathman_upd_del_1.out | 7 + expected/pathman_upd_del_2.out | 458 +++++ expected/pathman_views.out | 3 + expected/pathman_views_1.out | 3 + expected/pathman_views_2.out | 188 ++ sql/pathman_array_qual.sql | 5 + sql/pathman_basic.sql | 6 + sql/pathman_calamity.sql | 8 + sql/pathman_cte.sql | 14 +- sql/pathman_expressions.sql | 3 + sql/pathman_gaps.sql | 4 + sql/pathman_join_clause.sql | 5 +- sql/pathman_only.sql | 5 + sql/pathman_rowmarks.sql | 3 + sql/pathman_subpartitions.sql | 5 + sql/pathman_upd_del.sql | 7 + sql/pathman_views.sql | 3 + src/compat/pg_compat.c | 12 + src/hooks.c | 4 + src/include/compat/pg_compat.h | 133 +- src/include/compat/rowmarks_fix.h | 4 + src/include/partition_filter.h | 1 - src/include/planner_tree_modification.h | 2 +- src/init.c | 34 +- src/nodes_common.c | 22 +- src/partition_creation.c | 91 +- src/partition_filter.c | 79 +- src/partition_router.c | 155 +- src/pathman_workers.c | 2 +- src/pg_pathman.c | 71 +- src/pl_funcs.c | 28 +- src/pl_range_funcs.c | 10 +- src/planner_tree_modification.c | 3 + src/relation_info.c | 8 + src/runtime_merge_append.c | 6 +- src/utility_stmt_hooking.c | 57 +- 60 files changed, 7783 insertions(+), 242 deletions(-) create mode 100644 expected/pathman_array_qual_1.out create mode 100644 expected/pathman_calamity_1.out create mode 100644 expected/pathman_check.out create mode 100644 expected/pathman_cte_1.out create mode 100644 expected/pathman_expressions_2.out create mode 100644 expected/pathman_gaps_1.out create mode 100644 expected/pathman_join_clause_1.out create mode 100644 expected/pathman_only_1.out create mode 100644 expected/pathman_rowmarks_2.out create mode 100644 expected/pathman_subpartitions_1.out create mode 100644 expected/pathman_upd_del_2.out create mode 100644 expected/pathman_views_2.out diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out index 36ec268d..49dca03a 100644 --- a/expected/pathman_array_qual.out +++ b/expected/pathman_array_qual.out @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_array_qual_1.out b/expected/pathman_array_qual_1.out new file mode 100644 index 00000000..6c8def94 --- /dev/null +++ b/expected/pathman_array_qual_1.out @@ -0,0 +1,2397 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + array_qual.test_1 +(1 row) + +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + array_qual.test_2 +(1 row) + +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + array_qual.test_3 +(1 row) + +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + array_qual.test_4 +(1 row) + +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); +ANALYZE; +/* + * Test expr op ANY (...) + */ +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); + QUERY PLAN +-------------------- + Seq Scan on test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 +(5 rows) + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: (val < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); +ERROR: collation mismatch between explicit collations "C" and "POSIX" at character 95 +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text = ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: ((val)::text = ANY ('{a,b}'::text[])) +(5 rows) + +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_3 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_4 + Filter: (val ~~ ANY ('{a,b}'::text[])) +(9 rows) + +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); +ANALYZE; +/* + * Test expr IN (...) + */ +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{1,2,3,4}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); + QUERY PLAN +----------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{-100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,100}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) +(21 rows) + +/* + * Test expr = ANY (...) + */ +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr = ALL (...) + */ +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a = ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ALL ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr < ANY (...) + */ +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); + QUERY PLAN +-------------------- + Seq Scan on test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + Filter: (a < 550) +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < 700) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < ANY ('{NULL,700}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +/* + * Test expr < ALL (...) + */ +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a < ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); + QUERY PLAN +-------------------- + Seq Scan on test_1 + Filter: (a < 99) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + Filter: (a < 500) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (...) + */ +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 99) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > 500) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > ANY ('{NULL,700}'::integer[])) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +/* + * Test expr > ALL (...) + */ +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a > ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_2 + Filter: (a > 101) + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 550) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > 700) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +/* + * Test expr > ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(999); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 5 +DEALLOCATE q; +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q('{1, 999}'): number of partitions: 1 +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXECUTE q(1000); + a | b +---+--- +(0 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 1 +DEALLOCATE q; +/* + * Test expr = ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(100); + a | b +-----+----- + 100 | 100 +(1 row) + +DEALLOCATE q; +DROP SCHEMA array_qual CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 3baf2989..aa5b5ab6 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1,3 +1,8 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out index 61aed5db..d1403c77 100644 --- a/expected/pathman_basic_1.out +++ b/expected/pathman_basic_1.out @@ -1,3 +1,8 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; @@ -251,12 +256,11 @@ SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable paren ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; - QUERY PLAN -------------------------------------- - Append - -> Seq Scan on improved_dummy_11 - Filter: (id = 101) -(3 rows) + QUERY PLAN +------------------------------- + Seq Scan on improved_dummy_11 + Filter: (id = 101) +(2 rows) SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ set_enable_parent @@ -434,20 +438,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; - QUERY PLAN ------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (value = 2) -(3 rows) + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ - QUERY PLAN ------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (2 = value) -(3 rows) + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; QUERY PLAN @@ -460,12 +462,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ - QUERY PLAN ------------------------------------ - Append - -> Seq Scan on num_range_rel_3 - Filter: (2500 = id) -(3 rows) + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_3 + Filter: (2500 = id) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ QUERY PLAN @@ -537,11 +538,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* tes (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; - QUERY PLAN -------------------------------- - Append - -> Seq Scan on range_rel_2 -(2 rows) + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; QUERY PLAN @@ -593,20 +593,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; - QUERY PLAN ------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (value = 2) -(3 rows) + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ - QUERY PLAN ------------------------------- - Append - -> Seq Scan on hash_rel_1 - Filter: (2 = value) -(3 rows) + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; QUERY PLAN @@ -619,19 +617,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ - QUERY PLAN ----------------------------------------------------------------- - Append - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 - Index Cond: (2500 = id) -(3 rows) + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id = 2500) +(2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ QUERY PLAN ---------------------------------------------------------------- Append -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 - Index Cond: (2500 < id) + Index Cond: (id > 2500) -> Seq Scan on num_range_rel_4 (4 rows) @@ -710,17 +707,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* tes ------------------------------------------------------------------------------------ Append -> Index Scan using range_rel_2_dt_idx on range_rel_2 - Index Cond: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) -> Seq Scan on range_rel_3 -> Seq Scan on range_rel_4 (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; - QUERY PLAN -------------------------------- - Append - -> Seq Scan on range_rel_2 -(2 rows) + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; QUERY PLAN @@ -810,41 +806,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test. -> Index Scan using range_rel_2_dt_idx on range_rel_2 (4 rows) -/* - * Join - */ -set enable_nestloop = OFF; -SET enable_hashjoin = ON; -SET enable_mergejoin = OFF; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - QUERY PLAN ---------------------------------------------------------------------------------------- - Sort - Sort Key: j2.dt - -> Hash Join - Hash Cond: (j1.id = j2.id) - -> Hash Join - Hash Cond: (j3.id = j1.id) - -> Append - -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 - -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 - -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 j1 - -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 - -> Hash - -> Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 - -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 - -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 -(20 rows) - /* * Test inlined SQL functions */ @@ -859,22 +820,20 @@ CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $ select * from test.sql_inline where id = i_id limit 1; $$ LANGUAGE sql STABLE; EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); - QUERY PLAN --------------------------------------- + QUERY PLAN +-------------------------------- Limit - -> Append - -> Seq Scan on sql_inline_0 - Filter: (id = 5) -(4 rows) + -> Seq Scan on sql_inline_0 + Filter: (id = 5) +(3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); - QUERY PLAN --------------------------------------- + QUERY PLAN +-------------------------------- Limit - -> Append - -> Seq Scan on sql_inline_2 - Filter: (id = 1) -(4 rows) + -> Seq Scan on sql_inline_2 + Filter: (id = 1) +(3 rows) DROP FUNCTION test.sql_inline_func(int); DROP TABLE test.sql_inline CASCADE; @@ -945,12 +904,11 @@ SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_re (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; - QUERY PLAN ----------------------------------------------------------------- - Append - -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 - Index Cond: ((id >= 100) AND (id <= 700)) -(3 rows) + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: ((id >= 100) AND (id <= 700)) +(2 rows) SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); merge_range_partitions @@ -966,11 +924,10 @@ SELECT pathman.append_range_partition('test.num_range_rel'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; - QUERY PLAN ------------------------------------ - Append - -> Seq Scan on num_range_rel_6 -(2 rows) + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_6 +(1 row) SELECT pathman.prepend_range_partition('test.num_range_rel'); prepend_range_partition @@ -979,11 +936,10 @@ SELECT pathman.prepend_range_partition('test.num_range_rel'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; - QUERY PLAN ------------------------------------ - Append - -> Seq Scan on num_range_rel_7 -(2 rows) + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_7 +(1 row) SELECT pathman.drop_range_partition('test.num_range_rel_7'); drop_range_partition @@ -1049,12 +1005,11 @@ SELECT pathman.drop_range_partition('test.range_rel_7'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; - QUERY PLAN -------------------------------------------------------------------------------------- - Append - -> Index Scan using range_rel_1_dt_idx on range_rel_1 - Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) -(3 rows) + QUERY PLAN +------------------------------------------------------------------------------- + Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(2 rows) SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); ERROR: specified range [12-01-2014, 01-02-2015) overlaps with existing partitions @@ -1347,7 +1302,7 @@ DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 10 other objects /* Test attributes copying */ -CREATE UNLOGGED TABLE test.range_rel ( +CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt DATE NOT NULL) WITH (fillfactor = 70); @@ -1363,13 +1318,13 @@ SELECT pathman.create_range_partitions('test.range_rel', 'dt', SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; reloptions | relpersistence -----------------+---------------- - {fillfactor=70} | u + {fillfactor=70} | p (1 row) SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; reloptions | relpersistence -----------------+---------------- - {fillfactor=70} | u + {fillfactor=70} | p (1 row) DROP TABLE test.range_rel CASCADE; @@ -1390,12 +1345,11 @@ SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); INSERT INTO test.range_rel (dt) SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; - QUERY PLAN --------------------------------------------------------------------------------- - Append - -> Seq Scan on range_rel_14 - Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) -(3 rows) + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_14 + Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) +(2 rows) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; id | dt | data @@ -1404,12 +1358,11 @@ SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; - QUERY PLAN --------------------------------------------------------------------------------- - Append - -> Seq Scan on range_rel_8 - Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) -(3 rows) + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_8 + Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(2 rows) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; id | dt | data @@ -1839,7 +1792,25 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) + +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 28 other objects +NOTICE: drop cascades to 32 other objects DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 35d56733..c258b5cc 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -1,3 +1,10 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out new file mode 100644 index 00000000..ee422784 --- /dev/null +++ b/expected/pathman_calamity_1.out @@ -0,0 +1,1061 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.9 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); +ERROR: no hash function for type calamity.part_test +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: relation "1" has no partitions +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: relation "pg_class" has no partitions +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: relation "0" does not exist +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA calamity CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 + partition status cache | 2 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_check.out b/expected/pathman_check.out new file mode 100644 index 00000000..e69de29b diff --git a/expected/pathman_cte.out b/expected/pathman_cte.out index c7edd5a4..ce818a36 100644 --- a/expected/pathman_cte.out +++ b/expected/pathman_cte.out @@ -1,10 +1,14 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_cte; -/* - * Test simple CTE queries - */ CREATE TABLE test_cte.range_rel ( id INT4, dt TIMESTAMP NOT NULL, diff --git a/expected/pathman_cte_1.out b/expected/pathman_cte_1.out new file mode 100644 index 00000000..70a9ee88 --- /dev/null +++ b/expected/pathman_cte_1.out @@ -0,0 +1,265 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + Delete on cte_del_xacts_2 t_2 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash Join + Hash Cond: ((t_2.id = cte_del_xacts_specdata.tid) AND (t_2.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_2 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(22 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(15 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP SCHEMA test_cte CASCADE; +NOTICE: drop cascades to 3 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 66f931e3..1db38acb 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -3,6 +3,9 @@ * NOTE: This test behaves differenly on < 11 because planner now turns * Row(Const, Const) into just Const of record type, apparently since 3decd150 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_expressions_1.out b/expected/pathman_expressions_1.out index 893bcd21..126534a0 100644 --- a/expected/pathman_expressions_1.out +++ b/expected/pathman_expressions_1.out @@ -3,6 +3,9 @@ * NOTE: This test behaves differenly on < 11 because planner now turns * Row(Const, Const) into just Const of record type, apparently since 3decd150 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_expressions_2.out b/expected/pathman_expressions_2.out new file mode 100644 index 00000000..83b0c7b0 --- /dev/null +++ b/expected/pathman_expressions_2.out @@ -0,0 +1,430 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------- + Seq Scan on canon_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +-------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------- + Seq Scan on hash_rel_0 + Filter: ((value * value2) = 5) +(2 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Seq Scan on range_rel_4 + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(2 rows) + +DROP SCHEMA test_exprs CASCADE; +NOTICE: drop cascades to 24 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_gaps.out b/expected/pathman_gaps.out index a21734f0..1d9b1f33 100644 --- a/expected/pathman_gaps.out +++ b/expected/pathman_gaps.out @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_gaps_1.out b/expected/pathman_gaps_1.out new file mode 100644 index 00000000..d6e1973d --- /dev/null +++ b/expected/pathman_gaps_1.out @@ -0,0 +1,812 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 + Filter: (val = 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 + Filter: (val > 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +---------------------- + Seq Scan on test_2_4 + Filter: (val = 31) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + Filter: (val > 11) + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val > 31) + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +---------------------- + Seq Scan on test_3_5 + Filter: (val = 41) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + Filter: (val > 21) + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val > 41) + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +---------------------- + Seq Scan on test_4_6 + Filter: (val = 51) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + Filter: (val > 21) + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val > 51) + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +DROP SCHEMA gaps CASCADE; +NOTICE: drop cascades to 30 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out index 25d5cba9..ed822543 100644 --- a/expected/pathman_join_clause.out +++ b/expected/pathman_join_clause.out @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_join_clause_1.out b/expected/pathman_join_clause_1.out new file mode 100644 index 00000000..09b9a00c --- /dev/null +++ b/expected/pathman_join_clause_1.out @@ -0,0 +1,176 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Bitmap Heap Scan on mytbl_0 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_0_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_1 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_1_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_2 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_2_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_3 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_3_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_4 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_4_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_5 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_5_pkey + Index Cond: (id1 = fk.id1) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Heap Scan on mytbl_7 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_7_pkey + Index Cond: (id1 = fk.id1) +(41 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child_1.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_only.out b/expected/pathman_only.out index 28471cf3..b54722d8 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -2,6 +2,11 @@ * --------------------------------------------- * NOTE: This test behaves differenly on PgPro * --------------------------------------------- + * + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out new file mode 100644 index 00000000..fe64e5c9 --- /dev/null +++ b/expected/pathman_only_1.out @@ -0,0 +1,277 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 + -> Seq Scan on from_only_test +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP SCHEMA test_only CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 0bf1078a..4b51cb65 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_rowmarks_2.out is the updated version. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index d072cde9..e72e7076 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_rowmarks_2.out is the updated version. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_2.out b/expected/pathman_rowmarks_2.out new file mode 100644 index 00000000..a111d688 --- /dev/null +++ b/expected/pathman_rowmarks_2.out @@ -0,0 +1,387 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_rowmarks_2.out is the updated version. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +--------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +----------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +--------------------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Hash Join + Hash Cond: (first_0.id = second.id) + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP SCHEMA rowmarks CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to table rowmarks.first +drop cascades to table rowmarks.second +drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index a876b457..c13b4ee8 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ \set VERBOSITY terse CREATE EXTENSION pg_pathman; CREATE SCHEMA subpartitions; diff --git a/expected/pathman_subpartitions_1.out b/expected/pathman_subpartitions_1.out new file mode 100644 index 00000000..f190f798 --- /dev/null +++ b/expected/pathman_subpartitions_1.out @@ -0,0 +1,460 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + -> Seq Scan on abc_1_1 + -> Seq Scan on abc_1_2 + -> Append + -> Seq Scan on abc_2_0 + Filter: (a < 150) + -> Seq Scan on abc_2_1 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + Filter: (b = 215) + -> Seq Scan on abc_1_1 + Filter: (b = 215) + -> Seq Scan on abc_1_2 + Filter: (b = 215) + -> Seq Scan on abc_2_1 + Filter: (b = 215) + -> Seq Scan on abc_3_2 + Filter: (b = 215) +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------- + Seq Scan on abc_3_2 + Filter: ((a = 215) AND (b = 215)) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + QUERY PLAN +---------------------- + Seq Scan on abc_3_2 + Filter: (a >= 210) +(2 rows) + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); + append_range_partition +------------------------ + subpartitions.abc_4 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 15 other objects +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ + split_range_partition +----------------------- + subpartitions.abc_2_4 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2_1 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition +------------------------- + subpartitions.abc_3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 +(1 row) + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) + +SET pg_pathman.enable_partitionrouter = ON; +UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; +DROP SCHEMA subpartitions CASCADE; +NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 935b65b4..2cc19239 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -2,6 +2,13 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index d0022855..5cd5ac9f 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -2,6 +2,13 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_2.out b/expected/pathman_upd_del_2.out new file mode 100644 index 00000000..2aeb6702 --- /dev/null +++ b/expected/pathman_upd_del_2.out @@ -0,0 +1,458 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 27 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_views.out b/expected/pathman_views.out index 45423ef5..78589970 100644 --- a/expected/pathman_views.out +++ b/expected/pathman_views.out @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out index bead6de1..ea390d84 100644 --- a/expected/pathman_views_1.out +++ b/expected/pathman_views_1.out @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_views_2.out b/expected/pathman_views_2.out new file mode 100644 index 00000000..15770ec0 --- /dev/null +++ b/expected/pathman_views_2.out @@ -0,0 +1,188 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------- + Seq Scan on _abc_0 + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------- + LockRows + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +---------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(7 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP SCHEMA views CASCADE; +NOTICE: drop cascades to 16 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql index 7ab15b6a..84327359 100644 --- a/sql/pathman_array_qual.sql +++ b/sql/pathman_array_qual.sql @@ -1,3 +1,8 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 8a97448e..a164d421 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -1,3 +1,9 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 51827887..c380ea1d 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -1,3 +1,11 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + */ + \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/sql/pathman_cte.sql b/sql/pathman_cte.sql index 04af82f0..5a695cbb 100644 --- a/sql/pathman_cte.sql +++ b/sql/pathman_cte.sql @@ -1,15 +1,17 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ + \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_cte; - - -/* - * Test simple CTE queries - */ - CREATE TABLE test_cte.range_rel ( id INT4, dt TIMESTAMP NOT NULL, diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 6149a0c2..ed05be79 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -3,6 +3,9 @@ * NOTE: This test behaves differenly on < 11 because planner now turns * Row(Const, Const) into just Const of record type, apparently since 3decd150 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. */ \set VERBOSITY terse diff --git a/sql/pathman_gaps.sql b/sql/pathman_gaps.sql index eb185ff2..55c9a16d 100644 --- a/sql/pathman_gaps.sql +++ b/sql/pathman_gaps.sql @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql index c578d361..3a0a655f 100644 --- a/sql/pathman_join_clause.sql +++ b/sql/pathman_join_clause.sql @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; @@ -105,4 +109,3 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; - diff --git a/sql/pathman_only.sql b/sql/pathman_only.sql index e2813ea6..6e34a9c1 100644 --- a/sql/pathman_only.sql +++ b/sql/pathman_only.sql @@ -2,6 +2,11 @@ * --------------------------------------------- * NOTE: This test behaves differenly on PgPro * --------------------------------------------- + * + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. */ \set VERBOSITY terse diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index aa365544..f1ac0fe9 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_rowmarks_2.out is the updated version. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 05ac9614..5aaea49a 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -1,3 +1,8 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ + \set VERBOSITY terse CREATE EXTENSION pg_pathman; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index adca1e4c..a6cab581 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -2,6 +2,13 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. */ \set VERBOSITY terse diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql index 9f386a3d..65e64149 100644 --- a/sql/pathman_views.sql +++ b/sql/pathman_views.sql @@ -2,6 +2,9 @@ * ------------------------------------------- * NOTE: This test behaves differenly on 9.5 * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. */ \set VERBOSITY terse diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 4bc021fd..abf71f9d 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -145,8 +145,13 @@ get_all_actual_clauses(List *restrictinfo_list) * make_restrictinfos_from_actual_clauses */ #if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#include "optimizer/restrictinfo.h" +#else #include "optimizer/restrictinfo.h" #include "optimizer/var.h" +#endif /* 12 */ List * make_restrictinfos_from_actual_clauses(PlannerInfo *root, @@ -462,6 +467,13 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, */ return; #endif + +#if PG_VERSION_NUM >= 120000 + case RTE_RESULT: + /* RESULT RTEs, in themselves, are no problem. */ + break; +#endif /* 12 */ + } /* diff --git a/src/hooks.c b/src/hooks.c index fcaab6df..12c053b2 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -13,6 +13,10 @@ #include "compat/pg_compat.h" #include "compat/rowmarks_fix.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif + #include "declarative.h" #include "hooks.h" #include "init.h" diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 145b2113..26931fd9 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -26,8 +26,15 @@ #include "commands/trigger.h" #include "executor/executor.h" #include "nodes/memnodes.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/pathnodes.h" +#else #include "nodes/relation.h" +#endif #include "nodes/pg_list.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/appendinfo.h" +#endif #include "optimizer/cost.h" #include "optimizer/paths.h" #include "optimizer/pathnode.h" @@ -232,7 +239,20 @@ /* * create_append_path() */ -#if PG_VERSION_NUM >= 110000 +#if PG_VERSION_NUM >= 120000 + +#ifndef PGPRO_VERSION +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#else +/* TODO pgpro version */ +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ + (parallel_workers), false, NIL, -1, false, NIL) +#endif /* PGPRO_VERSION */ + +#elif PG_VERSION_NUM >= 110000 #ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ @@ -794,11 +814,14 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * MakeTupleTableSlot() */ -#if PG_VERSION_NUM >= 110000 -#define MakeTupleTableSlotCompat() \ +#if PG_VERSION_NUM >= 120000 +#define MakeTupleTableSlotCompat(tts_ops) \ + MakeTupleTableSlot(NULL, (tts_ops)) +#elif PG_VERSION_NUM >= 110000 +#define MakeTupleTableSlotCompat(tts_ops) \ MakeTupleTableSlot(NULL) #else -#define MakeTupleTableSlotCompat() \ +#define MakeTupleTableSlotCompat(tts_ops) \ MakeTupleTableSlot() #endif @@ -877,14 +900,113 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, # define HeapTupleGetXminCompat(htup) HeapTupleHeaderGetXmin((htup)->t_data) #endif +/* + * is_andclause + */ +#if PG_VERSION_NUM >= 120000 +#define is_andclause_compat(clause) is_andclause(clause) +#else +#define is_andclause_compat(clause) and_clause(clause) +#endif + +/* + * GetDefaultTablespace + */ +#if PG_VERSION_NUM >= 120000 +#define GetDefaultTablespaceCompat(relpersistence, partitioned) \ + GetDefaultTablespace((relpersistence), (partitioned)) +#else +#define GetDefaultTablespaceCompat(relpersistence, partitioned) \ + GetDefaultTablespace((relpersistence)) +#endif + +/* + * CreateTemplateTupleDesc + */ +#if PG_VERSION_NUM >= 120000 +#define CreateTemplateTupleDescCompat(natts, hasoid) CreateTemplateTupleDesc(natts) +#else +#define CreateTemplateTupleDescCompat(natts, hasoid) CreateTemplateTupleDesc((natts), (hasoid)) +#endif + +/* + * addRangeTableEntryForRelation + */ +#if PG_VERSION_NUM >= 120000 +#define addRangeTableEntryForRelationCompat(pstate, rel, lockmode, alias, inh, inFromCl) \ + addRangeTableEntryForRelation((pstate), (rel), (lockmode), (alias), (inh), (inFromCl)) +#else +#define addRangeTableEntryForRelationCompat(pstate, rel, lockmode, alias, inh, inFromCl) \ + addRangeTableEntryForRelation((pstate), (rel), (alias), (inh), (inFromCl)) +#endif + +/* + * nextCopyFrom (WITH_OIDS removed) + */ +#if PG_VERSION_NUM >= 120000 +#define NextCopyFromCompat(cstate, econtext, values, nulls, tupleOid) \ + NextCopyFrom((cstate), (econtext), (values), (nulls)) +#else +#define NextCopyFromCompat(cstate, econtext, values, nulls, tupleOid) \ + NextCopyFrom((cstate), (econtext), (values), (nulls), (tupleOid)) +#endif + +/* + * ExecInsertIndexTuples. Since 12 slot contains tupleid. + */ +#if PG_VERSION_NUM >= 120000 +#define ExecInsertIndexTuplesCompat(slot, tupleid, estate, noDupError, specConflict, arbiterIndexes) \ + ExecInsertIndexTuples((slot), (estate), (noDupError), (specConflict), (arbiterIndexes)) +#else +#define ExecInsertIndexTuplesCompat(slot, tupleid, estate, noDupError, specConflict, arbiterIndexes) \ + ExecInsertIndexTuples((slot), (tupleid), (estate), (noDupError), (specConflict), (arbiterIndexes)) +#endif + +/* + * RenameRelationInternal + */ +#if PG_VERSION_NUM >= 120000 +#define RenameRelationInternalCompat(myrelid, newname, is_internal, is_index) \ + RenameRelationInternal((myrelid), (newname), (is_internal), (is_index)) +#else +#define RenameRelationInternalCompat(myrelid, newname, is_internal, is_index) \ + RenameRelationInternal((myrelid), (newname), (is_internal)) +#endif + +/* + * getrelid + */ +#if PG_VERSION_NUM >= 120000 +#define getrelid(rangeindex,rangetable) \ + (rt_fetch(rangeindex, rangetable)->relid) +#endif + +/* + * AddRelationNewConstraints + */ +#if PG_VERSION_NUM >= 120000 +#define AddRelationNewConstraintsCompat(rel, newColDefaults, newConstrains, allow_merge, is_local, is_internal) \ + AddRelationNewConstraints((rel), (newColDefaults), (newConstrains), (allow_merge), (is_local), (is_internal), NULL) +#else +#define AddRelationNewConstraintsCompat(rel, newColDefaults, newConstrains, allow_merge, is_local, is_internal) \ + AddRelationNewConstraints((rel), (newColDefaults), (newConstrains), (allow_merge), (is_local), (is_internal)) +#endif + + /* * ------------- * Common code * ------------- */ +#if PG_VERSION_NUM >= 120000 +#define ExecInitExtraTupleSlotCompat(estate, tdesc, tts_ops) \ + ExecInitExtraTupleSlot((estate), (tdesc), (tts_ops)); +#else +#define ExecInitExtraTupleSlotCompat(estate, tdesc, tts_ops) \ + ExecInitExtraTupleSlotCompatHorse((estate), (tdesc)) static inline TupleTableSlot * -ExecInitExtraTupleSlotCompat(EState *s, TupleDesc t) +ExecInitExtraTupleSlotCompatHorse(EState *s, TupleDesc t) { #if PG_VERSION_NUM >= 110000 return ExecInitExtraTupleSlot(s,t); @@ -896,6 +1018,7 @@ ExecInitExtraTupleSlotCompat(EState *s, TupleDesc t) return res; #endif } +#endif /* See ExecEvalParamExtern() */ static inline ParamExternData * diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h index 4875358e..09e5fbef 100644 --- a/src/include/compat/rowmarks_fix.h +++ b/src/include/compat/rowmarks_fix.h @@ -17,7 +17,11 @@ #include "postgres.h" #include "nodes/parsenodes.h" #include "nodes/plannodes.h" +#if PG_VERSION_NUM < 120000 #include "nodes/relation.h" +#else +#include "optimizer/optimizer.h" +#endif #if PG_VERSION_NUM >= 90600 diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index bf03433c..0b32e575 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -90,7 +90,6 @@ struct ResultPartsStorage bool close_relations; LOCKMODE head_open_lock_mode; - LOCKMODE heap_close_lock_mode; PartRelationInfo *prel; ExprState *prel_expr_state; diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index 4e33ca34..edca73a0 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -16,7 +16,7 @@ #include "postgres.h" #include "utils/rel.h" -#include "nodes/relation.h" +/* #include "nodes/relation.h" */ #include "nodes/nodeFuncs.h" diff --git a/src/init.c b/src/init.c index 92d2d213..bd85c593 100644 --- a/src/init.c +++ b/src/init.c @@ -21,12 +21,20 @@ #include "utils.h" #include "access/htup_details.h" +#include "access/heapam.h" +#include "access/genam.h" #include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "catalog/indexing.h" #include "catalog/pg_extension.h" #include "catalog/pg_inherits.h" #include "catalog/pg_type.h" #include "miscadmin.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/nodeFuncs.h" +#endif #include "optimizer/clauses.h" #include "utils/inval.h" #include "utils/builtins.h" @@ -631,7 +639,11 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, TransactionId *xmin, ItemPointerData* iptr) { Relation rel; +#if PG_VERSION_NUM >= 120000 + TableScanDesc scan; +#else HeapScanDesc scan; +#endif ScanKeyData key[1]; Snapshot snapshot; HeapTuple htup; @@ -653,7 +665,11 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + scan = table_beginscan(rel, snapshot, 1, key); +#else scan = heap_beginscan(rel, snapshot, 1, key); +#endif while ((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) { @@ -681,7 +697,11 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, } /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(scan); +#else heap_endscan(scan); +#endif UnregisterSnapshot(snapshot); heap_close(rel, AccessShareLock); @@ -699,7 +719,11 @@ bool read_pathman_params(Oid relid, Datum *values, bool *isnull) { Relation rel; +#if PG_VERSION_NUM >= 120000 + TableScanDesc scan; +#else HeapScanDesc scan; +#endif ScanKeyData key[1]; Snapshot snapshot; HeapTuple htup; @@ -712,7 +736,11 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) rel = heap_open(get_pathman_config_params_relid(false), AccessShareLock); snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + scan = table_beginscan(rel, snapshot, 1, key); +#else scan = heap_beginscan(rel, snapshot, 1, key); +#endif /* There should be just 1 row */ if ((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) @@ -730,7 +758,11 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) } /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(scan); +#else heap_endscan(scan); +#endif UnregisterSnapshot(snapshot); heap_close(rel, AccessShareLock); @@ -764,7 +796,7 @@ validate_range_constraint(const Expr *expr, tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); /* Is it an AND clause? */ - if (and_clause((Node *) expr)) + if (is_andclause_compat((Node *) expr)) { const BoolExpr *boolexpr = (const BoolExpr *) expr; ListCell *lc; diff --git a/src/nodes_common.c b/src/nodes_common.c index 5f0c0c14..8adf81dd 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -15,9 +15,13 @@ #include "utils.h" #include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else #include "optimizer/clauses.h" -#include "optimizer/tlist.h" #include "optimizer/var.h" +#endif +#include "optimizer/tlist.h" #include "rewrite/rewriteManip.h" #include "utils/memutils.h" #include "utils/ruleutils.h" @@ -689,11 +693,25 @@ exec_append_common(CustomScanState *node, return NULL; if (!node->ss.ps.ps_ProjInfo) + { + /* + * ExecInitCustomScan carelessly promises that it will always (resultopsfixed) + * return TTSOpsVirtual slot. To keep the promise, convert raw + * BufferHeapTupleSlot to virtual even if we don't have any projection. + * + * BTW, why original code decided to invent its own scan_state->slot + * instead of using ss.ss_ScanTupleSlot? + */ +#if PG_VERSION_NUM >= 120000 + return ExecCopySlot(node->ss.ps.ps_ResultTupleSlot, scan_state->slot); +#else return scan_state->slot; +#endif + } /* * Assuming that current projection doesn't involve SRF. - * NOTE: Any SFR functions are evaluated in ProjectSet node. + * NOTE: Any SFR functions since 69f4b9c are evaluated in ProjectSet node. */ ResetExprContext(node->ss.ps.ps_ExprContext); node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = scan_state->slot; diff --git a/src/partition_creation.c b/src/partition_creation.c index bea41379..e162e99e 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -19,6 +19,9 @@ #include "access/htup_details.h" #include "access/reloptions.h" #include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/xact.h" #include "catalog/heap.h" #include "catalog/pg_authid.h" @@ -245,9 +248,9 @@ create_single_partition_common(Oid parent_relid, /* Open the relation and add new check constraint & fkeys */ child_relation = heap_open(partition_relid, AccessExclusiveLock); - AddRelationNewConstraints(child_relation, NIL, - list_make1(check_constraint), - false, true, true); + AddRelationNewConstraintsCompat(child_relation, NIL, + list_make1(check_constraint), + false, true, true); heap_close(child_relation, NoLock); /* Make constraint visible */ @@ -809,6 +812,9 @@ create_single_partition_internal(Oid parent_relid, #if defined(PGPRO_EE) && PG_VERSION_NUM < 100000 create_stmt.partition_info = NULL; #endif +#if PG_VERSION_NUM >= 120000 + create_stmt.accessMethod = NULL; +#endif /* Obtain the sequence of Stmts to create partition and link it to parent */ create_stmts = transformCreateStmt(&create_stmt, NULL); @@ -986,7 +992,11 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) /* Search for 'partition_relid' */ ScanKeyInit(&skey[0], +#if PG_VERSION_NUM >= 120000 + Anum_pg_class_oid, +#else ObjectIdAttributeNumber, +#endif BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(partition_relid)); @@ -1135,7 +1145,12 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) Oid copy_fkeys_proc_args[] = { REGCLASSOID, REGCLASSOID }; List *copy_fkeys_proc_name; FmgrInfo copy_fkeys_proc_flinfo; - FunctionCallInfoData copy_fkeys_proc_fcinfo; +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(copy_fkeys_proc_fcinfo, 2); +#else + FunctionCallInfoData copy_fkeys_proc_fcinfo_data; + FunctionCallInfo copy_fkeys_proc_fcinfo = ©_fkeys_proc_fcinfo_data; +#endif char *pathman_schema; /* Fetch pg_pathman's schema */ @@ -1150,15 +1165,22 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) copy_fkeys_proc_args, false), ©_fkeys_proc_flinfo); - InitFunctionCallInfoData(copy_fkeys_proc_fcinfo, ©_fkeys_proc_flinfo, + InitFunctionCallInfoData(*copy_fkeys_proc_fcinfo, ©_fkeys_proc_flinfo, 2, InvalidOid, NULL, NULL); - copy_fkeys_proc_fcinfo.arg[0] = ObjectIdGetDatum(parent_relid); - copy_fkeys_proc_fcinfo.argnull[0] = false; - copy_fkeys_proc_fcinfo.arg[1] = ObjectIdGetDatum(partition_oid); - copy_fkeys_proc_fcinfo.argnull[1] = false; +#if PG_VERSION_NUM >= 120000 + copy_fkeys_proc_fcinfo->args[0].value = ObjectIdGetDatum(parent_relid); + copy_fkeys_proc_fcinfo->args[0].isnull = false; + copy_fkeys_proc_fcinfo->args[1].value = ObjectIdGetDatum(partition_oid); + copy_fkeys_proc_fcinfo->args[1].isnull = false; +#else + copy_fkeys_proc_fcinfo->arg[0] = ObjectIdGetDatum(parent_relid); + copy_fkeys_proc_fcinfo->argnull[0] = false; + copy_fkeys_proc_fcinfo->arg[1] = ObjectIdGetDatum(partition_oid); + copy_fkeys_proc_fcinfo->argnull[1] = false; +#endif /* Invoke the callback */ - FunctionCallInvoke(©_fkeys_proc_fcinfo); + FunctionCallInvoke(copy_fkeys_proc_fcinfo); /* Make changes visible */ CommandCounterIncrement(); @@ -1266,9 +1288,9 @@ add_pathman_check_constraint(Oid relid, Constraint *constraint) { Relation part_rel = heap_open(relid, AccessExclusiveLock); - AddRelationNewConstraints(part_rel, NIL, - list_make1(constraint), - false, true, true); + AddRelationNewConstraintsCompat(part_rel, NIL, + list_make1(constraint), + false, true, true); heap_close(part_rel, NoLock); } @@ -1629,7 +1651,12 @@ invoke_init_callback_internal(init_callback_params *cb_params) Oid partition_oid = cb_params->partition_relid; FmgrInfo cb_flinfo; - FunctionCallInfoData cb_fcinfo; +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(cb_fcinfo, 1); +#else + FunctionCallInfoData cb_fcinfo_data; + FunctionCallInfo cb_fcinfo = &cb_fcinfo_data; +#endif JsonbParseState *jsonb_state = NULL; JsonbValue *result, @@ -1761,12 +1788,17 @@ invoke_init_callback_internal(init_callback_params *cb_params) /* Fetch function call data */ fmgr_info(cb_params->callback, &cb_flinfo); - InitFunctionCallInfoData(cb_fcinfo, &cb_flinfo, 1, InvalidOid, NULL, NULL); - cb_fcinfo.arg[0] = PointerGetDatum(JsonbValueToJsonb(result)); - cb_fcinfo.argnull[0] = false; + InitFunctionCallInfoData(*cb_fcinfo, &cb_flinfo, 1, InvalidOid, NULL, NULL); +#if PG_VERSION_NUM >= 120000 + cb_fcinfo->args[0].value = PointerGetDatum(JsonbValueToJsonb(result)); + cb_fcinfo->args[0].isnull = false; +#else + cb_fcinfo->arg[0] = PointerGetDatum(JsonbValueToJsonb(result)); + cb_fcinfo->argnull[0] = false; +#endif /* Invoke the callback */ - FunctionCallInvoke(&cb_fcinfo); + FunctionCallInvoke(cb_fcinfo); } /* Invoke a callback of a specified type */ @@ -1830,19 +1862,28 @@ validate_part_callback(Oid procid, bool emit_error) static Oid text_to_regprocedure(text *proc_signature) { - FunctionCallInfoData fcinfo; +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(fcinfo, 1); +#else + FunctionCallInfoData fcinfo_data; + FunctionCallInfo fcinfo = &fcinfo_data; +#endif Datum result; - InitFunctionCallInfoData(fcinfo, NULL, 1, InvalidOid, NULL, NULL); + InitFunctionCallInfoData(*fcinfo, NULL, 1, InvalidOid, NULL, NULL); -#if PG_VERSION_NUM >= 90600 - fcinfo.arg[0] = PointerGetDatum(proc_signature); +#if PG_VERSION_NUM >= 120000 + fcinfo->args[0].value = PointerGetDatum(proc_signature); + fcinfo->args[0].isnull = false; +#elif PG_VERSION_NUM >= 90600 + fcinfo->arg[0] = PointerGetDatum(proc_signature); + fcinfo->argnull[0] = false; #else - fcinfo.arg[0] = CStringGetDatum(text_to_cstring(proc_signature)); + fcinfo->arg[0] = CStringGetDatum(text_to_cstring(proc_signature)); + fcinfo->argnull[0] = false; #endif - fcinfo.argnull[0] = false; - result = to_regprocedure(&fcinfo); + result = to_regprocedure(fcinfo); return DatumGetObjectId(result); } diff --git a/src/partition_filter.c b/src/partition_filter.c index f905470e..a923c650 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -17,6 +17,9 @@ #include "utils.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/xact.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" @@ -86,7 +89,7 @@ static void prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, static Node *fix_returning_list_mutator(Node *node, void *state); -static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); +static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel); static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); static void pf_memcxt_callback(void *arg); @@ -182,10 +185,12 @@ init_result_parts_storage(ResultPartsStorage *parts_storage, parts_storage->command_type = cmd_type; parts_storage->speculative_inserts = speculative_inserts; - /* Should partitions be locked till transaction's end? */ + /* + * Should ResultPartsStorage do ExecCloseIndices and heap_close on + * finalization? + */ parts_storage->close_relations = close_relations; parts_storage->head_open_lock_mode = RowExclusiveLock; - parts_storage->heap_close_lock_mode = NoLock; /* Fetch PartRelationInfo for this partitioned relation */ parts_storage->prel = get_pathman_relation_info(parent_relid); @@ -214,13 +219,22 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) if (parts_storage->fini_rri_holder_cb) parts_storage->fini_rri_holder_cb(rri_holder, parts_storage); - /* Close partitions and indices */ + /* + * Close indices, unless ExecEndPlan won't do that for us (this is + * is CopyFrom which misses it, not usual executor run, essentially). + * Otherwise, it is always automaticaly closed; in <= 11, relcache + * refs of rris managed heap_open/close on their own, and ExecEndPlan + * closed them directly. Since 9ddef3, relcache management + * of executor was centralized; now rri refs are copies of ones in + * estate->es_relations, which are closed in ExecEndPlan. + * So we push our rel there, and it is also automatically closed. + */ if (parts_storage->close_relations) { ExecCloseIndices(rri_holder->result_rel_info); - + /* And relation itself */ heap_close(rri_holder->result_rel_info->ri_RelationDesc, - parts_storage->heap_close_lock_mode); + NoLock); } /* Free conversion-related stuff */ @@ -315,7 +329,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) ExecCheckRTPerms(list_make1(child_rte), true); /* Append RangeTblEntry to estate->es_range_table */ - child_rte_idx = append_rte_to_estate(parts_storage->estate, child_rte); + child_rte_idx = append_rte_to_estate(parts_storage->estate, child_rte, child_rel); /* Create ResultRelInfo for partition */ child_result_rel_info = makeNode(ResultRelInfo); @@ -355,7 +369,12 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; - /* Generate tuple transformation map and some other stuff */ + /* + * Generate parent->child tuple transformation map. We need to + * convert tuples because e.g. parent's TupleDesc might have dropped + * columns which child doesn't have at all because it was created after + * the drop. + */ rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); /* Default values */ @@ -760,21 +779,35 @@ partition_filter_exec(CustomScanState *node) /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) { + Relation child_rel = rri->ri_RelationDesc; + + /* xxx why old code decided to materialize it? */ +#if PG_VERSION_NUM < 120000 HeapTuple htup_old, htup_new; - Relation child_rel = rri->ri_RelationDesc; htup_old = ExecMaterializeSlot(slot); htup_new = do_convert_tuple(htup_old, rri_holder->tuple_map); ExecClearTuple(slot); +#endif - /* Allocate new slot if needed */ + /* + * Allocate new slot if needed. + * For 12, it is sort of important to create BufferHeapTuple, + * though we will store virtual one there. Otherwise, ModifyTable + * decides to copy it to mt_scans slot which has tupledesc of + * parent. + */ if (!state->tup_convert_slot) - state->tup_convert_slot = MakeTupleTableSlotCompat(); + state->tup_convert_slot = MakeTupleTableSlotCompat(&TTSOpsBufferHeapTuple); /* TODO: why should we *always* set a new slot descriptor? */ ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); +#if PG_VERSION_NUM >= 120000 + slot = execute_attr_map_slot(rri_holder->tuple_map->attrMap, slot, state->tup_convert_slot); +#else slot = ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); +#endif } return slot; @@ -1143,7 +1176,7 @@ fix_returning_list_mutator(Node *node, void *state) /* Append RangeTblEntry 'rte' to estate->es_range_table */ static Index -append_rte_to_estate(EState *estate, RangeTblEntry *rte) +append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel) { estate_mod_data *emd_struct = fetch_estate_mod_data(estate); @@ -1156,6 +1189,28 @@ append_rte_to_estate(EState *estate, RangeTblEntry *rte) /* Update estate_mod_data */ emd_struct->estate_not_modified = false; + /* + * On PG >= 12, also add rte to es_range_table_array. This is horribly + * inefficient, yes. + * At least in 12 es_range_table_array ptr is not saved anywhere in + * core, so it is safe to repalloc. + */ +#if PG_VERSION_NUM >= 120000 + estate->es_range_table_size = list_length(estate->es_range_table); + estate->es_range_table_array = (RangeTblEntry **) + repalloc(estate->es_range_table_array, + estate->es_range_table_size * sizeof(RangeTblEntry *)); + estate->es_range_table_array[estate->es_range_table_size - 1] = rte; + + /* + * Also reallocate es_relations, because es_range_table_size defines its + * len. This also ensures ExecEndPlan will close the rel. + */ + estate->es_relations = (Relation *) + repalloc(estate->es_relations, estate->es_range_table_size * sizeof(Relation)); + estate->es_relations[estate->es_range_table_size - 1] = child_rel; +#endif + return list_length(estate->es_range_table); } diff --git a/src/partition_router.c b/src/partition_router.c index 82578c5d..8c3bac55 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -14,12 +14,23 @@ #include "partition_router.h" #include "compat/pg_compat.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#include "access/tableam.h" +#endif #include "access/xact.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" /* direct heap_delete, no-no */ +#endif #include "access/htup_details.h" #include "catalog/pg_class.h" #include "commands/trigger.h" #include "executor/nodeModifyTable.h" #include "foreign/fdwapi.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/makefuncs.h" /* make_ands_explicit */ +#include "optimizer/optimizer.h" +#endif #include "optimizer/clauses.h" #include "storage/bufmgr.h" #include "utils/guc.h" @@ -272,7 +283,8 @@ router_set_slot(PartitionRouterState *state, /* Don't forget to set saved_slot! */ state->yielded_slot = ExecInitExtraTupleSlotCompat(mt_state->ps.state, - slot->tts_tupleDescriptor); + slot->tts_tupleDescriptor, + &TTSOpsHeapTuple); ExecCopySlot(state->yielded_slot, slot); } @@ -394,8 +406,15 @@ router_lock_or_delete_tuple(PartitionRouterState *state, ExprContext *econtext = GetPerTupleExprContext(estate); ExprState *constraint = state->constraint; - HeapUpdateFailureData hufd; + /* Maintaining both >= 12 and earlier is quite horrible there, you know */ +#if PG_VERSION_NUM >= 120000 + TM_FailureData tmfd; + TM_Result result; +#else + HeapUpdateFailureData tmfd; HTSU_Result result; +#endif + EPQState *epqstate = &state->epqstate; LOCKMODE lockmode; @@ -422,9 +441,14 @@ router_lock_or_delete_tuple(PartitionRouterState *state, if (rri->ri_TrigDesc && rri->ri_TrigDesc->trig_update_before_row) { +#if PG_VERSION_NUM >= 120000 + if (!ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot)) + return NULL; +#else slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); if (TupIsNull(slot)) return NULL; +#endif } /* BEFORE ROW DELETE triggers */ @@ -439,7 +463,7 @@ router_lock_or_delete_tuple(PartitionRouterState *state, result = heap_delete_compat(rel, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, - true /* wait for commit */, &hufd, + true /* wait for commit */, &tmfd, true /* changing partition */); } else @@ -448,10 +472,11 @@ router_lock_or_delete_tuple(PartitionRouterState *state, Buffer buffer; tuple.t_self = *tupleid; + /* xxx why we ever need this? */ result = heap_lock_tuple(rel, &tuple, estate->es_output_cid, lockmode, LockWaitBlock, - false, &buffer, &hufd); + false, &buffer, &tmfd); ReleaseBuffer(buffer); } @@ -459,8 +484,12 @@ router_lock_or_delete_tuple(PartitionRouterState *state, /* Check lock/delete status */ switch (result) { +#if PG_VERSION_NUM >= 120000 + case TM_SelfModified: +#else case HeapTupleSelfUpdated: - if (hufd.cmax != estate->es_output_cid) +#endif + if (tmfd.cmax != estate->es_output_cid) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), errmsg("tuple to be updated was already modified by an operation triggered by the current command"), @@ -469,20 +498,121 @@ router_lock_or_delete_tuple(PartitionRouterState *state, /* Already deleted by self; nothing to do */ return NULL; +#if PG_VERSION_NUM >= 120000 + case TM_Ok: +#else case HeapTupleMayBeUpdated: +#endif break; +#if PG_VERSION_NUM >= 120000 /* TM_Deleted/TM_Updated */ + case TM_Updated: + { + /* not sure this stuff is correct at all */ + TupleTableSlot *inputslot; + TupleTableSlot *epqslot; + + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + + /* + * Already know that we're going to need to do EPQ, so + * fetch tuple directly into the right slot. + */ + inputslot = EvalPlanQualSlot(epqstate, rel, rri->ri_RangeTableIndex); + + result = table_tuple_lock(rel, tupleid, + estate->es_snapshot, + inputslot, estate->es_output_cid, + LockTupleExclusive, LockWaitBlock, + TUPLE_LOCK_FLAG_FIND_LAST_VERSION, + &tmfd); + + switch (result) + { + case TM_Ok: + Assert(tmfd.traversed); + epqslot = EvalPlanQual(epqstate, + rel, + rri->ri_RangeTableIndex, + inputslot); + if (TupIsNull(epqslot)) + /* Tuple not passing quals anymore, exiting... */ + return NULL; + + /* just copied from below, ha */ + *tupleid = tmfd.ctid; + slot = epqslot; + goto recheck; + + case TM_SelfModified: + + /* + * This can be reached when following an update + * chain from a tuple updated by another session, + * reaching a tuple that was already updated in + * this transaction. If previously updated by this + * command, ignore the delete, otherwise error + * out. + * + * See also TM_SelfModified response to + * table_tuple_delete() above. + */ + if (tmfd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be deleted was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + return NULL; + + case TM_Deleted: + /* tuple already deleted; nothing to do */ + return NULL; + + default: + + /* + * TM_Invisible should be impossible because we're + * waiting for updated row versions, and would + * already have errored out if the first version + * is invisible. + * + * TM_Updated should be impossible, because we're + * locking the latest version via + * TUPLE_LOCK_FLAG_FIND_LAST_VERSION. + */ + elog(ERROR, "unexpected table_tuple_lock status: %u", + result); + return NULL; + } + + Assert(false); + break; + } + + + case TM_Deleted: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent delete"))); + /* tuple already deleted; nothing to do */ + return NULL; + +#else case HeapTupleUpdated: if (IsolationUsesXactSnapshot()) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); - if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) + if (ItemPointerIndicatesMovedPartitions(&tmfd.ctid)) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("tuple to be updated was already moved to another partition due to concurrent update"))); - if (!ItemPointerEquals(tupleid, &hufd.ctid)) + if (!ItemPointerEquals(tupleid, &tmfd.ctid)) { TupleTableSlot *epqslot; @@ -491,13 +621,13 @@ router_lock_or_delete_tuple(PartitionRouterState *state, rel, rri->ri_RangeTableIndex, LockTupleExclusive, - &hufd.ctid, - hufd.xmax); + &tmfd.ctid, + tmfd.xmax); if (!TupIsNull(epqslot)) { Assert(tupleid != NULL); - *tupleid = hufd.ctid; + *tupleid = tmfd.ctid; slot = epqslot; goto recheck; } @@ -505,8 +635,13 @@ router_lock_or_delete_tuple(PartitionRouterState *state, /* Tuple already deleted; nothing to do */ return NULL; +#endif /* TM_Deleted/TM_Updated */ +#if PG_VERSION_NUM >= 120000 + case TM_Invisible: +#else case HeapTupleInvisible: +#endif elog(ERROR, "attempted to lock invisible tuple"); break; diff --git a/src/pathman_workers.c b/src/pathman_workers.c index ae6d13b9..54d62e7f 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -839,7 +839,7 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) userctx->cur_idx = 0; /* Create tuple descriptor */ - tupdesc = CreateTemplateTupleDesc(Natts_pathman_cp_tasks, false); + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_cp_tasks, false); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_userid, "userid", REGROLEOID, -1, 0); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 7764aa94..285a130f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -23,15 +23,23 @@ #include "runtime_merge_append.h" #include "postgres.h" +#include "access/genam.h" #include "access/htup_details.h" #include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/xact.h" +#include "catalog/pg_collation.h" #include "catalog/indexing.h" #include "catalog/pg_type.h" #include "catalog/pg_extension.h" #include "commands/extension.h" #include "foreign/fdwapi.h" #include "miscadmin.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#endif #include "optimizer/clauses.h" #include "optimizer/plancat.h" #include "optimizer/restrictinfo.h" @@ -384,7 +392,11 @@ get_pathman_schema(void) return InvalidOid; /* exit if pg_pathman does not exist */ ScanKeyInit(&entry[0], +#if PG_VERSION_NUM >= 120000 + Anum_pg_extension_oid, +#else ObjectIdAttributeNumber, +#endif BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ext_oid)); @@ -485,6 +497,26 @@ append_child_relation(PlannerInfo *root, child_rti = list_length(root->parse->rtable); root->simple_rte_array[child_rti] = child_rte; + /* Build an AppendRelInfo for this child */ + appinfo = makeNode(AppendRelInfo); + appinfo->parent_relid = parent_rti; + appinfo->child_relid = child_rti; + appinfo->parent_reloid = parent_rte->relid; + + /* Store table row types for wholerow references */ + appinfo->parent_reltype = RelationGetDescr(parent_relation)->tdtypeid; + appinfo->child_reltype = RelationGetDescr(child_relation)->tdtypeid; + + make_inh_translation_list(parent_relation, child_relation, child_rti, + &appinfo->translated_vars); + + /* Now append 'appinfo' to 'root->append_rel_list' */ + root->append_rel_list = lappend(root->append_rel_list, appinfo); + /* And to array in >= 11, it must be big enough */ +#if PG_VERSION_NUM >= 110000 + root->append_rel_array[child_rti] = appinfo; +#endif + /* Create RelOptInfo for this child (and make some estimates as well) */ child_rel = build_simple_rel_compat(root, child_rti, parent_rel); @@ -533,26 +565,6 @@ append_child_relation(PlannerInfo *root, } - /* Build an AppendRelInfo for this child */ - appinfo = makeNode(AppendRelInfo); - appinfo->parent_relid = parent_rti; - appinfo->child_relid = child_rti; - appinfo->parent_reloid = parent_rte->relid; - - /* Store table row types for wholerow references */ - appinfo->parent_reltype = RelationGetDescr(parent_relation)->tdtypeid; - appinfo->child_reltype = RelationGetDescr(child_relation)->tdtypeid; - - make_inh_translation_list(parent_relation, child_relation, child_rti, - &appinfo->translated_vars); - - /* Now append 'appinfo' to 'root->append_rel_list' */ - root->append_rel_list = lappend(root->append_rel_list, appinfo); - /* And to array in >= 11, it must be big enough */ -#if PG_VERSION_NUM >= 110000 - root->append_rel_array[child_rti] = appinfo; -#endif - /* Translate column privileges for this child */ if (parent_rte->relid != child_oid) { @@ -618,7 +630,11 @@ append_child_relation(PlannerInfo *root, * Restriction reduces to constant FALSE or constant NULL after * substitution, so this child need not be scanned. */ +#if PG_VERSION_NUM >= 120000 + mark_dummy_rel(child_rel); +#else set_dummy_rel_pathlist(child_rel); +#endif } childquals = make_ands_implicit((Expr *) childqual); childquals = make_restrictinfos_from_actual_clauses(root, childquals); @@ -632,7 +648,11 @@ append_child_relation(PlannerInfo *root, * This child need not be scanned, so we can omit it from the * appendrel. */ +#if PG_VERSION_NUM >= 120000 + mark_dummy_rel(child_rel); +#else set_dummy_rel_pathlist(child_rel); +#endif } /* @@ -1065,9 +1085,14 @@ handle_const(const Const *c, } /* Else use the Const's value */ else value = c->constvalue; - - /* Calculate 32-bit hash of 'value' and corresponding index */ - hash = OidFunctionCall1(prel->hash_proc, value); + /* + * Calculate 32-bit hash of 'value' and corresponding index. + * Since 12, hashtext requires valid collation. Since we never + * supported this, passing db default one will do. + */ + hash = OidFunctionCall1Coll(prel->hash_proc, + DEFAULT_COLLATION_OID, + value); idx = hash_to_part_index(DatumGetInt32(hash), PrelChildrenCount(prel)); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index c302089e..ebf80861 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -19,6 +19,12 @@ #include "utils.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/table.h" +#include "access/tableam.h" +#endif #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" @@ -82,7 +88,11 @@ PG_FUNCTION_INFO_V1( pathman_version ); typedef struct { Relation pathman_config; +#if PG_VERSION_NUM >= 120000 + TableScanDesc pathman_config_scan; +#else HeapScanDesc pathman_config_scan; +#endif Snapshot snapshot; PartRelationInfo *current_prel; /* selected PartRelationInfo */ @@ -202,7 +212,8 @@ get_base_type_pl(PG_FUNCTION_ARGS) } /* - * Return tablespace name of a specified relation. + * Return tablespace name of a specified relation which must not be + * natively partitioned. */ Datum get_tablespace_pl(PG_FUNCTION_ARGS) @@ -216,7 +227,7 @@ get_tablespace_pl(PG_FUNCTION_ARGS) /* If tablespace id is InvalidOid then use the default tablespace */ if (!OidIsValid(tablespace_id)) { - tablespace_id = GetDefaultTablespace(get_rel_persistence(relid)); + tablespace_id = GetDefaultTablespaceCompat(get_rel_persistence(relid), false); /* If tablespace is still invalid then use database's default */ if (!OidIsValid(tablespace_id)) @@ -274,7 +285,7 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) usercxt->current_item = 0; /* Create tuple descriptor */ - tupdesc = CreateTemplateTupleDesc(Natts_pathman_cache_stats, false); + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_cache_stats, false); TupleDescInitEntry(tupdesc, Anum_pathman_cs_context, "context", TEXTOID, -1, 0); @@ -381,13 +392,18 @@ show_partition_list_internal(PG_FUNCTION_ARGS) usercxt->pathman_config = heap_open(get_pathman_config_relid(false), AccessShareLock); usercxt->snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + usercxt->pathman_config_scan = table_beginscan(usercxt->pathman_config, + usercxt->snapshot, 0, NULL); +#else usercxt->pathman_config_scan = heap_beginscan(usercxt->pathman_config, usercxt->snapshot, 0, NULL); +#endif usercxt->current_prel = NULL; /* Create tuple descriptor */ - tupdesc = CreateTemplateTupleDesc(Natts_pathman_partition_list, false); + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_partition_list, false); TupleDescInitEntry(tupdesc, Anum_pathman_pl_parent, "parent", REGCLASSOID, -1, 0); @@ -555,7 +571,11 @@ show_partition_list_internal(PG_FUNCTION_ARGS) } /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(usercxt->pathman_config_scan); +#else heap_endscan(usercxt->pathman_config_scan); +#endif UnregisterSnapshot(usercxt->snapshot); heap_close(usercxt->pathman_config, AccessShareLock); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 0d3ca9d7..27361dd3 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -15,6 +15,9 @@ #include "utils.h" #include "xact_handling.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/transam.h" #include "access/xact.h" #include "catalog/heap.h" @@ -26,6 +29,9 @@ #include "parser/parse_relation.h" #include "parser/parse_expr.h" #include "utils/array.h" +#if PG_VERSION_NUM >= 120000 +#include "utils/float.h" +#endif #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/numeric.h" @@ -1084,6 +1090,8 @@ build_range_condition(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL")));; + /* lock the partition */ + LockRelationOid(partition_relid, ShareUpdateExclusiveLock); min = PG_ARGISNULL(2) ? MakeBoundInf(MINUS_INFINITY) : MakeBound(PG_GETARG_DATUM(2)); @@ -1329,7 +1337,7 @@ deparse_constraint(Oid relid, Node *expr) /* Initialize parse state */ pstate = make_parsestate(NULL); - rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); + rte = addRangeTableEntryForRelationCompat(pstate, rel, AccessShareLock, NULL, false, true); addRTEtoQuery(pstate, rte, true, true, true); /* Transform constraint into executable expression (i.e. cook it) */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 4766ded1..2c14959e 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -20,6 +20,9 @@ #include "relation_info.h" #include "rewrite/rewriteManip.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/htup_details.h" #include "foreign/fdwapi.h" #include "miscadmin.h" diff --git a/src/relation_info.c b/src/relation_info.c index d24af71d..0c79b504 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -16,6 +16,10 @@ #include "xact_handling.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/genam.h" +#include "access/table.h" +#endif #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/indexing.h" @@ -24,8 +28,12 @@ #include "catalog/pg_type.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else #include "optimizer/clauses.h" #include "optimizer/var.h" +#endif #include "parser/analyze.h" #include "parser/parser.h" #include "storage/lmgr.h" diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index 836a1fdd..92ae3e60 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -19,10 +19,14 @@ #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "nodes/plannodes.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else #include "optimizer/cost.h" +#include "optimizer/var.h" +#endif #include "optimizer/planmain.h" #include "optimizer/tlist.h" -#include "optimizer/var.h" #include "utils/builtins.h" #include "utils/guc.h" #include "utils/lsyscache.h" diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 9683914b..2b5a5956 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -18,6 +18,10 @@ #include "partition_filter.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" +#include "access/table.h" +#endif #include "access/sysattr.h" #include "access/xact.h" #include "catalog/namespace.h" @@ -501,7 +505,11 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, estate->es_result_relations = parent_rri; estate->es_num_result_relations = 1; estate->es_result_relation_info = parent_rri; +#if PG_VERSION_NUM >= 120000 + ExecInitRangeTable(estate, range_table); +#else estate->es_range_table = range_table; +#endif /* Initialize ResultPartsStorage */ init_result_parts_storage(&parts_storage, @@ -513,9 +521,11 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, RPS_RRI_CB(finish_rri_for_copy, NULL)); /* Set up a tuple slot too */ - myslot = ExecInitExtraTupleSlotCompat(estate, NULL); + myslot = ExecInitExtraTupleSlotCompat(estate, NULL, &TTSOpsHeapTuple); /* Triggers might need a slot as well */ - estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate, tupDesc); +#if PG_VERSION_NUM < 120000 + estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate, tupDesc, nothing_here); +#endif /* Prepare to catch AFTER triggers. */ AfterTriggerBeginQuery(); @@ -535,7 +545,9 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { TupleTableSlot *slot; bool skip_tuple = false; +#if PG_VERSION_NUM < 120000 Oid tuple_oid = InvalidOid; +#endif ExprContext *econtext = GetPerTupleExprContext(estate); ResultRelInfoHolder *rri_holder; @@ -548,19 +560,25 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* Switch into per tuple memory context */ MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - if (!NextCopyFrom(cstate, econtext, values, nulls, &tuple_oid)) + if (!NextCopyFromCompat(cstate, econtext, values, nulls, &tuple_oid)) break; /* We can form the input tuple */ tuple = heap_form_tuple(tupDesc, values, nulls); +#if PG_VERSION_NUM < 120000 if (tuple_oid != InvalidOid) HeapTupleSetOid(tuple, tuple_oid); +#endif /* Place tuple in tuple slot --- but slot shouldn't free it */ slot = myslot; ExecSetSlotDescriptor(slot, tupDesc); +#if PG_VERSION_NUM >= 120000 + ExecStoreHeapTuple(tuple, slot, false); +#else ExecStoreTuple(tuple, slot, InvalidBuffer, false); +#endif /* Search for a matching partition */ rri_holder = select_partition_for_insert(&parts_storage, slot); @@ -581,13 +599,21 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, HeapTuple tuple_old; tuple_old = tuple; +#if PG_VERSION_NUM >= 120000 + tuple = execute_attr_map_tuple(tuple, rri_holder->tuple_map); +#else tuple = do_convert_tuple(tuple, rri_holder->tuple_map); +#endif heap_freetuple(tuple_old); } /* Now we can set proper tuple descriptor according to child relation */ ExecSetSlotDescriptor(slot, RelationGetDescr(child_rri->ri_RelationDesc)); +#if PG_VERSION_NUM >= 120000 + ExecStoreHeapTuple(tuple, slot, false); +#else ExecStoreTuple(tuple, slot, InvalidBuffer, false); +#endif /* Triggers and stuff need to be invoked in query context. */ MemoryContextSwitchTo(query_mcxt); @@ -596,12 +622,21 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, if (child_rri->ri_TrigDesc && child_rri->ri_TrigDesc->trig_insert_before_row) { +#if PG_VERSION_NUM >= 120000 + if (!ExecBRInsertTriggers(estate, child_rri, slot)) + skip_tuple = true; + else /* trigger might have changed tuple */ + tuple = ExecFetchSlotHeapTuple(slot, false, NULL); +#else slot = ExecBRInsertTriggers(estate, child_rri, slot); if (slot == NULL) /* "do nothing" */ skip_tuple = true; else /* trigger might have changed tuple */ + { tuple = ExecMaterializeSlot(slot); + } +#endif } /* Proceed if we still have a tuple */ @@ -618,11 +653,16 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, { /* OK, now store the tuple... */ simple_heap_insert(child_rri->ri_RelationDesc, tuple); +#if PG_VERSION_NUM >= 120000 /* since 12, tid lives directly in slot */ + ItemPointerCopy(&tuple->t_self, &slot->tts_tid); + /* and we must stamp tableOid as we go around table_tuple_insert */ + slot->tts_tableOid = RelationGetRelid(child_rri->ri_RelationDesc); +#endif /* ... and create index entries for it */ if (child_rri->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), - estate, false, NULL, NIL); + recheckIndexes = ExecInsertIndexTuplesCompat(slot, &(tuple->t_self), + estate, false, NULL, NIL); } #ifdef PG_SHARDMAN /* Handle foreign tables */ @@ -635,8 +675,13 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, #endif /* AFTER ROW INSERT Triggers (FIXME: NULL transition) */ +#if PG_VERSION_NUM >= 120000 + ExecARInsertTriggersCompat(estate, child_rri, slot, + recheckIndexes, NULL); +#else ExecARInsertTriggersCompat(estate, child_rri, tuple, recheckIndexes, NULL); +#endif list_free(recheckIndexes); @@ -798,7 +843,7 @@ PathmanRenameSequence(Oid parent_relid, /* parent Oid */ return; /* Finally, rename auto naming sequence */ - RenameRelationInternal(seq_relid, new_seq_name, false); + RenameRelationInternalCompat(seq_relid, new_seq_name, false, false); pfree(seq_nsp_name); pfree(old_seq_name); From e4bb77b683a338f0ae1c3dec6c99f861a8619f84 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 18 Nov 2019 22:50:19 +0300 Subject: [PATCH 1027/1124] Pgpro-specific part of porting to 12. --- expected/pathman_hashjoin.out | 5 +++ expected/pathman_hashjoin_1.out | 5 +++ expected/pathman_hashjoin_2.out | 5 +++ expected/pathman_hashjoin_3.out | 70 ++++++++++++++++++++++++++++++++ expected/pathman_mergejoin.out | 5 +++ expected/pathman_mergejoin_1.out | 5 +++ expected/pathman_mergejoin_2.out | 5 +++ expected/pathman_mergejoin_3.out | 68 +++++++++++++++++++++++++++++++ sql/pathman_hashjoin.sql | 6 +++ sql/pathman_mergejoin.sql | 6 +++ src/include/compat/pg_compat.h | 6 +-- 11 files changed, 183 insertions(+), 3 deletions(-) create mode 100644 expected/pathman_hashjoin_3.out create mode 100644 expected/pathman_mergejoin_3.out diff --git a/expected/pathman_hashjoin.out b/expected/pathman_hashjoin.out index 71ea1085..1e5b2783 100644 --- a/expected/pathman_hashjoin.out +++ b/expected/pathman_hashjoin.out @@ -1,3 +1,8 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_hashjoin_1.out b/expected/pathman_hashjoin_1.out index 8e0007d4..af569764 100644 --- a/expected/pathman_hashjoin_1.out +++ b/expected/pathman_hashjoin_1.out @@ -1,3 +1,8 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_hashjoin_2.out b/expected/pathman_hashjoin_2.out index d0cba65d..c77146d1 100644 --- a/expected/pathman_hashjoin_2.out +++ b/expected/pathman_hashjoin_2.out @@ -1,3 +1,8 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_hashjoin_3.out b/expected/pathman_hashjoin_3.out new file mode 100644 index 00000000..93613919 --- /dev/null +++ b/expected/pathman_hashjoin_3.out @@ -0,0 +1,70 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(12 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_mergejoin.out b/expected/pathman_mergejoin.out index ff2ae5bb..1bd9da6f 100644 --- a/expected/pathman_mergejoin.out +++ b/expected/pathman_mergejoin.out @@ -1,3 +1,8 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_mergejoin_1.out b/expected/pathman_mergejoin_1.out index de87f09b..5b903dc1 100644 --- a/expected/pathman_mergejoin_1.out +++ b/expected/pathman_mergejoin_1.out @@ -1,3 +1,8 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_mergejoin_2.out b/expected/pathman_mergejoin_2.out index acff2247..0168d556 100644 --- a/expected/pathman_mergejoin_2.out +++ b/expected/pathman_mergejoin_2.out @@ -1,3 +1,8 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; diff --git a/expected/pathman_mergejoin_3.out b/expected/pathman_mergejoin_3.out new file mode 100644 index 00000000..3d4a441c --- /dev/null +++ b/expected/pathman_mergejoin_3.out @@ -0,0 +1,68 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 +(11 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql index 411e0a7f..8a08569f 100644 --- a/sql/pathman_hashjoin.sql +++ b/sql/pathman_hashjoin.sql @@ -1,3 +1,9 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql index 9b0b95b1..e85cc934 100644 --- a/sql/pathman_mergejoin.sql +++ b/sql/pathman_mergejoin.sql @@ -1,3 +1,9 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 26931fd9..c915503c 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -246,10 +246,10 @@ create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ (parallel_workers), false, NIL, -1) #else -/* TODO pgpro version */ +/* TODO pgpro version? Looks like something is not ported yet */ #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ - create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ - (parallel_workers), false, NIL, -1, false, NIL) + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1, false) #endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 110000 From 7954c34976047b878c1a28005d7b07c7011cbfe8 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 18 Nov 2019 22:57:05 +0300 Subject: [PATCH 1028/1124] Add 12 to travis. --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 946eb606..ff2fac20 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,8 @@ notifications: on_failure: always env: + - PG_VERSION=12 LEVEL=hardcore + - PG_VERSION=12 - PG_VERSION=11 LEVEL=hardcore - PG_VERSION=11 - PG_VERSION=10 LEVEL=hardcore From a30c0a516cb7f2657c9d8247798887127773e1f6 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 19 Nov 2019 18:13:43 +0300 Subject: [PATCH 1029/1124] Not sure why I do that, but fix cmocka tests. --- tests/cmocka/Makefile | 1 + tests/cmocka/missing_basic.c | 1 + tests/cmocka/missing_bitmapset.c | 1 + tests/cmocka/missing_list.c | 1 + tests/cmocka/missing_stringinfo.c | 1 + 5 files changed, 5 insertions(+) diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index e31e6d95..5216a467 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -8,6 +8,7 @@ CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) CFLAGS += $(PG_CPPFLAGS) +CFLAGS += -D_GNU_SOURCE LDFLAGS += -lcmocka TEST_BIN = rangeset_tests diff --git a/tests/cmocka/missing_basic.c b/tests/cmocka/missing_basic.c index d6c3808e..7524abb5 100644 --- a/tests/cmocka/missing_basic.c +++ b/tests/cmocka/missing_basic.c @@ -1,6 +1,7 @@ #include #include "postgres.h" +#include "undef_printf.h" void * diff --git a/tests/cmocka/missing_bitmapset.c b/tests/cmocka/missing_bitmapset.c index 7e986d5a..84e7e771 100644 --- a/tests/cmocka/missing_bitmapset.c +++ b/tests/cmocka/missing_bitmapset.c @@ -1,4 +1,5 @@ #include "postgres.h" +#include "undef_printf.h" #include "nodes/bitmapset.h" diff --git a/tests/cmocka/missing_list.c b/tests/cmocka/missing_list.c index 9c07bc10..5ddce8a8 100644 --- a/tests/cmocka/missing_list.c +++ b/tests/cmocka/missing_list.c @@ -13,6 +13,7 @@ * *------------------------------------------------------------------------- */ +#define _GNU_SOURCE #include "postgres.h" #include "nodes/pg_list.h" diff --git a/tests/cmocka/missing_stringinfo.c b/tests/cmocka/missing_stringinfo.c index 8596bf7e..edf4d8a4 100644 --- a/tests/cmocka/missing_stringinfo.c +++ b/tests/cmocka/missing_stringinfo.c @@ -14,6 +14,7 @@ *------------------------------------------------------------------------- */ #include "postgres.h" +#include "undef_printf.h" #include "lib/stringinfo.h" #include "utils/memutils.h" From 6f51eb4f7b2414ae0307037ae94d6327c1e97385 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 19 Nov 2019 18:26:21 +0300 Subject: [PATCH 1030/1124] Forgot undef file. --- tests/cmocka/undef_printf.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 tests/cmocka/undef_printf.h diff --git a/tests/cmocka/undef_printf.h b/tests/cmocka/undef_printf.h new file mode 100644 index 00000000..63ba700c --- /dev/null +++ b/tests/cmocka/undef_printf.h @@ -0,0 +1,24 @@ +#ifdef vsnprintf +#undef vsnprintf +#endif +#ifdef snprintf +#undef snprintf +#endif +#ifdef vsprintf +#undef vsprintf +#endif +#ifdef sprintf +#undef sprintf +#endif +#ifdef vfprintf +#undef vfprintf +#endif +#ifdef fprintf +#undef fprintf +#endif +#ifdef vprintf +#undef vprintf +#endif +#ifdef printf +#undef printf +#endif From 44b8962b80d8917d52fbf7d34f9ab7c8384a2f30 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 22 Nov 2019 18:49:27 +0300 Subject: [PATCH 1031/1124] Bump 1.5.10 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- expected/pathman_calamity_1.out | 2 +- src/include/init.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/META.json b/META.json index cd55fcb4..1201812c 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.9", + "version": "1.5.10", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.9", + "version": "1.5.10", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index c258b5cc..759d7dca 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -20,7 +20,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.9 + 1.5.10 (1 row) set client_min_messages = NOTICE; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index ee422784..e434f2eb 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -20,7 +20,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.9 + 1.5.10 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 15efae16..931528ef 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -158,7 +158,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.9" +#define CURRENT_LIB_VERSION "1.5.10" void *pathman_cache_search_relid(HTAB *cache_table, From 30d07062dc038b13f576b9a6644621082c4837cc Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 26 Nov 2019 19:42:57 +0300 Subject: [PATCH 1032/1124] Create lateral test output file for pgpro. --- expected/pathman_lateral.out | 2 + expected/pathman_lateral_1.out | 121 +++++++++++++++++++++++++++++++++ sql/pathman_lateral.sql | 3 + 3 files changed, 126 insertions(+) create mode 100644 expected/pathman_lateral_1.out diff --git a/expected/pathman_lateral.out b/expected/pathman_lateral.out index e5148664..9bff1e57 100644 --- a/expected/pathman_lateral.out +++ b/expected/pathman_lateral.out @@ -1,3 +1,5 @@ +-- Sometimes join selectivity improvements patches in pgpro force nested loop +-- members swap -- in pathman_lateral_1.out \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_1.out b/expected/pathman_lateral_1.out new file mode 100644 index 00000000..1dc67fe2 --- /dev/null +++ b/expected/pathman_lateral_1.out @@ -0,0 +1,121 @@ +-- Sometimes join selectivity improvements patches in pgpro force nested loop +-- members swap -- in pathman_lateral_1.out +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t.id) + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t + -> Seq Scan on data_1 t_1 + -> Seq Scan on data_2 t_2 + -> Seq Scan on data_3 t_3 + -> Seq Scan on data_4 t_4 + -> Seq Scan on data_5 t_5 + -> Seq Scan on data_6 t_6 + -> Seq Scan on data_7 t_7 + -> Seq Scan on data_8 t_8 + -> Seq Scan on data_9 t_9 + -> Custom Scan (RuntimeAppend) + Prune by: (t.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(83 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP SCHEMA test_lateral CASCADE; +NOTICE: drop cascades to 11 other objects +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_lateral.sql b/sql/pathman_lateral.sql index 49dee604..645e5f93 100644 --- a/sql/pathman_lateral.sql +++ b/sql/pathman_lateral.sql @@ -1,3 +1,6 @@ +-- Sometimes join selectivity improvements patches in pgpro force nested loop +-- members swap -- in pathman_lateral_1.out + \set VERBOSITY terse SET search_path = 'public'; From f463e0e1b6cec2e1913760d637f8652fb6949048 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 3 Dec 2019 15:56:46 +0300 Subject: [PATCH 1033/1124] Add some quotes to SPI call in partition creation. --- src/partition_creation.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index e162e99e..3e578e70 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -605,15 +605,15 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ /* Construct call to create_single_range_partition() */ create_sql = psprintf( "select %s.create_single_range_partition('%s.%s', '%s'::%s, '%s'::%s, '%s.%s')", - get_namespace_name(get_pathman_schema()), - parent_nsp_name, - get_rel_name(parent_relid), + quote_identifier(get_namespace_name(get_pathman_schema())), + quote_identifier(parent_nsp_name), + quote_identifier(get_rel_name(parent_relid)), IsInfinite(&bounds[0]) ? "NULL" : datum_to_cstring(bounds[0].value, range_bound_type), typname, IsInfinite(&bounds[1]) ? "NULL" : datum_to_cstring(bounds[1].value, range_bound_type), typname, - parent_nsp_name, - partition_name + quote_identifier(parent_nsp_name), + quote_identifier(partition_name) ); /* ...and call it. */ From fa068e7a5cd04c76bb38dc2e073c6e01a14791ff Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 16 Dec 2019 17:06:37 +0300 Subject: [PATCH 1034/1124] Silence a couple of windows warnings. --- src/include/init.h | 2 ++ src/partition_router.c | 1 + 2 files changed, 3 insertions(+) diff --git a/src/include/init.h b/src/include/init.h index 931528ef..2e7b49d9 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -92,6 +92,8 @@ simplify_mcxt_name(MemoryContext mcxt) return PATHMAN_BOUNDS_CACHE; else elog(ERROR, "unknown memory context"); + + return NULL; /* keep compiler quiet */ } diff --git a/src/partition_router.c b/src/partition_router.c index 8c3bac55..b602347b 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -390,6 +390,7 @@ router_extract_ctid(PartitionRouterState *state, TupleTableSlot *slot) elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); else elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); + return *(ItemPointer) NULL; /* keep compiler quiet, lol */ } /* This is a heavily modified copy of ExecDelete from nodeModifyTable.c */ From 8f68671ad22f175a0b232d40695cef7fe6fb77d3 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 2 Apr 2020 21:00:26 +0300 Subject: [PATCH 1035/1124] [PGPRO-3725] zero out garbage in append_rel_array if we allocate it. Since 1d9056f563f3 (who uses AppendRelInfo* existence as a mark this rel is child) in 11.7 this led to (known) random segfaults. --- src/hooks.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 12c053b2..ca1db9be 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -498,9 +498,13 @@ pathman_rel_pathlist_hook(PlannerInfo *root, irange_len * sizeof(RangeTblEntry *)); #if PG_VERSION_NUM >= 110000 - /* Make sure append_rel_array is wide enough */ + /* + * Make sure append_rel_array is wide enough; if it hasn't been + * allocated previously, care to zero out [0; current_len) part. + */ if (root->append_rel_array == NULL) - root->append_rel_array = (AppendRelInfo **) palloc0(0); + root->append_rel_array = (AppendRelInfo **) + palloc0(current_len * sizeof(AppendRelInfo *)); root->append_rel_array = (AppendRelInfo **) repalloc(root->append_rel_array, new_len * sizeof(AppendRelInfo *)); From 7258169d0514779ea0bcbe11b339e037731b2d19 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 7 Apr 2020 22:32:05 +0300 Subject: [PATCH 1036/1124] Bump 1.5.11 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- expected/pathman_calamity_1.out | 2 +- src/include/init.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/META.json b/META.json index 1201812c..6bd1607d 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.10", + "version": "1.5.11", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.10", + "version": "1.5.11", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 759d7dca..0943bc5c 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -20,7 +20,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.10 + 1.5.11 (1 row) set client_min_messages = NOTICE; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index e434f2eb..b2e192e1 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -20,7 +20,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.10 + 1.5.11 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index 2e7b49d9..f7f3df59 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -160,7 +160,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.10" +#define CURRENT_LIB_VERSION "1.5.11" void *pathman_cache_search_relid(HTAB *cache_table, From cbbf906760ccf676ae9ed0af810337e36a26dde6 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Wed, 8 Apr 2020 20:35:59 +0300 Subject: [PATCH 1037/1124] Fix func signature change in minor pgpro releases, arrgh. --- src/include/compat/pg_compat.h | 14 +++++++++++++- src/nodes_common.c | 2 +- src/partition_filter.c | 2 +- src/planner_tree_modification.c | 16 ++++++++-------- 4 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index c915503c..032840c5 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -992,7 +992,19 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, AddRelationNewConstraints((rel), (newColDefaults), (newConstrains), (allow_merge), (is_local), (is_internal)) #endif - +/* + * [PGPRO-3725] Since 11.7 and 12.1 in pgpro standard and ee PGPRO-2843 + * appeared, changing the signature, wow. It is not present in pgpro 1c + * though; PG_VERSION_STR is defined in std and ee but not in 1c, so it is + * hackishly used for distinguishing them. + */ +#if defined(PGPRO_VERSION_STR) && (PG_VERSION_NUM >= 110006) +#define expression_tree_mutator_compat(node, mutator, context) \ + expression_tree_mutator((node), (mutator), (context), 0) +#else +#define expression_tree_mutator_compat(node, mutator, context) \ + expression_tree_mutator((node), (mutator), (context)) +#endif /* * ------------- diff --git a/src/nodes_common.c b/src/nodes_common.c index 8adf81dd..cf273fe6 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -373,7 +373,7 @@ canonicalize_custom_exprs_mutator(Node *node, void *cxt) return (Node *) var; } - return expression_tree_mutator(node, canonicalize_custom_exprs_mutator, NULL); + return expression_tree_mutator_compat(node, canonicalize_custom_exprs_mutator, NULL); } static List * diff --git a/src/partition_filter.c b/src/partition_filter.c index a923c650..f6cb5b60 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -1164,7 +1164,7 @@ fix_returning_list_mutator(Node *node, void *state) return (Node *) var; } - return expression_tree_mutator(node, fix_returning_list_mutator, state); + return expression_tree_mutator_compat(node, fix_returning_list_mutator, state); } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 2c14959e..6fc55c7b 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -709,15 +709,15 @@ adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) SubLink *sl = (SubLink *) node; /* Examine its expression */ - sl->testexpr = expression_tree_mutator(sl->testexpr, - adjust_appendrel_varnos, - context); + sl->testexpr = expression_tree_mutator_compat(sl->testexpr, + adjust_appendrel_varnos, + context); return (Node *) sl; } - return expression_tree_mutator(node, - adjust_appendrel_varnos, - context); + return expression_tree_mutator_compat(node, + adjust_appendrel_varnos, + context); } @@ -1063,8 +1063,8 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) } } - return expression_tree_mutator(node, eval_extern_params_mutator, - (void *) params); + return expression_tree_mutator_compat(node, eval_extern_params_mutator, + (void *) params); } /* Check whether Var translation list is trivial (no shuffle) */ From bf0a84ca516494e4459df3924108caf99edec734 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 14 Apr 2020 13:48:24 +0300 Subject: [PATCH 1038/1124] Use more specific macro for previous cbbf906760ccf6. --- src/include/compat/pg_compat.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 032840c5..c1805f80 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -26,6 +26,7 @@ #include "commands/trigger.h" #include "executor/executor.h" #include "nodes/memnodes.h" +#include "nodes/nodeFuncs.h" #if PG_VERSION_NUM >= 120000 #include "nodes/pathnodes.h" #else @@ -994,11 +995,11 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * [PGPRO-3725] Since 11.7 and 12.1 in pgpro standard and ee PGPRO-2843 - * appeared, changing the signature, wow. It is not present in pgpro 1c - * though; PG_VERSION_STR is defined in std and ee but not in 1c, so it is - * hackishly used for distinguishing them. + * appeared, changing the signature, wow. There is no numeric pgpro edition + * macro (and never will be, for old versions), so distinguish via macro added + * by the commit. */ -#if defined(PGPRO_VERSION_STR) && (PG_VERSION_NUM >= 110006) +#ifdef QTW_DONT_COPY_DEFAULT #define expression_tree_mutator_compat(node, mutator, context) \ expression_tree_mutator((node), (mutator), (context), 0) #else From 4de7727d11a77d0e6d708c4298a1393f738e381a Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 8 Sep 2020 14:26:58 +0300 Subject: [PATCH 1039/1124] Adapt to 3737965249c fix of CREATE TABLE LIKE with inheritance. Since it LIKE must be handled after DefineRelation -- do it so. (added ifdef won't work for current dev branches as PG_VERSION_NUM is not bumped yet, but will do its job after releases) --- src/partition_creation.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/partition_creation.c b/src/partition_creation.c index 3e578e70..cd2a7b82 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -854,6 +854,37 @@ create_single_partition_internal(Oid parent_relid, { elog(ERROR, "FDW partition creation is not implemented yet"); } + /* + * 3737965249cd fix (since 12.5, 11.10, etc) reworked LIKE handling + * to process it after DefineRelation. + */ +#if (PG_VERSION_NUM >= 130000) || \ + ((PG_VERSION_NUM < 130000) && (PG_VERSION_NUM >= 120005)) || \ + ((PG_VERSION_NUM < 120000) && (PG_VERSION_NUM >= 110010)) || \ + ((PG_VERSION_NUM < 110000) && (PG_VERSION_NUM >= 100015)) || \ + ((PG_VERSION_NUM < 100000) && (PG_VERSION_NUM >= 90620)) || \ + ((PG_VERSION_NUM < 90600) && (PG_VERSION_NUM >= 90524)) + else if (IsA(cur_stmt, TableLikeClause)) + { + /* + * Do delayed processing of LIKE options. This + * will result in additional sub-statements for us + * to process. We can just tack those onto the + * to-do list. + */ + TableLikeClause *like = (TableLikeClause *) cur_stmt; + RangeVar *rv = create_stmt.relation; + List *morestmts; + + morestmts = expandTableLikeClause(rv, like); + create_stmts = list_concat(create_stmts, morestmts); + + /* + * We don't need a CCI now + */ + continue; + } +#endif else { /* From 34f4698df1c637a1e8e6a8afd12bcf175e1880de Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 22 Oct 2020 22:18:29 -0400 Subject: [PATCH 1040/1124] use python3 instead of python in tests/python/Makefile and tests/update/check_update.py --- tests/python/Makefile | 4 ++-- tests/update/check_update.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/python/Makefile b/tests/python/Makefile index f8a71e41..fed17cf3 100644 --- a/tests/python/Makefile +++ b/tests/python/Makefile @@ -1,6 +1,6 @@ partitioning_tests: ifneq ($(CASE),) - python partitioning_test.py Tests.$(CASE) + python3 partitioning_test.py Tests.$(CASE) else - python partitioning_test.py + python3 partitioning_test.py endif diff --git a/tests/update/check_update.py b/tests/update/check_update.py index 9ac4db62..4bd740f6 100755 --- a/tests/update/check_update.py +++ b/tests/update/check_update.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 #coding: utf-8 import shutil From 347f8dc423fd8528119961de7da7ba159bbdac14 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 23 Oct 2020 09:21:49 +0300 Subject: [PATCH 1041/1124] PostgreSQL 13 compatibility. Also try to log tail of PG logs on CI in a brave hope of catching troubles there. Currently travis job on 10 rarely fails at test_concurrent_updates, but we've failed to reproduce the issue locally. Note that test_concurrent_updates actually doesn't perform anything useful as testgres nodes aren't pickable due to weird logging implemented in separate thread, but nobody cared to check apply_async result so this has went unnoticed for a long time. --- .travis.yml | 2 + Makefile | 4 +- README.md | 4 +- expected/pathman_basic.out | 3 + expected/pathman_basic_1.out | 3 + expected/pathman_basic_2.out | 1819 ++++++++++++++++++++++++++++ expected/pathman_calamity.out | 3 + expected/pathman_calamity_1.out | 3 + expected/pathman_calamity_2.out | 1064 ++++++++++++++++ expected/pathman_column_type.out | 4 + expected/pathman_column_type_1.out | 203 ++++ expected/pathman_hashjoin.out | 3 + expected/pathman_hashjoin_1.out | 3 + expected/pathman_hashjoin_2.out | 3 + expected/pathman_hashjoin_3.out | 3 + expected/pathman_hashjoin_4.out | 81 ++ expected/pathman_hashjoin_5.out | 73 ++ expected/pathman_inserts.out | 4 + expected/pathman_inserts_1.out | 4 + expected/pathman_inserts_2.out | 1071 ++++++++++++++++ expected/pathman_lateral.out | 9 +- expected/pathman_lateral_2.out | 127 ++ expected/pathman_lateral_3.out | 126 ++ expected/pathman_mergejoin.out | 7 + expected/pathman_mergejoin_1.out | 7 + expected/pathman_mergejoin_2.out | 7 + expected/pathman_mergejoin_3.out | 7 + expected/pathman_mergejoin_4.out | 84 ++ expected/pathman_mergejoin_5.out | 75 ++ expected/pathman_only.out | 3 + expected/pathman_only_1.out | 3 + expected/pathman_only_2.out | 280 +++++ expected/pathman_rowmarks.out | 3 + expected/pathman_rowmarks_1.out | 3 + expected/pathman_rowmarks_2.out | 3 + expected/pathman_rowmarks_3.out | 390 ++++++ run_tests.sh | 2 + sql/pathman_basic.sql | 3 + sql/pathman_calamity.sql | 3 + sql/pathman_column_type.sql | 5 + sql/pathman_hashjoin.sql | 3 + sql/pathman_inserts.sql | 5 + sql/pathman_lateral.sql | 10 +- sql/pathman_mergejoin.sql | 7 + sql/pathman_only.sql | 3 + sql/pathman_rowmarks.sql | 3 + src/hooks.c | 48 +- src/include/compat/pg_compat.h | 46 +- src/include/hooks.h | 15 +- src/include/partition_filter.h | 8 +- src/include/relation_info.h | 7 +- src/init.c | 18 +- src/nodes_common.c | 19 +- src/partition_creation.c | 51 +- src/partition_filter.c | 52 +- src/pg_pathman.c | 8 +- src/pl_funcs.c | 57 +- src/pl_range_funcs.c | 17 +- src/planner_tree_modification.c | 14 +- src/rangeset.c | 15 +- src/relation_info.c | 42 +- src/runtime_merge_append.c | 8 +- src/utility_stmt_hooking.c | 6 +- tests/cmocka/missing_basic.c | 5 + tests/cmocka/missing_list.c | 310 ++++- tests/python/Makefile | 4 +- tests/python/partitioning_test.py | 17 +- 67 files changed, 6202 insertions(+), 100 deletions(-) create mode 100644 expected/pathman_basic_2.out create mode 100644 expected/pathman_calamity_2.out create mode 100644 expected/pathman_column_type_1.out create mode 100644 expected/pathman_hashjoin_4.out create mode 100644 expected/pathman_hashjoin_5.out create mode 100644 expected/pathman_inserts_2.out create mode 100644 expected/pathman_lateral_2.out create mode 100644 expected/pathman_lateral_3.out create mode 100644 expected/pathman_mergejoin_4.out create mode 100644 expected/pathman_mergejoin_5.out create mode 100644 expected/pathman_only_2.out create mode 100644 expected/pathman_rowmarks_3.out diff --git a/.travis.yml b/.travis.yml index ff2fac20..b020780b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,8 @@ notifications: on_failure: always env: + - PG_VERSION=13 LEVEL=hardcore + - PG_VERSION=13 - PG_VERSION=12 LEVEL=hardcore - PG_VERSION=12 - PG_VERSION=11 LEVEL=hardcore diff --git a/Makefile b/Makefile index c1281871..9ec19548 100644 --- a/Makefile +++ b/Makefile @@ -66,7 +66,8 @@ REGRESS = pathman_array_qual \ EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add -EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output +CMOCKA_EXTRA_CLEAN = missing_basic.o missing_list.o missing_stringinfo.o missing_bitmapset.o rangeset_tests.o rangeset_tests +EXTRA_CLEAN = ./isolation_output $(patsubst %,tests/cmocka/%, $(CMOCKA_EXTRA_CLEAN)) ifdef USE_PGXS PG_CONFIG=pg_config @@ -74,6 +75,7 @@ PGXS := $(shell $(PG_CONFIG) --pgxs) VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') # check for declarative syntax +# this feature will not be ported to >=12 ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) REGRESS += pathman_declarative OBJS += src/declarative.o diff --git a/README.md b/README.md index b49c20ec..39ce5df9 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ ### NOTE: this project is not under development anymore -`pg_pathman` supports Postgres versions [9.5..12], but most probably it won't be ported to 13 and later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. +`pg_pathman` supports Postgres versions [9.5..13], but most probably it won't be ported to 14 and later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. # pg_pathman @@ -13,7 +13,7 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions The extension is compatible with: - * PostgreSQL 9.5, 9.6, 10, 11, 12; + * PostgreSQL 9.5, 9.6, 10, 11, 12, 13; * Postgres Pro Standard 9.5, 9.6, 10, 11, 12; * Postgres Pro Enterprise; diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index aa5b5ab6..4117a00c 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -2,6 +2,9 @@ * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output. Also, EXPLAIN now always shows key first in quals * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out index d1403c77..702f9027 100644 --- a/expected/pathman_basic_1.out +++ b/expected/pathman_basic_1.out @@ -2,6 +2,9 @@ * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output. Also, EXPLAIN now always shows key first in quals * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_basic_2.out b/expected/pathman_basic_2.out new file mode 100644 index 00000000..28e46c14 --- /dev/null +++ b/expected/pathman_basic_2.out @@ -0,0 +1,1819 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER); +INSERT INTO test.hash_rel VALUES (1, 1); +INSERT INTO test.hash_rel VALUES (2, 2); +INSERT INTO test.hash_rel VALUES (3, 3); +\set VERBOSITY default +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +ERROR: failed to analyze partitioning expression "value" +DETAIL: column "value" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on hash_rel hash_rel_1 + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 hash_rel_1_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- +(0 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on hash_rel hash_rel_1 + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 hash_rel_1_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 0 rows copied from test.hash_rel_0 +NOTICE: 0 rows copied from test.hash_rel_1 +NOTICE: 0 rows copied from test.hash_rel_2 + drop_partitions +----------------- + 3 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'Value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.hash_rel VALUES (4, 4); +INSERT INTO test.hash_rel VALUES (5, 5); +INSERT INTO test.hash_rel VALUES (6, 6); +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 6 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +\set VERBOSITY default +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); +ERROR: failed to analyze partitioning expression "dt" +DETAIL: column "dt" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.range_rel; + count +------- + 120 +(1 row) + +SELECT COUNT(*) FROM ONLY test.range_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 3000 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy improved_dummy_1 + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_1 improved_dummy_1_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(7 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + set_enable_parent +------------------- + +(1 row) + +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +------------------------------- + Seq Scan on improved_dummy_11 + Filter: (id = 101) +(2 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy improved_dummy_1 + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 12 other objects +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 + test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from test.improved_dummy_1 +NOTICE: 0 rows copied from test.improved_dummy_2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from p1 +NOTICE: 0 rows copied from p2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); + create_range_partitions +------------------------- + 5 +(1 row) + +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(7 rows) + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +--------------------------------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select insert_into_select_1 + Filter: (val <= 80) + -> Seq Scan on insert_into_select_1 insert_into_select_1_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(9 rows) + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; + count +------- + 100 +(1 row) + +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; +NOTICE: drop cascades to 6 other objects +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_3 + Filter: (2500 = id) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + Filter: (id >= 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_1 + Filter: (id >= 500) + -> Seq Scan on num_range_rel_2 + Filter: (id < 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_1 + Filter: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_2 + Filter: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id = 2500) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id >= 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 500) + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id < 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id <= 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-01-15' ORDER BY dt DESC; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan Backward using range_rel_4_dt_idx on range_rel_4 + -> Index Scan Backward using range_rel_3_dt_idx on range_rel_3 + -> Index Scan Backward using range_rel_2_dt_idx on range_rel_2 + -> Index Scan Backward using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +/* + * Sorting + */ +SET enable_indexscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Merge Append + Sort Key: range_rel_1.dt + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(4 rows) + +/* + * Test inlined SQL functions + */ +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); + QUERY PLAN +-------------------------------- + Limit + -> Seq Scan on sql_inline_0 + Filter: (id = 5) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); + QUERY PLAN +-------------------------------- + Limit + -> Seq Scan on sql_inline_2 + Filter: (id = 1) +(3 rows) + +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test by @baiyinqiqi (issue #60) + */ +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT * FROM test.hash_varchar WHERE val = 'a'; + val +----- +(0 rows) + +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + val +----- + 12 +(1 row) + +DROP TABLE test.hash_varchar CASCADE; +NOTICE: drop cascades to 4 other objects +/* + * Test split and merge + */ +/* Split first partition in half */ +SELECT pathman.split_range_partition('test.num_range_rel_1', 500); + split_range_partition +----------------------- + test.num_range_rel_5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 100) + -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 + Index Cond: (id <= 700) +(5 rows) + +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + +SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); + split_range_partition +----------------------- + test.range_rel_5 +(1 row) + +/* Merge two partitions into one */ +SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); + merge_range_partitions +------------------------ + test.num_range_rel_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: ((id >= 100) AND (id <= 700)) +(2 rows) + +SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +/* Append and prepend partitions */ +SELECT pathman.append_range_partition('test.num_range_rel'); + append_range_partition +------------------------ + test.num_range_rel_6 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.num_range_rel'); + prepend_range_partition +------------------------- + test.num_range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition('test.num_range_rel_7'); + drop_range_partition +---------------------- + test.num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 +(4 rows) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 +(3 rows) + +SELECT pathman.append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_7_dt_idx on range_rel_7 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +SELECT pathman.drop_range_partition('test.range_rel_7'); + drop_range_partition +---------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------- + Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); +ERROR: specified range [12-01-2014, 01-02-2015) overlaps with existing partitions +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + add_range_partition +--------------------- + test.range_rel_8 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_8_dt_idx on range_rel_8 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +CREATE TABLE test.range_rel_archive (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); +ERROR: specified range [01-01-2014, 01-01-2015) overlaps with existing partitions +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); + attach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_archive_dt_idx on range_rel_archive + Index Cond: (dt >= 'Sat Nov 15 00:00:00 2014'::timestamp without time zone) + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +SELECT pathman.detach_range_partition('test.range_rel_archive'); + detach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +CREATE TABLE test.range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT, + abc INTEGER); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: partition must have a compatible tuple format +CREATE TABLE test.range_rel_test2 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: column "dt" in child table must be marked NOT NULL +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); + add_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); + add_range_partition +------------------------------ + test.range_rel_plus_infinity +(1 row) + +SELECT pathman.append_range_partition('test.range_rel'); +ERROR: Cannot append partition because last partition's range is half open +SELECT pathman.prepend_range_partition('test.range_rel'); +ERROR: Cannot prepend partition because first partition's range is half open +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); + attach_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; + parent | partition | parttype | expr | range_min | range_max +----------------+-------------------------------+----------+------+--------------------------+-------------------------- + test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 + test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 + test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 + test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 + test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 + test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | +(8 rows) + +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on range_rel_minus_infinity + -> Seq Scan on range_rel_8 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on range_rel_6 + -> Seq Scan on range_rel_plus_infinity +(3 rows) + +/* + * Zero partitions count and adding partitions with specified name + */ +CREATE TABLE test.zero( + id SERIAL PRIMARY KEY, + value INT NOT NULL); +INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); +ERROR: relation "zero" has no partitions +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); +ERROR: relation "zero" has no partitions +SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); + add_range_partition +--------------------- + test.zero_50 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_appended'); + append_range_partition +------------------------ + test.zero_appended +(1 row) + +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); + prepend_range_partition +------------------------- + test.zero_prepended +(1 row) + +SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); + split_range_partition +----------------------- + test."test.zero_60" +(1 row) + +DROP TABLE test.zero CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Check that altering table columns doesn't break trigger + */ +ALTER TABLE test.hash_rel ADD COLUMN abc int; +INSERT INTO test.hash_rel (id, value, abc) VALUES (123, 456, 789); +SELECT * FROM test.hash_rel WHERE id = 123; + id | value | abc +-----+-------+----- + 123 | 456 | 789 +(1 row) + +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); + replace_hash_partition +------------------------ + test.hash_rel_extern +(1 row) + +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; + parent | partition | parttype +---------------+----------------------+---------- + test.hash_rel | test.hash_rel_1 | 1 + test.hash_rel | test.hash_rel_2 | 1 + test.hash_rel | test.hash_rel_extern | 1 +(3 rows) + +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; + oid | indexes | triggers +----------------------+---------------------------------------------------------------------------------------+---------- + test.hash_rel_0 | {"CREATE UNIQUE INDEX hash_rel_0_pkey ON test.hash_rel_0 USING btree (id)"} | {NULL} + test.hash_rel_extern | {"CREATE UNIQUE INDEX hash_rel_extern_pkey ON test.hash_rel_extern USING btree (id)"} | {NULL} +(2 rows) + +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + is_tuple_convertible +---------------------- + t +(1 row) + +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +ERROR: column "value" in child table must be marked NOT NULL +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +/* + * Clean up + */ +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 3 rows copied from test.hash_rel_1 +NOTICE: 2 rows copied from test.hash_rel_2 +NOTICE: 2 rows copied from test.hash_rel_extern + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 7 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT pathman.drop_partitions('test.hash_rel', TRUE); + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +DROP TABLE test.hash_rel CASCADE; +SELECT pathman.drop_partitions('test.num_range_rel'); +NOTICE: 999 rows copied from test.num_range_rel_1 +NOTICE: 1000 rows copied from test.num_range_rel_2 +NOTICE: 1000 rows copied from test.num_range_rel_3 + drop_partitions +----------------- + 3 +(1 row) + +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test automatic partition creation */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.range_rel (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); +INSERT INTO test.range_rel (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_14 + Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + id | dt | data +-----+--------------------------+------ + 137 | Mon Dec 15 00:00:00 2014 | +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_8 + Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + id | dt | data +----+--------------------------+------ + 74 | Sun Mar 15 00:00:00 2015 | +(1 row) + +SELECT pathman.set_auto('test.range_rel', false); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +ERROR: no suitable partition for key 'Mon Jun 01 00:00:00 2015' +SELECT pathman.set_auto('test.range_rel', true); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 21 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +---------+------+----------+---------------- +(0 rows) + +/* Check overlaps */ +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4001, 5000); +ERROR: specified range [4001, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4000, 5000); +ERROR: specified range [4000, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3999, 5000); +ERROR: specified range [3999, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3000, 3500); +ERROR: specified range [3000, 3500) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 999); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1000); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1001); +ERROR: specified range [0, 1001) overlaps with existing partitions +/* CaMeL cAsE table names and attributes */ +CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); +SELECT pathman.create_hash_partitions('test.TeSt', 'a', 3); +ERROR: relation "test.test" does not exist at character 39 +SELECT pathman.create_hash_partitions('test."TeSt"', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO test."TeSt" VALUES (1, 1); +INSERT INTO test."TeSt" VALUES (2, 2); +INSERT INTO test."TeSt" VALUES (3, 3); +SELECT * FROM test."TeSt"; + a | b +---+--- + 3 | 3 + 2 | 2 + 1 | 1 +(3 rows) + +DROP TABLE test."TeSt" CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test."RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT pathman.append_range_partition('test."RangeRel"'); + append_range_partition +------------------------ + test."RangeRel_4" +(1 row) + +SELECT pathman.prepend_range_partition('test."RangeRel"'); + prepend_range_partition +------------------------- + test."RangeRel_5" +(1 row) + +SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); + merge_range_partitions +------------------------ + test."RangeRel_1" +(1 row) + +SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); + split_range_partition +----------------------- + test."RangeRel_6" +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 6 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 +(1 row) + +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman; +/* Test that everything works fine without schemas */ +CREATE EXTENSION pg_pathman; +/* Hash */ +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; + QUERY PLAN +------------------------------------------------------ + Append + -> Index Scan using hash_rel_0_pkey on hash_rel_0 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_1_pkey on hash_rel_1 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_2_pkey on hash_rel_2 + Index Cond: (id = 1234) +(7 rows) + +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); + create_range_partitions +------------------------- + 12 +(1 row) + +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); + split_range_partition +----------------------- + test.range_rel_13 +(1 row) + +SELECT append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_14 +(1 row) + +SELECT prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_15 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on range_rel_15 + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_13 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_12 + Filter: (dt > 'Wed Dec 15 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on range_rel_14 +(4 rows) + +/* Create range partitions from whole range */ +SELECT drop_partitions('test.range_rel'); +NOTICE: 45 rows copied from test.range_rel_1 +NOTICE: 31 rows copied from test.range_rel_3 +NOTICE: 30 rows copied from test.range_rel_4 +NOTICE: 31 rows copied from test.range_rel_5 +NOTICE: 30 rows copied from test.range_rel_6 +NOTICE: 31 rows copied from test.range_rel_7 +NOTICE: 31 rows copied from test.range_rel_8 +NOTICE: 30 rows copied from test.range_rel_9 +NOTICE: 31 rows copied from test.range_rel_10 +NOTICE: 30 rows copied from test.range_rel_11 +NOTICE: 31 rows copied from test.range_rel_12 +NOTICE: 14 rows copied from test.range_rel_13 +NOTICE: 0 rows copied from test.range_rel_14 +NOTICE: 0 rows copied from test.range_rel_15 + drop_partitions +----------------- + 14 +(1 row) + +/* Test NOT operator */ +CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO bool_test SELECT g, (g % 4) = 0 FROM generate_series(1, 100) AS g; +SELECT count(*) FROM bool_test; + count +------- + 100 +(1 row) + +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); + count +------- + 0 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ + count +------- + 75 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ + count +------- + 25 +(1 row) + +DROP TABLE bool_test CASCADE; +NOTICE: drop cascades to 3 other objects +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s special_case_1_ind_o_s_1 + Filter: ((val < 75) AND (comment = 'a'::text)) + -> Seq Scan on special_case_1_ind_o_s_1 special_case_1_ind_o_s_1_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(7 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); + add_range_partition +--------------------------- + test.index_on_childs_1_1k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); + append_range_partition +---------------------------- + test.index_on_childs_1k_2k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); + append_range_partition +---------------------------- + test.index_on_childs_2k_3k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); + append_range_partition +---------------------------- + test.index_on_childs_3k_4k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); + append_range_partition +---------------------------- + test.index_on_childs_4k_5k +(1 row) + +SELECT set_enable_parent('test.index_on_childs', true); + set_enable_parent +------------------- + +(1 row) + +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using index_on_childs_c2_idx on index_on_childs index_on_childs_1 + Index Cond: (c2 = 500) + Filter: ((c1 > 100) AND (c1 < 2500)) + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k + Index Cond: (c2 = 500) + Filter: (c1 > 100) + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k + Index Cond: (c2 = 500) + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k + Index Cond: (c2 = 500) + Filter: (c1 < 2500) +(12 rows) + +/* Test create_range_partitions() + partition_names */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + create_hash_partitions +------------------------ + 2 +(1 row) + +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + partition +----------- + p1 + p2 +(2 rows) + +DROP TABLE test.provided_part_names CASCADE; +NOTICE: drop cascades to 2 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) + +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 32 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 0943bc5c..50bfd803 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -4,6 +4,9 @@ * ERROR: invalid input syntax for type integer: "abc" * instead of * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index b2e192e1..20c2ea6c 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -4,6 +4,9 @@ * ERROR: invalid input syntax for type integer: "abc" * instead of * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out new file mode 100644 index 00000000..0c7757a9 --- /dev/null +++ b/expected/pathman_calamity_2.out @@ -0,0 +1,1064 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.11 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); +ERROR: no hash function for type calamity.part_test +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: relation "1" has no partitions +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: relation "pg_class" has no partitions +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: relation "0" does not exist +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA calamity CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 + partition status cache | 2 +(4 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 + partition status cache | 2 +(4 rows) + +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on survivor survivor_1 + -> Seq Scan on survivor_1 survivor_2 + -> Seq Scan on survivor_2 survivor_3 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index 3ae9355c..4e2f3ff6 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -1,3 +1,7 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_column_type_1.out b/expected/pathman_column_type_1.out new file mode 100644 index 00000000..d169719d --- /dev/null +++ b/expected/pathman_column_type_1.out @@ -0,0 +1,203 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; +/* + * RANGE partitioning. + */ +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + +/* change column's type (should also flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 + partition status cache | 3 +(4 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | val +-------------------------+----- + test_column_type.test_1 | 1 +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 +NOTICE: 0 rows copied from test_column_type.test_5 +NOTICE: 0 rows copied from test_column_type.test_6 +NOTICE: 0 rows copied from test_column_type.test_7 +NOTICE: 0 rows copied from test_column_type.test_8 +NOTICE: 0 rows copied from test_column_type.test_9 +NOTICE: 0 rows copied from test_column_type.test_10 + drop_partitions +----------------- + 10 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +/* + * HASH partitioning. + */ +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 +(4 rows) + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; +ERROR: cannot change type of column "id" of table "test" partitioned by HASH +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 +(4 rows) + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 + partition status cache | 3 +(4 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | id | val +-------------------------+----+----- + test_column_type.test_0 | 1 | +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_0 +NOTICE: 0 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 + drop_partitions +----------------- + 5 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +DROP SCHEMA test_column_type CASCADE; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_hashjoin.out b/expected/pathman_hashjoin.out index 1e5b2783..779efe3d 100644 --- a/expected/pathman_hashjoin.out +++ b/expected/pathman_hashjoin.out @@ -2,6 +2,9 @@ * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_hashjoin_1.out b/expected/pathman_hashjoin_1.out index af569764..ae1edda6 100644 --- a/expected/pathman_hashjoin_1.out +++ b/expected/pathman_hashjoin_1.out @@ -2,6 +2,9 @@ * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_hashjoin_2.out b/expected/pathman_hashjoin_2.out index c77146d1..21cd1883 100644 --- a/expected/pathman_hashjoin_2.out +++ b/expected/pathman_hashjoin_2.out @@ -2,6 +2,9 @@ * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_hashjoin_3.out b/expected/pathman_hashjoin_3.out index 93613919..106e8c0e 100644 --- a/expected/pathman_hashjoin_3.out +++ b/expected/pathman_hashjoin_3.out @@ -2,6 +2,9 @@ * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_hashjoin_4.out b/expected/pathman_hashjoin_4.out new file mode 100644 index 00000000..ad4b5651 --- /dev/null +++ b/expected/pathman_hashjoin_4.out @@ -0,0 +1,81 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------------- + Sort + Sort Key: j2_1.dt + -> Hash Join + Hash Cond: (j1_1.id = j2_1.id) + -> Hash Join + Hash Cond: (j3_1.id = j1_1.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1_1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_2 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2_1 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_2 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_3 +(20 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_hashjoin_5.out b/expected/pathman_hashjoin_5.out new file mode 100644 index 00000000..7bbea061 --- /dev/null +++ b/expected/pathman_hashjoin_5.out @@ -0,0 +1,73 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3_1.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(12 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index cf05bd5a..225604c5 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -1,3 +1,7 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_inserts_1.out b/expected/pathman_inserts_1.out index fd54aeef..a6634edd 100644 --- a/expected/pathman_inserts_1.out +++ b/expected/pathman_inserts_1.out @@ -1,3 +1,7 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_inserts_2.out b/expected/pathman_inserts_2.out new file mode 100644 index 00000000..9a439010 --- /dev/null +++ b/expected/pathman_inserts_2.out @@ -0,0 +1,1071 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_inserts; +/* create a partitioned table */ +CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); +INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +CREATE UNIQUE INDEX ON test_inserts.storage(a); +SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* attach before and after insertion triggers to partitioned table */ +CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +/* set triggers on existing first partition and new generated partitions */ +CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); +CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); +/* set partition init callback that will add triggers to partitions */ +CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ +BEGIN + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s + for each row execute procedure test_inserts.print_cols_before_change();', + args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s + for each row execute procedure test_inserts.print_cols_after_change();', + args->>'partition_schema', args->>'partition'); +END; +$$ LANGUAGE plpgsql; +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* we don't support ON CONLICT */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') +ON CONFLICT (a) DO UPDATE SET a = 3; +ERROR: ON CONFLICT clause is not supported with partitioned tables +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_2') +ON CONFLICT (a) DO NOTHING; +ERROR: ON CONFLICT clause is not supported with partitioned tables +/* implicitly prepend a partition (no columns have been dropped yet) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) + tableoid +------------------------- + test_inserts.storage_11 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+----------- + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. +(2 rows) + +INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) + ?column? +---------- + 3 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+------------ + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. + 3 | 0 | 0 | PREPEND... +(3 rows) + +/* cause an unique index conflict (a = 0) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,CONFLICT) +ERROR: duplicate key value violates unique constraint "storage_11_a_idx" +/* drop first column */ +ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_12 +(1 row) + +INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +SELECT * FROM test_inserts.storage_12; /* direct access */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +/* spawn a new partition (b, c, d) */ +INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +SELECT * FROM test_inserts.storage_13; /* direct access */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +/* column 'a' has been dropped */ +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) + b | c | d | ?column? +-----+---+-------------+---------- + 111 | 0 | DROP_COL_1. | 17 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) + tableoid +------------------------- + test_inserts.storage_13 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) + ?column? | b +----------+----- + 222 | 111 +(1 row) + +/* drop third column */ +ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; +/* will have 2 columns (b, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +SELECT * FROM test_inserts.storage_14; /* direct access */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +/* column 'c' has been dropped */ +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) + b | d +-----+------------- + 121 | DROP_COL_2. +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) + tableoid +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) + ?column? | ?column? +------------------+---------- + DROP_COL_2...0_0 | 363 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_1') +RETURNING (SELECT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) + ?column? +---------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_2') +RETURNING (SELECT generate_series(1, 10) LIMIT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) + generate_series +----------------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_3') +RETURNING (SELECT get_partition_key('test_inserts.storage')); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) + get_partition_key +------------------- + b +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_4') +RETURNING 1, 2, 3, 4; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + 1 | 2 | 3 | 4 +(1 row) + +/* show number of columns in each partition */ +SELECT partition, range_min, range_max, count(partition) +FROM pathman_partition_list JOIN pg_attribute ON partition = attrelid +WHERE attnum > 0 +GROUP BY partition, range_min, range_max +ORDER BY range_min::INT4; + partition | range_min | range_max | count +-------------------------+-----------+-----------+------- + test_inserts.storage_11 | -9 | 1 | 4 + test_inserts.storage_1 | 1 | 11 | 4 + test_inserts.storage_2 | 11 | 21 | 4 + test_inserts.storage_3 | 21 | 31 | 4 + test_inserts.storage_4 | 31 | 41 | 4 + test_inserts.storage_5 | 41 | 51 | 4 + test_inserts.storage_6 | 51 | 61 | 4 + test_inserts.storage_7 | 61 | 71 | 4 + test_inserts.storage_8 | 71 | 81 | 4 + test_inserts.storage_9 | 81 | 91 | 4 + test_inserts.storage_10 | 91 | 101 | 4 + test_inserts.storage_12 | 101 | 111 | 3 + test_inserts.storage_13 | 111 | 121 | 3 + test_inserts.storage_14 | 121 | 131 | 2 +(14 rows) + +/* check the data */ +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----------------+------------------------- + 0 | PREPEND. | test_inserts.storage_11 + 0 | PREPEND.. | test_inserts.storage_11 + 0 | PREPEND... | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 3 cols! | test_inserts.storage_12 + 111 | 3 cols as well! | test_inserts.storage_13 + 111 | DROP_COL_1. | test_inserts.storage_13 + 111 | DROP_COL_1.. | test_inserts.storage_13 + 111 | DROP_COL_1... | test_inserts.storage_13 + 121 | 2 cols! | test_inserts.storage_14 + 121 | DROP_COL_2. | test_inserts.storage_14 + 121 | DROP_COL_2.. | test_inserts.storage_14 + 121 | DROP_COL_2... | test_inserts.storage_14 + 121 | query_1 | test_inserts.storage_14 + 121 | query_2 | test_inserts.storage_14 + 121 | query_3 | test_inserts.storage_14 + 121 | query_4 | test_inserts.storage_14 +(116 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* one more time! */ +INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----+------------------------- + -2 | -2 | test_inserts.storage_11 + -1 | -1 | test_inserts.storage_11 + 0 | 0 | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 101 | test_inserts.storage_12 + 102 | 102 | test_inserts.storage_12 + 103 | 103 | test_inserts.storage_12 + 104 | 104 | test_inserts.storage_12 + 105 | 105 | test_inserts.storage_12 + 106 | 106 | test_inserts.storage_12 + 107 | 107 | test_inserts.storage_12 + 108 | 108 | test_inserts.storage_12 + 109 | 109 | test_inserts.storage_12 + 110 | 110 | test_inserts.storage_12 + 111 | 111 | test_inserts.storage_13 + 112 | 112 | test_inserts.storage_13 + 113 | 113 | test_inserts.storage_13 + 114 | 114 | test_inserts.storage_13 + 115 | 115 | test_inserts.storage_13 + 116 | 116 | test_inserts.storage_13 + 117 | 117 | test_inserts.storage_13 + 118 | 118 | test_inserts.storage_13 + 119 | 119 | test_inserts.storage_13 + 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* add new column */ +ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; +/* one more time! x2 */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | e | tableoid +-----+-----+-----+------------------------- + -2 | -2 | -2 | test_inserts.storage_11 + -1 | -1 | -1 | test_inserts.storage_11 + 0 | 0 | 0 | test_inserts.storage_11 + 1 | 1 | 1 | test_inserts.storage_1 + 2 | 2 | 2 | test_inserts.storage_1 + 3 | 3 | 3 | test_inserts.storage_1 + 4 | 4 | 4 | test_inserts.storage_1 + 5 | 5 | 5 | test_inserts.storage_1 + 6 | 6 | 6 | test_inserts.storage_1 + 7 | 7 | 7 | test_inserts.storage_1 + 8 | 8 | 8 | test_inserts.storage_1 + 9 | 9 | 9 | test_inserts.storage_1 + 10 | 10 | 10 | test_inserts.storage_1 + 11 | 11 | 11 | test_inserts.storage_2 + 12 | 12 | 12 | test_inserts.storage_2 + 13 | 13 | 13 | test_inserts.storage_2 + 14 | 14 | 14 | test_inserts.storage_2 + 15 | 15 | 15 | test_inserts.storage_2 + 16 | 16 | 16 | test_inserts.storage_2 + 17 | 17 | 17 | test_inserts.storage_2 + 18 | 18 | 18 | test_inserts.storage_2 + 19 | 19 | 19 | test_inserts.storage_2 + 20 | 20 | 20 | test_inserts.storage_2 + 21 | 21 | 21 | test_inserts.storage_3 + 22 | 22 | 22 | test_inserts.storage_3 + 23 | 23 | 23 | test_inserts.storage_3 + 24 | 24 | 24 | test_inserts.storage_3 + 25 | 25 | 25 | test_inserts.storage_3 + 26 | 26 | 26 | test_inserts.storage_3 + 27 | 27 | 27 | test_inserts.storage_3 + 28 | 28 | 28 | test_inserts.storage_3 + 29 | 29 | 29 | test_inserts.storage_3 + 30 | 30 | 30 | test_inserts.storage_3 + 31 | 31 | 31 | test_inserts.storage_4 + 32 | 32 | 32 | test_inserts.storage_4 + 33 | 33 | 33 | test_inserts.storage_4 + 34 | 34 | 34 | test_inserts.storage_4 + 35 | 35 | 35 | test_inserts.storage_4 + 36 | 36 | 36 | test_inserts.storage_4 + 37 | 37 | 37 | test_inserts.storage_4 + 38 | 38 | 38 | test_inserts.storage_4 + 39 | 39 | 39 | test_inserts.storage_4 + 40 | 40 | 40 | test_inserts.storage_4 + 41 | 41 | 41 | test_inserts.storage_5 + 42 | 42 | 42 | test_inserts.storage_5 + 43 | 43 | 43 | test_inserts.storage_5 + 44 | 44 | 44 | test_inserts.storage_5 + 45 | 45 | 45 | test_inserts.storage_5 + 46 | 46 | 46 | test_inserts.storage_5 + 47 | 47 | 47 | test_inserts.storage_5 + 48 | 48 | 48 | test_inserts.storage_5 + 49 | 49 | 49 | test_inserts.storage_5 + 50 | 50 | 50 | test_inserts.storage_5 + 51 | 51 | 51 | test_inserts.storage_6 + 52 | 52 | 52 | test_inserts.storage_6 + 53 | 53 | 53 | test_inserts.storage_6 + 54 | 54 | 54 | test_inserts.storage_6 + 55 | 55 | 55 | test_inserts.storage_6 + 56 | 56 | 56 | test_inserts.storage_6 + 57 | 57 | 57 | test_inserts.storage_6 + 58 | 58 | 58 | test_inserts.storage_6 + 59 | 59 | 59 | test_inserts.storage_6 + 60 | 60 | 60 | test_inserts.storage_6 + 61 | 61 | 61 | test_inserts.storage_7 + 62 | 62 | 62 | test_inserts.storage_7 + 63 | 63 | 63 | test_inserts.storage_7 + 64 | 64 | 64 | test_inserts.storage_7 + 65 | 65 | 65 | test_inserts.storage_7 + 66 | 66 | 66 | test_inserts.storage_7 + 67 | 67 | 67 | test_inserts.storage_7 + 68 | 68 | 68 | test_inserts.storage_7 + 69 | 69 | 69 | test_inserts.storage_7 + 70 | 70 | 70 | test_inserts.storage_7 + 71 | 71 | 71 | test_inserts.storage_8 + 72 | 72 | 72 | test_inserts.storage_8 + 73 | 73 | 73 | test_inserts.storage_8 + 74 | 74 | 74 | test_inserts.storage_8 + 75 | 75 | 75 | test_inserts.storage_8 + 76 | 76 | 76 | test_inserts.storage_8 + 77 | 77 | 77 | test_inserts.storage_8 + 78 | 78 | 78 | test_inserts.storage_8 + 79 | 79 | 79 | test_inserts.storage_8 + 80 | 80 | 80 | test_inserts.storage_8 + 81 | 81 | 81 | test_inserts.storage_9 + 82 | 82 | 82 | test_inserts.storage_9 + 83 | 83 | 83 | test_inserts.storage_9 + 84 | 84 | 84 | test_inserts.storage_9 + 85 | 85 | 85 | test_inserts.storage_9 + 86 | 86 | 86 | test_inserts.storage_9 + 87 | 87 | 87 | test_inserts.storage_9 + 88 | 88 | 88 | test_inserts.storage_9 + 89 | 89 | 89 | test_inserts.storage_9 + 90 | 90 | 90 | test_inserts.storage_9 + 91 | 91 | 91 | test_inserts.storage_10 + 92 | 92 | 92 | test_inserts.storage_10 + 93 | 93 | 93 | test_inserts.storage_10 + 94 | 94 | 94 | test_inserts.storage_10 + 95 | 95 | 95 | test_inserts.storage_10 + 96 | 96 | 96 | test_inserts.storage_10 + 97 | 97 | 97 | test_inserts.storage_10 + 98 | 98 | 98 | test_inserts.storage_10 + 99 | 99 | 99 | test_inserts.storage_10 + 100 | 100 | 100 | test_inserts.storage_10 + 101 | 101 | 101 | test_inserts.storage_12 + 102 | 102 | 102 | test_inserts.storage_12 + 103 | 103 | 103 | test_inserts.storage_12 + 104 | 104 | 104 | test_inserts.storage_12 + 105 | 105 | 105 | test_inserts.storage_12 + 106 | 106 | 106 | test_inserts.storage_12 + 107 | 107 | 107 | test_inserts.storage_12 + 108 | 108 | 108 | test_inserts.storage_12 + 109 | 109 | 109 | test_inserts.storage_12 + 110 | 110 | 110 | test_inserts.storage_12 + 111 | 111 | 111 | test_inserts.storage_13 + 112 | 112 | 112 | test_inserts.storage_13 + 113 | 113 | 113 | test_inserts.storage_13 + 114 | 114 | 114 | test_inserts.storage_13 + 115 | 115 | 115 | test_inserts.storage_13 + 116 | 116 | 116 | test_inserts.storage_13 + 117 | 117 | 117 | test_inserts.storage_13 + 118 | 118 | 118 | test_inserts.storage_13 + 119 | 119 | 119 | test_inserts.storage_13 + 120 | 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* now test RETURNING list using our new column 'e' */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(-2, 130, 5) i +RETURNING e * 2, b, tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) + ?column? | b | tableoid +----------+-----+------------------------- + -4 | -2 | test_inserts.storage_11 + 6 | 3 | test_inserts.storage_1 + 16 | 8 | test_inserts.storage_1 + 26 | 13 | test_inserts.storage_2 + 36 | 18 | test_inserts.storage_2 + 46 | 23 | test_inserts.storage_3 + 56 | 28 | test_inserts.storage_3 + 66 | 33 | test_inserts.storage_4 + 76 | 38 | test_inserts.storage_4 + 86 | 43 | test_inserts.storage_5 + 96 | 48 | test_inserts.storage_5 + 106 | 53 | test_inserts.storage_6 + 116 | 58 | test_inserts.storage_6 + 126 | 63 | test_inserts.storage_7 + 136 | 68 | test_inserts.storage_7 + 146 | 73 | test_inserts.storage_8 + 156 | 78 | test_inserts.storage_8 + 166 | 83 | test_inserts.storage_9 + 176 | 88 | test_inserts.storage_9 + 186 | 93 | test_inserts.storage_10 + 196 | 98 | test_inserts.storage_10 + 206 | 103 | test_inserts.storage_12 + 216 | 108 | test_inserts.storage_12 + 226 | 113 | test_inserts.storage_13 + 236 | 118 | test_inserts.storage_13 + 246 | 123 | test_inserts.storage_14 + 256 | 128 | test_inserts.storage_14 +(27 rows) + +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + Output: (storage.e * 2), storage.b, (storage.tableoid)::regclass + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, NULL::integer, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, NULL::text, NULL::bigint + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, storage_11.e + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b, storage_11.d, storage_11.e + -> Seq Scan on test_inserts.storage_1 storage_1_1 + Output: storage_1_1.b, storage_1_1.d, storage_1_1.e + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b, storage_2.d, storage_2.e + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b, storage_3.d, storage_3.e + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b, storage_4.d, storage_4.e + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b, storage_5.d, storage_5.e + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b, storage_6.d, storage_6.e + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b, storage_7.d, storage_7.e + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b, storage_8.d, storage_8.e + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b, storage_9.d, storage_9.e + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b, storage_10.d, storage_10.e + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b, storage_12.d, storage_12.e + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b, storage_13.d, storage_13.e + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b, storage_14.d, storage_14.e +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b, storage_11.d + -> Seq Scan on test_inserts.storage_1 storage_1_1 + Output: storage_1_1.b, storage_1_1.d + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b, storage_2.d + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b, storage_3.d + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b, storage_4.d + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b, storage_5.d + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b, storage_6.d + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b, storage_7.d + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b, storage_8.d + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b, storage_9.d + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b, storage_10.d + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b, storage_12.d + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b, storage_13.d + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b, storage_14.d +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, NULL::text, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b + -> Seq Scan on test_inserts.storage_1 storage_1_1 + Output: storage_1_1.b + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b +(34 rows) + +/* test gap case (missing partition in between) */ +CREATE TABLE test_inserts.test_gap(val INT NOT NULL); +INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); +SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test_inserts.test_gap_2; /* make a gap */ +INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ +ERROR: cannot spawn a partition +DROP TABLE test_inserts.test_gap CASCADE; +NOTICE: drop cascades to 3 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_inserts CASCADE; +NOTICE: drop cascades to 19 other objects +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_lateral.out b/expected/pathman_lateral.out index 9bff1e57..0cb1a864 100644 --- a/expected/pathman_lateral.out +++ b/expected/pathman_lateral.out @@ -1,5 +1,10 @@ --- Sometimes join selectivity improvements patches in pgpro force nested loop --- members swap -- in pathman_lateral_1.out +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_2.out b/expected/pathman_lateral_2.out new file mode 100644 index 00000000..5ee4104c --- /dev/null +++ b/expected/pathman_lateral_2.out @@ -0,0 +1,127 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2_1.id + t1_1.id) = t_1.id) + -> HashAggregate + Group Key: t_1.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Materialize + -> Nested Loop + Join Filter: ((t2_1.id > t1_1.id) AND (t1_1.id > t2_1.id) AND (t1_1.id = t2_1.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> Custom Scan (RuntimeAppend) + Prune by: (t_1.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_1 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_2 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_3 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_4 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_5 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_6 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_7 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_8 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_9 t3 + Filter: (t_1.id = id) +(84 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP SCHEMA test_lateral CASCADE; +NOTICE: drop cascades to 11 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_3.out b/expected/pathman_lateral_3.out new file mode 100644 index 00000000..dd64819d --- /dev/null +++ b/expected/pathman_lateral_3.out @@ -0,0 +1,126 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2_1.id + t1_1.id) = t_1.id) + -> Nested Loop + Join Filter: ((t2_1.id > t1_1.id) AND (t1_1.id > t2_1.id) AND (t1_1.id = t2_1.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> HashAggregate + Group Key: t_1.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Custom Scan (RuntimeAppend) + Prune by: (t_1.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_1 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_2 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_3 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_4 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_5 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_6 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_7 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_8 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_9 t3 + Filter: (t_1.id = id) +(83 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP SCHEMA test_lateral CASCADE; +NOTICE: drop cascades to 11 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_mergejoin.out b/expected/pathman_mergejoin.out index 1bd9da6f..ca3a3d9d 100644 --- a/expected/pathman_mergejoin.out +++ b/expected/pathman_mergejoin.out @@ -2,6 +2,13 @@ * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_mergejoin_1.out b/expected/pathman_mergejoin_1.out index 5b903dc1..31da465a 100644 --- a/expected/pathman_mergejoin_1.out +++ b/expected/pathman_mergejoin_1.out @@ -2,6 +2,13 @@ * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_mergejoin_2.out b/expected/pathman_mergejoin_2.out index 0168d556..4b614ad6 100644 --- a/expected/pathman_mergejoin_2.out +++ b/expected/pathman_mergejoin_2.out @@ -2,6 +2,13 @@ * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_mergejoin_3.out b/expected/pathman_mergejoin_3.out index 3d4a441c..7003205f 100644 --- a/expected/pathman_mergejoin_3.out +++ b/expected/pathman_mergejoin_3.out @@ -2,6 +2,13 @@ * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_mergejoin_4.out b/expected/pathman_mergejoin_4.out new file mode 100644 index 00000000..185aa3d1 --- /dev/null +++ b/expected/pathman_mergejoin_4.out @@ -0,0 +1,84 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2_1.dt + -> Merge Join + Merge Cond: (j2_1.id = j3_1.id) + -> Merge Join + Merge Cond: (j1_1.id = j2_1.id) + -> Merge Append + Sort Key: j1_1.id + -> Index Scan using range_rel_1_pkey on range_rel_1 j1_1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_2 + -> Merge Append + Sort Key: j2_1.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2_1 + -> Index Scan using range_rel_3_pkey on range_rel_3 j2_2 + -> Index Scan using range_rel_4_pkey on range_rel_4 j2_3 + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(20 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_mergejoin_5.out b/expected/pathman_mergejoin_5.out new file mode 100644 index 00000000..6ffe89cd --- /dev/null +++ b/expected/pathman_mergejoin_5.out @@ -0,0 +1,75 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3_1.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(11 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_only.out b/expected/pathman_only.out index b54722d8..83425632 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -7,6 +7,9 @@ * optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out index fe64e5c9..da913e54 100644 --- a/expected/pathman_only_1.out +++ b/expected/pathman_only_1.out @@ -7,6 +7,9 @@ * optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_2.out b/expected/pathman_only_2.out new file mode 100644 index 00000000..39b8f199 --- /dev/null +++ b/expected/pathman_only_2.out @@ -0,0 +1,280 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 + -> Seq Scan on from_only_test from_only_test_12 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_2 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP SCHEMA test_only CASCADE; +NOTICE: drop cascades to 12 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index 4b51cb65..f9ef8114 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -5,6 +5,9 @@ * * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output; pathman_rowmarks_2.out is the updated version. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index e72e7076..e0877333 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -5,6 +5,9 @@ * * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output; pathman_rowmarks_2.out is the updated version. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_2.out b/expected/pathman_rowmarks_2.out index a111d688..7436b081 100644 --- a/expected/pathman_rowmarks_2.out +++ b/expected/pathman_rowmarks_2.out @@ -5,6 +5,9 @@ * * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output; pathman_rowmarks_2.out is the updated version. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_3.out b/expected/pathman_rowmarks_3.out new file mode 100644 index 00000000..6179ff94 --- /dev/null +++ b/expected/pathman_rowmarks_3.out @@ -0,0 +1,390 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_rowmarks_2.out is the updated version. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +--------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +--------------------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 first_1_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +--------------------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Hash Join + Hash Cond: (first_0.id = second.id) + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP SCHEMA rowmarks CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to table rowmarks.first +drop cascades to table rowmarks.second +drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP EXTENSION pg_pathman; diff --git a/run_tests.sh b/run_tests.sh index 82d1f9d3..8f06d39c 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -134,6 +134,8 @@ make USE_PGXS=1 python_tests || status=$? deactivate set -x +if [ $status -ne 0 ]; then tail -n 2000 tests/python/tests.log; fi + # show Valgrind logs if necessary if [ "$LEVEL" = "nightmare" ]; then for f in $(find /tmp -name valgrind-*.log); do diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index a164d421..403424f5 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -2,6 +2,9 @@ * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output. Also, EXPLAIN now always shows key first in quals * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index c380ea1d..b49d061c 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -4,6 +4,9 @@ * ERROR: invalid input syntax for type integer: "abc" * instead of * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index 98c73908..685643fd 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -1,3 +1,8 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql index 8a08569f..2c3654d4 100644 --- a/sql/pathman_hashjoin.sql +++ b/sql/pathman_hashjoin.sql @@ -2,6 +2,9 @@ * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index 0f4859c4..c8c6439d 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -1,3 +1,8 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + \set VERBOSITY terse SET search_path = 'public'; diff --git a/sql/pathman_lateral.sql b/sql/pathman_lateral.sql index 645e5f93..d287c051 100644 --- a/sql/pathman_lateral.sql +++ b/sql/pathman_lateral.sql @@ -1,5 +1,11 @@ --- Sometimes join selectivity improvements patches in pgpro force nested loop --- members swap -- in pathman_lateral_1.out +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + \set VERBOSITY terse diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql index e85cc934..05de4ba2 100644 --- a/sql/pathman_mergejoin.sql +++ b/sql/pathman_mergejoin.sql @@ -2,6 +2,13 @@ * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- */ \set VERBOSITY terse diff --git a/sql/pathman_only.sql b/sql/pathman_only.sql index 6e34a9c1..53ef6a9a 100644 --- a/sql/pathman_only.sql +++ b/sql/pathman_only.sql @@ -7,6 +7,9 @@ * optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ \set VERBOSITY terse diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index f1ac0fe9..ab7f24ac 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -5,6 +5,9 @@ * * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, * causing different output; pathman_rowmarks_2.out is the updated version. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/src/hooks.c b/src/hooks.c index ca1db9be..e9ff1ed7 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -3,7 +3,7 @@ * hooks.c * definitions of rel_pathlist and join_pathlist hooks * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -517,7 +517,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, } /* Parent has already been locked by rewriter */ - parent_rel = heap_open(rte->relid, NoLock); + parent_rel = heap_open_compat(rte->relid, NoLock); parent_rowmark = get_plan_rowmark(root->rowMarks, rti); @@ -537,7 +537,7 @@ pathman_rel_pathlist_hook(PlannerInfo *root, } /* Now close parent relation */ - heap_close(parent_rel, NoLock); + heap_close_compat(parent_rel, NoLock); /* Clear path list and make it point to NIL */ list_free_deep(rel->pathlist); @@ -673,9 +673,15 @@ execute_for_plantree(PlannedStmt *planned_stmt, * Planner hook. It disables inheritance for tables that have been partitioned * by pathman to prevent standart PostgreSQL partitioning mechanism from * handling those tables. + * + * Since >= 13 (6aba63ef3e6) query_string parameter was added. */ PlannedStmt * +#if PG_VERSION_NUM >= 130000 +pathman_planner_hook(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams) +#else pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) +#endif { PlannedStmt *result; uint32 query_id = parse->queryId; @@ -696,9 +702,17 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Invoke original hook if needed */ if (pathman_planner_hook_next) +#if PG_VERSION_NUM >= 130000 + result = pathman_planner_hook_next(parse, query_string, cursorOptions, boundParams); +#else result = pathman_planner_hook_next(parse, cursorOptions, boundParams); +#endif else +#if PG_VERSION_NUM >= 130000 + result = standard_planner(parse, query_string, cursorOptions, boundParams); +#else result = standard_planner(parse, cursorOptions, boundParams); +#endif if (pathman_ready) { @@ -927,9 +941,21 @@ pathman_relcache_hook(Datum arg, Oid relid) /* * Utility function invoker hook. * NOTE: 'first_arg' is (PlannedStmt *) in PG 10, or (Node *) in PG <= 9.6. + * In PG 13 (2f9661311b8) command completion tags was reworked (added QueryCompletion struct) */ void -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 130000 +pathman_process_utility_hook(PlannedStmt *first_arg, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, QueryCompletion *queryCompletion) +{ + Node *parsetree = first_arg->utilityStmt; + int stmt_location = first_arg->stmt_location, + stmt_len = first_arg->stmt_len; +#elif PG_VERSION_NUM >= 100000 pathman_process_utility_hook(PlannedStmt *first_arg, const char *queryString, ProcessUtilityContext context, @@ -968,9 +994,14 @@ pathman_process_utility_hook(Node *first_arg, /* Handle our COPY case (and show a special cmd name) */ PathmanDoCopy((CopyStmt *) parsetree, queryString, stmt_location, stmt_len, &processed); +#if PG_VERSION_NUM >= 130000 + if (queryCompletion) + SetQueryCompletion(queryCompletion, CMDTAG_COPY, processed); +#else if (completionTag) snprintf(completionTag, COMPLETION_TAG_BUFSIZE, "COPY " UINT64_FORMAT, processed); +#endif return; /* don't call standard_ProcessUtility() or hooks */ } @@ -1037,10 +1068,19 @@ pathman_process_utility_hook(Node *first_arg, } /* Finally call process_utility_hook_next or standard_ProcessUtility */ +#if PG_VERSION_NUM >= 130000 + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : + standard_ProcessUtility), + first_arg, queryString, + context, params, queryEnv, + dest, queryCompletion); +#else call_process_utility_compat((pathman_process_utility_hook_next ? pathman_process_utility_hook_next : standard_ProcessUtility), first_arg, queryString, context, params, queryEnv, dest, completionTag); +#endif } diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index c1805f80..24a36fea 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -3,7 +3,7 @@ * pg_compat.h * Compatibility tools for PostgreSQL API * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -240,7 +240,14 @@ /* * create_append_path() */ -#if PG_VERSION_NUM >= 120000 +#if PG_VERSION_NUM >= 130000 +/* + * PGPRO-3938 made create_append_path compatible with vanilla again + */ +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#elif PG_VERSION_NUM >= 120000 #ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ @@ -1058,5 +1065,40 @@ CustomEvalParamExternCompat(Param *param, void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); +/* + * lnext() + * In >=13 list implementation was reworked (1cff1b95ab6) + */ +#if PG_VERSION_NUM >= 130000 +#define lnext_compat(l, lc) lnext((l), (lc)) +#else +#define lnext_compat(l, lc) lnext((lc)) +#endif + +/* + * heap_open() + * heap_openrv() + * heap_close() + * In >=13 heap_* was replaced with table_* (e0c4ec07284) + */ +#if PG_VERSION_NUM >= 130000 +#define heap_open_compat(r, l) table_open((r), (l)) +#define heap_openrv_compat(r, l) table_openrv((r), (l)) +#define heap_close_compat(r, l) table_close((r), (l)) +#else +#define heap_open_compat(r, l) heap_open((r), (l)) +#define heap_openrv_compat(r, l) heap_openrv((r), (l)) +#define heap_close_compat(r, l) heap_close((r), (l)) +#endif + +/* + * convert_tuples_by_name() + * In >=13 msg parameter in convert_tuples_by_name function was removed (fe66125974c) + */ +#if PG_VERSION_NUM >= 130000 +#define convert_tuples_by_name_compat(i, o, m) convert_tuples_by_name((i), (o)) +#else +#define convert_tuples_by_name_compat(i, o, m) convert_tuples_by_name((i), (o), (m)) +#endif #endif /* PG_COMPAT_H */ diff --git a/src/include/hooks.h b/src/include/hooks.h index adf96d37..49d7e8f1 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -3,7 +3,7 @@ * hooks.h * prototypes of rel_pathlist and join_pathlist hooks * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -45,6 +45,9 @@ void pathman_rel_pathlist_hook(PlannerInfo *root, void pathman_enable_assign_hook(bool newval, void *extra); PlannedStmt * pathman_planner_hook(Query *parse, +#if PG_VERSION_NUM >= 130000 + const char *query_string, +#endif int cursorOptions, ParamListInfo boundParams); @@ -55,7 +58,15 @@ void pathman_shmem_startup_hook(void); void pathman_relcache_hook(Datum arg, Oid relid); -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 130000 +void pathman_process_utility_hook(PlannedStmt *pstmt, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc); +#elif PG_VERSION_NUM >= 100000 void pathman_process_utility_hook(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 0b32e575..233054b7 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -3,7 +3,7 @@ * partition_filter.h * Select partition for INSERT operation * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -31,7 +31,13 @@ #define ERR_PART_ATTR_NULL "partitioning expression's value should not be NULL" #define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" #define ERR_PART_ATTR_MULTIPLE INSERT_NODE_NAME " selected more than one partition" +#if PG_VERSION_NUM < 130000 +/* + * In >=13 msg parameter in convert_tuples_by_name function was removed (fe66125974c) + * and ERR_PART_DESC_CONVERT become unusable + */ #define ERR_PART_DESC_CONVERT "could not convert row type for partition" +#endif /* diff --git a/src/include/relation_info.h b/src/include/relation_info.h index 80b92740..a42bf727 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -3,7 +3,7 @@ * relation_info.h * Data structures describing partitioned relations * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -309,9 +309,14 @@ PrelExpressionForRelid(const PartRelationInfo *prel, Index rti) return expr; } +#if PG_VERSION_NUM >= 130000 +AttrMap *PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc); +#else AttrNumber *PrelExpressionAttributesMap(const PartRelationInfo *prel, TupleDesc source_tupdesc, int *map_length); +#endif /* PartType wrappers */ diff --git a/src/init.c b/src/init.c index bd85c593..86e96ebe 100644 --- a/src/init.c +++ b/src/init.c @@ -3,7 +3,7 @@ * init.c * Initialization functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -470,7 +470,7 @@ find_inheritance_children_array(Oid parent_relid, */ ArrayAlloc(oidarr, maxoids, numoids, 32); - relation = heap_open(InheritsRelationId, AccessShareLock); + relation = heap_open_compat(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhparent, @@ -490,7 +490,7 @@ find_inheritance_children_array(Oid parent_relid, systable_endscan(scan); - heap_close(relation, AccessShareLock); + heap_close_compat(relation, AccessShareLock); /* * If we found more than one child, sort them by OID. This ensures @@ -655,7 +655,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, ObjectIdGetDatum(relid)); /* Open PATHMAN_CONFIG with latest snapshot available */ - rel = heap_open(get_pathman_config_relid(false), AccessShareLock); + rel = heap_open_compat(get_pathman_config_relid(false), AccessShareLock); /* Check that 'partrel' column is of regclass type */ Assert(TupleDescAttr(RelationGetDescr(rel), @@ -703,7 +703,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, heap_endscan(scan); #endif UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); + heap_close_compat(rel, AccessShareLock); elog(DEBUG2, "PATHMAN_CONFIG %s relation %u", (contains_rel ? "contains" : "doesn't contain"), relid); @@ -734,7 +734,7 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); - rel = heap_open(get_pathman_config_params_relid(false), AccessShareLock); + rel = heap_open_compat(get_pathman_config_params_relid(false), AccessShareLock); snapshot = RegisterSnapshot(GetLatestSnapshot()); #if PG_VERSION_NUM >= 120000 scan = table_beginscan(rel, snapshot, 1, key); @@ -764,7 +764,7 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) heap_endscan(scan); #endif UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); + heap_close_compat(rel, AccessShareLock); return row_found; } @@ -1118,7 +1118,7 @@ get_plpgsql_frontend_version(void) char *version_cstr; /* Look up the extension */ - pg_extension_rel = heap_open(ExtensionRelationId, AccessShareLock); + pg_extension_rel = heap_open_compat(ExtensionRelationId, AccessShareLock); ScanKeyInit(&skey, Anum_pg_extension_extname, @@ -1143,7 +1143,7 @@ get_plpgsql_frontend_version(void) version_cstr = text_to_cstring(DatumGetTextPP(datum)); systable_endscan(scan); - heap_close(pg_extension_rel, AccessShareLock); + heap_close_compat(pg_extension_rel, AccessShareLock); return build_semver_uint32(version_cstr); } diff --git a/src/nodes_common.c b/src/nodes_common.c index cf273fe6..c2a02649 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -3,7 +3,7 @@ * nodes_common.c * Common code for custom nodes * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -364,11 +364,19 @@ canonicalize_custom_exprs_mutator(Node *node, void *cxt) Var *var = palloc(sizeof(Var)); *var = *(Var *) node; +#if PG_VERSION_NUM >= 130000 +/* + * In >=13 (9ce77d75c5) varnoold and varoattno were changed to varnosyn and + * varattnosyn, and they are not consulted in _equalVar anymore. + */ + var->varattno = var->varattnosyn; +#else /* Replace original 'varnoold' */ var->varnoold = INDEX_VAR; /* Restore original 'varattno' */ var->varattno = var->varoattno; +#endif return (Node *) var; } @@ -822,9 +830,18 @@ explain_append_common(CustomScanState *node, char *exprstr; /* Set up deparsing context */ +#if PG_VERSION_NUM >= 130000 +/* + * Since 6ef77cf46e8 + */ + deparse_context = set_deparse_context_plan(es->deparse_cxt, + node->ss.ps.plan, + ancestors); +#else deparse_context = set_deparse_context_planstate(es->deparse_cxt, (Node *) node, ancestors); +#endif /* Deparse the expression */ exprstr = deparse_expression((Node *) make_ands_explicit(custom_exprs), diff --git a/src/partition_creation.c b/src/partition_creation.c index cd2a7b82..c7a944a1 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -3,7 +3,7 @@ * partition_creation.c * Various functions for partition creation. * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * *------------------------------------------------------------------------- */ @@ -42,6 +42,9 @@ #include "parser/parse_utilcmd.h" #include "parser/parse_relation.h" #include "tcop/utility.h" +#if PG_VERSION_NUM >= 130000 +#include "utils/acl.h" +#endif #include "utils/builtins.h" #include "utils/datum.h" #include "utils/fmgroids.h" @@ -247,11 +250,11 @@ create_single_partition_common(Oid parent_relid, Relation child_relation; /* Open the relation and add new check constraint & fkeys */ - child_relation = heap_open(partition_relid, AccessExclusiveLock); + child_relation = heap_open_compat(partition_relid, AccessExclusiveLock); AddRelationNewConstraintsCompat(child_relation, NIL, list_make1(check_constraint), false, true, true); - heap_close(child_relation, NoLock); + heap_close_compat(child_relation, NoLock); /* Make constraint visible */ CommandCounterIncrement(); @@ -984,17 +987,17 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) Snapshot snapshot; /* Both parent & partition have already been locked */ - parent_rel = heap_open(parent_relid, NoLock); - partition_rel = heap_open(partition_relid, NoLock); + parent_rel = heap_open_compat(parent_relid, NoLock); + partition_rel = heap_open_compat(partition_relid, NoLock); make_inh_translation_list(parent_rel, partition_rel, 0, &translated_vars); - heap_close(parent_rel, NoLock); - heap_close(partition_rel, NoLock); + heap_close_compat(parent_rel, NoLock); + heap_close_compat(partition_rel, NoLock); /* Open catalog's relations */ - pg_class_rel = heap_open(RelationRelationId, RowExclusiveLock); - pg_attribute_rel = heap_open(AttributeRelationId, RowExclusiveLock); + pg_class_rel = heap_open_compat(RelationRelationId, RowExclusiveLock); + pg_attribute_rel = heap_open_compat(AttributeRelationId, RowExclusiveLock); /* Get most recent snapshot */ snapshot = RegisterSnapshot(GetLatestSnapshot()); @@ -1165,8 +1168,8 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) /* Don't forget to free snapshot */ UnregisterSnapshot(snapshot); - heap_close(pg_class_rel, RowExclusiveLock); - heap_close(pg_attribute_rel, RowExclusiveLock); + heap_close_compat(pg_class_rel, RowExclusiveLock); + heap_close_compat(pg_attribute_rel, RowExclusiveLock); } /* Copy foreign keys of parent table (updates pg_class) */ @@ -1235,7 +1238,7 @@ copy_rel_options(Oid parent_relid, Oid partition_relid) bool isnull[Natts_pg_class], replace[Natts_pg_class] = { false }; - pg_class_rel = heap_open(RelationRelationId, RowExclusiveLock); + pg_class_rel = heap_open_compat(RelationRelationId, RowExclusiveLock); parent_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); partition_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(partition_relid)); @@ -1273,7 +1276,7 @@ copy_rel_options(Oid parent_relid, Oid partition_relid) ReleaseSysCache(parent_htup); ReleaseSysCache(partition_htup); - heap_close(pg_class_rel, RowExclusiveLock); + heap_close_compat(pg_class_rel, RowExclusiveLock); /* Make changes visible */ CommandCounterIncrement(); @@ -1291,15 +1294,21 @@ void drop_pathman_check_constraint(Oid relid) { char *constr_name; +#if PG_VERSION_NUM >= 130000 + List *cmds; +#else AlterTableStmt *stmt; +#endif AlterTableCmd *cmd; /* Build a correct name for this constraint */ constr_name = build_check_constraint_name_relid_internal(relid); +#if PG_VERSION_NUM < 130000 stmt = makeNode(AlterTableStmt); stmt->relation = makeRangeVarFromRelid(relid); stmt->relkind = OBJECT_TABLE; +#endif cmd = makeNode(AlterTableCmd); cmd->subtype = AT_DropConstraint; @@ -1307,23 +1316,35 @@ drop_pathman_check_constraint(Oid relid) cmd->behavior = DROP_RESTRICT; cmd->missing_ok = true; +#if PG_VERSION_NUM >= 130000 + cmds = list_make1(cmd); + + /* + * Since 1281a5c907b AlterTable() was changed. + * recurse = true (see stmt->relation->inh makeRangeVarFromRelid() makeRangeVar()) + * Dropping constraint won't do parse analyze, so AlterTableInternal + * is enough. + */ + AlterTableInternal(relid, cmds, true); +#else stmt->cmds = list_make1(cmd); /* See function AlterTableGetLockLevel() */ AlterTable(relid, AccessExclusiveLock, stmt); +#endif } /* Add pg_pathman's check constraint using 'relid' */ void add_pathman_check_constraint(Oid relid, Constraint *constraint) { - Relation part_rel = heap_open(relid, AccessExclusiveLock); + Relation part_rel = heap_open_compat(relid, AccessExclusiveLock); AddRelationNewConstraintsCompat(part_rel, NIL, list_make1(constraint), false, true, true); - heap_close(part_rel, NoLock); + heap_close_compat(part_rel, NoLock); } diff --git a/src/partition_filter.c b/src/partition_filter.c index f6cb5b60..3808dc26 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -3,7 +3,7 @@ * partition_filter.c * Select partition for INSERT operation * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -233,7 +233,7 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) { ExecCloseIndices(rri_holder->result_rel_info); /* And relation itself */ - heap_close(rri_holder->result_rel_info->ri_RelationDesc, + heap_close_compat(rri_holder->result_rel_info->ri_RelationDesc, NoLock); } @@ -307,7 +307,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) base_rel = parts_storage->base_rri->ri_RelationDesc; /* Open child relation and check if it is a valid target */ - child_rel = heap_open(partid, NoLock); + child_rel = heap_open_compat(partid, NoLock); /* Build Var translation list for 'inserted_cols' */ make_inh_translation_list(base_rel, child_rel, 0, &translated_vars); @@ -450,7 +450,7 @@ build_part_tuple_map(Relation base_rel, Relation child_rel) parent_tupdesc->tdtypeid = InvalidOid; /* Generate tuple transformation map and some other stuff */ - tuple_map = convert_tuples_by_name(parent_tupdesc, + tuple_map = convert_tuples_by_name_compat(parent_tupdesc, child_tupdesc, ERR_PART_DESC_CONVERT); @@ -592,6 +592,10 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, return result; } +/* + * Since 13 (e1551f96e64) AttrNumber[] and map_length was combined + * into one struct AttrMap + */ static ExprState * prepare_expr_state(const PartRelationInfo *prel, Relation source_rel, @@ -610,26 +614,44 @@ prepare_expr_state(const PartRelationInfo *prel, /* Should we try using map? */ if (PrelParentRelid(prel) != RelationGetRelid(source_rel)) { +#if PG_VERSION_NUM >= 130000 + AttrMap *map; +#else AttrNumber *map; int map_length; +#endif TupleDesc source_tupdesc = RelationGetDescr(source_rel); /* Remap expression attributes for source relation */ +#if PG_VERSION_NUM >= 130000 + map = PrelExpressionAttributesMap(prel, source_tupdesc); +#else map = PrelExpressionAttributesMap(prel, source_tupdesc, &map_length); +#endif if (map) { bool found_whole_row; +#if PG_VERSION_NUM >= 130000 + expr = map_variable_attnos(expr, PART_EXPR_VARNO, 0, map, + InvalidOid, + &found_whole_row); +#else expr = map_variable_attnos_compat(expr, PART_EXPR_VARNO, 0, map, map_length, InvalidOid, &found_whole_row); +#endif if (found_whole_row) elog(ERROR, "unexpected whole-row reference" " found in partition key"); +#if PG_VERSION_NUM >= 130000 + free_attrmap(map); +#else pfree(map); +#endif } } @@ -1073,7 +1095,11 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, /* HACK: plan a fake query for FDW access to be planned as well */ elog(DEBUG1, "FDW(%u): plan fake query for fdw_private", partid); +#if PG_VERSION_NUM >= 130000 + plan = standard_planner(&query, NULL, 0, NULL); +#else plan = standard_planner(&query, 0, NULL); +#endif /* HACK: create a fake PlanState */ memset(&pstate, 0, sizeof(PlanState)); @@ -1147,7 +1173,11 @@ fix_returning_list_mutator(Node *node, void *state) for (i = 0; i < rri_holder->tuple_map->outdesc->natts; i++) { /* Good, 'varattno' of parent is child's 'i+1' */ +#if PG_VERSION_NUM >= 130000 + if (var->varattno == rri_holder->tuple_map->attrMap->attnums[i]) +#else if (var->varattno == rri_holder->tuple_map->attrMap[i]) +#endif { var->varattno = i + 1; /* attnos begin with 1 */ found_mapping = true; @@ -1189,19 +1219,25 @@ append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel) /* Update estate_mod_data */ emd_struct->estate_not_modified = false; +#if PG_VERSION_NUM >= 120000 + estate->es_range_table_size = list_length(estate->es_range_table); +#endif +#if PG_VERSION_NUM >= 120000 && PG_VERSION_NUM < 130000 /* - * On PG >= 12, also add rte to es_range_table_array. This is horribly + * On PG = 12, also add rte to es_range_table_array. This is horribly * inefficient, yes. - * At least in 12 es_range_table_array ptr is not saved anywhere in + * In 12 es_range_table_array ptr is not saved anywhere in * core, so it is safe to repalloc. + * + * In >= 13 (3c92658) es_range_table_array was removed */ -#if PG_VERSION_NUM >= 120000 - estate->es_range_table_size = list_length(estate->es_range_table); estate->es_range_table_array = (RangeTblEntry **) repalloc(estate->es_range_table_array, estate->es_range_table_size * sizeof(RangeTblEntry *)); estate->es_range_table_array[estate->es_range_table_size - 1] = rte; +#endif +#if PG_VERSION_NUM >= 120000 /* * Also reallocate es_relations, because es_range_table_size defines its * len. This also ensures ExecEndPlan will close the rel. diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 285a130f..e3a46abd 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -400,7 +400,7 @@ get_pathman_schema(void) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ext_oid)); - rel = heap_open(ExtensionRelationId, AccessShareLock); + rel = heap_open_compat(ExtensionRelationId, AccessShareLock); scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, NULL, 1, entry); @@ -414,7 +414,7 @@ get_pathman_schema(void) systable_endscan(scandesc); - heap_close(rel, AccessShareLock); + heap_close_compat(rel, AccessShareLock); return result; } @@ -483,7 +483,7 @@ append_child_relation(PlannerInfo *root, parent_rte = root->simple_rte_array[parent_rti]; /* Open child relation (we've just locked it) */ - child_relation = heap_open(child_oid, NoLock); + child_relation = heap_open_compat(child_oid, NoLock); /* Create RangeTblEntry for child relation */ child_rte = copyObject(parent_rte); @@ -678,7 +678,7 @@ append_child_relation(PlannerInfo *root, } /* Close child relations, but keep locks */ - heap_close(child_relation, NoLock); + heap_close_compat(child_relation, NoLock); return child_rti; } diff --git a/src/pl_funcs.c b/src/pl_funcs.c index ebf80861..76ecbe3d 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -3,7 +3,7 @@ * pl_funcs.c * Utility C functions for stored procedures * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -367,6 +367,9 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) /* * List all existing partitions and their parents. + * + * In >=13 (bc8393cf277) struct SPITupleTable was changed + * (free removed and numvals added) */ Datum show_partition_list_internal(PG_FUNCTION_ARGS) @@ -389,7 +392,7 @@ show_partition_list_internal(PG_FUNCTION_ARGS) usercxt = (show_partition_list_cxt *) palloc(sizeof(show_partition_list_cxt)); /* Open PATHMAN_CONFIG with latest snapshot available */ - usercxt->pathman_config = heap_open(get_pathman_config_relid(false), + usercxt->pathman_config = heap_open_compat(get_pathman_config_relid(false), AccessShareLock); usercxt->snapshot = RegisterSnapshot(GetLatestSnapshot()); #if PG_VERSION_NUM >= 120000 @@ -433,7 +436,12 @@ show_partition_list_internal(PG_FUNCTION_ARGS) tuptable->tuptabcxt = tuptab_mcxt; /* Set up initial allocations */ +#if PG_VERSION_NUM >= 130000 + tuptable->alloced = PART_RELS_SIZE * CHILD_FACTOR; + tuptable->numvals = 0; +#else tuptable->alloced = tuptable->free = PART_RELS_SIZE * CHILD_FACTOR; +#endif tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple)); MemoryContextSwitchTo(old_mcxt); @@ -549,20 +557,34 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* Form output tuple */ htup = heap_form_tuple(funccxt->tuple_desc, values, isnull); +#if PG_VERSION_NUM >= 130000 + if (tuptable->numvals == tuptable->alloced) +#else if (tuptable->free == 0) +#endif { /* Double the size of the pointer array */ +#if PG_VERSION_NUM >= 130000 + tuptable->alloced += tuptable->alloced; +#else tuptable->free = tuptable->alloced; tuptable->alloced += tuptable->free; +#endif tuptable->vals = (HeapTuple *) repalloc_huge(tuptable->vals, tuptable->alloced * sizeof(HeapTuple)); } +#if PG_VERSION_NUM >= 130000 + /* Add tuple to table and increase 'numvals' */ + tuptable->vals[tuptable->numvals] = htup; + (tuptable->numvals)++; +#else /* Add tuple to table and decrement 'free' */ tuptable->vals[tuptable->alloced - tuptable->free] = htup; (tuptable->free)--; +#endif MemoryContextSwitchTo(old_mcxt); @@ -577,7 +599,7 @@ show_partition_list_internal(PG_FUNCTION_ARGS) heap_endscan(usercxt->pathman_config_scan); #endif UnregisterSnapshot(usercxt->snapshot); - heap_close(usercxt->pathman_config, AccessShareLock); + heap_close_compat(usercxt->pathman_config, AccessShareLock); usercxt->child_number = 0; } @@ -587,7 +609,11 @@ show_partition_list_internal(PG_FUNCTION_ARGS) tuptable = usercxt->tuptable; /* Iterate through used slots */ +#if PG_VERSION_NUM >= 130000 + if (usercxt->child_number < tuptable->numvals) +#else if (usercxt->child_number < (tuptable->alloced - tuptable->free)) +#endif { HeapTuple htup = usercxt->tuptable->vals[usercxt->child_number++]; @@ -689,21 +715,34 @@ is_tuple_convertible(PG_FUNCTION_ARGS) { Relation rel1, rel2; +#if PG_VERSION_NUM >= 130000 + AttrMap *map; /* we don't actually need it */ +#else void *map; /* we don't actually need it */ +#endif - rel1 = heap_open(PG_GETARG_OID(0), AccessShareLock); - rel2 = heap_open(PG_GETARG_OID(1), AccessShareLock); + rel1 = heap_open_compat(PG_GETARG_OID(0), AccessShareLock); + rel2 = heap_open_compat(PG_GETARG_OID(1), AccessShareLock); /* Try to build a conversion map */ +#if PG_VERSION_NUM >= 130000 + map = build_attrmap_by_name(RelationGetDescr(rel1), + RelationGetDescr(rel2)); +#else map = convert_tuples_by_name_map(RelationGetDescr(rel1), RelationGetDescr(rel2), ERR_PART_DESC_CONVERT); +#endif /* Now free map */ +#if PG_VERSION_NUM >= 130000 + free_attrmap(map); +#else pfree(map); +#endif - heap_close(rel1, AccessShareLock); - heap_close(rel2, AccessShareLock); + heap_close_compat(rel1, AccessShareLock); + heap_close_compat(rel2, AccessShareLock); /* still return true to avoid changing tests */ PG_RETURN_BOOL(true); @@ -852,12 +891,12 @@ add_to_pathman_config(PG_FUNCTION_ARGS) isnull[Anum_pathman_config_expr - 1] = false; /* Insert new row into PATHMAN_CONFIG */ - pathman_config = heap_open(get_pathman_config_relid(false), RowExclusiveLock); + pathman_config = heap_open_compat(get_pathman_config_relid(false), RowExclusiveLock); htup = heap_form_tuple(RelationGetDescr(pathman_config), values, isnull); CatalogTupleInsert(pathman_config, htup); - heap_close(pathman_config, RowExclusiveLock); + heap_close_compat(pathman_config, RowExclusiveLock); /* Make changes visible */ CommandCounterIncrement(); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 27361dd3..12c247ab 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -3,7 +3,7 @@ * pl_range_funcs.c * Utility C functions for stored RANGE procedures * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -1320,12 +1320,18 @@ modify_range_constraint(Oid partition_relid, /* * Transform constraint into cstring + * + * In >=13 (5815696bc66) result type of addRangeTableEntryForRelationCompat() was changed */ static char * deparse_constraint(Oid relid, Node *expr) { Relation rel; +#if PG_VERSION_NUM >= 130000 + ParseNamespaceItem *nsitem; +#else RangeTblEntry *rte; +#endif Node *cooked_expr; ParseState *pstate; List *context; @@ -1333,12 +1339,17 @@ deparse_constraint(Oid relid, Node *expr) context = deparse_context_for(get_rel_name(relid), relid); - rel = heap_open(relid, NoLock); + rel = heap_open_compat(relid, NoLock); /* Initialize parse state */ pstate = make_parsestate(NULL); +#if PG_VERSION_NUM >= 130000 + nsitem = addRangeTableEntryForRelationCompat(pstate, rel, AccessShareLock, NULL, false, true); + addNSItemToQuery(pstate, nsitem, true, true, true); +#else rte = addRangeTableEntryForRelationCompat(pstate, rel, AccessShareLock, NULL, false, true); addRTEtoQuery(pstate, rte, true, true, true); +#endif /* Transform constraint into executable expression (i.e. cook it) */ cooked_expr = transformExpr(pstate, expr, EXPR_KIND_CHECK_CONSTRAINT); @@ -1346,7 +1357,7 @@ deparse_constraint(Oid relid, Node *expr) /* Transform expression into string */ result = deparse_expression(cooked_expr, context, false, false); - heap_close(rel, NoLock); + heap_close_compat(rel, NoLock); return result; } diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 6fc55c7b..77a55bd3 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -3,7 +3,7 @@ * planner_tree_modification.c * Functions for query- and plan- tree modification * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -588,8 +588,8 @@ handle_modification_query(Query *parse, transform_query_cxt *context) rte->inh = false; /* Both tables are already locked */ - child_rel = heap_open(child, NoLock); - parent_rel = heap_open(parent, NoLock); + child_rel = heap_open_compat(child, NoLock); + parent_rel = heap_open_compat(parent, NoLock); make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); @@ -611,8 +611,8 @@ handle_modification_query(Query *parse, transform_query_cxt *context) } /* Close relations (should remain locked, though) */ - heap_close(child_rel, NoLock); - heap_close(parent_rel, NoLock); + heap_close_compat(child_rel, NoLock); + heap_close_compat(parent_rel, NoLock); } } @@ -783,7 +783,7 @@ partition_filter_visitor(Plan *plan, void *context) if (lc3) { returning_list = lfirst(lc3); - lc3 = lnext(lc3); + lc3 = lnext_compat(modify_table->returningLists, lc3); } lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, @@ -849,7 +849,7 @@ partition_router_visitor(Plan *plan, void *context) if (lc3) { returning_list = lfirst(lc3); - lc3 = lnext(lc3); + lc3 = lnext_compat(modify_table->returningLists, lc3); } prouter = make_partition_router((Plan *) lfirst(lc1), diff --git a/src/rangeset.c b/src/rangeset.c index 15bb5849..9f7b2aa1 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -3,11 +3,12 @@ * rangeset.c * IndexRange functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" #include "rangeset.h" @@ -238,25 +239,25 @@ irange_list_union(List *a, List *b) if (irange_lower(lfirst_irange(ca)) <= irange_lower(lfirst_irange(cb))) { next = lfirst_irange(ca); - ca = lnext(ca); /* move to next cell */ + ca = lnext_compat(a, ca); /* move to next cell */ } else { next = lfirst_irange(cb); - cb = lnext(cb); /* move to next cell */ + cb = lnext_compat(b, cb); /* move to next cell */ } } /* Fetch next irange from A */ else if (ca) { next = lfirst_irange(ca); - ca = lnext(ca); /* move to next cell */ + ca = lnext_compat(a, ca); /* move to next cell */ } /* Fetch next irange from B */ else if (cb) { next = lfirst_irange(cb); - cb = lnext(cb); /* move to next cell */ + cb = lnext_compat(b, cb); /* move to next cell */ } /* Put this irange to 'cur' if don't have it yet */ @@ -339,9 +340,9 @@ irange_list_intersection(List *a, List *b) * irange is greater (or equal) to upper bound of current. */ if (irange_upper(ra) <= irange_upper(rb)) - ca = lnext(ca); + ca = lnext_compat(a, ca); if (irange_upper(ra) >= irange_upper(rb)) - cb = lnext(cb); + cb = lnext_compat(b, cb); } return result; } diff --git a/src/relation_info.c b/src/relation_info.c index 0c79b504..df60dde3 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -3,7 +3,7 @@ * relation_info.c * Data structures describing partitioned relations * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -925,16 +925,26 @@ shout_if_prel_is_invalid(const Oid parent_oid, * This is a simplified version of functions that return TupleConversionMap. * It should be faster if expression uses a few fields of relation. */ +#if PG_VERSION_NUM >= 130000 +AttrMap * +PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc) +#else AttrNumber * PrelExpressionAttributesMap(const PartRelationInfo *prel, TupleDesc source_tupdesc, int *map_length) +#endif { Oid parent_relid = PrelParentRelid(prel); int source_natts = source_tupdesc->natts, expr_natts = 0; - AttrNumber *result, - i; +#if PG_VERSION_NUM >= 130000 + AttrMap *result; +#else + AttrNumber *result; +#endif + AttrNumber i; bool is_trivial = true; /* Get largest attribute number used in expression */ @@ -942,8 +952,12 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, while ((i = bms_next_member(prel->expr_atts, i)) >= 0) expr_natts = i; +#if PG_VERSION_NUM >= 130000 + result = make_attrmap(expr_natts); +#else /* Allocate array for map */ result = (AttrNumber *) palloc0(expr_natts * sizeof(AttrNumber)); +#endif /* Find a match for each attribute */ i = -1; @@ -964,26 +978,44 @@ PrelExpressionAttributesMap(const PartRelationInfo *prel, if (strcmp(NameStr(att->attname), attname) == 0) { +#if PG_VERSION_NUM >= 130000 + result->attnums[attnum - 1] = (AttrNumber) (j + 1); +#else result[attnum - 1] = (AttrNumber) (j + 1); +#endif break; } } +#if PG_VERSION_NUM >= 130000 + if (result->attnums[attnum - 1] == 0) +#else if (result[attnum - 1] == 0) +#endif elog(ERROR, "cannot find column \"%s\" in child relation", attname); +#if PG_VERSION_NUM >= 130000 + if (result->attnums[attnum - 1] != attnum) +#else if (result[attnum - 1] != attnum) +#endif is_trivial = false; } /* Check if map is trivial */ if (is_trivial) { +#if PG_VERSION_NUM >= 130000 + free_attrmap(result); +#else pfree(result); +#endif return NULL; } +#if PG_VERSION_NUM < 130000 *map_length = expr_natts; +#endif return result; } @@ -1330,7 +1362,7 @@ get_parent_of_partition(Oid partition) HeapTuple htup; Oid parent = InvalidOid; - relation = heap_open(InheritsRelationId, AccessShareLock); + relation = heap_open_compat(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhrelid, @@ -1359,7 +1391,7 @@ get_parent_of_partition(Oid partition) } systable_endscan(scan); - heap_close(relation, AccessShareLock); + heap_close_compat(relation, AccessShareLock); return parent; } diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index 92ae3e60..601c663f 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -3,7 +3,7 @@ * runtime_merge_append.c * RuntimeMergeAppend node's function definitions and global variables * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -898,9 +898,15 @@ show_sort_group_keys(PlanState *planstate, const char *qlabel, initStringInfo(&sortkeybuf); /* Set up deparsing context */ +#if PG_VERSION_NUM >= 130000 + context = set_deparse_context_plan(es->deparse_cxt, + plan, + ancestors); +#else context = set_deparse_context_planstate(es->deparse_cxt, (Node *) planstate, ancestors); +#endif useprefix = (list_length(es->rtable) > 1 || es->verbose); for (keyno = 0; keyno < nkeys; keyno++) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 2b5a5956..c9ffbf14 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -4,7 +4,7 @@ * Override COPY TO/FROM and ALTER TABLE ... RENAME statements * for partitioned tables * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -401,7 +401,7 @@ PathmanDoCopy(const CopyStmt *stmt, Assert(!stmt->query); /* Open the relation (we've locked it in is_pathman_related_copy()) */ - rel = heap_openrv(stmt->relation, NoLock); + rel = heap_openrv_compat(stmt->relation, NoLock); rte = makeNode(RangeTblEntry); rte->rtekind = RTE_RELATION; @@ -468,7 +468,7 @@ PathmanDoCopy(const CopyStmt *stmt, } /* Close the relation, but keep it locked */ - heap_close(rel, (is_from ? NoLock : PATHMAN_COPY_READ_LOCK)); + heap_close_compat(rel, (is_from ? NoLock : PATHMAN_COPY_READ_LOCK)); } /* diff --git a/tests/cmocka/missing_basic.c b/tests/cmocka/missing_basic.c index 7524abb5..36d76160 100644 --- a/tests/cmocka/missing_basic.c +++ b/tests/cmocka/missing_basic.c @@ -16,6 +16,11 @@ repalloc(void *pointer, Size size) return realloc(pointer, size); } +void +pfree(void *pointer) +{ + free(pointer); +} void ExceptionalCondition(const char *conditionName, diff --git a/tests/cmocka/missing_list.c b/tests/cmocka/missing_list.c index 5ddce8a8..b85eed94 100644 --- a/tests/cmocka/missing_list.c +++ b/tests/cmocka/missing_list.c @@ -1,10 +1,10 @@ /*------------------------------------------------------------------------- * * list.c - * implementation for PostgreSQL generic linked list package + * implementation for PostgreSQL generic list package * * - * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -13,10 +13,11 @@ * *------------------------------------------------------------------------- */ -#define _GNU_SOURCE #include "postgres.h" + #include "nodes/pg_list.h" +#if PG_VERSION_NUM < 130000 #define IsPointerList(l) ((l) == NIL || IsA((l), List)) #define IsIntegerList(l) ((l) == NIL || IsA((l), IntList)) @@ -141,3 +142,306 @@ lcons(void *datum, List *list) return list; } + +#else /* PG_VERSION_NUM >= 130000 */ + +/*------------------------------------------------------------------------- + * + * This was taken from src/backend/nodes/list.c PostgreSQL-13 source code. + * We only need lappend() and lcons() and their dependencies. + * There is one change: we use palloc() instead MemoryContextAlloc() in + * enlarge_list() (see #defines). + * + *------------------------------------------------------------------------- + */ +#include "port/pg_bitutils.h" +#include "utils/memdebug.h" +#include "utils/memutils.h" + +#define MemoryContextAlloc(c, s) palloc(s) +#define GetMemoryChunkContext(l) 0 + +/* + * The previous List implementation, since it used a separate palloc chunk + * for each cons cell, had the property that adding or deleting list cells + * did not move the storage of other existing cells in the list. Quite a + * bit of existing code depended on that, by retaining ListCell pointers + * across such operations on a list. There is no such guarantee in this + * implementation, so instead we have debugging support that is meant to + * help flush out now-broken assumptions. Defining DEBUG_LIST_MEMORY_USAGE + * while building this file causes the List operations to forcibly move + * all cells in a list whenever a cell is added or deleted. In combination + * with MEMORY_CONTEXT_CHECKING and/or Valgrind, this can usually expose + * broken code. It's a bit expensive though, as there's many more palloc + * cycles and a lot more data-copying than in a default build. + * + * By default, we enable this when building for Valgrind. + */ +#ifdef USE_VALGRIND +#define DEBUG_LIST_MEMORY_USAGE +#endif + +/* Overhead for the fixed part of a List header, measured in ListCells */ +#define LIST_HEADER_OVERHEAD \ + ((int) ((offsetof(List, initial_elements) - 1) / sizeof(ListCell) + 1)) + +/* + * Macros to simplify writing assertions about the type of a list; a + * NIL list is considered to be an empty list of any type. + */ +#define IsPointerList(l) ((l) == NIL || IsA((l), List)) +#define IsIntegerList(l) ((l) == NIL || IsA((l), IntList)) +#define IsOidList(l) ((l) == NIL || IsA((l), OidList)) + +#ifdef USE_ASSERT_CHECKING +/* + * Check that the specified List is valid (so far as we can tell). + */ +static void +check_list_invariants(const List *list) +{ + if (list == NIL) + return; + + Assert(list->length > 0); + Assert(list->length <= list->max_length); + Assert(list->elements != NULL); + + Assert(list->type == T_List || + list->type == T_IntList || + list->type == T_OidList); +} +#else +#define check_list_invariants(l) ((void) 0) +#endif /* USE_ASSERT_CHECKING */ + +/* + * Return a freshly allocated List with room for at least min_size cells. + * + * Since empty non-NIL lists are invalid, new_list() sets the initial length + * to min_size, effectively marking that number of cells as valid; the caller + * is responsible for filling in their data. + */ +static List * +new_list(NodeTag type, int min_size) +{ + List *newlist; + int max_size; + + Assert(min_size > 0); + + /* + * We allocate all the requested cells, and possibly some more, as part of + * the same palloc request as the List header. This is a big win for the + * typical case of short fixed-length lists. It can lose if we allocate a + * moderately long list and then it gets extended; we'll be wasting more + * initial_elements[] space than if we'd made the header small. However, + * rounding up the request as we do in the normal code path provides some + * defense against small extensions. + */ + +#ifndef DEBUG_LIST_MEMORY_USAGE + + /* + * Normally, we set up a list with some extra cells, to allow it to grow + * without a repalloc. Prefer cell counts chosen to make the total + * allocation a power-of-2, since palloc would round it up to that anyway. + * (That stops being true for very large allocations, but very long lists + * are infrequent, so it doesn't seem worth special logic for such cases.) + * + * The minimum allocation is 8 ListCell units, providing either 4 or 5 + * available ListCells depending on the machine's word width. Counting + * palloc's overhead, this uses the same amount of space as a one-cell + * list did in the old implementation, and less space for any longer list. + * + * We needn't worry about integer overflow; no caller passes min_size + * that's more than twice the size of an existing list, so the size limits + * within palloc will ensure that we don't overflow here. + */ + max_size = pg_nextpower2_32(Max(8, min_size + LIST_HEADER_OVERHEAD)); + max_size -= LIST_HEADER_OVERHEAD; +#else + + /* + * For debugging, don't allow any extra space. This forces any cell + * addition to go through enlarge_list() and thus move the existing data. + */ + max_size = min_size; +#endif + + newlist = (List *) palloc(offsetof(List, initial_elements) + + max_size * sizeof(ListCell)); + newlist->type = type; + newlist->length = min_size; + newlist->max_length = max_size; + newlist->elements = newlist->initial_elements; + + return newlist; +} + +/* + * Enlarge an existing non-NIL List to have room for at least min_size cells. + * + * This does *not* update list->length, as some callers would find that + * inconvenient. (list->length had better be the correct number of existing + * valid cells, though.) + */ +static void +enlarge_list(List *list, int min_size) +{ + int new_max_len; + + Assert(min_size > list->max_length); /* else we shouldn't be here */ + +#ifndef DEBUG_LIST_MEMORY_USAGE + + /* + * As above, we prefer power-of-two total allocations; but here we need + * not account for list header overhead. + */ + + /* clamp the minimum value to 16, a semi-arbitrary small power of 2 */ + new_max_len = pg_nextpower2_32(Max(16, min_size)); + +#else + /* As above, don't allocate anything extra */ + new_max_len = min_size; +#endif + + if (list->elements == list->initial_elements) + { + /* + * Replace original in-line allocation with a separate palloc block. + * Ensure it is in the same memory context as the List header. (The + * previous List implementation did not offer any guarantees about + * keeping all list cells in the same context, but it seems reasonable + * to create such a guarantee now.) + */ + list->elements = (ListCell *) + MemoryContextAlloc(GetMemoryChunkContext(list), + new_max_len * sizeof(ListCell)); + memcpy(list->elements, list->initial_elements, + list->length * sizeof(ListCell)); + + /* + * We must not move the list header, so it's unsafe to try to reclaim + * the initial_elements[] space via repalloc. In debugging builds, + * however, we can clear that space and/or mark it inaccessible. + * (wipe_mem includes VALGRIND_MAKE_MEM_NOACCESS.) + */ +#ifdef CLOBBER_FREED_MEMORY + wipe_mem(list->initial_elements, + list->max_length * sizeof(ListCell)); +#else + VALGRIND_MAKE_MEM_NOACCESS(list->initial_elements, + list->max_length * sizeof(ListCell)); +#endif + } + else + { +#ifndef DEBUG_LIST_MEMORY_USAGE + /* Normally, let repalloc deal with enlargement */ + list->elements = (ListCell *) repalloc(list->elements, + new_max_len * sizeof(ListCell)); +#else + /* + * repalloc() might enlarge the space in-place, which we don't want + * for debugging purposes, so forcibly move the data somewhere else. + */ + ListCell *newelements; + + newelements = (ListCell *) + MemoryContextAlloc(GetMemoryChunkContext(list), + new_max_len * sizeof(ListCell)); + memcpy(newelements, list->elements, + list->length * sizeof(ListCell)); + pfree(list->elements); + list->elements = newelements; +#endif + } + + list->max_length = new_max_len; +} + +/* + * Make room for a new head cell in the given (non-NIL) list. + * + * The data in the new head cell is undefined; the caller should be + * sure to fill it in + */ +static void +new_head_cell(List *list) +{ + /* Enlarge array if necessary */ + if (list->length >= list->max_length) + enlarge_list(list, list->length + 1); + /* Now shove the existing data over */ + memmove(&list->elements[1], &list->elements[0], + list->length * sizeof(ListCell)); + list->length++; +} + +/* + * Make room for a new tail cell in the given (non-NIL) list. + * + * The data in the new tail cell is undefined; the caller should be + * sure to fill it in + */ +static void +new_tail_cell(List *list) +{ + /* Enlarge array if necessary */ + if (list->length >= list->max_length) + enlarge_list(list, list->length + 1); + list->length++; +} + +/* + * Append a pointer to the list. A pointer to the modified list is + * returned. Note that this function may or may not destructively + * modify the list; callers should always use this function's return + * value, rather than continuing to use the pointer passed as the + * first argument. + */ +List * +lappend(List *list, void *datum) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List, 1); + else + new_tail_cell(list); + + lfirst(list_tail(list)) = datum; + check_list_invariants(list); + return list; +} + +/* + * Prepend a new element to the list. A pointer to the modified list + * is returned. Note that this function may or may not destructively + * modify the list; callers should always use this function's return + * value, rather than continuing to use the pointer passed as the + * second argument. + * + * Caution: before Postgres 8.0, the original List was unmodified and + * could be considered to retain its separate identity. This is no longer + * the case. + */ +List * +lcons(void *datum, List *list) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List, 1); + else + new_head_cell(list); + + lfirst(list_head(list)) = datum; + check_list_invariants(list); + return list; +} + +#endif /* PG_VERSION_NUM */ diff --git a/tests/python/Makefile b/tests/python/Makefile index fed17cf3..8311bb12 100644 --- a/tests/python/Makefile +++ b/tests/python/Makefile @@ -1,6 +1,6 @@ partitioning_tests: ifneq ($(CASE),) - python3 partitioning_test.py Tests.$(CASE) + python3 -u partitioning_test.py Tests.$(CASE) else - python3 partitioning_test.py + python3 -u partitioning_test.py endif diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 0e3d1492..ad555455 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -4,7 +4,7 @@ partitioning_test.py Various stuff that looks out of place in regression tests - Copyright (c) 2015-2017, Postgres Professional + Copyright (c) 2015-2020, Postgres Professional """ import functools @@ -21,10 +21,11 @@ import unittest from distutils.version import LooseVersion -from testgres import get_new_node, get_pg_version +from testgres import get_new_node, get_pg_version, configure_testgres -# set setup base logging config, it can be turned on by `use_logging` +# set setup base logging config, it can be turned on by `use_python_logging` # parameter on node setup +# configure_testgres(use_python_logging=True) import logging import logging.config @@ -548,7 +549,7 @@ def test_parallel_nodes(self): } ] """) - self.assertEqual(ordered(plan), ordered(expected)) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed']), ordered(expected)) # Check count of returned tuples count = con.execute( @@ -601,7 +602,7 @@ def test_parallel_nodes(self): } ] """) - self.assertEqual(ordered(plan), ordered(expected)) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed']), ordered(expected)) # Check tuples returned by query above res_tuples = con.execute( @@ -1128,4 +1129,8 @@ def make_updates(node, count): else: suite = unittest.TestLoader().loadTestsFromTestCase(Tests) - unittest.TextTestRunner(verbosity=2, failfast=True).run(suite) + configure_testgres(use_python_logging=True) + + result = unittest.TextTestRunner(verbosity=2, failfast=True).run(suite) + if not result.wasSuccessful(): + sys.exit(1) From e0171c8ef1b7d0b9143fa800364a60b083e33ef6 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 27 Oct 2020 17:50:38 +0300 Subject: [PATCH 1042/1124] Fix for CVE-2020-14350. - Explicit casts to ensure exact match to pathman functions instead of pwning ones. - Explicit use of @extschema@ and pg_catalog schemas where possible (except for operators). - Replace unsafe OR REPLACE clause. This is believed to remove the possibility of malicious internal functions overloading. For more information, see the documentation: 37.17.6.2. Security Considerations for Extension Scripts (https://www.postgresql.org/docs/current/extend-extensions.html#EXTEND-EXTENSIONS-SECURITY) 5.9.6. Usage Patterns (https://www.postgresql.org/docs/current/ddl-schemas.html#DDL-SCHEMAS-PATTERNS) --- Makefile | 3 +- README.md | 22 +++-- expected/pathman_CVE-2020-14350.out | 115 +++++++++++++++++++++ hash.sql | 22 ++--- init.sql | 148 ++++++++++++++-------------- range.sql | 100 +++++++++---------- sql/pathman_CVE-2020-14350.sql | 77 +++++++++++++++ src/partition_creation.c | 2 +- src/pathman_workers.c | 2 +- 9 files changed, 346 insertions(+), 145 deletions(-) create mode 100644 expected/pathman_CVE-2020-14350.out create mode 100644 sql/pathman_CVE-2020-14350.sql diff --git a/Makefile b/Makefile index c1281871..b198a6a1 100644 --- a/Makefile +++ b/Makefile @@ -61,7 +61,8 @@ REGRESS = pathman_array_qual \ pathman_update_triggers \ pathman_upd_del \ pathman_utility_stmt \ - pathman_views + pathman_views \ + pathman_CVE-2020-14350 EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add diff --git a/README.md b/README.md index b49c20ec..2f95a738 100644 --- a/README.md +++ b/README.md @@ -95,11 +95,19 @@ shared_preload_libraries = 'pg_pathman' It is essential to restart the PostgreSQL instance. After that, execute the following query in psql: ```plpgsql -CREATE EXTENSION pg_pathman; +CREATE SCHEMA pathman; +GRANT USAGE ON SCHEMA pathman TO PUBLIC; +CREATE EXTENSION pg_pathman WITH SCHEMA pathman; ``` Done! Now it's time to setup your partitioning schemes. +> **Security notice**: pg_pathman is believed to be secure against +search-path-based attacks mentioned in Postgres +[documentation](https://www.postgresql.org/docs/current/sql-createextension.html). However, +if *your* calls of pathman's functions doesn't exactly match the signature, they +might be vulnerable to malicious overloading. If in doubt, install pathman to clean schema where nobody except superusers have CREATE object permission to avoid problems. + > **Windows-specific**: pg_pathman imports several symbols (e.g. None_Receiver, InvalidObjectAddress) from PostgreSQL, which is fine by itself, but requires that those symbols are marked as `PGDLLIMPORT`. Unfortunately, some of them are not exported from vanilla PostgreSQL, which means that you have to either use Postgres Pro Standard/Enterprise (which includes all necessary patches), or patch and build your own distribution of PostgreSQL. ## How to update @@ -611,7 +619,7 @@ SELECT tableoid::regclass AS partition, * FROM partitioned_table; - All running concurrent partitioning tasks can be listed using the `pathman_concurrent_part_tasks` view: ```plpgsql SELECT * FROM pathman_concurrent_part_tasks; - userid | pid | dbid | relid | processed | status + userid | pid | dbid | relid | processed | status --------+------+-------+-------+-----------+--------- dmitry | 7367 | 16384 | test | 472000 | working (1 row) @@ -625,7 +633,7 @@ WHERE parent = 'part_test'::regclass AND range_min::int < 500; NOTICE: 1 rows copied from part_test_11 NOTICE: 100 rows copied from part_test_1 NOTICE: 100 rows copied from part_test_2 - drop_range_partition + drop_range_partition ---------------------- dummy_test_11 dummy_test_1 @@ -780,8 +788,8 @@ All sections and data will remain unchanged and will be handled by the standard Do not hesitate to post your issues, questions and new ideas at the [issues](https://github.com/postgrespro/pg_pathman/issues) page. ## Authors -[Ildar Musin](https://github.com/zilder) -Alexander Korotkov Postgres Professional Ltd., Russia -[Dmitry Ivanov](https://github.com/funbringer) -Maksim Milyutin Postgres Professional Ltd., Russia +[Ildar Musin](https://github.com/zilder) +Alexander Korotkov Postgres Professional Ltd., Russia +[Dmitry Ivanov](https://github.com/funbringer) +Maksim Milyutin Postgres Professional Ltd., Russia [Ildus Kurbangaliev](https://github.com/ildus) diff --git a/expected/pathman_CVE-2020-14350.out b/expected/pathman_CVE-2020-14350.out new file mode 100644 index 00000000..c91a280f --- /dev/null +++ b/expected/pathman_CVE-2020-14350.out @@ -0,0 +1,115 @@ +/* + * Check fix for CVE-2020-14350. + * See also 7eeb1d986 postgresql commit. + */ +SET client_min_messages = 'warning'; +DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); +DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE IF EXISTS test1 CASCADE; +DROP TABLE IF EXISTS test2 CASCADE; +DROP ROLE IF EXISTS regress_hacker; +SET client_min_messages = 'notice'; +CREATE EXTENSION pg_pathman; +CREATE ROLE regress_hacker LOGIN; +-- Test 1 +RESET ROLE; +ALTER ROLE regress_hacker NOSUPERUSER; +SET ROLE regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_total BIGINT) +RETURNS bigint +AS $$ +BEGIN + ALTER ROLE regress_hacker SUPERUSER; + SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; +END +$$ LANGUAGE plpgsql; +CREATE TABLE test1(i INT4 NOT NULL); +INSERT INTO test1 SELECT generate_series(1, 500); +SELECT create_hash_partitions('test1', 'i', 5, false); + create_hash_partitions +------------------------ + 5 +(1 row) + +RESET ROLE; +SELECT partition_table_concurrently('test1', 10, 1); +NOTICE: worker started, you can stop it with the following command: select public.stop_concurrent_part_task('test1'); + partition_table_concurrently +------------------------------ + +(1 row) + +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + +-- Test result (must be 'off') +SET ROLE regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +-- Test 2 +RESET ROLE; +ALTER ROLE regress_hacker NOSUPERUSER; +SET ROLE regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT) +RETURNS REGCLASS +AS $$ +BEGIN + ALTER ROLE regress_hacker SUPERUSER; + RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); +END +$$ LANGUAGE plpgsql; +RESET ROLE; +CREATE TABLE test2(i INT4 NOT NULL); +INSERT INTO test2 VALUES(0); +SELECT create_range_partitions('test2', 'i', 0, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test2 values(1); +-- Test result (must be 'off') +SET ROLE regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +-- Cleanup +RESET ROLE; +DROP FUNCTION _partition_data_concurrent(oid,integer); +DROP FUNCTION create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE test1 CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table test1_0 +drop cascades to table test1_1 +drop cascades to table test1_2 +drop cascades to table test1_3 +drop cascades to table test1_4 +DROP TABLE test2 CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to sequence test2_seq +drop cascades to table test2_1 +drop cascades to table test2_2 +DROP ROLE regress_hacker; +DROP EXTENSION pg_pathman; diff --git a/hash.sql b/hash.sql index 45c9b71d..b22fd75e 100644 --- a/hash.sql +++ b/hash.sql @@ -3,7 +3,7 @@ * hash.sql * HASH partitioning functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -11,7 +11,7 @@ /* * Creates hash partitions for specified relation */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( +CREATE FUNCTION @extschema@.create_hash_partitions( parent_relid REGCLASS, expression TEXT, partitions_count INT4, @@ -53,7 +53,7 @@ SET client_min_messages = WARNING; * * lock_parent - should we take an exclusive lock? */ -CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( +CREATE FUNCTION @extschema@.replace_hash_partition( old_partition REGCLASS, new_partition REGCLASS, lock_parent BOOL DEFAULT TRUE) @@ -110,18 +110,18 @@ BEGIN /* Fetch definition of old_partition's HASH constraint */ SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint - WHERE conrelid = old_partition AND quote_ident(conname) = old_constr_name + WHERE conrelid = old_partition AND pg_catalog.quote_ident(conname) = old_constr_name INTO old_constr_def; /* Detach old partition */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + EXECUTE pg_catalog.format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE pg_catalog.format('ALTER TABLE %s DROP CONSTRAINT %s', old_partition, old_constr_name); /* Attach the new one */ - EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + EXECUTE pg_catalog.format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE pg_catalog.format('ALTER TABLE %s ADD CONSTRAINT %s %s', new_partition, @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); @@ -146,7 +146,7 @@ $$ LANGUAGE plpgsql; /* * Just create HASH partitions, called by create_hash_partitions(). */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( +CREATE FUNCTION @extschema@.create_hash_partitions_internal( parent_relid REGCLASS, attribute TEXT, partitions_count INT4, @@ -158,14 +158,14 @@ LANGUAGE C; /* * Calculates hash for integer value */ -CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INT4, INT4) +CREATE FUNCTION @extschema@.get_hash_part_idx(INT4, INT4) RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' LANGUAGE C STRICT; /* * Build hash condition for a CHECK CONSTRAINT */ -CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( +CREATE FUNCTION @extschema@.build_hash_condition( attribute_type REGTYPE, attribute TEXT, partitions_count INT4, diff --git a/init.sql b/init.sql index 16ec0b8f..123b2a36 100644 --- a/init.sql +++ b/init.sql @@ -3,7 +3,7 @@ * init.sql * Creates config table and provides common utility functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -14,7 +14,7 @@ * to partitioning key. The function throws an error if it fails to convert * text to Datum */ -CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( +CREATE FUNCTION @extschema@.validate_interval_value( partrel REGCLASS, expr TEXT, parttype INTEGER, @@ -31,7 +31,7 @@ LANGUAGE C; * range_interval - base interval for RANGE partitioning as string * cooked_expr - cooked partitioning expression (parsed & rewritten) */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( +CREATE TABLE @extschema@.pathman_config ( partrel REGCLASS NOT NULL PRIMARY KEY, expr TEXT NOT NULL, parttype INTEGER NOT NULL, @@ -55,7 +55,7 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( * * NOTE: this function is used in CHECK CONSTRAINT. */ -CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( +CREATE FUNCTION @extschema@.validate_part_callback( callback REGPROCEDURE, raise_error BOOL DEFAULT TRUE) RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' @@ -70,7 +70,7 @@ LANGUAGE C STRICT; * init_callback - text signature of cb to be executed on partition creation * spawn_using_bgw - use background worker in order to auto create partitions */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( +CREATE TABLE @extschema@.pathman_config_params ( partrel REGCLASS NOT NULL PRIMARY KEY, enable_parent BOOLEAN NOT NULL DEFAULT FALSE, auto BOOLEAN NOT NULL DEFAULT TRUE, @@ -91,7 +91,7 @@ TO public; /* * Check if current user can alter/drop specified relation */ -CREATE OR REPLACE FUNCTION @extschema@.check_security_policy(relation regclass) +CREATE FUNCTION @extschema@.check_security_policy(relation regclass) RETURNS BOOL AS 'pg_pathman', 'check_security_policy' LANGUAGE C STRICT; /* @@ -113,7 +113,7 @@ ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; /* * Invalidate relcache every time someone changes parameters config or pathman_config */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() +CREATE FUNCTION @extschema@.pathman_config_params_trigger_func() RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' LANGUAGE C; @@ -135,13 +135,13 @@ SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config_params', /* * Add a row describing the optional parameter to pathman_config_params. */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( +CREATE FUNCTION @extschema@.pathman_set_param( relation REGCLASS, param TEXT, value ANYELEMENT) RETURNS VOID AS $$ BEGIN - EXECUTE format('INSERT INTO @extschema@.pathman_config_params + EXECUTE pg_catalog.format('INSERT INTO @extschema@.pathman_config_params (partrel, %1$s) VALUES ($1, $2) ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) USING relation, value; @@ -151,7 +151,7 @@ $$ LANGUAGE plpgsql; /* * Include\exclude parent relation in query plan. */ -CREATE OR REPLACE FUNCTION @extschema@.set_enable_parent( +CREATE FUNCTION @extschema@.set_enable_parent( relation REGCLASS, value BOOLEAN) RETURNS VOID AS $$ @@ -163,7 +163,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Enable\disable automatic partition creation. */ -CREATE OR REPLACE FUNCTION @extschema@.set_auto( +CREATE FUNCTION @extschema@.set_auto( relation REGCLASS, value BOOLEAN) RETURNS VOID AS $$ @@ -175,7 +175,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set partition creation callback */ -CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( +CREATE FUNCTION @extschema@.set_init_callback( relation REGCLASS, callback REGPROCEDURE DEFAULT 0) RETURNS VOID AS $$ @@ -186,10 +186,10 @@ BEGIN /* Fetch schema-qualified name of callback */ IF callback != 0 THEN - SELECT quote_ident(nspname) || '.' || - quote_ident(proname) || '(' || - (SELECT string_agg(x.argtype::REGTYPE::TEXT, ',') - FROM unnest(proargtypes) AS x(argtype)) || + SELECT pg_catalog.quote_ident(nspname) || '.' || + pg_catalog.quote_ident(proname) || '(' || + (SELECT pg_catalog.string_agg(x.argtype::REGTYPE::TEXT, ',') + FROM pg_catalog.unnest(proargtypes) AS x(argtype)) || ')' FROM pg_catalog.pg_proc p JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace @@ -204,7 +204,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set 'spawn using BGW' option */ -CREATE OR REPLACE FUNCTION @extschema@.set_spawn_using_bgw( +CREATE FUNCTION @extschema@.set_spawn_using_bgw( relation REGCLASS, value BOOLEAN) RETURNS VOID AS $$ @@ -216,7 +216,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set (or reset) default interval for auto created partitions */ -CREATE OR REPLACE FUNCTION @extschema@.set_interval( +CREATE FUNCTION @extschema@.set_interval( relation REGCLASS, value ANYELEMENT) RETURNS VOID AS $$ @@ -240,7 +240,7 @@ $$ LANGUAGE plpgsql; /* * Show all existing parents and partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() +CREATE FUNCTION @extschema@.show_partition_list() RETURNS TABLE ( parent REGCLASS, partition REGCLASS, @@ -254,7 +254,7 @@ LANGUAGE C STRICT; /* * View for show_partition_list(). */ -CREATE OR REPLACE VIEW @extschema@.pathman_partition_list +CREATE VIEW @extschema@.pathman_partition_list AS SELECT * FROM @extschema@.show_partition_list(); GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; @@ -262,7 +262,7 @@ GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; /* * Show memory usage of pg_pathman's caches. */ -CREATE OR REPLACE FUNCTION @extschema@.show_cache_stats() +CREATE FUNCTION @extschema@.show_cache_stats() RETURNS TABLE ( context TEXT, size INT8, @@ -274,13 +274,13 @@ LANGUAGE C STRICT; /* * View for show_cache_stats(). */ -CREATE OR REPLACE VIEW @extschema@.pathman_cache_stats +CREATE VIEW @extschema@.pathman_cache_stats AS SELECT * FROM @extschema@.show_cache_stats(); /* * Show all existing concurrent partitioning tasks. */ -CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() +CREATE FUNCTION @extschema@.show_concurrent_part_tasks() RETURNS TABLE ( userid REGROLE, pid INT, @@ -294,7 +294,7 @@ LANGUAGE C STRICT; /* * View for show_concurrent_part_tasks(). */ -CREATE OR REPLACE VIEW @extschema@.pathman_concurrent_part_tasks +CREATE VIEW @extschema@.pathman_concurrent_part_tasks AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; @@ -302,7 +302,7 @@ GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; /* * Partition table using ConcurrentPartWorker. */ -CREATE OR REPLACE FUNCTION @extschema@.partition_table_concurrently( +CREATE FUNCTION @extschema@.partition_table_concurrently( relation REGCLASS, batch_size INTEGER DEFAULT 1000, sleep_time FLOAT8 DEFAULT 1.0) @@ -312,7 +312,7 @@ LANGUAGE C STRICT; /* * Stop concurrent partitioning task. */ -CREATE OR REPLACE FUNCTION @extschema@.stop_concurrent_part_task( +CREATE FUNCTION @extschema@.stop_concurrent_part_task( relation REGCLASS) RETURNS BOOL AS 'pg_pathman', 'stop_concurrent_part_task' LANGUAGE C STRICT; @@ -321,7 +321,7 @@ LANGUAGE C STRICT; /* * Copy rows to partitions concurrently. */ -CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( +CREATE FUNCTION @extschema@._partition_data_concurrent( relation REGCLASS, p_min ANYELEMENT DEFAULT NULL::text, p_max ANYELEMENT DEFAULT NULL::text, @@ -341,19 +341,19 @@ BEGIN /* Format LIMIT clause if needed */ IF NOT p_limit IS NULL THEN - v_limit_clause := format('LIMIT %s', p_limit); + v_limit_clause := pg_catalog.format('LIMIT %s', p_limit); END IF; /* Format WHERE clause if needed */ IF NOT p_min IS NULL THEN - v_where_clause := format('%1$s >= $1', part_expr); + v_where_clause := pg_catalog.format('%1$s >= $1', part_expr); END IF; IF NOT p_max IS NULL THEN IF NOT p_min IS NULL THEN v_where_clause := v_where_clause || ' AND '; END IF; - v_where_clause := v_where_clause || format('%1$s < $2', part_expr); + v_where_clause := v_where_clause || pg_catalog.format('%1$s < $2', part_expr); END IF; IF v_where_clause != '' THEN @@ -362,12 +362,12 @@ BEGIN /* Lock rows and copy data */ RAISE NOTICE 'Copying data to partitions...'; - EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', + EXECUTE pg_catalog.format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', relation, v_where_clause, v_limit_clause) USING p_min, p_max INTO ctids; - EXECUTE format('WITH data AS ( + EXECUTE pg_catalog.format('WITH data AS ( DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) INSERT INTO %1$s SELECT * FROM data', relation) @@ -383,7 +383,7 @@ SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is O /* * Old school way to distribute rows to partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.partition_data( +CREATE FUNCTION @extschema@.partition_data( parent_relid REGCLASS, OUT p_total BIGINT) AS $$ @@ -391,7 +391,7 @@ BEGIN p_total := 0; /* Create partitions and copy rest of the data */ - EXECUTE format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) + EXECUTE pg_catalog.format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) INSERT INTO %1$s SELECT * FROM part_data', parent_relid::TEXT); @@ -405,7 +405,7 @@ SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is O /* * Disable pathman partitioning for specified relation. */ -CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( +CREATE FUNCTION @extschema@.disable_pathman_for( parent_relid REGCLASS) RETURNS VOID AS $$ BEGIN @@ -420,7 +420,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Check a few things and take locks before partitioning. */ -CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( +CREATE FUNCTION @extschema@.prepare_for_partitioning( parent_relid REGCLASS, expression TEXT, partition_data BOOLEAN) @@ -455,7 +455,7 @@ BEGIN RAISE EXCEPTION 'table "%" has already been partitioned', parent_relid; END IF; - IF EXISTS (SELECT 1 FROM pg_inherits WHERE inhparent = parent_relid) THEN + IF EXISTS (SELECT 1 FROM pg_catalog.pg_inherits WHERE inhparent = parent_relid) THEN RAISE EXCEPTION 'can''t partition table "%" with existing children', parent_relid; END IF; @@ -478,7 +478,7 @@ $$ LANGUAGE plpgsql; /* * Returns relname without quotes or something. */ -CREATE OR REPLACE FUNCTION @extschema@.get_plain_schema_and_relname( +CREATE FUNCTION @extschema@.get_plain_schema_and_relname( cls REGCLASS, OUT schema TEXT, OUT relname TEXT) @@ -494,7 +494,7 @@ $$ LANGUAGE plpgsql STRICT; /* * DDL trigger that removes entry from pathman_config table. */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() +CREATE FUNCTION @extschema@.pathman_ddl_trigger_func() RETURNS event_trigger AS $$ DECLARE obj RECORD; @@ -505,8 +505,8 @@ BEGIN pg_class_oid = 'pg_catalog.pg_class'::regclass; /* Find relids to remove from config */ - SELECT array_agg(cfg.partrel) INTO relids - FROM pg_event_trigger_dropped_objects() AS events + SELECT pg_catalog.array_agg(cfg.partrel) INTO relids + FROM pg_catalog.pg_event_trigger_dropped_objects() AS events JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid WHERE events.classid = pg_class_oid AND events.objsubid = 0; @@ -522,7 +522,7 @@ $$ LANGUAGE plpgsql; * Drop partitions. If delete_data set to TRUE, partitions * will be dropped with all the data. */ -CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( +CREATE FUNCTION @extschema@.drop_partitions( parent_relid REGCLASS, delete_data BOOLEAN DEFAULT FALSE) RETURNS INTEGER AS $$ @@ -552,7 +552,7 @@ BEGIN ORDER BY inhrelid ASC) LOOP IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', + EXECUTE pg_catalog.format('INSERT INTO %s SELECT * FROM %s', parent_relid::TEXT, child::TEXT); GET DIAGNOSTICS rows_count = ROW_COUNT; @@ -571,9 +571,9 @@ BEGIN * DROP TABLE or DROP FOREIGN TABLE. */ IF rel_kind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', child); + EXECUTE pg_catalog.format('DROP FOREIGN TABLE %s', child); ELSE - EXECUTE format('DROP TABLE %s', child); + EXECUTE pg_catalog.format('DROP TABLE %s', child); END IF; part_count := part_count + 1; @@ -592,7 +592,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is /* * Copy all of parent's foreign keys. */ -CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( +CREATE FUNCTION @extschema@.copy_foreign_keys( parent_relid REGCLASS, partition_relid REGCLASS) RETURNS VOID AS $$ @@ -606,7 +606,7 @@ BEGIN FOR conid IN (SELECT oid FROM pg_catalog.pg_constraint WHERE conrelid = parent_relid AND contype = 'f') LOOP - EXECUTE format('ALTER TABLE %s ADD %s', + EXECUTE pg_catalog.format('ALTER TABLE %s ADD %s', partition_relid::TEXT, pg_catalog.pg_get_constraintdef(conid)); END LOOP; @@ -617,7 +617,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set new relname, schema and tablespace */ -CREATE OR REPLACE FUNCTION @extschema@.alter_partition( +CREATE FUNCTION @extschema@.alter_partition( relation REGCLASS, new_name TEXT, new_schema REGNAMESPACE, @@ -634,17 +634,17 @@ BEGIN /* Alter table name */ IF new_name != orig_name THEN - EXECUTE format('ALTER TABLE %s RENAME TO %s', relation, new_name); + EXECUTE pg_catalog.format('ALTER TABLE %s RENAME TO %s', relation, new_name); END IF; /* Alter table schema */ IF new_schema != orig_schema THEN - EXECUTE format('ALTER TABLE %s SET SCHEMA %s', relation, new_schema); + EXECUTE pg_catalog.format('ALTER TABLE %s SET SCHEMA %s', relation, new_schema); END IF; /* Move to another tablespace */ IF NOT new_tablespace IS NULL THEN - EXECUTE format('ALTER TABLE %s SET TABLESPACE %s', relation, new_tablespace); + EXECUTE pg_catalog.format('ALTER TABLE %s SET TABLESPACE %s', relation, new_tablespace); END IF; END $$ LANGUAGE plpgsql; @@ -661,7 +661,7 @@ EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); /* * Get partitioning key. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( +CREATE FUNCTION @extschema@.get_partition_key( parent_relid REGCLASS) RETURNS TEXT AS $$ @@ -674,7 +674,7 @@ LANGUAGE sql STRICT; /* * Get partitioning key type. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( +CREATE FUNCTION @extschema@.get_partition_key_type( parent_relid REGCLASS) RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' LANGUAGE C STRICT; @@ -682,7 +682,7 @@ LANGUAGE C STRICT; /* * Get parsed and analyzed expression. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_cooked_key( +CREATE FUNCTION @extschema@.get_partition_cooked_key( parent_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'get_partition_cooked_key_pl' LANGUAGE C STRICT; @@ -690,7 +690,7 @@ LANGUAGE C STRICT; /* * Get partitioning type. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( +CREATE FUNCTION @extschema@.get_partition_type( parent_relid REGCLASS) RETURNS INT4 AS $$ @@ -703,11 +703,11 @@ LANGUAGE sql STRICT; /* * Get number of partitions managed by pg_pathman. */ -CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( +CREATE FUNCTION @extschema@.get_number_of_partitions( parent_relid REGCLASS) RETURNS INT4 AS $$ - SELECT count(*)::INT4 + SELECT pg_catalog.count(*)::INT4 FROM pg_catalog.pg_inherits WHERE inhparent = parent_relid; $$ @@ -716,7 +716,7 @@ LANGUAGE sql STRICT; /* * Get parent of pg_pathman's partition. */ -CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition( +CREATE FUNCTION @extschema@.get_parent_of_partition( partition_relid REGCLASS) RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' LANGUAGE C STRICT; @@ -724,7 +724,7 @@ LANGUAGE C STRICT; /* * Extract basic type of a domain. */ -CREATE OR REPLACE FUNCTION @extschema@.get_base_type( +CREATE FUNCTION @extschema@.get_base_type( typid REGTYPE) RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' LANGUAGE C STRICT; @@ -732,7 +732,7 @@ LANGUAGE C STRICT; /* * Return tablespace name for specified relation. */ -CREATE OR REPLACE FUNCTION @extschema@.get_tablespace( +CREATE FUNCTION @extschema@.get_tablespace( relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'get_tablespace_pl' LANGUAGE C STRICT; @@ -741,7 +741,7 @@ LANGUAGE C STRICT; /* * Check that relation exists. */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relname( +CREATE FUNCTION @extschema@.validate_relname( relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'validate_relname' LANGUAGE C; @@ -749,7 +749,7 @@ LANGUAGE C; /* * Check that expression is valid */ -CREATE OR REPLACE FUNCTION @extschema@.validate_expression( +CREATE FUNCTION @extschema@.validate_expression( relid REGCLASS, expression TEXT) RETURNS VOID AS 'pg_pathman', 'validate_expression' @@ -758,7 +758,7 @@ LANGUAGE C; /* * Check if regclass is date or timestamp. */ -CREATE OR REPLACE FUNCTION @extschema@.is_date_type( +CREATE FUNCTION @extschema@.is_date_type( typid REGTYPE) RETURNS BOOLEAN AS 'pg_pathman', 'is_date_type' LANGUAGE C STRICT; @@ -766,7 +766,7 @@ LANGUAGE C STRICT; /* * Check if TYPE supports the specified operator. */ -CREATE OR REPLACE FUNCTION @extschema@.is_operator_supported( +CREATE FUNCTION @extschema@.is_operator_supported( type_oid REGTYPE, opname TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'is_operator_supported' @@ -775,7 +775,7 @@ LANGUAGE C STRICT; /* * Check if tuple from first relation can be converted to fit the second one. */ -CREATE OR REPLACE FUNCTION @extschema@.is_tuple_convertible( +CREATE FUNCTION @extschema@.is_tuple_convertible( relation1 REGCLASS, relation2 REGCLASS) RETURNS BOOL AS 'pg_pathman', 'is_tuple_convertible' @@ -785,7 +785,7 @@ LANGUAGE C STRICT; /* * Build check constraint name for a specified relation's column. */ -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( +CREATE FUNCTION @extschema@.build_check_constraint_name( partition_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name' LANGUAGE C STRICT; @@ -793,7 +793,7 @@ LANGUAGE C STRICT; /* * Add record to pathman_config (RANGE) and validate partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( +CREATE FUNCTION @extschema@.add_to_pathman_config( parent_relid REGCLASS, expression TEXT, range_interval TEXT) @@ -803,7 +803,7 @@ LANGUAGE C; /* * Add record to pathman_config (HASH) and validate partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( +CREATE FUNCTION @extschema@.add_to_pathman_config( parent_relid REGCLASS, expression TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' @@ -814,7 +814,7 @@ LANGUAGE C; * Lock partitioned relation to restrict concurrent * modification of partitioning scheme. */ -CREATE OR REPLACE FUNCTION @extschema@.prevent_part_modification( +CREATE FUNCTION @extschema@.prevent_part_modification( parent_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'prevent_part_modification' LANGUAGE C STRICT; @@ -822,7 +822,7 @@ LANGUAGE C STRICT; /* * Lock relation to restrict concurrent modification of data. */ -CREATE OR REPLACE FUNCTION @extschema@.prevent_data_modification( +CREATE FUNCTION @extschema@.prevent_data_modification( parent_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'prevent_data_modification' LANGUAGE C STRICT; @@ -831,7 +831,7 @@ LANGUAGE C STRICT; /* * Invoke init_callback on RANGE partition. */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( +CREATE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, partition_relid REGCLASS, init_callback REGPROCEDURE, @@ -843,7 +843,7 @@ LANGUAGE C; /* * Invoke init_callback on HASH partition. */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( +CREATE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, partition_relid REGCLASS, init_callback REGPROCEDURE) @@ -853,10 +853,10 @@ LANGUAGE C; /* * DEBUG: Place this inside some plpgsql fuction and set breakpoint. */ -CREATE OR REPLACE FUNCTION @extschema@.debug_capture() +CREATE FUNCTION @extschema@.debug_capture() RETURNS VOID AS 'pg_pathman', 'debug_capture' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.pathman_version() +CREATE FUNCTION @extschema@.pathman_version() RETURNS CSTRING AS 'pg_pathman', 'pathman_version' LANGUAGE C STRICT; diff --git a/range.sql b/range.sql index ef439cee..5af17014 100644 --- a/range.sql +++ b/range.sql @@ -3,7 +3,7 @@ * range.sql * RANGE partitioning functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -11,7 +11,7 @@ /* * Check RANGE partition boundaries. */ -CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( +CREATE FUNCTION @extschema@.check_boundaries( parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -24,7 +24,7 @@ DECLARE BEGIN /* Get min and max values */ - EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) + EXECUTE pg_catalog.format('SELECT count(*), min(%1$s), max(%1$s) FROM %2$s WHERE NOT %1$s IS NULL', expression, parent_relid::TEXT) INTO rows_count, min_value, max_value; @@ -49,7 +49,7 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on datetime attribute */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -76,7 +76,7 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + EXECUTE pg_catalog.format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO rows_count, max_value; IF rows_count = 0 THEN @@ -142,7 +142,7 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on numerical expression */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -169,7 +169,7 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + EXECUTE pg_catalog.format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO rows_count, max_value; IF rows_count = 0 THEN @@ -239,7 +239,7 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on bounds array */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, bounds ANYARRAY, @@ -297,7 +297,7 @@ LANGUAGE plpgsql; /* * Append new partition. */ -CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( +CREATE FUNCTION @extschema@.append_range_partition( parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) @@ -326,7 +326,7 @@ BEGIN INTO part_interval; EXECUTE - format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + pg_catalog.format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', @extschema@.get_base_type(part_expr_type)::TEXT) USING parent_relid, @@ -347,7 +347,7 @@ $$ LANGUAGE plpgsql; * * NOTE: we don't take a xact_handling lock here. */ -CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( +CREATE FUNCTION @extschema@.append_partition_internal( parent_relid REGCLASS, p_atttype REGTYPE, p_interval TEXT, @@ -368,7 +368,7 @@ BEGIN part_expr_type := @extschema@.get_base_type(p_atttype); /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + EXECUTE pg_catalog.format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', part_expr_type::TEXT) USING parent_relid INTO p_range; @@ -378,13 +378,13 @@ BEGIN END IF; IF @extschema@.is_date_type(p_atttype) THEN - v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); ELSE - v_args_format := format('$1, $2, $2 + $3::%s, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, $2, $2 + $3::%s, $4, $5', part_expr_type::TEXT); END IF; EXECUTE - format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + pg_catalog.format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) USING parent_relid, p_range[2], @@ -401,7 +401,7 @@ $$ LANGUAGE plpgsql; /* * Prepend new partition. */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( +CREATE FUNCTION @extschema@.prepend_range_partition( parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) @@ -430,7 +430,7 @@ BEGIN INTO part_interval; EXECUTE - format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + pg_catalog.format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', @extschema@.get_base_type(part_expr_type)::TEXT) USING parent_relid, @@ -451,7 +451,7 @@ $$ LANGUAGE plpgsql; * * NOTE: we don't take a xact_handling lock here. */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( +CREATE FUNCTION @extschema@.prepend_partition_internal( parent_relid REGCLASS, p_atttype REGTYPE, p_interval TEXT, @@ -472,7 +472,7 @@ BEGIN part_expr_type := @extschema@.get_base_type(p_atttype); /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + EXECUTE pg_catalog.format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', part_expr_type::TEXT) USING parent_relid INTO p_range; @@ -482,13 +482,13 @@ BEGIN END IF; IF @extschema@.is_date_type(p_atttype) THEN - v_args_format := format('$1, ($2 - $3::interval)::%s, $2, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, ($2 - $3::interval)::%s, $2, $4, $5', part_expr_type::TEXT); ELSE - v_args_format := format('$1, $2 - $3::%s, $2, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, $2 - $3::%s, $2, $4, $5', part_expr_type::TEXT); END IF; EXECUTE - format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + pg_catalog.format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) USING parent_relid, p_range[1], @@ -505,7 +505,7 @@ $$ LANGUAGE plpgsql; /* * Add new partition */ -CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( +CREATE FUNCTION @extschema@.add_range_partition( parent_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT, @@ -547,7 +547,7 @@ $$ LANGUAGE plpgsql; /* * Drop range partition */ -CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( +CREATE FUNCTION @extschema@.drop_range_partition( partition_relid REGCLASS, delete_data BOOLEAN DEFAULT TRUE) RETURNS TEXT AS $$ @@ -576,7 +576,7 @@ BEGIN PERFORM @extschema@.prevent_part_modification(parent_relid); IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', + EXECUTE pg_catalog.format('INSERT INTO %s SELECT * FROM %s', parent_relid::TEXT, partition_relid::TEXT); GET DIAGNOSTICS v_rows = ROW_COUNT; @@ -595,9 +595,9 @@ BEGIN * DROP TABLE or DROP FOREIGN TABLE. */ IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + EXECUTE pg_catalog.format('DROP FOREIGN TABLE %s', partition_relid::TEXT); ELSE - EXECUTE format('DROP TABLE %s', partition_relid::TEXT); + EXECUTE pg_catalog.format('DROP TABLE %s', partition_relid::TEXT); END IF; RETURN part_name; @@ -608,7 +608,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is /* * Attach range partition */ -CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( +CREATE FUNCTION @extschema@.attach_range_partition( parent_relid REGCLASS, partition_relid REGCLASS, start_value ANYELEMENT, @@ -658,10 +658,10 @@ BEGIN END IF; /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + EXECUTE pg_catalog.format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); /* Set check constraint */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + EXECUTE pg_catalog.format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', partition_relid::TEXT, @extschema@.build_check_constraint_name(partition_relid), @extschema@.build_range_condition(partition_relid, @@ -691,7 +691,7 @@ $$ LANGUAGE plpgsql; /* * Detach range partition */ -CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( +CREATE FUNCTION @extschema@.detach_range_partition( partition_relid REGCLASS) RETURNS TEXT AS $$ DECLARE @@ -718,12 +718,12 @@ BEGIN END IF; /* Remove inheritance */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', + EXECUTE pg_catalog.format('ALTER TABLE %s NO INHERIT %s', partition_relid::TEXT, parent_relid::TEXT); /* Remove check constraint */ - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + EXECUTE pg_catalog.format('ALTER TABLE %s DROP CONSTRAINT %s', partition_relid::TEXT, @extschema@.build_check_constraint_name(partition_relid)); @@ -735,7 +735,7 @@ $$ LANGUAGE plpgsql; /* * Create a naming sequence for partitioned table. */ -CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( +CREATE FUNCTION @extschema@.create_naming_sequence( parent_relid REGCLASS) RETURNS TEXT AS $$ DECLARE @@ -744,8 +744,8 @@ DECLARE BEGIN seq_name := @extschema@.build_sequence_name(parent_relid); - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); - EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); + EXECUTE pg_catalog.format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE pg_catalog.format('CREATE SEQUENCE %s START 1', seq_name); RETURN seq_name; END @@ -755,7 +755,7 @@ SET client_min_messages = WARNING; /* mute NOTICE message */ /* * Drop a naming sequence for partitioned table. */ -CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( +CREATE FUNCTION @extschema@.drop_naming_sequence( parent_relid REGCLASS) RETURNS VOID AS $$ DECLARE @@ -764,7 +764,7 @@ DECLARE BEGIN seq_name := @extschema@.build_sequence_name(parent_relid); - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE pg_catalog.format('DROP SEQUENCE IF EXISTS %s', seq_name); END $$ LANGUAGE plpgsql SET client_min_messages = WARNING; /* mute NOTICE message */ @@ -773,7 +773,7 @@ SET client_min_messages = WARNING; /* mute NOTICE message */ /* * Split RANGE partition in two using a pivot. */ -CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( +CREATE FUNCTION @extschema@.split_range_partition( partition_relid REGCLASS, split_value ANYELEMENT, partition_name TEXT DEFAULT NULL, @@ -784,7 +784,7 @@ LANGUAGE C; /* * Merge RANGE partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( +CREATE FUNCTION @extschema@.merge_range_partitions( variadic partitions REGCLASS[]) RETURNS REGCLASS AS 'pg_pathman', 'merge_range_partitions' LANGUAGE C STRICT; @@ -796,12 +796,12 @@ LANGUAGE C STRICT; * DROP PARTITION. In Oracle partitions only have upper bound and when * partition is dropped the next one automatically covers freed range */ -CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( +CREATE FUNCTION @extschema@.drop_range_partition_expand_next( partition_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( +CREATE FUNCTION @extschema@.create_range_partitions_internal( parent_relid REGCLASS, bounds ANYARRAY, partition_names TEXT[], @@ -813,7 +813,7 @@ LANGUAGE C; * Creates new RANGE partition. Returns partition name. * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). */ -CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( +CREATE FUNCTION @extschema@.create_single_range_partition( parent_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT, @@ -825,7 +825,7 @@ LANGUAGE C; /* * Construct CHECK constraint condition for a range partition. */ -CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( +CREATE FUNCTION @extschema@.build_range_condition( partition_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -836,7 +836,7 @@ LANGUAGE C; /* * Generate a name for naming sequence. */ -CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( +CREATE FUNCTION @extschema@.build_sequence_name( parent_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' LANGUAGE C STRICT; @@ -844,7 +844,7 @@ LANGUAGE C STRICT; /* * Returns N-th range (as an array of two elements). */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( +CREATE FUNCTION @extschema@.get_part_range( parent_relid REGCLASS, partition_idx INTEGER, dummy ANYELEMENT) @@ -854,7 +854,7 @@ LANGUAGE C; /* * Returns min and max values for specified RANGE partition. */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( +CREATE FUNCTION @extschema@.get_part_range( partition_relid REGCLASS, dummy ANYELEMENT) RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' @@ -864,7 +864,7 @@ LANGUAGE C; * Checks if range overlaps with existing partitions. * Returns TRUE if overlaps and FALSE otherwise. */ -CREATE OR REPLACE FUNCTION @extschema@.check_range_available( +CREATE FUNCTION @extschema@.check_range_available( parent_relid REGCLASS, range_min ANYELEMENT, range_max ANYELEMENT) @@ -874,14 +874,14 @@ LANGUAGE C; /* * Generate range bounds starting with 'p_start' using 'p_interval'. */ -CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( +CREATE FUNCTION @extschema@.generate_range_bounds( p_start ANYELEMENT, p_interval INTERVAL, p_count INTEGER) RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( +CREATE FUNCTION @extschema@.generate_range_bounds( p_start ANYELEMENT, p_interval ANYELEMENT, p_count INTEGER) diff --git a/sql/pathman_CVE-2020-14350.sql b/sql/pathman_CVE-2020-14350.sql new file mode 100644 index 00000000..877f3280 --- /dev/null +++ b/sql/pathman_CVE-2020-14350.sql @@ -0,0 +1,77 @@ +/* + * Check fix for CVE-2020-14350. + * See also 7eeb1d986 postgresql commit. + */ + +SET client_min_messages = 'warning'; +DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); +DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE IF EXISTS test1 CASCADE; +DROP TABLE IF EXISTS test2 CASCADE; +DROP ROLE IF EXISTS regress_hacker; +SET client_min_messages = 'notice'; + +CREATE EXTENSION pg_pathman; +CREATE ROLE regress_hacker LOGIN; + +-- Test 1 +RESET ROLE; +ALTER ROLE regress_hacker NOSUPERUSER; + +SET ROLE regress_hacker; +SHOW is_superuser; +CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_total BIGINT) +RETURNS bigint +AS $$ +BEGIN + ALTER ROLE regress_hacker SUPERUSER; + SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; +END +$$ LANGUAGE plpgsql; + +CREATE TABLE test1(i INT4 NOT NULL); +INSERT INTO test1 SELECT generate_series(1, 500); +SELECT create_hash_partitions('test1', 'i', 5, false); + +RESET ROLE; +SELECT partition_table_concurrently('test1', 10, 1); +SELECT pg_sleep(1); + +-- Test result (must be 'off') +SET ROLE regress_hacker; +SHOW is_superuser; + +-- Test 2 +RESET ROLE; +ALTER ROLE regress_hacker NOSUPERUSER; + +SET ROLE regress_hacker; +SHOW is_superuser; +CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT) +RETURNS REGCLASS +AS $$ +BEGIN + ALTER ROLE regress_hacker SUPERUSER; + RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); +END +$$ LANGUAGE plpgsql; + +RESET ROLE; +CREATE TABLE test2(i INT4 NOT NULL); +INSERT INTO test2 VALUES(0); +SELECT create_range_partitions('test2', 'i', 0, 1); +INSERT INTO test2 values(1); + +-- Test result (must be 'off') +SET ROLE regress_hacker; +SHOW is_superuser; + +-- Cleanup +RESET ROLE; +DROP FUNCTION _partition_data_concurrent(oid,integer); +DROP FUNCTION create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE test1 CASCADE; +DROP TABLE test2 CASCADE; +DROP ROLE regress_hacker; +DROP EXTENSION pg_pathman; + diff --git a/src/partition_creation.c b/src/partition_creation.c index cd2a7b82..c86ba7aa 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -604,7 +604,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ /* Construct call to create_single_range_partition() */ create_sql = psprintf( - "select %s.create_single_range_partition('%s.%s', '%s'::%s, '%s'::%s, '%s.%s')", + "select %s.create_single_range_partition('%s.%s'::regclass, '%s'::%s, '%s'::%s, '%s.%s', NULL::text)", quote_identifier(get_namespace_name(get_pathman_schema())), quote_identifier(parent_nsp_name), quote_identifier(get_rel_name(parent_relid)), diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 54d62e7f..a75e912b 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -523,7 +523,7 @@ bgw_main_concurrent_part(Datum main_arg) * context will be destroyed after transaction finishes */ current_mcxt = MemoryContextSwitchTo(TopPathmanContext); - sql = psprintf("SELECT %s._partition_data_concurrent($1::oid, p_limit:=$2)", + sql = psprintf("SELECT %s._partition_data_concurrent($1::regclass, NULL::text, NULL::text, p_limit:=$2)", get_namespace_name(get_pathman_schema())); MemoryContextSwitchTo(current_mcxt); } From 1e82fd397d7acf14d4f7791adb1eefa7f8aaa06e Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Sun, 8 Nov 2020 17:17:21 +0300 Subject: [PATCH 1043/1124] Bump 1.5.12 lib version. --- META.json | 4 ++-- expected/pathman_calamity.out | 2 +- expected/pathman_calamity_1.out | 2 +- expected/pathman_calamity_2.out | 2 +- src/include/init.h | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/META.json b/META.json index 6bd1607d..c32d74ba 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_pathman", "abstract": "Fast partitioning tool for PostgreSQL", "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.5.11", + "version": "1.5.12", "maintainer": [ "Arseny Sher " ], @@ -22,7 +22,7 @@ "pg_pathman": { "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.5.11", + "version": "1.5.12", "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 50bfd803..7e794a72 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -23,7 +23,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.11 + 1.5.12 (1 row) set client_min_messages = NOTICE; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index 20c2ea6c..60313bfd 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -23,7 +23,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.11 + 1.5.12 (1 row) set client_min_messages = NOTICE; diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out index 0c7757a9..e621831b 100644 --- a/expected/pathman_calamity_2.out +++ b/expected/pathman_calamity_2.out @@ -23,7 +23,7 @@ SELECT debug_capture(); SELECT pathman_version(); pathman_version ----------------- - 1.5.11 + 1.5.12 (1 row) set client_min_messages = NOTICE; diff --git a/src/include/init.h b/src/include/init.h index f7f3df59..f2234c8f 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -160,7 +160,7 @@ simplify_mcxt_name(MemoryContext mcxt) #define LOWEST_COMPATIBLE_FRONT "1.5.0" /* Current version of native C library */ -#define CURRENT_LIB_VERSION "1.5.11" +#define CURRENT_LIB_VERSION "1.5.12" void *pathman_cache_search_relid(HTAB *cache_table, From d31988d910ad84b9573d9b964f8f6b73b93adb0f Mon Sep 17 00:00:00 2001 From: Victor Wagner Date: Wed, 11 Nov 2020 16:12:24 +0300 Subject: [PATCH 1044/1124] Sinence compiler warnings found on buildfarm --- src/init.c | 2 +- src/partition_filter.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/init.c b/src/init.c index 86e96ebe..99b79f55 100644 --- a/src/init.c +++ b/src/init.c @@ -930,7 +930,7 @@ read_opexpr_const(const OpExpr *opexpr, /* Update RIGHT */ right = (Node *) constant; } - /* FALL THROUGH (no break) */ + /* FALLTHROUGH */ case T_Const: { diff --git a/src/partition_filter.c b/src/partition_filter.c index 3808dc26..b8b3b03c 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -1016,6 +1016,7 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, elog(ERROR, "FDWs other than postgres_fdw are restricted"); + break; case PF_FDW_INSERT_ANY_FDW: elog(WARNING, "unrestricted FDW mode may lead to crashes"); From c25ba927ede826a229a533d184d78f73468da7cd Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Thu, 10 Dec 2020 19:06:58 +0300 Subject: [PATCH 1045/1124] pathman_dropped_cols test fixed --- expected/pathman_dropped_cols.out | 20 ++++++++++---------- sql/pathman_dropped_cols.sql | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out index 79e781b2..220f6750 100644 --- a/expected/pathman_dropped_cols.out +++ b/expected/pathman_dropped_cols.out @@ -183,22 +183,22 @@ EXECUTE getbyroot(2); 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 (2 rows) -EXPLAIN EXECUTE getbyroot(2); - QUERY PLAN --------------------------------------------------------------------------------------------- - Custom Scan (RuntimeAppend) (cost=4.17..11.28 rows=3 width=128) +EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); + QUERY PLAN +---------------------------------------------------------- + Custom Scan (RuntimeAppend) Prune by: (root_dict.root_id = $1) - -> Bitmap Heap Scan on root_dict_0 root_dict (cost=4.17..11.28 rows=3 width=128) + -> Bitmap Heap Scan on root_dict_0 root_dict Recheck Cond: (root_id = $1) - -> Bitmap Index Scan on root_dict_0_root_id_idx (cost=0.00..4.17 rows=3 width=0) + -> Bitmap Index Scan on root_dict_0_root_id_idx Index Cond: (root_id = $1) - -> Bitmap Heap Scan on root_dict_1 root_dict (cost=4.17..11.28 rows=3 width=128) + -> Bitmap Heap Scan on root_dict_1 root_dict Recheck Cond: (root_id = $1) - -> Bitmap Index Scan on root_dict_1_root_id_idx (cost=0.00..4.17 rows=3 width=0) + -> Bitmap Index Scan on root_dict_1_root_id_idx Index Cond: (root_id = $1) - -> Bitmap Heap Scan on root_dict_2 root_dict (cost=4.17..11.28 rows=3 width=128) + -> Bitmap Heap Scan on root_dict_2 root_dict Recheck Cond: (root_id = $1) - -> Bitmap Index Scan on root_dict_2_root_id_idx (cost=0.00..4.17 rows=3 width=0) + -> Bitmap Index Scan on root_dict_2_root_id_idx Index Cond: (root_id = $1) (14 rows) diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql index 0ae16c8a..cb6acc57 100644 --- a/sql/pathman_dropped_cols.sql +++ b/sql/pathman_dropped_cols.sql @@ -96,7 +96,7 @@ EXECUTE getbyroot(2); -- errors usually start here EXECUTE getbyroot(2); EXECUTE getbyroot(2); -EXPLAIN EXECUTE getbyroot(2); +EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); DEALLOCATE getbyroot; DROP TABLE root_dict CASCADE; From 6b484c2ca9d071037be83c4f3c32df3348bf867d Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 15 Dec 2020 15:57:55 +0300 Subject: [PATCH 1046/1124] Remove queries from calamity test which depend on num of entries in relcache. Autovacuum blows out relcache, so it rarely fails if it is agressive enough. --- expected/pathman_calamity.out | 54 ++++++++++++++++----------------- expected/pathman_calamity_1.out | 54 ++++++++++++++++----------------- expected/pathman_calamity_2.out | 54 ++++++++++++++++----------------- sql/pathman_calamity.sql | 27 +++++++++++------ 4 files changed, 99 insertions(+), 90 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 7e794a72..d8b6ad96 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -816,25 +816,25 @@ SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10 10 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Change this setting for code coverage */ SET pg_pathman.enable_bounds_cache = false; @@ -862,25 +862,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; @@ -908,25 +908,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* check that parents cache has been flushed after partition was dropped */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); @@ -952,14 +952,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); drop_range_partition @@ -967,25 +967,25 @@ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); calamity.test_pathman_cache_stats_1 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 9 partition parents cache | 9 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 10 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index 60313bfd..2b0f98e5 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -816,25 +816,25 @@ SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10 10 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Change this setting for code coverage */ SET pg_pathman.enable_bounds_cache = false; @@ -862,25 +862,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; @@ -908,25 +908,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* check that parents cache has been flushed after partition was dropped */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); @@ -952,14 +952,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); drop_range_partition @@ -967,25 +967,25 @@ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); calamity.test_pathman_cache_stats_1 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 9 partition parents cache | 9 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 10 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out index e621831b..b6fafc83 100644 --- a/expected/pathman_calamity_2.out +++ b/expected/pathman_calamity_2.out @@ -816,25 +816,25 @@ SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10 10 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Change this setting for code coverage */ SET pg_pathman.enable_bounds_cache = false; @@ -862,25 +862,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; @@ -908,25 +908,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) /* check that parents cache has been flushed after partition was dropped */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); @@ -952,14 +952,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); drop_range_partition @@ -967,25 +967,25 @@ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); calamity.test_pathman_cache_stats_1 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 9 partition parents cache | 9 - partition status cache | 2 -(4 rows) +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 10 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 0 partition parents cache | 0 - partition status cache | 2 -(4 rows) +(3 rows) DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index b49d061c..6ad0df0e 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -383,9 +383,11 @@ CREATE EXTENSION pg_pathman; /* check that cache loading is lazy */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ /* Change this setting for code coverage */ SET pg_pathman.enable_bounds_cache = false; @@ -394,9 +396,11 @@ SET pg_pathman.enable_bounds_cache = false; CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; @@ -405,19 +409,24 @@ SET pg_pathman.enable_bounds_cache = true; CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ /* check that parents cache has been flushed after partition was dropped */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; From 2062ab9538b94f2f23e14cf2ba4d9cdac2b07601 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 28 Jun 2021 15:21:49 +0300 Subject: [PATCH 1047/1124] [PGPRO-5255] fix that ALTER TABLE IF EXISTS ... RENAME TO of not existed table generate ERROR instead of NOTICE --- expected/pathman_utility_stmt.out | 7 +++++++ sql/pathman_utility_stmt.sql | 6 ++++++ src/utility_stmt_hooking.c | 5 ++++- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 4cc4d493..0001b2f0 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -370,4 +370,11 @@ SELECT create_hash_partitions('drop_index.test', 'val', 2); DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; DROP SCHEMA drop_index CASCADE; NOTICE: drop cascades to 3 other objects +/* + * Test, that ALTER TABLE IF EXISTS ... RENAME TO of not existed table generate NOTICE instead of ERROR + */ +CREATE SCHEMA rename_nonexistent; +ALTER TABLE IF EXISTS rename_nonexistent.nonexistent_table RENAME TO other_table_name; +NOTICE: relation "nonexistent_table" does not exist, skipping +DROP SCHEMA rename_nonexistent CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index 31232ce1..c5e940ce 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -250,6 +250,12 @@ DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; DROP SCHEMA drop_index CASCADE; +/* + * Test, that ALTER TABLE IF EXISTS ... RENAME TO of not existed table generate NOTICE instead of ERROR + */ +CREATE SCHEMA rename_nonexistent; +ALTER TABLE IF EXISTS rename_nonexistent.nonexistent_table RENAME TO other_table_name; +DROP SCHEMA rename_nonexistent CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index c9ffbf14..8b160f64 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -175,7 +175,10 @@ is_pathman_related_table_rename(Node *parsetree, /* Fetch Oid of this relation */ relation_oid = RangeVarGetRelid(rename_stmt->relation, AccessShareLock, - false); + rename_stmt->missing_ok); + /* PGPRO-5255: check ALTER TABLE IF EXISTS of non existent table */ + if (rename_stmt->missing_ok && relation_oid == InvalidOid) + return false; /* Assume it's a parent */ if (has_pathman_relation_info(relation_oid)) From 363efa969a473680f54314df0af0313c9d9dda8b Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 28 Jun 2021 18:34:11 +0300 Subject: [PATCH 1048/1124] [PGPRO-5255] corrections based on the review --- expected/pathman_declarative.out | 4 ++ expected/pathman_declarative_1.out | 4 ++ expected/pathman_utility_stmt.out | 67 ++++++++++++++++++++++++++++-- sql/pathman_declarative.sql | 3 ++ sql/pathman_utility_stmt.sql | 49 ++++++++++++++++++++-- src/declarative.c | 6 ++- src/utility_stmt_hooking.c | 9 +++- 7 files changed, 131 insertions(+), 11 deletions(-) diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index 011a0f71..c13c0010 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -94,6 +94,10 @@ Check constraints: "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) Inherits: test.range_rel +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +NOTICE: relation "nonexistent_table" does not exist, skipping +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; +NOTICE: relation "nonexistent_table" does not exist, skipping DROP SCHEMA test CASCADE; NOTICE: drop cascades to 8 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_declarative_1.out b/expected/pathman_declarative_1.out index 8ef4e556..d720d335 100644 --- a/expected/pathman_declarative_1.out +++ b/expected/pathman_declarative_1.out @@ -94,6 +94,10 @@ Check constraints: "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) Inherits: test.range_rel +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +NOTICE: relation "nonexistent_table" does not exist, skipping +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; +NOTICE: relation "nonexistent_table" does not exist, skipping DROP SCHEMA test CASCADE; NOTICE: drop cascades to 8 other objects DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 0001b2f0..6e137b37 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -371,10 +371,69 @@ DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; DROP SCHEMA drop_index CASCADE; NOTICE: drop cascades to 3 other objects /* - * Test, that ALTER TABLE IF EXISTS ... RENAME TO of not existed table generate NOTICE instead of ERROR + * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla */ -CREATE SCHEMA rename_nonexistent; -ALTER TABLE IF EXISTS rename_nonexistent.nonexistent_table RENAME TO other_table_name; +CREATE SCHEMA test_nonexistance; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; NOTICE: relation "nonexistent_table" does not exist, skipping -DROP SCHEMA rename_nonexistent CASCADE; +/* renaming existent tables already tested earlier (see rename.plain_test) */ +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN IF NOT EXISTS j INT4; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS i INT4; +NOTICE: column "i" of relation "existent_table" already exists, skipping +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS j INT4; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +--------- + i + j +(2 rows) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table DROP COLUMN IF EXISTS i; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS i; +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS nonexistent_column; +NOTICE: column "nonexistent_column" of relation "existent_table" does not exist, skipping +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +------------------------------ + ........pg.dropped.1........ +(1 row) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME COLUMN i TO j; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME COLUMN i TO j; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +--------- + j +(1 row) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME CONSTRAINT baz TO bar; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4 CONSTRAINT existent_table_i_check CHECK (i < 100)); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME CONSTRAINT existent_table_i_check TO existent_table_i_other_check; +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET SCHEMA nonexistent_schema; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA nonexistent_schema; +ERROR: schema "nonexistent_schema" does not exist +CREATE SCHEMA test_nonexistance2; +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; +DROP TABLE test_nonexistance2.existent_table; +DROP SCHEMA test_nonexistance2 CASCADE; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; +ERROR: tablespace "nonexistent_tablespace" does not exist +DROP TABLE test_nonexistance.existent_table; +DROP SCHEMA test_nonexistance CASCADE; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql index 864e3af8..347627a7 100644 --- a/sql/pathman_declarative.sql +++ b/sql/pathman_declarative.sql @@ -39,6 +39,9 @@ CREATE TABLE test.r4 PARTITION OF test.range_rel FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); \d+ test.r4; +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; + DROP SCHEMA test CASCADE; DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman CASCADE; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index c5e940ce..c0832f34 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -251,11 +251,52 @@ DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; DROP SCHEMA drop_index CASCADE; /* - * Test, that ALTER TABLE IF EXISTS ... RENAME TO of not existed table generate NOTICE instead of ERROR + * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla */ -CREATE SCHEMA rename_nonexistent; -ALTER TABLE IF EXISTS rename_nonexistent.nonexistent_table RENAME TO other_table_name; -DROP SCHEMA rename_nonexistent CASCADE; +CREATE SCHEMA test_nonexistance; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; +/* renaming existent tables already tested earlier (see rename.plain_test) */ + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN IF NOT EXISTS j INT4; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS i INT4; +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS j INT4; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table DROP COLUMN IF EXISTS i; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS i; +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS nonexistent_column; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME COLUMN i TO j; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME COLUMN i TO j; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME CONSTRAINT baz TO bar; +CREATE TABLE test_nonexistance.existent_table(i INT4 CONSTRAINT existent_table_i_check CHECK (i < 100)); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME CONSTRAINT existent_table_i_check TO existent_table_i_other_check; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET SCHEMA nonexistent_schema; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA nonexistent_schema; +CREATE SCHEMA test_nonexistance2; +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; +DROP TABLE test_nonexistance2.existent_table; +DROP SCHEMA test_nonexistance2 CASCADE; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; +DROP TABLE test_nonexistance.existent_table; + +DROP SCHEMA test_nonexistance CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/declarative.c b/src/declarative.c index ca4fe165..367df752 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -75,7 +75,11 @@ is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) AlterTableStmt *stmt = (AlterTableStmt *) parsetree; int cnt = 0; - *parent_relid = RangeVarGetRelid(stmt->relation, NoLock, false); + *parent_relid = RangeVarGetRelid(stmt->relation, NoLock, stmt->missing_ok); + + if (stmt->missing_ok && *parent_relid == InvalidOid) + return false; + if ((prel = get_pathman_relation_info(*parent_relid)) == NULL) return false; diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 8b160f64..1949d970 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -176,7 +176,8 @@ is_pathman_related_table_rename(Node *parsetree, relation_oid = RangeVarGetRelid(rename_stmt->relation, AccessShareLock, rename_stmt->missing_ok); - /* PGPRO-5255: check ALTER TABLE IF EXISTS of non existent table */ + + /* Check ALTER TABLE ... IF EXISTS of nonexistent table */ if (rename_stmt->missing_ok && relation_oid == InvalidOid) return false; @@ -235,7 +236,11 @@ is_pathman_related_alter_column_type(Node *parsetree, /* Assume it's a parent, fetch its Oid */ parent_relid = RangeVarGetRelid(alter_table_stmt->relation, AccessShareLock, - false); + alter_table_stmt->missing_ok); + + /* Check ALTER TABLE ... IF EXISTS of nonexistent table */ + if (alter_table_stmt->missing_ok && parent_relid == InvalidOid) + return false; /* Is parent partitioned? */ if ((prel = get_pathman_relation_info(parent_relid)) != NULL) From 98b1f181b442e38d2e28d463c70c66511e7b8736 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 29 Jun 2021 05:09:58 +0300 Subject: [PATCH 1049/1124] fix travis-ci build, remove deprecated options from yaml, move to travis-ci.com (from .org) --- .travis.yml | 6 ++++-- Dockerfile.tmpl | 4 ++-- README.md | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index b020780b..7f22cf8e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,6 @@ -sudo: required +os: linux + +dist: focal language: c @@ -31,7 +33,7 @@ env: - PG_VERSION=9.5 LEVEL=hardcore - PG_VERSION=9.5 -matrix: +jobs: allow_failures: - env: PG_VERSION=10 LEVEL=nightmare - env: PG_VERSION=9.6 LEVEL=nightmare diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 85b159cf..e1e3b0e6 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -29,8 +29,8 @@ ADD . /pg/testdir # Grant privileges RUN chown -R postgres:postgres ${PGDATA} && \ chown -R postgres:postgres /pg/testdir && \ - chmod a+rwx /usr/local/lib/postgresql && \ - chmod a+rwx /usr/local/share/postgresql/extension + chmod a+rwx /usr/local/share/postgresql/extension && \ + find /usr/local/lib/postgresql -type d -print0 | xargs -0 chmod a+rwx COPY run_tests.sh /run.sh RUN chmod 755 /run.sh diff --git a/README.md b/README.md index 94133b32..d4b8e3bb 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Build Status](https://travis-ci.org/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.org/postgrespro/pg_pathman) +[![Build Status](https://travis-ci.com/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.com/postgrespro/pg_pathman) [![PGXN version](https://badge.fury.io/pg/pg_pathman.svg)](https://badge.fury.io/pg/pg_pathman) [![codecov](https://codecov.io/gh/postgrespro/pg_pathman/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/pg_pathman) [![GitHub license](https://img.shields.io/badge/license-PostgreSQL-blue.svg)](https://raw.githubusercontent.com/postgrespro/pg_pathman/master/LICENSE) From 2e174bad0beb4835d2ab18675e32f3b81a7d7c4a Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 29 Jun 2021 14:54:14 +0300 Subject: [PATCH 1050/1124] [PGPRO-5255] fix tests for postgres 9.5 and 10 --- expected/pathman_declarative.out | 3 ++- expected/pathman_declarative_1.out | 3 ++- expected/pathman_utility_stmt.out | 6 ++---- sql/pathman_declarative.sql | 3 ++- sql/pathman_utility_stmt.sql | 5 ++--- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index c13c0010..01f924ae 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -94,7 +94,8 @@ Check constraints: "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) Inherits: test.range_rel -ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); NOTICE: relation "nonexistent_table" does not exist, skipping ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; NOTICE: relation "nonexistent_table" does not exist, skipping diff --git a/expected/pathman_declarative_1.out b/expected/pathman_declarative_1.out index d720d335..9870a3e7 100644 --- a/expected/pathman_declarative_1.out +++ b/expected/pathman_declarative_1.out @@ -94,7 +94,8 @@ Check constraints: "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) Inherits: test.range_rel -ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); NOTICE: relation "nonexistent_table" does not exist, skipping ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; NOTICE: relation "nonexistent_table" does not exist, skipping diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 6e137b37..7e59fa23 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -377,12 +377,10 @@ CREATE SCHEMA test_nonexistance; ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; NOTICE: relation "nonexistent_table" does not exist, skipping /* renaming existent tables already tested earlier (see rename.plain_test) */ -ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN IF NOT EXISTS j INT4; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN j INT4; NOTICE: relation "nonexistent_table" does not exist, skipping CREATE TABLE test_nonexistance.existent_table(i INT4); -ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS i INT4; -NOTICE: column "i" of relation "existent_table" already exists, skipping -ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS j INT4; +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN j INT4; SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; attname --------- diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql index 347627a7..d89ce3ed 100644 --- a/sql/pathman_declarative.sql +++ b/sql/pathman_declarative.sql @@ -39,7 +39,8 @@ CREATE TABLE test.r4 PARTITION OF test.range_rel FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); \d+ test.r4; -ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz DEFAULT; +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; DROP SCHEMA test CASCADE; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index c0832f34..3b99a2f3 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -258,10 +258,9 @@ CREATE SCHEMA test_nonexistance; ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; /* renaming existent tables already tested earlier (see rename.plain_test) */ -ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN IF NOT EXISTS j INT4; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN j INT4; CREATE TABLE test_nonexistance.existent_table(i INT4); -ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS i INT4; -ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN IF NOT EXISTS j INT4; +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN j INT4; SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; DROP TABLE test_nonexistance.existent_table; From 4b0252aaa1ebe1fc035ca7a48f606b2af896bb89 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 5 Jul 2021 19:26:27 +0300 Subject: [PATCH 1051/1124] [PGPRO-5306] more correct checking of b-tree search strategies --- src/pg_pathman.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index e3a46abd..f06e794e 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -1159,8 +1159,14 @@ handle_array(ArrayType *array, bool elem_byval; char elem_align; - /* Check if we can work with this strategy */ - if (strategy == 0) + /* + * Check if we can work with this strategy + * We can work only with BTLessStrategyNumber, BTLessEqualStrategyNumber, + * BTEqualStrategyNumber, BTGreaterEqualStrategyNumber and BTGreaterStrategyNumber. + * If new search strategies appear in the future, then access optimizations from + * this function will not work, and the default behavior (handle_array_return:) will work. + */ + if (strategy == InvalidStrategy || strategy > BTGreaterStrategyNumber) goto handle_array_return; /* Get element's properties */ @@ -1177,8 +1183,12 @@ handle_array(ArrayType *array, List *ranges; int i; - /* This is only for paranoia's sake */ - Assert(BTMaxStrategyNumber == 5 && BTEqualStrategyNumber == 3); + /* This is only for paranoia's sake (checking correctness of following take_min calculation) */ + Assert(BTEqualStrategyNumber == 3 + && BTLessStrategyNumber < BTEqualStrategyNumber + && BTLessEqualStrategyNumber < BTEqualStrategyNumber + && BTGreaterEqualStrategyNumber > BTEqualStrategyNumber + && BTGreaterStrategyNumber > BTEqualStrategyNumber); /* Optimizations for <, <=, >=, > */ if (strategy != BTEqualStrategyNumber) From 4870f5cc5d2d40d7019d8b47e709241c8ef28252 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 22 Oct 2021 16:42:41 +0300 Subject: [PATCH 1052/1124] Changes for gcc-11 compilation --- src/pathman_workers.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index a75e912b..1bfda8f1 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -57,8 +57,8 @@ extern PGDLLEXPORT void bgw_main_concurrent_part(Datum main_arg); static void handle_sigterm(SIGNAL_ARGS); static void bg_worker_load_config(const char *bgw_name); -static bool start_bgworker(const char bgworker_name[BGW_MAXLEN], - const char bgworker_proc[BGW_MAXLEN], +static bool start_bgworker(const char *bgworker_name, + const char *bgworker_proc, Datum bgw_arg, bool wait_for_shutdown); @@ -166,8 +166,8 @@ bg_worker_load_config(const char *bgw_name) * Common function to start background worker. */ static bool -start_bgworker(const char bgworker_name[BGW_MAXLEN], - const char bgworker_proc[BGW_MAXLEN], +start_bgworker(const char *bgworker_name, + const char *bgworker_proc, Datum bgw_arg, bool wait_for_shutdown) { #define HandleError(condition, new_state) \ From cffbe81c227ad62111a19bd82f2b84d405a2c8e6 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 27 Oct 2021 14:43:31 +0300 Subject: [PATCH 1053/1124] Change for online upgrade --- src/pathman_workers.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 1bfda8f1..7b37d7ba 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -195,6 +195,9 @@ start_bgworker(const char *bgworker_name, snprintf(worker.bgw_library_name, BGW_MAXLEN, "pg_pathman"); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | +#if defined(PGPRO_EE) && PG_VERSION_NUM == 130000 /* FIXME: need to replace "==" to ">=" in future */ + BGWORKER_CLASS_PERSISTENT | +#endif BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; From ca078423cbf8299ad71b1ca98ff7cc6e5c74222f Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 27 Oct 2021 17:43:12 +0300 Subject: [PATCH 1054/1124] Fixed PG_VERSION_NUM condition --- src/pathman_workers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 7b37d7ba..38d61622 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -195,7 +195,7 @@ start_bgworker(const char *bgworker_name, snprintf(worker.bgw_library_name, BGW_MAXLEN, "pg_pathman"); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | -#if defined(PGPRO_EE) && PG_VERSION_NUM == 130000 /* FIXME: need to replace "==" to ">=" in future */ +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 && PG_VERSION_NUM < 140000 /* FIXME: need to remove last condition in future */ BGWORKER_CLASS_PERSISTENT | #endif BGWORKER_BACKEND_DATABASE_CONNECTION; From 7df6cdfb582c5f752304c7cb49e0e54ee51af055 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 21 Oct 2021 09:20:00 +0300 Subject: [PATCH 1055/1124] PostgreSQL v14 compatibility Porting to v14 has difficulties because the internal API has changed seriously. So this porting requires some changes in the source PostgreSQL codes and can not be applied to vanilla without patch. --- expected/pathman_calamity_3.out | 1068 ++++++++++++++++++++ expected/pathman_cte_2.out | 252 +++++ expected/pathman_join_clause_2.out | 155 +++ expected/pathman_subpartitions.out | 3 +- expected/pathman_subpartitions_1.out | 3 +- expected/pathman_views_3.out | 189 ++++ patches/REL_14_STABLE-pg_pathman-core.diff | 533 ++++++++++ sql/pathman_subpartitions.sql | 3 +- src/compat/pg_compat.c | 8 +- src/hooks.c | 41 +- src/include/compat/pg_compat.h | 89 +- src/include/hooks.h | 17 +- src/include/partition_router.h | 7 +- src/nodes_common.c | 36 + src/partition_filter.c | 99 +- src/partition_overseer.c | 42 +- src/partition_router.c | 104 +- src/planner_tree_modification.c | 53 + src/relation_info.c | 4 +- src/utility_stmt_hooking.c | 52 +- 20 files changed, 2706 insertions(+), 52 deletions(-) create mode 100644 expected/pathman_calamity_3.out create mode 100644 expected/pathman_cte_2.out create mode 100644 expected/pathman_join_clause_2.out create mode 100644 expected/pathman_views_3.out create mode 100644 patches/REL_14_STABLE-pg_pathman-core.diff diff --git a/expected/pathman_calamity_3.out b/expected/pathman_calamity_3.out new file mode 100644 index 00000000..9aec9765 --- /dev/null +++ b/expected/pathman_calamity_3.out @@ -0,0 +1,1068 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.12 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); + build_hash_condition +---------------------------------------------------- + public.get_hash_part_idx(hash_record(val), 10) = 1 +(1 row) + +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: relation "1" has no partitions +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: relation "pg_class" has no partitions +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: relation "0" does not exist +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA calamity CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on survivor survivor_1 + -> Seq Scan on survivor_1 survivor_2 + -> Seq Scan on survivor_2 survivor_3 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity CASCADE; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_2.out b/expected/pathman_cte_2.out new file mode 100644 index 00000000..455a7cad --- /dev/null +++ b/expected/pathman_cte_2.out @@ -0,0 +1,252 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t_1 + Delete on cte_del_xacts_1 t_2 + Delete on cte_del_xacts_2 t_3 + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Append + -> Seq Scan on cte_del_xacts t_1 + -> Seq Scan on cte_del_xacts_1 t_2 + -> Seq Scan on cte_del_xacts_2 t_3 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(13 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t_1 + Delete on cte_del_xacts_1 t_2 + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Append + -> Seq Scan on cte_del_xacts t_1 + -> Seq Scan on cte_del_xacts_1 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(11 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP SCHEMA test_cte CASCADE; +NOTICE: drop cascades to 3 other objects +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_join_clause_2.out b/expected/pathman_join_clause_2.out new file mode 100644 index 00000000..d58ff6f6 --- /dev/null +++ b/expected/pathman_join_clause_2.out @@ -0,0 +1,155 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Seq Scan on mytbl_0 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child_1.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP SCHEMA test CASCADE; +NOTICE: drop cascades to 15 other objects +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman CASCADE; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index c13b4ee8..25b36492 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -417,7 +417,8 @@ SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; (4 rows) SET pg_pathman.enable_partitionrouter = ON; -UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; tableoid | id1 | id2 | val -----------------------+-----+-----+----- subpartitions.abc_3_4 | -1 | -1 | 1 diff --git a/expected/pathman_subpartitions_1.out b/expected/pathman_subpartitions_1.out index f190f798..5ea33044 100644 --- a/expected/pathman_subpartitions_1.out +++ b/expected/pathman_subpartitions_1.out @@ -411,7 +411,8 @@ SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; (4 rows) SET pg_pathman.enable_partitionrouter = ON; -UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; tableoid | id1 | id2 | val -----------------------+-----+-----+----- subpartitions.abc_3_4 | -1 | -1 | 1 diff --git a/expected/pathman_views_3.out b/expected/pathman_views_3.out new file mode 100644 index 00000000..09b5718f --- /dev/null +++ b/expected/pathman_views_3.out @@ -0,0 +1,189 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------- + Seq Scan on _abc_0 + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------- + LockRows + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +---------------------------------------- + Unique + -> Sort + Sort Key: _abc_8.id + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP SCHEMA views CASCADE; +NOTICE: drop cascades to 16 other objects +DROP EXTENSION pg_pathman; diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..e3e7c549 --- /dev/null +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -0,0 +1,533 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index f27e458482..ea47c341c1 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -32,6 +32,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index ca6f6d57d3..8ab313b910 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -76,7 +76,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index 5483dee650..e2864e6ae9 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1799,6 +1799,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple for determine from ++ * which partition the touple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index b3ce4bae53..8f2bb12542 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -824,6 +824,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2713,6 +2720,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_junkFilter = parentestate->es_junkFilter; + rcestate->es_output_cid = parentestate->es_output_cid; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index d328856ae5..27235ec869 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -450,7 +450,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, + * This is also a convenient place to verify that the output of an UPDATE + * matches the target table (ExecBuildUpdateProjection does that). + */ +-static void ++void + ExecInitUpdateProjection(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) + { +@@ -2363,6 +2363,7 @@ ExecModifyTable(PlanState *pstate) + PartitionTupleRouting *proute = node->mt_partition_tuple_routing; + List *relinfos = NIL; + ListCell *lc; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -2400,12 +2401,23 @@ ExecModifyTable(PlanState *pstate) + resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex; + subplanstate = outerPlanState(node); + ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; ++ + /* + * Fetch rows from subplan, and execute the required table modification + * for each row. + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contains original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -2439,7 +2451,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + elog(ERROR, "tableoid is NULL"); +@@ -2458,6 +2472,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -2467,6 +2483,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -2496,7 +2513,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -2526,7 +2544,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -2557,8 +2576,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, planSlot); +- slot = ExecInsert(node, resultRelInfo, slot, planSlot, ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, planSlot); ++ slot = ExecInsert(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, planSlot, + estate, node->canSetTag); + break; + case CMD_UPDATE: +@@ -2566,37 +2589,45 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + +- /* +- * Make the new tuple by combining plan's output tuple with +- * the old tuple being updated. +- */ +- oldSlot = resultRelInfo->ri_oldTupleSlot; +- if (oldtuple != NULL) +- { +- /* Use the wholerow junk attr as the old tuple. */ +- ExecForceStoreHeapTuple(oldtuple, oldSlot, false); +- } +- else ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) + { +- /* Fetch the most recent version of old tuple. */ +- Relation relation = resultRelInfo->ri_RelationDesc; +- +- Assert(tupleid != NULL); +- if (!table_tuple_fetch_row_version(relation, tupleid, +- SnapshotAny, +- oldSlot)) +- elog(ERROR, "failed to fetch tuple being updated"); ++ /* ++ * Make the new tuple by combining plan's output tuple ++ * with the old tuple being updated. ++ */ ++ oldSlot = resultRelInfo->ri_oldTupleSlot; ++ if (oldtuple != NULL) ++ { ++ /* Use the wholerow junk attr as the old tuple. */ ++ ExecForceStoreHeapTuple(oldtuple, oldSlot, false); ++ } ++ else ++ { ++ /* Fetch the most recent version of old tuple. */ ++ Relation relation = resultRelInfo->ri_RelationDesc; ++ ++ Assert(tupleid != NULL); ++ if (!table_tuple_fetch_row_version(relation, tupleid, ++ SnapshotAny, ++ oldSlot)) ++ elog(ERROR, "failed to fetch tuple being updated"); ++ } ++ slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, ++ oldSlot); + } +- slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, +- oldSlot); + + /* Now apply the update. */ +- slot = ExecUpdate(node, resultRelInfo, tupleid, oldtuple, slot, ++ slot = ExecUpdate(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, slot, + planSlot, &node->mt_epqstate, estate, + node->canSetTag); + break; + case CMD_DELETE: +- slot = ExecDelete(node, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + planSlot, &node->mt_epqstate, estate, + true, /* processReturning */ + node->canSetTag, +@@ -2613,7 +2644,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -2642,6 +2676,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -2716,6 +2751,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -2812,6 +2848,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -2884,6 +2927,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + } + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete, there will be a junk attribute + * named "tableoid" present in the subplan's targetlist. It will be used +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 381d9e548d..9d101c3a86 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion)0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 134f6862da..92ff475332 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,7 +53,9 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern bool DefaultXactReadOnly; +-extern bool XactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY ++extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ + extern bool xact_is_sampled; +diff --git a/src/include/catalog/objectaddress.h b/src/include/catalog/objectaddress.h +index 2b4e104bb9..80d1274efe 100644 +--- a/src/include/catalog/objectaddress.h ++++ b/src/include/catalog/objectaddress.h +@@ -28,7 +28,7 @@ typedef struct ObjectAddress + int32 objectSubId; /* Subitem within object (eg column), or 0 */ + } ObjectAddress; + +-extern const ObjectAddress InvalidObjectAddress; ++extern PGDLLIMPORT const ObjectAddress InvalidObjectAddress; + + #define ObjectAddressSubSet(addr, class_id, object_id, object_sub_id) \ + do { \ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index 3dc03c913e..1002d97499 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -657,5 +657,7 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++extern void ExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h +index 02015efe13..2091f7f3b7 100644 +--- a/src/include/libpq/libpq-be.h ++++ b/src/include/libpq/libpq-be.h +@@ -327,7 +327,7 @@ extern ssize_t be_gssapi_read(Port *port, void *ptr, size_t len); + extern ssize_t be_gssapi_write(Port *port, void *ptr, size_t len); + #endif /* ENABLE_GSS */ + +-extern ProtocolVersion FrontendProtocol; ++extern PGDLLIMPORT ProtocolVersion FrontendProtocol; + + /* TCP keepalives configuration. These are no-ops on an AF_UNIX socket. */ + +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index 105180764e..2a40d2ce15 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -579,6 +579,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index de22c9ba2c..c8be5323b8 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,18 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} + sub lcopy + { + my $src = shift; +@@ -608,7 +620,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 05ff67e693..d169271df1 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -41,7 +41,10 @@ my @contrib_uselibpq = + my @contrib_uselibpgport = ('libpq_pipeline', 'oid2name', 'vacuumlo'); + my @contrib_uselibpgcommon = ('libpq_pipeline', 'oid2name', 'vacuumlo'); + my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = { 'dblink' => ['src/backend'] }; ++my $contrib_extraincludes = { ++ 'dblink' => ['src/backend'], ++ 'pg_pathman' => ['contrib/pg_pathman/src/include'] ++}; + my $contrib_extrasource = { + 'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ], + 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], +@@ -970,6 +973,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + } + elsif ($mf =~ /^MODULES\s*=\s*(.*)$/mg) +@@ -999,6 +1003,19 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1023,23 +1040,53 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) +- { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ if ( -f "contrib/$n/$d.in" ) { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } else { ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 5aaea49a..7a4dc606 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -142,7 +142,8 @@ INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; SET pg_pathman.enable_partitionrouter = ON; -UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; DROP TABLE subpartitions.abc CASCADE; diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index abf71f9d..7afdd99a 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -181,7 +181,9 @@ make_restrictinfos_from_actual_clauses(PlannerInfo *root, root->hasPseudoConstantQuals = true; } - rinfo = make_restrictinfo(clause, + rinfo = make_restrictinfo_compat( + root, + clause, true, false, pseudoconstant, @@ -235,7 +237,9 @@ McxtStatsInternal(MemoryContext context, int level, AssertArg(MemoryContextIsValid(context)); /* Examine the context itself */ -#if PG_VERSION_NUM >= 110000 +#if PG_VERSION_NUM >= 140000 + (*context->methods->stats) (context, NULL, NULL, totals, true); +#elif PG_VERSION_NUM >= 110000 (*context->methods->stats) (context, NULL, NULL, totals); #else (*context->methods->stats) (context, level, false, totals); diff --git a/src/hooks.c b/src/hooks.c index e9ff1ed7..276f6cfd 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -751,12 +751,25 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) * Post parse analysis hook. It makes sure the config is loaded before executing * any statement, including utility commands. */ +#if PG_VERSION_NUM >= 140000 +/* + * pathman_post_parse_analyze_hook(), pathman_post_parse_analyze_hook_next(): + * in 14 new argument was added (5fd9dfa5f50) + */ +void +pathman_post_parse_analyze_hook(ParseState *pstate, Query *query, JumbleState *jstate) +{ + /* Invoke original hook if needed */ + if (pathman_post_parse_analyze_hook_next) + pathman_post_parse_analyze_hook_next(pstate, query, jstate); +#else void pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) { /* Invoke original hook if needed */ if (pathman_post_parse_analyze_hook_next) pathman_post_parse_analyze_hook_next(pstate, query); +#endif /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) @@ -944,7 +957,23 @@ pathman_relcache_hook(Datum arg, Oid relid) * In PG 13 (2f9661311b8) command completion tags was reworked (added QueryCompletion struct) */ void -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 140000 +/* + * pathman_post_parse_analyze_hook(), pathman_post_parse_analyze_hook_next(): + * in 14 new argument was added (5fd9dfa5f50) + */ +pathman_process_utility_hook(PlannedStmt *first_arg, + const char *queryString, + bool readOnlyTree, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, QueryCompletion *queryCompletion) +{ + Node *parsetree = first_arg->utilityStmt; + int stmt_location = first_arg->stmt_location, + stmt_len = first_arg->stmt_len; +#elif PG_VERSION_NUM >= 130000 pathman_process_utility_hook(PlannedStmt *first_arg, const char *queryString, ProcessUtilityContext context, @@ -1068,7 +1097,15 @@ pathman_process_utility_hook(Node *first_arg, } /* Finally call process_utility_hook_next or standard_ProcessUtility */ -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 140000 + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : + standard_ProcessUtility), + first_arg, queryString, + readOnlyTree, + context, params, queryEnv, + dest, queryCompletion); +#elif PG_VERSION_NUM >= 130000 call_process_utility_compat((pathman_process_utility_hook_next ? pathman_process_utility_hook_next : standard_ProcessUtility), diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 24a36fea..a551b7ed 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -130,7 +130,12 @@ /* * BeginCopyFrom() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define BeginCopyFromCompat(pstate, rel, filename, is_program, data_source_cb, \ + attnamelist, options) \ + BeginCopyFrom((pstate), (rel), NULL, (filename), (is_program), \ + (data_source_cb), (attnamelist), (options)) +#elif PG_VERSION_NUM >= 100000 #define BeginCopyFromCompat(pstate, rel, filename, is_program, data_source_cb, \ attnamelist, options) \ BeginCopyFrom((pstate), (rel), (filename), (is_program), \ @@ -174,7 +179,14 @@ * - in pg 10 PlannedStmt object * - in pg 9.6 and lower Node parsetree */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define call_process_utility_compat(process_utility, first_arg, query_string, \ + readOnlyTree, context, params, query_env, \ + dest, completion_tag) \ + (process_utility)((first_arg), (query_string), readOnlyTree, \ + (context), (params), \ + (query_env), (dest), (completion_tag)) +#elif PG_VERSION_NUM >= 100000 #define call_process_utility_compat(process_utility, first_arg, query_string, \ context, params, query_env, dest, \ completion_tag) \ @@ -240,7 +252,11 @@ /* * create_append_path() */ -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 140000 +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, -1) +#elif PG_VERSION_NUM >= 130000 /* * PGPRO-3938 made create_append_path compatible with vanilla again */ @@ -303,7 +319,12 @@ /* * create_merge_append_path() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define create_merge_append_path_compat(root, rel, subpaths, pathkeys, \ + required_outer) \ + create_merge_append_path((root), (rel), (subpaths), (pathkeys), \ + (required_outer)) +#elif PG_VERSION_NUM >= 100000 #define create_merge_append_path_compat(root, rel, subpaths, pathkeys, \ required_outer) \ create_merge_append_path((root), (rel), (subpaths), (pathkeys), \ @@ -650,7 +671,20 @@ extern int oid_cmp(const void *p1, const void *p2); * * for v10 set NULL into 'queryEnv' argument */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define ProcessUtilityCompat(parsetree, queryString, context, params, dest, \ + completionTag) \ + do { \ + PlannedStmt *stmt = makeNode(PlannedStmt); \ + stmt->commandType = CMD_UTILITY; \ + stmt->canSetTag = true; \ + stmt->utilityStmt = (parsetree); \ + stmt->stmt_location = -1; \ + stmt->stmt_len = 0; \ + ProcessUtility(stmt, (queryString), false, (context), (params), NULL, \ + (dest), (completionTag)); \ + } while (0) +#elif PG_VERSION_NUM >= 100000 #define ProcessUtilityCompat(parsetree, queryString, context, params, dest, \ completionTag) \ do { \ @@ -709,6 +743,9 @@ extern void set_rel_consider_parallel(PlannerInfo *root, * in compat version the type of first argument is (Expr *) */ #if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 /* function removed in 375398244168add84a884347625d14581a421e71 */ +extern TargetEntry *tlist_member_ignore_relabel(Expr * node, List * targetlist); +#endif #define tlist_member_ignore_relabel_compat(expr, targetlist) \ tlist_member_ignore_relabel((expr), (targetlist)) #elif PG_VERSION_NUM >= 90500 @@ -961,12 +998,16 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecInsertIndexTuples. Since 12 slot contains tupleid. + * Since 14: new fields "resultRelInfo", "update". */ -#if PG_VERSION_NUM >= 120000 -#define ExecInsertIndexTuplesCompat(slot, tupleid, estate, noDupError, specConflict, arbiterIndexes) \ +#if PG_VERSION_NUM >= 140000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ + ExecInsertIndexTuples((resultRelInfo), (slot), (estate), (update), (noDupError), (specConflict), (arbiterIndexes)) +#elif PG_VERSION_NUM >= 120000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ ExecInsertIndexTuples((slot), (estate), (noDupError), (specConflict), (arbiterIndexes)) #else -#define ExecInsertIndexTuplesCompat(slot, tupleid, estate, noDupError, specConflict, arbiterIndexes) \ +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ ExecInsertIndexTuples((slot), (tupleid), (estate), (noDupError), (specConflict), (arbiterIndexes)) #endif @@ -1006,7 +1047,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, * macro (and never will be, for old versions), so distinguish via macro added * by the commit. */ -#ifdef QTW_DONT_COPY_DEFAULT +#if defined(QTW_DONT_COPY_DEFAULT) && (PG_VERSION_NUM < 140000) #define expression_tree_mutator_compat(node, mutator, context) \ expression_tree_mutator((node), (mutator), (context), 0) #else @@ -1101,4 +1142,34 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); #define convert_tuples_by_name_compat(i, o, m) convert_tuples_by_name((i), (o), (m)) #endif +/* + * raw_parser() + * In 14 new argument was added (844fe9f159a) + */ +#if PG_VERSION_NUM >= 140000 +#define raw_parser_compat(s) raw_parser((s), RAW_PARSE_DEFAULT) +#else +#define raw_parser_compat(s) raw_parser(s) +#endif + +/* + * make_restrictinfo() + * In >=14 new argument was added (55dc86eca70) + */ +#if PG_VERSION_NUM >= 140000 +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (od), (p), (sl), (rr), (or), (nr)) +#else +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((c), (ipd), (od), (p), (sl), (rr), (or), (nr)) +#endif + +/* + * pull_varnos() + * In >=14 new argument was added (55dc86eca70) + */ +#if PG_VERSION_NUM >= 140000 +#define pull_varnos_compat(r, n) pull_varnos((r), (n)) +#else +#define pull_varnos_compat(r, n) pull_varnos(n) +#endif + #endif /* PG_COMPAT_H */ diff --git a/src/include/hooks.h b/src/include/hooks.h index 49d7e8f1..ccfe060b 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -51,14 +51,29 @@ PlannedStmt * pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams); +#if PG_VERSION_NUM >= 140000 +void pathman_post_parse_analyze_hook(ParseState *pstate, + Query *query, + JumbleState *jstate); +#else void pathman_post_parse_analyze_hook(ParseState *pstate, Query *query); +#endif void pathman_shmem_startup_hook(void); void pathman_relcache_hook(Datum arg, Oid relid); -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 140000 +void pathman_process_utility_hook(PlannedStmt *pstmt, + const char *queryString, + bool readOnlyTree, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc); +#elif PG_VERSION_NUM >= 130000 void pathman_process_utility_hook(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, diff --git a/src/include/partition_router.h b/src/include/partition_router.h index 8240d13b..c6924609 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -32,7 +32,9 @@ typedef struct PartitionRouterState Plan *subplan; /* proxy variable to store subplan */ ExprState *constraint; /* should tuple remain in partition? */ +#if PG_VERSION_NUM < 140000 /* field removed in 86dc90056dfd */ JunkFilter *junkfilter; /* 'ctid' extraction facility */ +#endif ResultRelInfo *current_rri; /* Machinery required for EvalPlanQual */ @@ -42,6 +44,9 @@ typedef struct PartitionRouterState /* Preserved slot from last call */ bool yielded; TupleTableSlot *yielded_slot; +#if PG_VERSION_NUM >= 140000 + TupleTableSlot *yielded_original_slot; +#endif /* Need these for a GREAT deal of hackery */ ModifyTableState *mt_state; @@ -66,8 +71,6 @@ extern CustomExecMethods partition_router_exec_methods; #define MTHackField(mt_state, field) ( (mt_state)->field ) void init_partition_router_static_data(void); -void prepare_modify_table_for_partition_router(PlanState *state, - void *context); void partition_router_begin(CustomScanState *node, EState *estate, int eflags); void partition_router_end(CustomScanState *node); void partition_router_rescan(CustomScanState *node); diff --git a/src/nodes_common.c b/src/nodes_common.c index c2a02649..b6bf24cb 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -184,6 +184,42 @@ build_parent_tlist(List *tlist, AppendRelInfo *appinfo) return temp_tlist; } +#if PG_VERSION_NUM >= 140000 +/* + * Function "tlist_member_ignore_relabel" was removed in vanilla (375398244168) + * Function moved to pg_pathman. + */ +/* + * tlist_member_ignore_relabel + * Finds the (first) member of the given tlist whose expression is + * equal() to the given expression. Result is NULL if no such member. + * We ignore top-level RelabelType nodes + * while checking for a match. This is needed for some scenarios + * involving binary-compatible sort operations. + */ +TargetEntry * +tlist_member_ignore_relabel(Expr *node, List *targetlist) +{ + ListCell *temp; + + while (node && IsA(node, RelabelType)) + node = ((RelabelType *) node)->arg; + + foreach(temp, targetlist) + { + TargetEntry *tlentry = (TargetEntry *) lfirst(temp); + Expr *tlexpr = tlentry->expr; + + while (tlexpr && IsA(tlexpr, RelabelType)) + tlexpr = ((RelabelType *) tlexpr)->arg; + + if (equal(node, tlexpr)) + return tlentry; + } + return NULL; +} +#endif + /* Is tlist 'a' subset of tlist 'b'? (in terms of Vars) */ static bool tlist_is_var_subset(List *a, List *b) diff --git a/src/partition_filter.c b/src/partition_filter.c index b8b3b03c..5d1f4943 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -14,6 +14,7 @@ #include "pathman.h" #include "partition_creation.h" #include "partition_filter.h" +#include "partition_router.h" #include "utils.h" #include "access/htup_details.h" @@ -353,10 +354,13 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) CopyToResultRelInfo(ri_onConflictSetWhere); #endif +#if PG_VERSION_NUM < 140000 + /* field "ri_junkFilter" removed in 86dc90056dfd */ if (parts_storage->command_type != CMD_UPDATE) CopyToResultRelInfo(ri_junkFilter); else child_result_rel_info->ri_junkFilter = NULL; +#endif /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ child_result_rel_info->ri_ConstraintExprs = NULL; @@ -765,6 +769,32 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) RPS_RRI_CB(NULL, NULL)); } +#if PG_VERSION_NUM >= 140000 +/* + * Re-initialization of PartitionFilterState for using new partition with new + * "current_rri" + */ +static void +reint_partition_filter_state(PartitionFilterState *state, ResultRelInfo *current_rri) +{ + Oid parent_relid = state->partitioned_table; + EState *estate = state->result_parts.estate; + + fini_result_parts_storage(&state->result_parts); + + state->returning_list = current_rri->ri_returningList; + + /* Init ResultRelInfo cache */ + init_result_parts_storage(&state->result_parts, + parent_relid, current_rri, + estate, state->command_type, + RPS_SKIP_RELATIONS, + state->on_conflict_action != ONCONFLICT_NONE, + RPS_RRI_CB(prepare_rri_for_insert, state), + RPS_RRI_CB(NULL, NULL)); +} +#endif + TupleTableSlot * partition_filter_exec(CustomScanState *node) { @@ -782,6 +812,22 @@ partition_filter_exec(CustomScanState *node) MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; ResultRelInfo *rri; +#if PG_VERSION_NUM >= 140000 + PartitionRouterState *pr_state = linitial(node->custom_ps); + + /* + * For 14: in case UPDATE command, we can scanning several partitions + * in one plan. Need to switch context each time partition is switched. + */ + if (IsPartitionRouterState(pr_state) && + state->result_parts.base_rri != pr_state->current_rri) + { /* + * Slot switched to new partition: need to + * reinitialize some PartitionFilterState variables + */ + reint_partition_filter_state(state, pr_state->current_rri); + } +#endif /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -1112,9 +1158,18 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, NodeSetTag(&mtstate, T_ModifyTableState); mtstate.ps.state = estate; mtstate.operation = CMD_INSERT; +#if PG_VERSION_NUM >= 140000 + /* + * Some fields ("mt_plans", "mt_nplans", "mt_whichplan") removed + * in 86dc90056dfd + */ + outerPlanState(&mtstate.ps) = pstate_ptr; + mtstate.mt_nrels = 1; +#else mtstate.mt_plans = &pstate_ptr; mtstate.mt_nplans = 1; mtstate.mt_whichplan = 0; +#endif mtstate.resultRelInfo = rri; #if PG_VERSION_NUM < 110000 mtstate.mt_onconflict = ONCONFLICT_NONE; @@ -1255,9 +1310,40 @@ append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel) static int append_rri_to_estate(EState *estate, ResultRelInfo *rri) { - estate_mod_data *emd_struct = fetch_estate_mod_data(estate); - int result_rels_allocated = emd_struct->estate_alloc_result_rels; + estate_mod_data *emd_struct = fetch_estate_mod_data(estate); + int result_rels_allocated = emd_struct->estate_alloc_result_rels; +#if PG_VERSION_NUM >= 140000 /* reworked in commit a04daa97a433 */ + ResultRelInfo **rri_array = estate->es_result_relations; + + /* + * We already increased variable "estate->es_range_table_size" in previous + * call append_rte_to_estate(): see + * "estate->es_range_table_size = list_length(estate->es_range_table)" + * after "lappend(estate->es_range_table, rte)". So we should append + * new value in "estate->es_result_relations" only. + */ + /* Reallocate estate->es_result_relations if needed */ + if (result_rels_allocated < estate->es_range_table_size) + { + result_rels_allocated = result_rels_allocated * ALLOC_EXP + 1; + estate->es_result_relations = palloc(result_rels_allocated * + sizeof(ResultRelInfo *)); + memcpy(estate->es_result_relations, + rri_array, + (estate->es_range_table_size - 1) * sizeof(ResultRelInfo *)); + } + + estate->es_result_relations[estate->es_range_table_size - 1] = rri; + + estate->es_opened_result_relations = lappend(estate->es_opened_result_relations, rri); + + /* Update estate_mod_data */ + emd_struct->estate_alloc_result_rels = result_rels_allocated; + emd_struct->estate_not_modified = false; + + return estate->es_range_table_size; +#else /* Reallocate estate->es_result_relations if needed */ if (result_rels_allocated <= estate->es_num_result_relations) { @@ -1284,6 +1370,7 @@ append_rri_to_estate(EState *estate, ResultRelInfo *rri) emd_struct->estate_not_modified = false; return estate->es_num_result_relations++; +#endif } @@ -1318,7 +1405,15 @@ fetch_estate_mod_data(EState *estate) /* Have to create a new one */ emd_struct = MemoryContextAlloc(estate_mcxt, sizeof(estate_mod_data)); emd_struct->estate_not_modified = true; +#if PG_VERSION_NUM >= 140000 + /* + * Reworked in commit a04daa97a433: field "es_num_result_relations" + * removed + */ + emd_struct->estate_alloc_result_rels = estate->es_range_table_size; +#else emd_struct->estate_alloc_result_rels = estate->es_num_result_relations; +#endif cb = MemoryContextAlloc(estate_mcxt, sizeof(MemoryContextCallback)); cb->func = pf_memcxt_callback; diff --git a/src/partition_overseer.c b/src/partition_overseer.c index 41590425..ffa770ba 100644 --- a/src/partition_overseer.c +++ b/src/partition_overseer.c @@ -68,23 +68,32 @@ partition_overseer_create_scan_state(CustomScan *node) static void set_mt_state_for_router(PlanState *state, void *context) { +#if PG_VERSION_NUM < 140000 int i; - ModifyTableState *mt_state = (ModifyTableState *) state; +#endif + ModifyTableState *mt_state = (ModifyTableState *) state; - if (!IsA(state, ModifyTableState)) + if (!IsA(state, ModifyTableState)) return; +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_plans", "mt_nplans" removed in 86dc90056dfd */ + { + CustomScanState *pf_state = (CustomScanState *) outerPlanState(mt_state); +#else for (i = 0; i < mt_state->mt_nplans; i++) { - CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; - PartitionRouterState *pr_state; - + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; +#endif /* Check if this is a PartitionFilter + PartitionRouter combo */ - if (IsPartitionFilterState(pf_state) && - IsPartitionRouterState(pr_state = linitial(pf_state->custom_ps))) + if (IsPartitionFilterState(pf_state)) { - /* HACK: point to ModifyTable in PartitionRouter */ - pr_state->mt_state = mt_state; + PartitionRouterState *pr_state = linitial(pf_state->custom_ps); + if (IsPartitionRouterState(pr_state)) + { + /* HACK: point to ModifyTable in PartitionRouter */ + pr_state->mt_state = mt_state; + } } } } @@ -116,25 +125,40 @@ partition_overseer_exec(CustomScanState *node) mt_plans_new; /* Get initial signal */ +#if PG_VERSION_NUM >= 140000 /* field "mt_nplans" removed in 86dc90056dfd */ + mt_plans_old = mt_state->mt_nrels; +#else mt_plans_old = mt_state->mt_nplans; +#endif restart: /* Run ModifyTable */ slot = ExecProcNode((PlanState *) mt_state); /* Get current signal */ +#if PG_VERSION_NUM >= 140000 /* field "mt_nplans" removed in 86dc90056dfd */ + mt_plans_new = MTHackField(mt_state, mt_nrels); +#else mt_plans_new = MTHackField(mt_state, mt_nplans); +#endif /* Did PartitionRouter ask us to restart? */ if (mt_plans_new != mt_plans_old) { /* Signal points to current plan */ +#if PG_VERSION_NUM < 140000 int state_idx = -mt_plans_new; +#endif /* HACK: partially restore ModifyTable's state */ MTHackField(mt_state, mt_done) = false; +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_nplans", "mt_whichplan" removed in 86dc90056dfd */ + MTHackField(mt_state, mt_nrels) = mt_plans_old; +#else MTHackField(mt_state, mt_nplans) = mt_plans_old; MTHackField(mt_state, mt_whichplan) = state_idx; +#endif /* Rerun ModifyTable */ goto restart; diff --git a/src/partition_router.c b/src/partition_router.c index b602347b..17013a02 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -72,9 +72,10 @@ static TupleTableSlot *router_set_slot(PartitionRouterState *state, TupleTableSlot *slot, CmdType operation); static TupleTableSlot *router_get_slot(PartitionRouterState *state, + EState *estate, bool *should_process); -static void router_lazy_init_constraint(PartitionRouterState *state); +static void router_lazy_init_constraint(PartitionRouterState *state, bool recreate); static ItemPointerData router_extract_ctid(PartitionRouterState *state, TupleTableSlot *slot); @@ -185,43 +186,97 @@ partition_router_exec(CustomScanState *node) take_next_tuple: /* Get next tuple for processing */ - slot = router_get_slot(state, &should_process); + slot = router_get_slot(state, estate, &should_process); if (should_process) { CmdType new_cmd; bool deleted; ItemPointerData ctid; + /* Variables for prepare a full "new" tuple, after 86dc90056dfd */ +#if PG_VERSION_NUM >= 140000 + TupleTableSlot *old_slot; + ResultRelInfo *rri; +#endif + TupleTableSlot *full_slot = slot; + bool partition_changed = false; ItemPointerSetInvalid(&ctid); +#if PG_VERSION_NUM < 140000 /* Build new junkfilter if needed */ if (state->junkfilter == NULL) state->junkfilter = state->current_rri->ri_junkFilter; +#else + if (slot->tts_tableOid == InvalidOid) + elog(ERROR, "invalid table OID in returned tuple"); + + /* + * For 14: in case UPDATE command we can scanning several partitions + * in one plan. Need to switch context each time partition is switched. + */ + if (RelationGetRelid(state->current_rri->ri_RelationDesc) != slot->tts_tableOid) + { + /* + * Function router_get_slot() switched to new partition: need to + * reinitialize some PartitionRouterState variables + */ + state->current_rri = ExecLookupResultRelByOid(state->mt_state, + slot->tts_tableOid, false, false); + partition_changed = true; + } +#endif - /* Build recheck constraint state lazily */ - router_lazy_init_constraint(state); + /* Build recheck constraint state lazily (and re-create constraint + * in case we start scan another relation) */ + router_lazy_init_constraint(state, partition_changed); /* Extract item pointer from current tuple */ ctid = router_extract_ctid(state, slot); + Assert(ItemPointerIsValid(&ctid)); /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = state->current_rri; +#if PG_VERSION_NUM >= 140000 /* after 86dc90056dfd */ + /* Store original slot */ + estate->es_original_tuple = slot; + /* + * "slot" contains new values of the changed columns plus row + * identity information such as CTID. + * Need to prepare a "newSlot" with full tuple for triggers in + * router_lock_or_delete_tuple(). But we should return old slot + * with CTID because this CTID is used in ExecModifyTable(). + */ + rri = state->current_rri; + + /* Initialize projection info if first time for this table. */ + if (unlikely(!rri->ri_projectNewInfoValid)) + ExecInitUpdateProjection(state->mt_state, rri); + + old_slot = rri->ri_oldTupleSlot; + /* Fetch the most recent version of old tuple. */ + if (!table_tuple_fetch_row_version(rri->ri_RelationDesc, + &ctid, SnapshotAny, old_slot)) + elog(ERROR, "failed to fetch partition tuple being updated"); + + /* Build full tuple (using "old_slot" + changed from "slot"): */ + full_slot = ExecGetUpdateNewTuple(rri, slot, old_slot); +#endif + /* Lock or delete tuple from old partition */ - Assert(ItemPointerIsValid(&ctid)); - slot = router_lock_or_delete_tuple(state, slot, - &ctid, &deleted); + full_slot = router_lock_or_delete_tuple(state, full_slot, + &ctid, &deleted); /* We require a tuple (previous one has vanished) */ - if (TupIsNull(slot)) + if (TupIsNull(full_slot)) goto take_next_tuple; /* Should we use UPDATE or DELETE + INSERT? */ new_cmd = deleted ? CMD_INSERT : CMD_UPDATE; /* Alter ModifyTable's state and return */ - return router_set_slot(state, slot, new_cmd); + return router_set_slot(state, full_slot, new_cmd); } return slot; @@ -265,7 +320,12 @@ router_set_slot(PartitionRouterState *state, return slot; /* HACK: alter ModifyTable's state */ +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_nplans", "mt_whichplan" removed in 86dc90056dfd */ + MTHackField(mt_state, mt_nrels) = -mt_state->mt_nrels; +#else MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; +#endif MTHackField(mt_state, operation) = operation; /* HACK: disable AFTER STATEMENT triggers */ @@ -273,6 +333,9 @@ router_set_slot(PartitionRouterState *state, if (!TupIsNull(slot)) { + EState *estate = mt_state->ps.state; + +#if PG_VERSION_NUM < 140000 /* field "ri_junkFilter" removed in 86dc90056dfd */ /* We should've cached junk filter already */ Assert(state->junkfilter); @@ -280,12 +343,20 @@ router_set_slot(PartitionRouterState *state, state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? state->junkfilter : NULL; +#endif /* Don't forget to set saved_slot! */ - state->yielded_slot = ExecInitExtraTupleSlotCompat(mt_state->ps.state, + state->yielded_slot = ExecInitExtraTupleSlotCompat(estate, slot->tts_tupleDescriptor, &TTSOpsHeapTuple); ExecCopySlot(state->yielded_slot, slot); +#if PG_VERSION_NUM >= 140000 + Assert(estate->es_original_tuple != NULL); + state->yielded_original_slot = ExecInitExtraTupleSlotCompat(estate, + estate->es_original_tuple->tts_tupleDescriptor, + &TTSOpsHeapTuple); + ExecCopySlot(state->yielded_original_slot, estate->es_original_tuple); +#endif } /* Yield */ @@ -296,6 +367,7 @@ router_set_slot(PartitionRouterState *state, /* Fetch next tuple (either fresh or yielded) */ static TupleTableSlot * router_get_slot(PartitionRouterState *state, + EState *estate, bool *should_process) { TupleTableSlot *slot; @@ -309,6 +381,10 @@ router_get_slot(PartitionRouterState *state, /* Reset saved slot */ slot = state->yielded_slot; state->yielded_slot = NULL; +#if PG_VERSION_NUM >= 140000 + estate->es_original_tuple = state->yielded_original_slot; + state->yielded_original_slot = NULL; +#endif state->yielded = false; /* We shouldn't process preserved slot... */ @@ -331,9 +407,9 @@ router_get_slot(PartitionRouterState *state, } static void -router_lazy_init_constraint(PartitionRouterState *state) +router_lazy_init_constraint(PartitionRouterState *state, bool reinit) { - if (state->constraint == NULL) + if (state->constraint == NULL || reinit) { Relation rel = state->current_rri->ri_RelationDesc; Oid relid = RelationGetRelid(rel); @@ -376,7 +452,11 @@ router_extract_ctid(PartitionRouterState *state, TupleTableSlot *slot) bool ctid_isnull; ctid_datum = ExecGetJunkAttribute(slot, +#if PG_VERSION_NUM >= 140000 /* field "junkfilter" removed in 86dc90056dfd */ + state->current_rri->ri_RowIdAttNo, +#else state->junkfilter->jf_junkAttNo, +#endif &ctid_isnull); /* shouldn't ever get a null result... */ diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 77a55bd3..2477cc7f 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -185,8 +185,12 @@ plan_tree_visitor(Plan *plan, break; case T_ModifyTable: +#if PG_VERSION_NUM >= 140000 /* reworked in commit 86dc90056dfd */ + plan_tree_visitor(outerPlan(plan), visitor, context); +#else foreach (l, ((ModifyTable *) plan)->plans) plan_tree_visitor((Plan *) lfirst(l), visitor, context); +#endif break; case T_Append: @@ -248,9 +252,13 @@ state_tree_visitor(PlanState *state, break; case T_ModifyTable: +#if PG_VERSION_NUM >= 140000 /* reworked in commit 86dc90056dfd */ + visitor(outerPlanState(state), context); +#else state_visit_members(((ModifyTableState *) state)->mt_plans, ((ModifyTableState *) state)->mt_nplans, visitor, context); +#endif break; case T_Append: @@ -757,9 +765,19 @@ partition_filter_visitor(Plan *plan, void *context) { List *rtable = (List *) context; ModifyTable *modify_table = (ModifyTable *) plan; +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + /* + * We have only one subplan for 14: need to modify it without + * using any cycle + */ + Plan *subplan = outerPlan(modify_table); + ListCell *lc2, + *lc3; +#else ListCell *lc1, *lc2, *lc3; +#endif /* Skip if not ModifyTable with 'INSERT' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_INSERT) @@ -768,8 +786,12 @@ partition_filter_visitor(Plan *plan, void *context) Assert(rtable && IsA(rtable, List)); lc3 = list_head(modify_table->returningLists); +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + lc2 = list_head(modify_table->resultRelations); +#else forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) +#endif { Index rindex = lfirst_int(lc2); Oid relid = getrelid(rindex, rtable); @@ -786,11 +808,19 @@ partition_filter_visitor(Plan *plan, void *context) lc3 = lnext_compat(modify_table->returningLists, lc3); } +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + outerPlan(modify_table) = make_partition_filter(subplan, relid, + modify_table->nominalRelation, + modify_table->onConflictAction, + modify_table->operation, + returning_list); +#else lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, modify_table->nominalRelation, modify_table->onConflictAction, modify_table->operation, returning_list); +#endif } } @@ -807,9 +837,19 @@ partition_router_visitor(Plan *plan, void *context) { List *rtable = (List *) context; ModifyTable *modify_table = (ModifyTable *) plan; +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + /* + * We have only one subplan for 14: need to modify it without + * using any cycle + */ + Plan *subplan = outerPlan(modify_table); + ListCell *lc2, + *lc3; +#else ListCell *lc1, *lc2, *lc3; +#endif bool changed = false; /* Skip if not ModifyTable with 'UPDATE' command */ @@ -827,8 +867,12 @@ partition_router_visitor(Plan *plan, void *context) } lc3 = list_head(modify_table->returningLists); +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + lc2 = list_head(modify_table->resultRelations); +#else forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) +#endif { Index rindex = lfirst_int(lc2); Oid relid = getrelid(rindex, rtable), @@ -852,8 +896,13 @@ partition_router_visitor(Plan *plan, void *context) lc3 = lnext_compat(modify_table->returningLists, lc3); } +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + prouter = make_partition_router(subplan, + modify_table->epqParam); +#else prouter = make_partition_router((Plan *) lfirst(lc1), modify_table->epqParam); +#endif pfilter = make_partition_filter((Plan *) prouter, relid, modify_table->nominalRelation, @@ -861,7 +910,11 @@ partition_router_visitor(Plan *plan, void *context) CMD_UPDATE, returning_list); +#if PG_VERSION_NUM >= 140000 /* for changes in 86dc90056dfd */ + outerPlan(modify_table) = pfilter; +#else lfirst(lc1) = pfilter; +#endif changed = true; } } diff --git a/src/relation_info.c b/src/relation_info.c index df60dde3..64c04c2f 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1444,7 +1444,7 @@ parse_partitioning_expression(const Oid relid, PG_TRY(); { - parsetree_list = raw_parser(query_string); + parsetree_list = raw_parser_compat(query_string); } PG_CATCH(); { @@ -1555,7 +1555,7 @@ cook_partitioning_expression(const Oid relid, " must be marked IMMUTABLE"))); /* Sanity check #5 */ - expr_varnos = pull_varnos(expr); + expr_varnos = pull_varnos_compat(NULL, expr); if (bms_num_members(expr_varnos) != 1 || relid != ((RangeTblEntry *) linitial(query->rtable))->relid) { diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 1949d970..89649e0d 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -67,7 +67,12 @@ ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; #define PATHMAN_COPY_WRITE_LOCK RowExclusiveLock -static uint64 PathmanCopyFrom(CopyState cstate, +static uint64 PathmanCopyFrom( +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate, +#else + CopyState cstate, +#endif Relation parent_rel, List *range_table, bool old_protocol); @@ -230,7 +235,11 @@ is_pathman_related_alter_column_type(Node *parsetree, return false; /* Are we going to modify some table? */ +#if PG_VERSION_NUM >= 140000 + if (alter_table_stmt->objtype != OBJECT_TABLE) +#else if (alter_table_stmt->relkind != OBJECT_TABLE) +#endif return false; /* Assume it's a parent, fetch its Oid */ @@ -284,7 +293,7 @@ is_pathman_related_alter_column_type(Node *parsetree, } /* - * CopyGetAttnums - build an integer list of attnums to be copied + * PathmanCopyGetAttnums - build an integer list of attnums to be copied * * The input attnamelist is either the user-specified column list, * or NIL if there was none (in which case we want all the non-dropped @@ -293,7 +302,7 @@ is_pathman_related_alter_column_type(Node *parsetree, * rel can be NULL ... it's only used for error reports. */ static List * -CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) +PathmanCopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) { List *attnums = NIL; @@ -372,7 +381,11 @@ PathmanDoCopy(const CopyStmt *stmt, int stmt_len, uint64 *processed) { +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate; +#else CopyState cstate; +#endif ParseState *pstate; Relation rel; List *range_table = NIL; @@ -419,7 +432,7 @@ PathmanDoCopy(const CopyStmt *stmt, range_table = list_make1(rte); tupDesc = RelationGetDescr(rel); - attnums = CopyGetAttnums(tupDesc, rel, stmt->attlist); + attnums = PathmanCopyGetAttnums(tupDesc, rel, stmt->attlist); foreach(cur, attnums) { int attnum = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; @@ -483,7 +496,13 @@ PathmanDoCopy(const CopyStmt *stmt, * Copy FROM file to relation. */ static uint64 -PathmanCopyFrom(CopyState cstate, Relation parent_rel, +PathmanCopyFrom( +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate, +#else + CopyState cstate, +#endif + Relation parent_rel, List *range_table, bool old_protocol) { HeapTuple tuple; @@ -510,6 +529,23 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, 0); ExecOpenIndices(parent_rri, false); +#if PG_VERSION_NUM >= 140000 /* reworked in 1375422c7826 */ + /* + * Call ExecInitRangeTable() should be first because in 14 it initializes + * field "estate->es_result_relations": + */ + ExecInitRangeTable(estate, range_table); + estate->es_result_relations = + (ResultRelInfo **) palloc0(list_length(range_table) * sizeof(ResultRelInfo *)); + estate->es_result_relations[0] = parent_rri; + /* + * Saving in the list allows to avoid needlessly traversing the whole + * array when only a few of its entries are possibly non-NULL. + */ + estate->es_opened_result_relations = + lappend(estate->es_opened_result_relations, parent_rri); + estate->es_result_relation_info = parent_rri; +#else estate->es_result_relations = parent_rri; estate->es_num_result_relations = 1; estate->es_result_relation_info = parent_rri; @@ -518,7 +554,7 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, #else estate->es_range_table = range_table; #endif - +#endif /* Initialize ResultPartsStorage */ init_result_parts_storage(&parts_storage, parent_relid, parent_rri, @@ -669,8 +705,8 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, /* ... and create index entries for it */ if (child_rri->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuplesCompat(slot, &(tuple->t_self), - estate, false, NULL, NIL); + recheckIndexes = ExecInsertIndexTuplesCompat(estate->es_result_relation_info, + slot, &(tuple->t_self), estate, false, false, NULL, NIL); } #ifdef PG_SHARDMAN /* Handle foreign tables */ From 90e90e9912b9366e2ce89819c2c399edc7add39d Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 9 Nov 2021 00:47:23 +0300 Subject: [PATCH 1056/1124] Changes for PostgreSQL v15 --- src/hooks.c | 4 ++++ src/partition_creation.c | 49 ++++++++++++++++++++++++++++++++++++++-- 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 276f6cfd..f376e4a0 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -293,7 +293,11 @@ pathman_join_pathlist_hook(PlannerInfo *root, * Currently we use get_parameterized_joinrel_size() since * it works just fine, but this might change some day. */ +#if PG_VERSION_NUM >= 150000 /* reason: commit 18fea737b5e4 */ + nest_path->jpath.path.rows = +#else nest_path->path.rows = +#endif get_parameterized_joinrel_size_compat(root, joinrel, outer, inner, extra->sjinfo, diff --git a/src/partition_creation.c b/src/partition_creation.c index 65335c65..2154bc8a 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -92,8 +92,13 @@ static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_rel static Oid text_to_regprocedure(text *proname_args); static Constraint *make_constraint_common(char *name, Node *raw_expr); -static Value make_string_value_struct(char *str); +#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +static String make_string_value_struct(char *str); +static Integer make_int_value_struct(int int_val); +#else +static Value make_string_value_struct(char* str); static Value make_int_value_struct(int int_val); +#endif static Node *build_partitioning_expression(Oid parent_relid, Oid *expr_type, @@ -1356,12 +1361,21 @@ build_raw_range_check_tree(Node *raw_expression, const Bound *end_value, Oid value_type) { +#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#define BuildConstExpr(node, value, value_type) \ + do { \ + (node)->val.sval = make_string_value_struct( \ + datum_to_cstring((value), (value_type))); \ + (node)->location = -1; \ + } while (0) +#else #define BuildConstExpr(node, value, value_type) \ do { \ (node)->val = make_string_value_struct( \ datum_to_cstring((value), (value_type))); \ (node)->location = -1; \ } while (0) +#endif #define BuildCmpExpr(node, opname, expr, c) \ do { \ @@ -1554,11 +1568,19 @@ build_raw_hash_check_tree(Node *raw_expression, hash_proc = tce->hash_proc; /* Total amount of partitions */ +#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ + part_count_c->val.ival = make_int_value_struct(part_count); +#else part_count_c->val = make_int_value_struct(part_count); +#endif part_count_c->location = -1; /* Index of this partition (hash % total amount) */ +#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ + part_idx_c->val.ival = make_int_value_struct(part_idx); +#else part_idx_c->val = make_int_value_struct(part_idx); +#endif part_idx_c->location = -1; /* Call hash_proc() */ @@ -1649,6 +1671,29 @@ make_constraint_common(char *name, Node *raw_expr) return constraint; } +#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +static String +make_string_value_struct(char* str) +{ + String val; + + val.type = T_String; + val.val = str; + + return val; +} + +static Integer +make_int_value_struct(int int_val) +{ + Integer val; + + val.type = T_Integer; + val.val = int_val; + + return val; +} +#else static Value make_string_value_struct(char *str) { @@ -1670,7 +1715,7 @@ make_int_value_struct(int int_val) return val; } - +#endif /* PG_VERSION_NUM >= 150000 */ /* * --------------------- From 23122aba6efd3feeee032636c89a100f9940d812 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 9 Nov 2021 14:43:27 +0300 Subject: [PATCH 1057/1124] Corrected test after REVOKE PUBLIC CREATE (see commit https://git.postgresql.org/gitweb/?p=postgresql.git&a=commitdiff&h=b073c3ccd06e4cb845e121387a43faa8c68a7b62) --- expected/pathman_CVE-2020-14350.out | 1 + sql/pathman_CVE-2020-14350.sql | 1 + 2 files changed, 2 insertions(+) diff --git a/expected/pathman_CVE-2020-14350.out b/expected/pathman_CVE-2020-14350.out index c91a280f..c4250097 100644 --- a/expected/pathman_CVE-2020-14350.out +++ b/expected/pathman_CVE-2020-14350.out @@ -9,6 +9,7 @@ DROP TABLE IF EXISTS test1 CASCADE; DROP TABLE IF EXISTS test2 CASCADE; DROP ROLE IF EXISTS regress_hacker; SET client_min_messages = 'notice'; +GRANT CREATE ON SCHEMA public TO PUBLIC; CREATE EXTENSION pg_pathman; CREATE ROLE regress_hacker LOGIN; -- Test 1 diff --git a/sql/pathman_CVE-2020-14350.sql b/sql/pathman_CVE-2020-14350.sql index 877f3280..e3730744 100644 --- a/sql/pathman_CVE-2020-14350.sql +++ b/sql/pathman_CVE-2020-14350.sql @@ -10,6 +10,7 @@ DROP TABLE IF EXISTS test1 CASCADE; DROP TABLE IF EXISTS test2 CASCADE; DROP ROLE IF EXISTS regress_hacker; SET client_min_messages = 'notice'; +GRANT CREATE ON SCHEMA public TO PUBLIC; CREATE EXTENSION pg_pathman; CREATE ROLE regress_hacker LOGIN; From 6e155ce29feb1be64643ef44c8acac1e82bf7e83 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 18 Oct 2021 10:15:48 +0300 Subject: [PATCH 1058/1124] [PGPRO-5113] Added 'tuple map' for prevent addition extra columns values into partitions --- expected/pathman_rebuild_updates.out | 39 ++++++++ expected/pathman_rebuild_updates_1.out | 39 ++++++++ sql/pathman_rebuild_updates.sql | 19 ++++ src/include/partition_filter.h | 5 + src/partition_filter.c | 125 ++++++++++++++++++++++--- 5 files changed, 215 insertions(+), 12 deletions(-) diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index eb078303..dfa4a5ce 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -155,6 +155,45 @@ UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::RE -1 | 105 | test_updates.test_13 (1 row) +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects DROP SCHEMA test_updates CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_updates_1.out b/expected/pathman_rebuild_updates_1.out index 10ec256e..5bda15ce 100644 --- a/expected/pathman_rebuild_updates_1.out +++ b/expected/pathman_rebuild_updates_1.out @@ -155,6 +155,45 @@ UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::RE -1 | 105 | test_updates.test_13 (1 row) +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects DROP SCHEMA test_updates CASCADE; NOTICE: drop cascades to 15 other objects DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index f4229d09..01757c2c 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -79,6 +79,25 @@ UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::RE UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; +drop table test_updates.test_5113 cascade; + +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; +drop table test_updates.test_5113 cascade; + DROP SCHEMA test_updates CASCADE; DROP EXTENSION pg_pathman; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 233054b7..0c912abe 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -48,6 +48,7 @@ typedef struct Oid partid; /* partition's relid */ ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ + TupleConversionMap *tuple_map_child; /* tuple mapping (child => child), for exclude 'ctid' */ PartRelationInfo *prel; /* this child might be a parent... */ ExprState *prel_expr_state; /* and have its own part. expression */ @@ -173,6 +174,10 @@ PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storag TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); +TupleConversionMap * build_part_tuple_map_child(Relation child_rel); + +void destroy_tuple_map(TupleConversionMap *tuple_map); + List * pfilter_build_tlist(Plan *subplan); diff --git a/src/partition_filter.c b/src/partition_filter.c index 5d1f4943..44f021c4 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -239,13 +239,9 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) } /* Free conversion-related stuff */ - if (rri_holder->tuple_map) - { - FreeTupleDesc(rri_holder->tuple_map->indesc); - FreeTupleDesc(rri_holder->tuple_map->outdesc); + destroy_tuple_map(rri_holder->tuple_map); - free_conversion_map(rri_holder->tuple_map); - } + destroy_tuple_map(rri_holder->tuple_map_child); /* Don't forget to close 'prel'! */ if (rri_holder->prel) @@ -381,6 +377,13 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) */ rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); + /* + * Field for child->child tuple transformation map. We need to + * convert tuples because child TupleDesc might have extra + * columns ('ctid' etc.) and need remove them. + */ + rri_holder->tuple_map_child = NULL; + /* Default values */ rri_holder->prel = NULL; rri_holder->prel_expr_state = NULL; @@ -468,6 +471,73 @@ build_part_tuple_map(Relation base_rel, Relation child_rel) return tuple_map; } +/* + * Build tuple conversion map (e.g. partition tuple has extra column(s)). + * We create a special tuple map (tuple_map_child), which, when applied to the + * tuple of partition, translates the tuple attributes into the tuple + * attributes of the same partition, discarding service attributes like "ctid" + * (i.e. working like junkFilter). + */ +TupleConversionMap * +build_part_tuple_map_child(Relation child_rel) +{ + TupleConversionMap *tuple_map; + TupleDesc child_tupdesc1; + TupleDesc child_tupdesc2; + int n; +#if PG_VERSION_NUM >= 130000 + AttrMap *attrMap; +#else + AttrNumber *attrMap; +#endif + + child_tupdesc1 = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc1->tdtypeid = InvalidOid; + + child_tupdesc2 = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc2->tdtypeid = InvalidOid; + + /* Generate tuple transformation map */ +#if PG_VERSION_NUM >= 130000 + attrMap = build_attrmap_by_name(child_tupdesc1, child_tupdesc2); +#else + attrMap = convert_tuples_by_name_map(child_tupdesc1, child_tupdesc2, + ERR_PART_DESC_CONVERT); +#endif + + /* Prepare the map structure */ + tuple_map = (TupleConversionMap *) palloc(sizeof(TupleConversionMap)); + tuple_map->indesc = child_tupdesc1; + tuple_map->outdesc = child_tupdesc2; + tuple_map->attrMap = attrMap; + + /* preallocate workspace for Datum arrays */ + n = child_tupdesc1->natts; + tuple_map->outvalues = (Datum *) palloc(n * sizeof(Datum)); + tuple_map->outisnull = (bool *) palloc(n * sizeof(bool)); + + n = child_tupdesc1->natts + 1; /* +1 for NULL */ + tuple_map->invalues = (Datum *) palloc(n * sizeof(Datum)); + tuple_map->inisnull = (bool *) palloc(n * sizeof(bool)); + + tuple_map->invalues[0] = (Datum) 0; /* set up the NULL entry */ + tuple_map->inisnull[0] = true; + + return tuple_map; +} + +/* Destroy tuple conversion map */ +void +destroy_tuple_map(TupleConversionMap *tuple_map) +{ + if (tuple_map) + { + FreeTupleDesc(tuple_map->indesc); + FreeTupleDesc(tuple_map->outdesc); + + free_conversion_map(tuple_map); + } +} /* * ----------------------------------- @@ -812,6 +882,7 @@ partition_filter_exec(CustomScanState *node) MemoryContext old_mcxt; ResultRelInfoHolder *rri_holder; ResultRelInfo *rri; + JunkFilter *junkfilter = NULL; #if PG_VERSION_NUM >= 140000 PartitionRouterState *pr_state = linitial(node->custom_ps); @@ -827,6 +898,8 @@ partition_filter_exec(CustomScanState *node) */ reint_partition_filter_state(state, pr_state->current_rri); } +#else + junkfilter = estate->es_result_relation_info->ri_junkFilter; #endif /* Switch to per-tuple context */ @@ -844,18 +917,46 @@ partition_filter_exec(CustomScanState *node) /* Magic: replace parent's ResultRelInfo with ours */ estate->es_result_relation_info = rri; + /* + * Besides 'transform map' we should process two cases: + * 1) CMD_UPDATE, row moved to other partition, junkfilter == NULL + * (filled in router_set_slot() for SELECT + INSERT); + * we should clear attribute 'ctid' (do not insert it into database); + * 2) CMD_INSERT/CMD_UPDATE operations for partitions with deleted column(s), + * junkfilter == NULL. + */ /* If there's a transform map, rebuild the tuple */ - if (rri_holder->tuple_map) + if (rri_holder->tuple_map || + (!junkfilter && + (state->command_type == CMD_INSERT || state->command_type == CMD_UPDATE) && + (slot->tts_tupleDescriptor->natts > rri->ri_RelationDesc->rd_att->natts /* extra fields */))) { - Relation child_rel = rri->ri_RelationDesc; - - /* xxx why old code decided to materialize it? */ #if PG_VERSION_NUM < 120000 HeapTuple htup_old, htup_new; +#endif + Relation child_rel = rri->ri_RelationDesc; + TupleConversionMap *tuple_map; + if (rri_holder->tuple_map) + tuple_map = rri_holder->tuple_map; + else + { + if (!rri_holder->tuple_map_child) + { /* + * Generate child->child tuple transformation map. We need to + * convert tuples because child TupleDesc has extra + * columns ('ctid' etc.) and need remove them. + */ + rri_holder->tuple_map_child = build_part_tuple_map_child(child_rel); + } + tuple_map = rri_holder->tuple_map_child; + } + + /* xxx why old code decided to materialize it? */ +#if PG_VERSION_NUM < 120000 htup_old = ExecMaterializeSlot(slot); - htup_new = do_convert_tuple(htup_old, rri_holder->tuple_map); + htup_new = do_convert_tuple(htup_old, tuple_map); ExecClearTuple(slot); #endif @@ -872,7 +973,7 @@ partition_filter_exec(CustomScanState *node) /* TODO: why should we *always* set a new slot descriptor? */ ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); #if PG_VERSION_NUM >= 120000 - slot = execute_attr_map_slot(rri_holder->tuple_map->attrMap, slot, state->tup_convert_slot); + slot = execute_attr_map_slot(tuple_map->attrMap, slot, state->tup_convert_slot); #else slot = ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); #endif From c16f7468167df5cb7655ca46b192b61619ab0930 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 12 Nov 2021 02:31:41 +0300 Subject: [PATCH 1059/1124] [PGPRO-5113] Added extra conditions for using tuple map (v9.6 - v11) --- src/partition_filter.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/partition_filter.c b/src/partition_filter.c index 44f021c4..0ef84e61 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -929,7 +929,19 @@ partition_filter_exec(CustomScanState *node) if (rri_holder->tuple_map || (!junkfilter && (state->command_type == CMD_INSERT || state->command_type == CMD_UPDATE) && - (slot->tts_tupleDescriptor->natts > rri->ri_RelationDesc->rd_att->natts /* extra fields */))) + (slot->tts_tupleDescriptor->natts > rri->ri_RelationDesc->rd_att->natts /* extra fields */ +#if PG_VERSION_NUM < 120000 + /* + * If we have a regular physical tuple 'slot->tts_tuple' and + * it's locally palloc'd => we will use this tuple in + * ExecMaterializeSlot() instead of materialize the slot, so + * need to check number of attributes for this tuple: + */ + || (slot->tts_tuple && slot->tts_shouldFree && + HeapTupleHeaderGetNatts(slot->tts_tuple->t_data) > + rri->ri_RelationDesc->rd_att->natts /* extra fields */) +#endif + ))) { #if PG_VERSION_NUM < 120000 HeapTuple htup_old, From e4faa9030c99a08ca587e036239b30ef9ca888c4 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Sun, 31 Oct 2021 14:51:35 +0300 Subject: [PATCH 1060/1124] Changed some regression tests + results 1) 'DROP SCHEMA ... CASCADE' replaced to 'DROP SCHEMA ...'; 2) pathman_column_type.sql: from results removed row 'partition status cache'; 3) pathman_mergejoin.sql: added GUC's for fixate strategy of queries --- expected/pathman_array_qual.out | 5 ++-- expected/pathman_array_qual_1.out | 5 ++-- expected/pathman_basic.out | 21 ++++++++++++++--- expected/pathman_basic_1.out | 21 ++++++++++++++--- expected/pathman_basic_2.out | 21 ++++++++++++++--- expected/pathman_bgw.out | 2 +- expected/pathman_calamity.out | 16 +++++++++---- expected/pathman_calamity_1.out | 16 +++++++++---- expected/pathman_calamity_2.out | 16 +++++++++---- expected/pathman_calamity_3.out | 16 +++++++++---- expected/pathman_callbacks.out | 6 +++-- expected/pathman_column_type.out | 32 +++++++++++++------------- expected/pathman_column_type_1.out | 32 +++++++++++++------------- expected/pathman_cte.out | 5 ++-- expected/pathman_cte_1.out | 5 ++-- expected/pathman_cte_2.out | 5 ++-- expected/pathman_declarative.out | 8 ++++--- expected/pathman_declarative_1.out | 8 ++++--- expected/pathman_domains.out | 6 +++-- expected/pathman_dropped_cols.out | 2 +- expected/pathman_expressions.out | 10 ++++++-- expected/pathman_expressions_1.out | 10 ++++++-- expected/pathman_expressions_2.out | 10 ++++++-- expected/pathman_foreign_keys.out | 5 ++-- expected/pathman_gaps.out | 11 +++++++-- expected/pathman_gaps_1.out | 11 +++++++-- expected/pathman_hashjoin.out | 9 +++++--- expected/pathman_hashjoin_1.out | 9 +++++--- expected/pathman_hashjoin_2.out | 9 +++++--- expected/pathman_hashjoin_3.out | 9 +++++--- expected/pathman_hashjoin_4.out | 9 +++++--- expected/pathman_hashjoin_5.out | 9 +++++--- expected/pathman_inserts.out | 8 +++++-- expected/pathman_inserts_1.out | 8 +++++-- expected/pathman_inserts_2.out | 8 +++++-- expected/pathman_interval.out | 2 +- expected/pathman_join_clause.out | 12 +++++++--- expected/pathman_join_clause_1.out | 12 +++++++--- expected/pathman_join_clause_2.out | 12 +++++++--- expected/pathman_lateral.out | 5 ++-- expected/pathman_lateral_1.out | 5 ++-- expected/pathman_lateral_2.out | 5 ++-- expected/pathman_lateral_3.out | 5 ++-- expected/pathman_mergejoin.out | 12 +++++++--- expected/pathman_mergejoin_1.out | 12 +++++++--- expected/pathman_mergejoin_2.out | 12 +++++++--- expected/pathman_mergejoin_3.out | 12 +++++++--- expected/pathman_mergejoin_4.out | 12 +++++++--- expected/pathman_mergejoin_5.out | 12 +++++++--- expected/pathman_only.out | 5 ++-- expected/pathman_only_1.out | 5 ++-- expected/pathman_only_2.out | 5 ++-- expected/pathman_param_upd_del.out | 5 ++-- expected/pathman_permissions.out | 2 +- expected/pathman_rebuild_deletes.out | 5 ++-- expected/pathman_rebuild_deletes_1.out | 5 ++-- expected/pathman_rebuild_updates.out | 5 ++-- expected/pathman_rebuild_updates_1.out | 5 ++-- expected/pathman_rowmarks.out | 10 ++++---- expected/pathman_rowmarks_1.out | 10 ++++---- expected/pathman_rowmarks_2.out | 10 ++++---- expected/pathman_rowmarks_3.out | 10 ++++---- expected/pathman_runtime_nodes.out | 24 ++++++++++++++++--- expected/pathman_subpartitions.out | 4 ++-- expected/pathman_subpartitions_1.out | 4 ++-- expected/pathman_upd_del.out | 10 +++++--- expected/pathman_upd_del_1.out | 10 +++++--- expected/pathman_upd_del_2.out | 10 +++++--- expected/pathman_update_node.out | 7 ++++-- expected/pathman_update_triggers.out | 6 +++-- expected/pathman_utility_stmt.out | 27 +++++++++++++++------- expected/pathman_views.out | 7 ++++-- expected/pathman_views_1.out | 7 ++++-- expected/pathman_views_2.out | 7 ++++-- expected/pathman_views_3.out | 7 ++++-- sql/pathman_array_qual.sql | 3 ++- sql/pathman_basic.sql | 15 ++++++++++-- sql/pathman_bgw.sql | 2 +- sql/pathman_calamity.sql | 11 ++++++--- sql/pathman_callbacks.sql | 5 +++- sql/pathman_column_type.sql | 17 +++++++++----- sql/pathman_cte.sql | 3 ++- sql/pathman_declarative.sql | 6 +++-- sql/pathman_domains.sql | 4 +++- sql/pathman_dropped_cols.sql | 2 +- sql/pathman_expressions.sql | 6 ++++- sql/pathman_foreign_keys.sql | 4 +++- sql/pathman_gaps.sql | 6 ++++- sql/pathman_hashjoin.sql | 6 +++-- sql/pathman_inserts.sql | 6 ++++- sql/pathman_interval.sql | 2 +- sql/pathman_join_clause.sql | 9 ++++++-- sql/pathman_lateral.sql | 3 ++- sql/pathman_mergejoin.sql | 10 ++++++-- sql/pathman_only.sql | 3 ++- sql/pathman_param_upd_del.sql | 3 ++- sql/pathman_permissions.sql | 2 +- sql/pathman_rebuild_deletes.sql | 3 ++- sql/pathman_rebuild_updates.sql | 3 ++- sql/pathman_rowmarks.sql | 4 +++- sql/pathman_runtime_nodes.sql | 19 +++++++++++++-- sql/pathman_subpartitions.sql | 3 ++- sql/pathman_upd_del.sql | 7 ++++-- sql/pathman_update_node.sql | 4 +++- sql/pathman_update_triggers.sql | 4 +++- sql/pathman_utility_stmt.sql | 18 +++++++++++---- sql/pathman_views.sql | 5 +++- 107 files changed, 649 insertions(+), 274 deletions(-) diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out index 49dca03a..0587a1c8 100644 --- a/expected/pathman_array_qual.out +++ b/expected/pathman_array_qual.out @@ -2402,6 +2402,7 @@ EXECUTE q(100); (1 row) DEALLOCATE q; -DROP SCHEMA array_qual CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_array_qual_1.out b/expected/pathman_array_qual_1.out index 6c8def94..dd7d2485 100644 --- a/expected/pathman_array_qual_1.out +++ b/expected/pathman_array_qual_1.out @@ -2392,6 +2392,7 @@ EXECUTE q(100); (1 row) DEALLOCATE q; -DROP SCHEMA array_qual CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 4117a00c..3afde299 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1830,7 +1830,22 @@ SELECT * FROM test.mixinh_child1; SELECT * FROM test.mixinh_parent; ERROR: could not expand partitioned table "mixinh_child1" -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 32 other objects +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out index 702f9027..92a86727 100644 --- a/expected/pathman_basic_1.out +++ b/expected/pathman_basic_1.out @@ -1813,7 +1813,22 @@ SELECT * FROM test.mixinh_child1; SELECT * FROM test.mixinh_parent; ERROR: could not expand partitioned table "mixinh_child1" -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 32 other objects +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_basic_2.out b/expected/pathman_basic_2.out index 28e46c14..7cfde8a6 100644 --- a/expected/pathman_basic_2.out +++ b/expected/pathman_basic_2.out @@ -1813,7 +1813,22 @@ SELECT * FROM test.mixinh_child1; SELECT * FROM test.mixinh_parent; ERROR: could not expand partitioned table "mixinh_child1" -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 32 other objects +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index 5d5d2b21..4f2ad6b8 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -242,5 +242,5 @@ SELECT count(*) FROM test_bgw.conc_part; DROP TABLE test_bgw.conc_part CASCADE; NOTICE: drop cascades to 5 other objects -DROP SCHEMA test_bgw CASCADE; +DROP SCHEMA test_bgw; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index d8b6ad96..7226e7b9 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -779,8 +779,16 @@ SELECT merge_range_partitions('calamity.merge_test_a_1', ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects -DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 15 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------- @@ -987,7 +995,7 @@ SELECT context, entries FROM pathman_cache_stats partition parents cache | 0 (3 rows) -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------------------ @@ -1060,5 +1068,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index 2b0f98e5..62050cfd 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -779,8 +779,16 @@ SELECT merge_range_partitions('calamity.merge_test_a_1', ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects -DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 15 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------- @@ -987,7 +995,7 @@ SELECT context, entries FROM pathman_cache_stats partition parents cache | 0 (3 rows) -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------------------ @@ -1060,5 +1068,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out index b6fafc83..f647e788 100644 --- a/expected/pathman_calamity_2.out +++ b/expected/pathman_calamity_2.out @@ -779,8 +779,16 @@ SELECT merge_range_partitions('calamity.merge_test_a_1', ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects -DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 15 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------- @@ -987,7 +995,7 @@ SELECT context, entries FROM pathman_cache_stats partition parents cache | 0 (3 rows) -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------------------ @@ -1060,5 +1068,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_3.out b/expected/pathman_calamity_3.out index 9aec9765..f64a5f8b 100644 --- a/expected/pathman_calamity_3.out +++ b/expected/pathman_calamity_3.out @@ -783,8 +783,16 @@ SELECT merge_range_partitions('calamity.merge_test_a_1', ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects -DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 15 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------- @@ -991,7 +999,7 @@ SELECT context, entries FROM pathman_cache_stats partition parents cache | 0 (3 rows) -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------------------ @@ -1064,5 +1072,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 3eea2049..8427dae7 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -411,6 +411,8 @@ ORDER BY range_min::INT4; DROP TABLE callbacks.abc CASCADE; NOTICE: drop cascades to 5 other objects -DROP SCHEMA callbacks CASCADE; -NOTICE: drop cascades to 2 other objects +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +DROP FUNCTION public.dummy_cb(jsonb); +DROP FUNCTION callbacks.rotation_callback(jsonb); +DROP SCHEMA callbacks; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index 4e2f3ff6..c77acbb2 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -23,14 +23,14 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) /* * Get parsed and analyzed expression. @@ -84,14 +84,14 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -135,14 +135,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* change column's type (should NOT work) */ ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; @@ -153,14 +153,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; @@ -170,14 +170,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -199,5 +199,5 @@ NOTICE: 0 rows copied from test_column_type.test_4 (1 row) DROP TABLE test_column_type.test CASCADE; -DROP SCHEMA test_column_type CASCADE; +DROP SCHEMA test_column_type; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_column_type_1.out b/expected/pathman_column_type_1.out index d169719d..06b61387 100644 --- a/expected/pathman_column_type_1.out +++ b/expected/pathman_column_type_1.out @@ -23,14 +23,14 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) /* * Get parsed and analyzed expression. @@ -84,14 +84,14 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 10 partition parents cache | 10 - partition status cache | 3 -(4 rows) +(3 rows) /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -135,14 +135,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* change column's type (should NOT work) */ ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; @@ -153,14 +153,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; @@ -170,14 +170,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; context | entries -------------------------+--------- maintenance | 0 partition bounds cache | 5 partition parents cache | 5 - partition status cache | 3 -(4 rows) +(3 rows) /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -199,5 +199,5 @@ NOTICE: 0 rows copied from test_column_type.test_4 (1 row) DROP TABLE test_column_type.test CASCADE; -DROP SCHEMA test_column_type CASCADE; +DROP SCHEMA test_column_type; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte.out b/expected/pathman_cte.out index ce818a36..33821ac0 100644 --- a/expected/pathman_cte.out +++ b/expected/pathman_cte.out @@ -271,6 +271,7 @@ SELECT * FROM test; (4 rows) -DROP SCHEMA test_cte CASCADE; -NOTICE: drop cascades to 3 other objects +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_1.out b/expected/pathman_cte_1.out index 70a9ee88..5e30e188 100644 --- a/expected/pathman_cte_1.out +++ b/expected/pathman_cte_1.out @@ -260,6 +260,7 @@ SELECT * FROM test; (4 rows) -DROP SCHEMA test_cte CASCADE; -NOTICE: drop cascades to 3 other objects +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_2.out b/expected/pathman_cte_2.out index 455a7cad..6b64ad42 100644 --- a/expected/pathman_cte_2.out +++ b/expected/pathman_cte_2.out @@ -247,6 +247,7 @@ SELECT * FROM test; (4 rows) -DROP SCHEMA test_cte CASCADE; -NOTICE: drop cascades to 3 other objects +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out index 01f924ae..2915ecfb 100644 --- a/expected/pathman_declarative.out +++ b/expected/pathman_declarative.out @@ -99,7 +99,9 @@ ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN NOTICE: relation "nonexistent_table" does not exist, skipping ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; NOTICE: relation "nonexistent_table" does not exist, skipping -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 8 other objects +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_declarative_1.out b/expected/pathman_declarative_1.out index 9870a3e7..dede4941 100644 --- a/expected/pathman_declarative_1.out +++ b/expected/pathman_declarative_1.out @@ -99,7 +99,9 @@ ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN NOTICE: relation "nonexistent_table" does not exist, skipping ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; NOTICE: relation "nonexistent_table" does not exist, skipping -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 8 other objects +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index 41c8bfbb..cc32ce0c 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -124,6 +124,8 @@ ORDER BY "partition"::TEXT; domains.dom_table | domains.dom_table_4 | 1 | val | | (5 rows) -DROP SCHEMA domains CASCADE; -NOTICE: drop cascades to 7 other objects +DROP TABLE domains.dom_table CASCADE; +NOTICE: drop cascades to 5 other objects +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out index 220f6750..826931d3 100644 --- a/expected/pathman_dropped_cols.out +++ b/expected/pathman_dropped_cols.out @@ -205,5 +205,5 @@ EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); DEALLOCATE getbyroot; DROP TABLE root_dict CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA dropped_cols CASCADE; +DROP SCHEMA dropped_cols; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 1db38acb..cd629b8e 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -430,6 +430,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-0 Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) (3 rows) -DROP SCHEMA test_exprs CASCADE; -NOTICE: drop cascades to 24 other objects +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions_1.out b/expected/pathman_expressions_1.out index 126534a0..66e3ea75 100644 --- a/expected/pathman_expressions_1.out +++ b/expected/pathman_expressions_1.out @@ -434,6 +434,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-0 Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) (3 rows) -DROP SCHEMA test_exprs CASCADE; -NOTICE: drop cascades to 24 other objects +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions_2.out b/expected/pathman_expressions_2.out index 83b0c7b0..89bf24ef 100644 --- a/expected/pathman_expressions_2.out +++ b/expected/pathman_expressions_2.out @@ -425,6 +425,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-0 Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) (2 rows) -DROP SCHEMA test_exprs CASCADE; -NOTICE: drop cascades to 24 other objects +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_foreign_keys.out b/expected/pathman_foreign_keys.out index 2ff12279..34fc75ad 100644 --- a/expected/pathman_foreign_keys.out +++ b/expected/pathman_foreign_keys.out @@ -90,6 +90,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM fkeys.messages; DROP TABLE fkeys.messages, fkeys.replies CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA fkeys CASCADE; -NOTICE: drop cascades to 2 other objects +DROP TABLE fkeys.test_fkey CASCADE; +DROP TABLE fkeys.test_ref CASCADE; +DROP SCHEMA fkeys; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_gaps.out b/expected/pathman_gaps.out index 1d9b1f33..530beca9 100644 --- a/expected/pathman_gaps.out +++ b/expected/pathman_gaps.out @@ -822,6 +822,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; -> Seq Scan on test_4_11 (7 rows) -DROP SCHEMA gaps CASCADE; -NOTICE: drop cascades to 30 other objects +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_gaps_1.out b/expected/pathman_gaps_1.out index d6e1973d..b1c0ac34 100644 --- a/expected/pathman_gaps_1.out +++ b/expected/pathman_gaps_1.out @@ -807,6 +807,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; -> Seq Scan on test_4_11 (7 rows) -DROP SCHEMA gaps CASCADE; -NOTICE: drop cascades to 30 other objects +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_hashjoin.out b/expected/pathman_hashjoin.out index 779efe3d..f5ebabdd 100644 --- a/expected/pathman_hashjoin.out +++ b/expected/pathman_hashjoin.out @@ -75,7 +75,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 (20 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_1.out b/expected/pathman_hashjoin_1.out index ae1edda6..df6c0174 100644 --- a/expected/pathman_hashjoin_1.out +++ b/expected/pathman_hashjoin_1.out @@ -75,7 +75,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 (20 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_2.out b/expected/pathman_hashjoin_2.out index 21cd1883..69ea5762 100644 --- a/expected/pathman_hashjoin_2.out +++ b/expected/pathman_hashjoin_2.out @@ -68,7 +68,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; Filter: (id IS NOT NULL) (13 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_3.out b/expected/pathman_hashjoin_3.out index 106e8c0e..e2c8903a 100644 --- a/expected/pathman_hashjoin_3.out +++ b/expected/pathman_hashjoin_3.out @@ -67,7 +67,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; Filter: (id IS NOT NULL) (12 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_4.out b/expected/pathman_hashjoin_4.out index ad4b5651..ef8dfc29 100644 --- a/expected/pathman_hashjoin_4.out +++ b/expected/pathman_hashjoin_4.out @@ -75,7 +75,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_3 (20 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_5.out b/expected/pathman_hashjoin_5.out index 7bbea061..a8f3b6e7 100644 --- a/expected/pathman_hashjoin_5.out +++ b/expected/pathman_hashjoin_5.out @@ -67,7 +67,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; Filter: (id IS NOT NULL) (12 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index 225604c5..16656f18 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -1066,6 +1066,10 @@ SELECT count(*) FROM test_inserts.special_2; DROP TABLE test_inserts.special_2; DROP TABLE test_inserts.test_special_only CASCADE; NOTICE: drop cascades to 4 other objects -DROP SCHEMA test_inserts CASCADE; -NOTICE: drop cascades to 19 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts_1.out b/expected/pathman_inserts_1.out index a6634edd..3479c12d 100644 --- a/expected/pathman_inserts_1.out +++ b/expected/pathman_inserts_1.out @@ -1066,6 +1066,10 @@ SELECT count(*) FROM test_inserts.special_2; DROP TABLE test_inserts.special_2; DROP TABLE test_inserts.test_special_only CASCADE; NOTICE: drop cascades to 4 other objects -DROP SCHEMA test_inserts CASCADE; -NOTICE: drop cascades to 19 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts_2.out b/expected/pathman_inserts_2.out index 9a439010..91f05753 100644 --- a/expected/pathman_inserts_2.out +++ b/expected/pathman_inserts_2.out @@ -1066,6 +1066,10 @@ SELECT count(*) FROM test_inserts.special_2; DROP TABLE test_inserts.special_2; DROP TABLE test_inserts.test_special_only CASCADE; NOTICE: drop cascades to 4 other objects -DROP SCHEMA test_inserts CASCADE; -NOTICE: drop cascades to 19 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 72dc4e01..e4741522 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -271,5 +271,5 @@ SELECT set_interval('test_interval.abc', NULL::INTEGER); ERROR: table "test_interval.abc" is not partitioned by RANGE DROP TABLE test_interval.abc CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA test_interval CASCADE; +DROP SCHEMA test_interval; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out index ed822543..7654d4ca 100644 --- a/expected/pathman_join_clause.out +++ b/expected/pathman_join_clause.out @@ -171,7 +171,13 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); 4 | 3 | | (2 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_1.out b/expected/pathman_join_clause_1.out index 09b9a00c..d65131c7 100644 --- a/expected/pathman_join_clause_1.out +++ b/expected/pathman_join_clause_1.out @@ -170,7 +170,13 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); 4 | 3 | | (2 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_2.out b/expected/pathman_join_clause_2.out index d58ff6f6..a1fae839 100644 --- a/expected/pathman_join_clause_2.out +++ b/expected/pathman_join_clause_2.out @@ -149,7 +149,13 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); 4 | 3 | | (2 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_lateral.out b/expected/pathman_lateral.out index 0cb1a864..53edc3d2 100644 --- a/expected/pathman_lateral.out +++ b/expected/pathman_lateral.out @@ -122,6 +122,7 @@ select * from set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_1.out b/expected/pathman_lateral_1.out index 1dc67fe2..12995290 100644 --- a/expected/pathman_lateral_1.out +++ b/expected/pathman_lateral_1.out @@ -116,6 +116,7 @@ select * from set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_2.out b/expected/pathman_lateral_2.out index 5ee4104c..df5292f8 100644 --- a/expected/pathman_lateral_2.out +++ b/expected/pathman_lateral_2.out @@ -122,6 +122,7 @@ select * from set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_3.out b/expected/pathman_lateral_3.out index dd64819d..4bc385de 100644 --- a/expected/pathman_lateral_3.out +++ b/expected/pathman_lateral_3.out @@ -121,6 +121,7 @@ select * from set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_mergejoin.out b/expected/pathman_mergejoin.out index ca3a3d9d..d8a14371 100644 --- a/expected/pathman_mergejoin.out +++ b/expected/pathman_mergejoin.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -80,7 +82,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_1.out b/expected/pathman_mergejoin_1.out index 31da465a..bcd6c272 100644 --- a/expected/pathman_mergejoin_1.out +++ b/expected/pathman_mergejoin_1.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -78,7 +80,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_2.out b/expected/pathman_mergejoin_2.out index 4b614ad6..aed697d2 100644 --- a/expected/pathman_mergejoin_2.out +++ b/expected/pathman_mergejoin_2.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -71,7 +73,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_3.out b/expected/pathman_mergejoin_3.out index 7003205f..85414544 100644 --- a/expected/pathman_mergejoin_3.out +++ b/expected/pathman_mergejoin_3.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -69,7 +71,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_4.out b/expected/pathman_mergejoin_4.out index 185aa3d1..e2affa74 100644 --- a/expected/pathman_mergejoin_4.out +++ b/expected/pathman_mergejoin_4.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -78,7 +80,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_5.out b/expected/pathman_mergejoin_5.out index 6ffe89cd..7b607435 100644 --- a/expected/pathman_mergejoin_5.out +++ b/expected/pathman_mergejoin_5.out @@ -47,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -69,7 +71,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_only.out b/expected/pathman_only.out index 83425632..1b9f6a6b 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -272,6 +272,7 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test Filter: (val = $0) (27 rows) -DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out index da913e54..b92a8eaf 100644 --- a/expected/pathman_only_1.out +++ b/expected/pathman_only_1.out @@ -275,6 +275,7 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test Filter: (val = $0) (27 rows) -DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_2.out b/expected/pathman_only_2.out index 39b8f199..63638012 100644 --- a/expected/pathman_only_2.out +++ b/expected/pathman_only_2.out @@ -275,6 +275,7 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test Filter: (val = $0) (27 rows) -DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_param_upd_del.out b/expected/pathman_param_upd_del.out index ad935579..28fa616d 100644 --- a/expected/pathman_param_upd_del.out +++ b/expected/pathman_param_upd_del.out @@ -185,6 +185,7 @@ EXPLAIN (COSTS OFF) EXECUTE del(11); (3 rows) DEALLOCATE del; -DROP SCHEMA param_upd_del CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE param_upd_del.test CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA param_upd_del; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index d03588c7..04b1112d 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -259,5 +259,5 @@ DROP OWNED BY user1; DROP OWNED BY user2; DROP USER user1; DROP USER user2; -DROP SCHEMA permissions CASCADE; +DROP SCHEMA permissions; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_deletes.out b/expected/pathman_rebuild_deletes.out index b19d700a..a5edc242 100644 --- a/expected/pathman_rebuild_deletes.out +++ b/expected/pathman_rebuild_deletes.out @@ -100,6 +100,7 @@ RETURNING *, tableoid::REGCLASS; (3 rows) DROP TABLE test_deletes.test_dummy; -DROP SCHEMA test_deletes CASCADE; -NOTICE: drop cascades to 13 other objects +DROP TABLE test_deletes.test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_deletes; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_deletes_1.out b/expected/pathman_rebuild_deletes_1.out index d1c4b69e..eb2f5001 100644 --- a/expected/pathman_rebuild_deletes_1.out +++ b/expected/pathman_rebuild_deletes_1.out @@ -100,6 +100,7 @@ RETURNING *, tableoid::REGCLASS; (3 rows) DROP TABLE test_deletes.test_dummy; -DROP SCHEMA test_deletes CASCADE; -NOTICE: drop cascades to 13 other objects +DROP TABLE test_deletes.test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_deletes; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index dfa4a5ce..40c5b048 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -194,6 +194,7 @@ select * from test_updates.test_5113 where val = 11; drop table test_updates.test_5113 cascade; NOTICE: drop cascades to 3 other objects -DROP SCHEMA test_updates CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE test_updates.test CASCADE; +NOTICE: drop cascades to 14 other objects +DROP SCHEMA test_updates; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_updates_1.out b/expected/pathman_rebuild_updates_1.out index 5bda15ce..57b3297a 100644 --- a/expected/pathman_rebuild_updates_1.out +++ b/expected/pathman_rebuild_updates_1.out @@ -194,6 +194,7 @@ select * from test_updates.test_5113 where val = 11; drop table test_updates.test_5113 cascade; NOTICE: drop cascades to 3 other objects -DROP SCHEMA test_updates CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE test_updates.test CASCADE; +NOTICE: drop cascades to 14 other objects +DROP SCHEMA test_updates; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index f9ef8114..ea047c9e 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -381,13 +381,13 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = SET enable_hashjoin = t; SET enable_mergejoin = t; -DROP SCHEMA rowmarks CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table rowmarks.first -drop cascades to table rowmarks.second -drop cascades to table rowmarks.first_0 +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 drop cascades to table rowmarks.first_1 drop cascades to table rowmarks.first_2 drop cascades to table rowmarks.first_3 drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index e0877333..256b8637 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -436,13 +436,13 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = SET enable_hashjoin = t; SET enable_mergejoin = t; -DROP SCHEMA rowmarks CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table rowmarks.first -drop cascades to table rowmarks.second -drop cascades to table rowmarks.first_0 +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 drop cascades to table rowmarks.first_1 drop cascades to table rowmarks.first_2 drop cascades to table rowmarks.first_3 drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_2.out b/expected/pathman_rowmarks_2.out index 7436b081..06fb88ac 100644 --- a/expected/pathman_rowmarks_2.out +++ b/expected/pathman_rowmarks_2.out @@ -378,13 +378,13 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = SET enable_hashjoin = t; SET enable_mergejoin = t; -DROP SCHEMA rowmarks CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table rowmarks.first -drop cascades to table rowmarks.second -drop cascades to table rowmarks.first_0 +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 drop cascades to table rowmarks.first_1 drop cascades to table rowmarks.first_2 drop cascades to table rowmarks.first_3 drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_3.out b/expected/pathman_rowmarks_3.out index 6179ff94..c2539d76 100644 --- a/expected/pathman_rowmarks_3.out +++ b/expected/pathman_rowmarks_3.out @@ -378,13 +378,13 @@ WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = SET enable_hashjoin = t; SET enable_mergejoin = t; -DROP SCHEMA rowmarks CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table rowmarks.first -drop cascades to table rowmarks.second -drop cascades to table rowmarks.first_0 +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 drop cascades to table rowmarks.first_1 drop cascades to table rowmarks.first_2 drop cascades to table rowmarks.first_3 drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index f364cfb4..17905e59 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -444,7 +444,25 @@ where id = any (select generate_series(-10, -1)); /* should be empty */ set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 37 other objects +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_2 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_3 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test.runtime_test_4 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out index 25b36492..3a6a19eb 100644 --- a/expected/pathman_subpartitions.out +++ b/expected/pathman_subpartitions.out @@ -462,6 +462,6 @@ SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDAT DROP TABLE subpartitions.a2 CASCADE; NOTICE: drop cascades to 4 other objects DROP TABLE subpartitions.a1; -DROP SCHEMA subpartitions CASCADE; -NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_subpartitions_1.out b/expected/pathman_subpartitions_1.out index 5ea33044..d620cde9 100644 --- a/expected/pathman_subpartitions_1.out +++ b/expected/pathman_subpartitions_1.out @@ -456,6 +456,6 @@ SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDAT DROP TABLE subpartitions.a2 CASCADE; NOTICE: drop cascades to 4 other objects DROP TABLE subpartitions.a1; -DROP SCHEMA subpartitions CASCADE; -NOTICE: drop cascades to function subpartitions.partitions_tree(regclass,text) +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 2cc19239..44bb34fc 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -460,7 +460,11 @@ WITH q AS (SELECT id FROM test.tmp2 WHERE id < 3) DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); ROLLBACK; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 27 other objects +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index 5cd5ac9f..0a7e91e9 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -460,7 +460,11 @@ WITH q AS (SELECT id FROM test.tmp2 WHERE id < 3) DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); ROLLBACK; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 27 other objects +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_2.out b/expected/pathman_upd_del_2.out index 2aeb6702..80325d7e 100644 --- a/expected/pathman_upd_del_2.out +++ b/expected/pathman_upd_del_2.out @@ -452,7 +452,11 @@ WITH q AS (SELECT id FROM test.tmp2 WHERE id < 3) DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); ROLLBACK; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 27 other objects +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out index 120b42c4..9fc1d07f 100644 --- a/expected/pathman_update_node.out +++ b/expected/pathman_update_node.out @@ -446,6 +446,9 @@ SELECT count(*) FROM test_update_node.test_hash; 10 (1 row) -DROP SCHEMA test_update_node CASCADE; -NOTICE: drop cascades to 17 other objects +DROP TABLE test_update_node.test_hash CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test_update_node.test_range CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_update_node; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_update_triggers.out b/expected/pathman_update_triggers.out index d5c92b9f..40c6a19c 100644 --- a/expected/pathman_update_triggers.out +++ b/expected/pathman_update_triggers.out @@ -184,6 +184,8 @@ select count(distinct val) from test_update_triggers.test; 1 (1 row) -DROP SCHEMA test_update_triggers CASCADE; -NOTICE: drop cascades to 4 other objects +DROP TABLE test_update_triggers.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 7e59fa23..1a8b969e 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -214,8 +214,11 @@ SELECT COUNT(*) FROM copy_stmt_hooking.test2; 1 (1 row) -DROP SCHEMA copy_stmt_hooking CASCADE; -NOTICE: drop cascades to 797 other objects +DROP TABLE copy_stmt_hooking.test CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE copy_stmt_hooking.test2 CASCADE; +NOTICE: drop cascades to 790 other objects +DROP SCHEMA copy_stmt_hooking; /* * Test auto check constraint renaming */ @@ -353,8 +356,15 @@ WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; pathman_plain_test_renamed_check | CHECK (a < 100) (1 row) -DROP SCHEMA rename CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE rename.plain_test CASCADE; +DROP TABLE rename.test_inh CASCADE; +NOTICE: drop cascades to table rename.test_inh_one +DROP TABLE rename.parent CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE rename.test CASCADE; +NOTICE: drop cascades to 3 other objects +DROP FUNCTION add_constraint(regclass); +DROP SCHEMA rename; /* * Test DROP INDEX CONCURRENTLY (test snapshots) */ @@ -368,8 +378,9 @@ SELECT create_hash_partitions('drop_index.test', 'val', 2); (1 row) DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; -DROP SCHEMA drop_index CASCADE; -NOTICE: drop cascades to 3 other objects +DROP TABLE drop_index.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA drop_index; /* * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla */ @@ -426,12 +437,12 @@ ERROR: schema "nonexistent_schema" does not exist CREATE SCHEMA test_nonexistance2; ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; DROP TABLE test_nonexistance2.existent_table; -DROP SCHEMA test_nonexistance2 CASCADE; +DROP SCHEMA test_nonexistance2; ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; NOTICE: relation "nonexistent_table" does not exist, skipping CREATE TABLE test_nonexistance.existent_table(i INT4); ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; ERROR: tablespace "nonexistent_tablespace" does not exist DROP TABLE test_nonexistance.existent_table; -DROP SCHEMA test_nonexistance CASCADE; +DROP SCHEMA test_nonexistance; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views.out b/expected/pathman_views.out index 78589970..64b8425d 100644 --- a/expected/pathman_views.out +++ b/expected/pathman_views.out @@ -186,6 +186,9 @@ explain (costs off) select * from views.abc_union_all where id = 5; Filter: (id = 5) (5 rows) -DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 16 other objects +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out index ea390d84..e6bb45f5 100644 --- a/expected/pathman_views_1.out +++ b/expected/pathman_views_1.out @@ -242,6 +242,9 @@ explain (costs off) select * from views.abc_union_all where id = 5; Filter: (id = 5) (5 rows) -DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 16 other objects +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_2.out b/expected/pathman_views_2.out index 15770ec0..45ea3eb4 100644 --- a/expected/pathman_views_2.out +++ b/expected/pathman_views_2.out @@ -183,6 +183,9 @@ explain (costs off) select * from views.abc_union_all where id = 5; Filter: (id = 5) (5 rows) -DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 16 other objects +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_3.out b/expected/pathman_views_3.out index 09b5718f..cf5ca58e 100644 --- a/expected/pathman_views_3.out +++ b/expected/pathman_views_3.out @@ -184,6 +184,9 @@ explain (costs off) select * from views.abc_union_all where id = 5; Filter: (id = 5) (5 rows) -DROP SCHEMA views CASCADE; -NOTICE: drop cascades to 16 other objects +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql index 84327359..9f1b0c1e 100644 --- a/sql/pathman_array_qual.sql +++ b/sql/pathman_array_qual.sql @@ -427,5 +427,6 @@ DEALLOCATE q; -DROP SCHEMA array_qual CASCADE; +DROP TABLE array_qual.test CASCADE; +DROP SCHEMA array_qual; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 403424f5..478935c5 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -563,6 +563,17 @@ INSERT INTO test.mixinh_child1 VALUES (1); SELECT * FROM test.mixinh_child1; SELECT * FROM test.mixinh_parent; -DROP SCHEMA test CASCADE; +DROP TABLE test.hash_rel CASCADE; +DROP TABLE test.index_on_childs CASCADE; +DROP TABLE test.mixinh_child1 CASCADE; +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index 28f922e6..74239e99 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -145,5 +145,5 @@ DROP TABLE test_bgw.conc_part CASCADE; -DROP SCHEMA test_bgw CASCADE; +DROP SCHEMA test_bgw; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 6ad0df0e..ecc2c30f 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -345,7 +345,12 @@ SELECT merge_range_partitions('calamity.merge_test_a_1', DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; -DROP SCHEMA calamity CASCADE; +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +DROP TABLE calamity.part_ok CASCADE; +DROP TABLE calamity.hash_two_times CASCADE; +DROP TABLE calamity.to_be_disabled CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; @@ -428,7 +433,7 @@ DROP TABLE calamity.test_pathman_cache_stats CASCADE; SELECT context, entries FROM pathman_cache_stats WHERE context != 'partition status cache' ORDER BY context; /* OK */ -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; @@ -467,5 +472,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 65b729d9..096a55ad 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -144,5 +144,8 @@ ORDER BY range_min::INT4; DROP TABLE callbacks.abc CASCADE; -DROP SCHEMA callbacks CASCADE; +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +DROP FUNCTION public.dummy_cb(jsonb); +DROP FUNCTION callbacks.rotation_callback(jsonb); +DROP SCHEMA callbacks; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index 685643fd..d3f16107 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -20,7 +20,8 @@ SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); /* make sure that bounds and dispatch info has been cached */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* * Get parsed and analyzed expression. @@ -45,7 +46,8 @@ DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -65,21 +67,24 @@ SELECT create_hash_partitions('test_column_type.test', 'id', 5); /* make sure that bounds and dispatch info has been cached */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* change column's type (should NOT work) */ ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -89,5 +94,5 @@ SELECT drop_partitions('test_column_type.test'); DROP TABLE test_column_type.test CASCADE; -DROP SCHEMA test_column_type CASCADE; +DROP SCHEMA test_column_type; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_cte.sql b/sql/pathman_cte.sql index 5a695cbb..594c6db7 100644 --- a/sql/pathman_cte.sql +++ b/sql/pathman_cte.sql @@ -157,5 +157,6 @@ SELECT * FROM test; -DROP SCHEMA test_cte CASCADE; +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +DROP SCHEMA test_cte; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql index d89ce3ed..eb12c295 100644 --- a/sql/pathman_declarative.sql +++ b/sql/pathman_declarative.sql @@ -43,6 +43,8 @@ CREATE TABLE test.r4 PARTITION OF test.range_rel ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; -DROP SCHEMA test CASCADE; +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_domains.sql b/sql/pathman_domains.sql index 4793c6f8..105b2399 100644 --- a/sql/pathman_domains.sql +++ b/sql/pathman_domains.sql @@ -41,5 +41,7 @@ SELECT * FROM pathman_partition_list ORDER BY "partition"::TEXT; -DROP SCHEMA domains CASCADE; +DROP TABLE domains.dom_table CASCADE; +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql index cb6acc57..2a128df2 100644 --- a/sql/pathman_dropped_cols.sql +++ b/sql/pathman_dropped_cols.sql @@ -100,5 +100,5 @@ EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); DEALLOCATE getbyroot; DROP TABLE root_dict CASCADE; -DROP SCHEMA dropped_cols CASCADE; +DROP SCHEMA dropped_cols; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index ed05be79..bf29f896 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -178,5 +178,9 @@ INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('as SELECT COUNT(*) FROM test_exprs.range_rel_6; EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; -DROP SCHEMA test_exprs CASCADE; +DROP TABLE test_exprs.canary CASCADE; +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +DROP TABLE test_exprs.hash_rel CASCADE; +DROP SCHEMA test_exprs; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_foreign_keys.sql b/sql/pathman_foreign_keys.sql index 1ec1b766..74dee25f 100644 --- a/sql/pathman_foreign_keys.sql +++ b/sql/pathman_foreign_keys.sql @@ -52,5 +52,7 @@ DROP TABLE fkeys.messages, fkeys.replies CASCADE; -DROP SCHEMA fkeys CASCADE; +DROP TABLE fkeys.test_fkey CASCADE; +DROP TABLE fkeys.test_ref CASCADE; +DROP SCHEMA fkeys; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_gaps.sql b/sql/pathman_gaps.sql index 55c9a16d..129b210c 100644 --- a/sql/pathman_gaps.sql +++ b/sql/pathman_gaps.sql @@ -137,5 +137,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; -DROP SCHEMA gaps CASCADE; +DROP TABLE gaps.test_1 CASCADE; +DROP TABLE gaps.test_2 CASCADE; +DROP TABLE gaps.test_3 CASCADE; +DROP TABLE gaps.test_4 CASCADE; +DROP SCHEMA gaps; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql index 2c3654d4..620dee5f 100644 --- a/sql/pathman_hashjoin.sql +++ b/sql/pathman_hashjoin.sql @@ -49,6 +49,8 @@ JOIN test.range_rel j2 on j2.id = j1.id JOIN test.num_range_rel j3 on j3.id = j1.id WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -DROP SCHEMA test CASCADE; +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index c8c6439d..aa5b6c1c 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -223,5 +223,9 @@ DROP TABLE test_inserts.special_2; DROP TABLE test_inserts.test_special_only CASCADE; -DROP SCHEMA test_inserts CASCADE; +DROP TABLE test_inserts.storage CASCADE; +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql index f2933ab0..3a457e7a 100644 --- a/sql/pathman_interval.sql +++ b/sql/pathman_interval.sql @@ -168,5 +168,5 @@ DROP TABLE test_interval.abc CASCADE; -DROP SCHEMA test_interval CASCADE; +DROP SCHEMA test_interval; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql index 3a0a655f..aa30b0b8 100644 --- a/sql/pathman_join_clause.sql +++ b/sql/pathman_join_clause.sql @@ -106,6 +106,11 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); -DROP SCHEMA test CASCADE; +DROP TABLE test.child CASCADE; +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_lateral.sql b/sql/pathman_lateral.sql index d287c051..d5def38c 100644 --- a/sql/pathman_lateral.sql +++ b/sql/pathman_lateral.sql @@ -45,5 +45,6 @@ set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; +DROP TABLE test_lateral.data CASCADE; +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql index 05de4ba2..d1084375 100644 --- a/sql/pathman_mergejoin.sql +++ b/sql/pathman_mergejoin.sql @@ -48,6 +48,9 @@ SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -56,7 +59,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; +SET enable_seqscan = ON; -DROP SCHEMA test CASCADE; +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_only.sql b/sql/pathman_only.sql index 53ef6a9a..88f4e88a 100644 --- a/sql/pathman_only.sql +++ b/sql/pathman_only.sql @@ -74,5 +74,6 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test -DROP SCHEMA test_only CASCADE; +DROP TABLE test_only.from_only_test CASCADE; +DROP SCHEMA test_only; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_param_upd_del.sql b/sql/pathman_param_upd_del.sql index f4e42a41..0f3030e7 100644 --- a/sql/pathman_param_upd_del.sql +++ b/sql/pathman_param_upd_del.sql @@ -45,5 +45,6 @@ EXPLAIN (COSTS OFF) EXECUTE del(11); DEALLOCATE del; -DROP SCHEMA param_upd_del CASCADE; +DROP TABLE param_upd_del.test CASCADE; +DROP SCHEMA param_upd_del; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 3a234676..49e1fc18 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -174,5 +174,5 @@ DROP USER user1; DROP USER user2; -DROP SCHEMA permissions CASCADE; +DROP SCHEMA permissions; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_deletes.sql b/sql/pathman_rebuild_deletes.sql index 28a09916..1af6b61a 100644 --- a/sql/pathman_rebuild_deletes.sql +++ b/sql/pathman_rebuild_deletes.sql @@ -60,5 +60,6 @@ DROP TABLE test_deletes.test_dummy; -DROP SCHEMA test_deletes CASCADE; +DROP TABLE test_deletes.test CASCADE; +DROP SCHEMA test_deletes; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index 01757c2c..fbbbcbba 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -99,5 +99,6 @@ select * from test_updates.test_5113 where val = 11; drop table test_updates.test_5113 cascade; -DROP SCHEMA test_updates CASCADE; +DROP TABLE test_updates.test CASCADE; +DROP SCHEMA test_updates; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index ab7f24ac..bb7719ea 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -135,5 +135,7 @@ SET enable_mergejoin = t; -DROP SCHEMA rowmarks CASCADE; +DROP TABLE rowmarks.first CASCADE; +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index e0b50e9b..81c046db 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -331,7 +331,22 @@ set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test CASCADE; +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +DROP TABLE test.runtime_test_2 CASCADE; +DROP TABLE test.runtime_test_3 CASCADE; +DROP TABLE test.runtime_test_4 CASCADE; +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql index 7a4dc606..5515874c 100644 --- a/sql/pathman_subpartitions.sql +++ b/sql/pathman_subpartitions.sql @@ -164,5 +164,6 @@ DROP TABLE subpartitions.a2 CASCADE; DROP TABLE subpartitions.a1; -DROP SCHEMA subpartitions CASCADE; +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index a6cab581..a034c14a 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -274,6 +274,9 @@ ROLLBACK; -DROP SCHEMA test CASCADE; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql index 2c7e97f7..e70f60f4 100644 --- a/sql/pathman_update_node.sql +++ b/sql/pathman_update_node.sql @@ -214,5 +214,7 @@ SELECT count(*) FROM test_update_node.test_hash; -DROP SCHEMA test_update_node CASCADE; +DROP TABLE test_update_node.test_hash CASCADE; +DROP TABLE test_update_node.test_range CASCADE; +DROP SCHEMA test_update_node; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_triggers.sql b/sql/pathman_update_triggers.sql index e8405acb..646afe65 100644 --- a/sql/pathman_update_triggers.sql +++ b/sql/pathman_update_triggers.sql @@ -140,5 +140,7 @@ update test_update_triggers.test set val = val + 1 returning *, tableoid::regcla select count(distinct val) from test_update_triggers.test; -DROP SCHEMA test_update_triggers CASCADE; +DROP TABLE test_update_triggers.test CASCADE; +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index 3b99a2f3..08992835 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -154,7 +154,9 @@ COPY copy_stmt_hooking.test2(t) FROM stdin; \. SELECT COUNT(*) FROM copy_stmt_hooking.test2; -DROP SCHEMA copy_stmt_hooking CASCADE; +DROP TABLE copy_stmt_hooking.test CASCADE; +DROP TABLE copy_stmt_hooking.test2 CASCADE; +DROP SCHEMA copy_stmt_hooking; @@ -234,7 +236,12 @@ FROM pg_constraint r WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; -DROP SCHEMA rename CASCADE; +DROP TABLE rename.plain_test CASCADE; +DROP TABLE rename.test_inh CASCADE; +DROP TABLE rename.parent CASCADE; +DROP TABLE rename.test CASCADE; +DROP FUNCTION add_constraint(regclass); +DROP SCHEMA rename; @@ -248,7 +255,8 @@ CREATE INDEX ON drop_index.test (val); SELECT create_hash_partitions('drop_index.test', 'val', 2); DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; -DROP SCHEMA drop_index CASCADE; +DROP TABLE drop_index.test CASCADE; +DROP SCHEMA drop_index; /* * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla @@ -288,14 +296,14 @@ ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA nonexistent_sc CREATE SCHEMA test_nonexistance2; ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; DROP TABLE test_nonexistance2.existent_table; -DROP SCHEMA test_nonexistance2 CASCADE; +DROP SCHEMA test_nonexistance2; ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; CREATE TABLE test_nonexistance.existent_table(i INT4); ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; DROP TABLE test_nonexistance.existent_table; -DROP SCHEMA test_nonexistance CASCADE; +DROP SCHEMA test_nonexistance; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql index 65e64149..36baa5c5 100644 --- a/sql/pathman_views.sql +++ b/sql/pathman_views.sql @@ -79,5 +79,8 @@ explain (costs off) select * from views.abc_union_all where id = 5; -DROP SCHEMA views CASCADE; +DROP TABLE views._abc CASCADE; +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; DROP EXTENSION pg_pathman; From a5356299da96093cd2afe2623629b74640759d20 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 15 Oct 2021 14:05:11 +0300 Subject: [PATCH 1061/1124] [PGPRO-5614] Reset cache at start and at finish ATX transaction --- src/pg_pathman.c | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index f06e794e..24b22eb2 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -4,7 +4,7 @@ * This module sets planner hooks, handles SELECT queries and produces * paths for partitioned tables * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2021, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -281,6 +281,32 @@ estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) else return 1.0; } +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 +/* + * Reset cache at start and at finish ATX transaction + */ +static void +pathman_xact_cb(XactEvent event, void *arg) +{ + if (getNestLevelATX() > 0) + { + /* + * For each ATX transaction start/finish: need to reset pg_pathman + * cache because we shouldn't see uncommitted data in autonomous + * transaction and data of autonomous transaction in main transaction + */ + if ((event == XACT_EVENT_START /* start */) || + (event == XACT_EVENT_ABORT || + event == XACT_EVENT_PARALLEL_ABORT || + event == XACT_EVENT_COMMIT || + event == XACT_EVENT_PARALLEL_COMMIT || + event == XACT_EVENT_PREPARE /* finish */)) + { + pathman_relcache_hook(PointerGetDatum(NULL), InvalidOid); + } + } +} +#endif /* * ------------------- @@ -330,6 +356,11 @@ _PG_init(void) init_partition_filter_static_data(); init_partition_router_static_data(); init_partition_overseer_static_data(); + +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 + /* Callbacks for reload relcache for ATX transactions */ + RegisterXactCallback(pathman_xact_cb, NULL); +#endif } /* Get cached PATHMAN_CONFIG relation Oid */ From 8ffc7224187cc59e91d99f6edeaa19982f9372b7 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 6 Dec 2021 15:30:36 +0300 Subject: [PATCH 1062/1124] [PGPRO-5902] Reset cache at start and at finish ATX transaction (for v10-v12) --- src/pg_pathman.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 24b22eb2..35ad28dd 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -281,7 +281,7 @@ estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) else return 1.0; } -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 100000 /* * Reset cache at start and at finish ATX transaction */ @@ -357,7 +357,7 @@ _PG_init(void) init_partition_router_static_data(); init_partition_overseer_static_data(); -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 100000 /* Callbacks for reload relcache for ATX transactions */ RegisterXactCallback(pathman_xact_cb, NULL); #endif From 1daee0c503ded92d1504b5ec8c99401cee2994ed Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 20 Apr 2022 21:14:25 +0300 Subject: [PATCH 1063/1124] [PGPRO-6538] Changed lock order The parent table is locked first and then are locked the partitions. --- src/pl_range_funcs.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 12c247ab..4465d36e 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -683,9 +683,6 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* Extract partition Oids from array */ parts[i] = DatumGetObjectId(datums[i]); - /* Prevent modification of partitions */ - LockRelationOid(parts[i], AccessExclusiveLock); - /* Check if all partitions are from the same parent */ cur_parent = get_parent_of_partition(parts[i]); @@ -708,6 +705,10 @@ merge_range_partitions(PG_FUNCTION_ARGS) /* Prevent changes in partitioning scheme */ LockRelationOid(parent, ShareUpdateExclusiveLock); + /* Prevent modification of partitions */ + for (i = 0; i < nparts; i++) + LockRelationOid(parts[i], AccessExclusiveLock); + /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); From 66543e768f7b9a7de6844f9bb0780a253ddc2823 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 21 Apr 2022 15:25:13 +0300 Subject: [PATCH 1064/1124] [PGPRO-6538] Skip non-existing relations for Citus compatibility (issue #247) --- src/utility_stmt_hooking.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 89649e0d..35786092 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -114,7 +114,11 @@ is_pathman_related_copy(Node *parsetree) (copy_stmt->is_from ? PATHMAN_COPY_WRITE_LOCK : PATHMAN_COPY_READ_LOCK), - false); + true); + + /* Skip relation if it does not exist (for Citus compatibility) */ + if (!OidIsValid(parent_relid)) + return false; /* Check that relation is partitioned */ if (has_pathman_relation_info(parent_relid)) From 52260faa84a09c81bb9c4b3709bf6e723d83ff24 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 18 May 2022 18:17:03 +0300 Subject: [PATCH 1065/1124] [PGPRO-6644] Corrected memory allocation using double pointer --- src/utils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils.c b/src/utils.c index ddf10bae..15552f56 100644 --- a/src/utils.c +++ b/src/utils.c @@ -515,7 +515,7 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) /* Convert partition names into RangeVars */ if (relnames) { - rangevars = palloc(sizeof(RangeVar) * nrelnames); + rangevars = palloc(sizeof(RangeVar *) * nrelnames); for (i = 0; i < nrelnames; i++) { List *nl = stringToQualifiedNameList(relnames[i]); From 31f101220a83d6609fc269c6217ff6be4934317a Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 14 Jun 2022 10:39:41 +0300 Subject: [PATCH 1066/1124] [PGPRO-6764] Fix build errors after merging 1C_master into STD_master --- src/partition_creation.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index 2154bc8a..a89f8f68 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -1671,14 +1671,14 @@ make_constraint_common(char *name, Node *raw_expr) return constraint; } -#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#if PG_VERSION_NUM >= 150000 /* for commits 639a86e36aae, c4cc2850f4d1 */ static String make_string_value_struct(char* str) { String val; val.type = T_String; - val.val = str; + val.sval = str; return val; } @@ -1689,7 +1689,7 @@ make_int_value_struct(int int_val) Integer val; val.type = T_Integer; - val.val = int_val; + val.ival = int_val; return val; } From 33b4d47a904cdb0f608c3e2c26e77919e351c41b Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 29 Jun 2022 21:12:21 +0300 Subject: [PATCH 1067/1124] PGPRO-6857: fix build for PostgreSQL 15 - In the commit 791b1b71da35d9d4264f72a87e4078b85a2fcfb4 the functions parse_analyze and pg_analyze_and_rewrite were renamed to parse_analyze_fixedparams and pg_analyze_and_rewrite_fixedparams respectively. - The commit 7103ebb7aae8ab8076b7e85f335ceb8fe799097c added a new argument tmfd to the function ExecBRUpdateTriggers. - The commit ba9a7e392171c83eb3332a757279e7088487f9a2 added a new argmument is_crosspart_update to the function ExecARDeleteTriggers. --- src/include/compat/pg_compat.h | 35 +++++++++++++++++++++++++++++++--- src/partition_router.c | 2 +- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index a551b7ed..80a76d60 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -635,7 +635,12 @@ extern int oid_cmp(const void *p1, const void *p2); * * for v10 cast first arg to RawStmt type */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ + query_env) \ + parse_analyze_fixedparams((RawStmt *) (parse_tree), (query_string), (param_types), \ + (nparams), (query_env)) +#elif PG_VERSION_NUM >= 100000 #define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ query_env) \ parse_analyze((RawStmt *) (parse_tree), (query_string), (param_types), \ @@ -653,7 +658,12 @@ extern int oid_cmp(const void *p1, const void *p2); * * for v10 cast first arg to RawStmt type */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ + nparams, query_env) \ + pg_analyze_and_rewrite_fixedparams((RawStmt *) (parsetree), (query_string), \ + (param_types), (nparams), (query_env)) +#elif PG_VERSION_NUM >= 100000 #define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ nparams, query_env) \ pg_analyze_and_rewrite((RawStmt *) (parsetree), (query_string), \ @@ -766,6 +776,20 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, #include "access/tupconvert.h" #endif +/* + * ExecBRUpdateTriggers() + */ +#if PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot), NULL) +#else +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot)) +#endif /* * ExecARInsertTriggers() @@ -801,7 +825,12 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecARDeleteTriggers() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 /* for commit ba9a7e392171 */ +#define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ + fdw_trigtuple, transition_capture) \ + ExecARDeleteTriggers((estate), (relinfo), (tupleid), \ + (fdw_trigtuple), (transition_capture), false) +#elif PG_VERSION_NUM >= 100000 #define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ fdw_trigtuple, transition_capture) \ ExecARDeleteTriggers((estate), (relinfo), (tupleid), \ diff --git a/src/partition_router.c b/src/partition_router.c index 17013a02..90727c00 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -523,7 +523,7 @@ router_lock_or_delete_tuple(PartitionRouterState *state, rri->ri_TrigDesc->trig_update_before_row) { #if PG_VERSION_NUM >= 120000 - if (!ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot)) + if (!ExecBRUpdateTriggersCompat(estate, epqstate, rri, tupleid, NULL, slot)) return NULL; #else slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); From 677e7913bc7473d075d5e0777dfa18a98ada758f Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 11 Jul 2022 23:12:16 +0300 Subject: [PATCH 1068/1124] [PGPRO-5360] Fix for freeze (Valgrind and compilation with -Og option) --- src/pathman_workers.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 38d61622..7b64017b 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -458,8 +458,8 @@ bgw_main_concurrent_part(Datum main_arg) ConcurrentPartSlot *part_slot; char *sql = NULL; int64 rows; - bool failed; - int failures_count = 0; + volatile bool failed; + volatile int failures_count = 0; LOCKMODE lockmode = RowExclusiveLock; /* Update concurrent part slot */ @@ -497,7 +497,7 @@ bgw_main_concurrent_part(Datum main_arg) Oid types[2] = { OIDOID, INT4OID }; Datum vals[2] = { part_slot->relid, part_slot->batch_size }; - bool rel_locked = false; + volatile bool rel_locked = false; /* Reset loop variables */ failed = false; From f1350909c8071c7f4393e37aa452576e11a09db1 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 14 Jul 2022 14:00:56 +0300 Subject: [PATCH 1069/1124] Revert "hide false positives found by clang analyzer" This reverts commit 6b00d812b9396353fff72d42181278c4bd19b68f. --- src/pathman_workers.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 7b64017b..eca9ee52 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -545,14 +545,12 @@ bgw_main_concurrent_part(Datum main_arg) /* Great, now relation is locked */ rel_locked = true; - (void) rel_locked; /* mute clang analyzer */ /* Make sure that relation exists */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid))) { /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; - (void) failures_count; /* mute clang analyzer */ elog(ERROR, "relation %u does not exist", part_slot->relid); } @@ -562,7 +560,6 @@ bgw_main_concurrent_part(Datum main_arg) { /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; - (void) failures_count; /* mute clang analyzer */ elog(ERROR, "relation \"%s\" is not partitioned", get_rel_name(part_slot->relid)); From ff2942add4eb0c53936fac9205a40375db911a68 Mon Sep 17 00:00:00 2001 From: Anton Voloshin Date: Wed, 20 Jul 2022 13:21:59 +0300 Subject: [PATCH 1070/1124] adapt pg_pathman for upcoming PostgreSQL 15 Only call RequestAddinShmemSpace from within our implementation of shmem_request_hook (as required after commit 4f2400cb3 in PostgreSQL 15). --- src/pg_pathman.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 35ad28dd..b6b5d815 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -314,6 +314,11 @@ pathman_xact_cb(XactEvent event, void *arg) * ------------------- */ +#if PG_VERSION_NUM >= 150000 +static shmem_request_hook_type prev_shmem_request_hook = NULL; +static void pg_pathman_shmem_request(void); +#endif + /* Set initial values for all Postmaster's forks */ void _PG_init(void) @@ -326,7 +331,12 @@ _PG_init(void) } /* Request additional shared resources */ +#if PG_VERSION_NUM >= 150000 + prev_shmem_request_hook = shmem_request_hook; + shmem_request_hook = pg_pathman_shmem_request; +#else RequestAddinShmemSpace(estimate_pathman_shmem_size()); +#endif /* Assign pg_pathman's initial state */ pathman_init_state.pg_pathman_enable = DEFAULT_PATHMAN_ENABLE; @@ -363,6 +373,17 @@ _PG_init(void) #endif } +#if PG_VERSION_NUM >= 150000 +static void +pg_pathman_shmem_request(void) +{ + if (prev_shmem_request_hook) + prev_shmem_request_hook(); + + RequestAddinShmemSpace(estimate_pathman_shmem_size()); +} +#endif + /* Get cached PATHMAN_CONFIG relation Oid */ Oid get_pathman_config_relid(bool invalid_is_ok) From 0b54f70915da8ca919ddb1216863ab8ebf819b46 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 7 Jun 2022 17:56:39 +0300 Subject: [PATCH 1071/1124] [PGPRO-6734] PostgreSQL v15 compatibility --- .travis.yml | 7 +- Dockerfile.tmpl | 2 +- patches/REL_14_STABLE-pg_pathman-core.diff | 8 +- patches/REL_15_STABLE-pg_pathman-core.diff | 506 +++++++++++++++++++++ run_tests.sh | 50 +- src/hooks.c | 2 +- src/partition_creation.c | 8 +- src/partition_router.c | 4 +- src/pg_pathman.c | 6 +- src/planner_tree_modification.c | 14 + tests/python/partitioning_test.py | 82 ++-- 11 files changed, 620 insertions(+), 69 deletions(-) create mode 100644 patches/REL_15_STABLE-pg_pathman-core.diff diff --git a/.travis.yml b/.travis.yml index 7f22cf8e..67a5f2ee 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,8 @@ notifications: on_failure: always env: + - PG_VERSION=14 LEVEL=hardcore + - PG_VERSION=14 - PG_VERSION=13 LEVEL=hardcore - PG_VERSION=13 - PG_VERSION=12 LEVEL=hardcore @@ -28,12 +30,7 @@ env: - PG_VERSION=11 - PG_VERSION=10 LEVEL=hardcore - PG_VERSION=10 - - PG_VERSION=9.6 LEVEL=hardcore - - PG_VERSION=9.6 - - PG_VERSION=9.5 LEVEL=hardcore - - PG_VERSION=9.5 jobs: allow_failures: - env: PG_VERSION=10 LEVEL=nightmare - - env: PG_VERSION=9.6 LEVEL=nightmare diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index e1e3b0e6..0a25ad14 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -2,7 +2,7 @@ FROM postgres:${PG_VERSION}-alpine # Install dependencies RUN apk add --no-cache \ - openssl curl \ + openssl curl git patch \ cmocka-dev \ perl perl-ipc-run \ python3 python3-dev py3-virtualenv \ diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff index e3e7c549..751095aa 100644 --- a/patches/REL_14_STABLE-pg_pathman-core.diff +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -33,8 +33,8 @@ index 5483dee650..e2864e6ae9 100644 out: + + /* -+ * pg_pathman: pass 'tts_tableOid' to result tuple for determine from -+ * which partition the touple was read ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read + */ + if (resultslot) + { @@ -111,7 +111,7 @@ index d328856ae5..27235ec869 100644 for (;;) { + /* -+ * "es_original_tuple" should contains original modified tuple (new ++ * "es_original_tuple" should contain original modified tuple (new + * values of the changed columns plus row identity information such as + * CTID) in case tuple planSlot is replaced in pg_pathman to new value + * in call "ExecProcNode(subplanstate)". @@ -312,7 +312,7 @@ index 381d9e548d..9d101c3a86 100644 -ProtocolVersion FrontendProtocol; -+ProtocolVersion FrontendProtocol = (ProtocolVersion)0; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; volatile sig_atomic_t InterruptPending = false; volatile sig_atomic_t QueryCancelPending = false; diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..b30b0230 --- /dev/null +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -0,0 +1,506 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index bbf220407b..9a82a2db04 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -34,6 +34,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index 47d80b0d25..6689776769 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -78,7 +78,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index e44ad68cda..b9ba79e756 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1831,6 +1831,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index ef2fd46092..8551733c55 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -826,6 +826,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2811,6 +2818,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_junkFilter = parentestate->es_junkFilter; + rcestate->es_output_cid = parentestate->es_output_cid; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index a49c3da5b6..2c0b32e2df 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -551,7 +551,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, + * This is also a convenient place to verify that the output of an UPDATE + * matches the target table (ExecBuildUpdateProjection does that). + */ +-static void ++void + ExecInitUpdateProjection(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) + { +@@ -3460,6 +3460,7 @@ ExecModifyTable(PlanState *pstate) + PartitionTupleRouting *proute = node->mt_partition_tuple_routing; + List *relinfos = NIL; + ListCell *lc; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -3501,6 +3502,8 @@ ExecModifyTable(PlanState *pstate) + context.mtstate = node; + context.epqstate = &node->mt_epqstate; + context.estate = estate; ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; + + /* + * Fetch rows from subplan, and execute the required table modification +@@ -3508,6 +3511,14 @@ ExecModifyTable(PlanState *pstate) + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contain original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -3541,7 +3552,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : context.planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + { +@@ -3578,6 +3591,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -3587,6 +3602,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -3617,7 +3633,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + +@@ -3665,7 +3682,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -3696,9 +3714,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); +- slot = ExecInsert(&context, resultRelInfo, slot, +- node->canSetTag, NULL, NULL); ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); ++ slot = ExecInsert(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, node->canSetTag, NULL, NULL); + break; + + case CMD_UPDATE: +@@ -3706,38 +3727,46 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + +- /* +- * Make the new tuple by combining plan's output tuple with +- * the old tuple being updated. +- */ +- oldSlot = resultRelInfo->ri_oldTupleSlot; +- if (oldtuple != NULL) +- { +- /* Use the wholerow junk attr as the old tuple. */ +- ExecForceStoreHeapTuple(oldtuple, oldSlot, false); +- } +- else ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) + { +- /* Fetch the most recent version of old tuple. */ +- Relation relation = resultRelInfo->ri_RelationDesc; ++ /* ++ * Make the new tuple by combining plan's output tuple ++ * with the old tuple being updated. ++ */ ++ oldSlot = resultRelInfo->ri_oldTupleSlot; ++ if (oldtuple != NULL) ++ { ++ /* Use the wholerow junk attr as the old tuple. */ ++ ExecForceStoreHeapTuple(oldtuple, oldSlot, false); ++ } ++ else ++ { ++ /* Fetch the most recent version of old tuple. */ ++ Relation relation = resultRelInfo->ri_RelationDesc; + +- if (!table_tuple_fetch_row_version(relation, tupleid, +- SnapshotAny, +- oldSlot)) +- elog(ERROR, "failed to fetch tuple being updated"); ++ if (!table_tuple_fetch_row_version(relation, tupleid, ++ SnapshotAny, ++ oldSlot)) ++ elog(ERROR, "failed to fetch tuple being updated"); ++ } ++ slot = internalGetUpdateNewTuple(resultRelInfo, context.planSlot, ++ oldSlot, NULL); ++ context.GetUpdateNewTuple = internalGetUpdateNewTuple; ++ context.relaction = NULL; + } +- slot = internalGetUpdateNewTuple(resultRelInfo, context.planSlot, +- oldSlot, NULL); +- context.GetUpdateNewTuple = internalGetUpdateNewTuple; +- context.relaction = NULL; + + /* Now apply the update. */ +- slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecUpdate(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + slot, node->canSetTag); + break; + + case CMD_DELETE: +- slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + true, false, node->canSetTag, NULL, NULL); + break; + +@@ -3755,7 +3784,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -3784,6 +3816,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -3858,6 +3891,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -3958,6 +3992,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -4040,6 +4081,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + } + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete/merge, there will be a junk + * attribute named "tableoid" present in the subplan's targetlist. It +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 1a5d29ac9b..c70e3ff8b8 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 4794941df3..483050268e 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern PGDLLIMPORT bool DefaultXactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY + extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index d68a6b9d28..a96eb93316 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -661,5 +661,7 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++extern void ExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index 5728801379..ec5496afff 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -611,6 +611,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index 8de79c618c..c9226ba5ad 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,18 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} + sub lcopy + { + my $src = shift; +@@ -609,7 +621,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index e4feda10fd..74a0a0a062 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -39,8 +39,8 @@ my $contrib_defines = {}; + my @contrib_uselibpq = (); + my @contrib_uselibpgport = (); + my @contrib_uselibpgcommon = (); +-my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = {}; ++my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; ++my $contrib_extraincludes = { 'pg_pathman' => ['contrib/pg_pathman/src/include'] }; + my $contrib_extrasource = {}; + my @contrib_excludes = ( + 'bool_plperl', 'commit_ts', +@@ -964,6 +964,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + push @projects, $proj; + } +@@ -1067,6 +1068,19 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1091,23 +1105,53 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) +- { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ if ( -f "contrib/$n/$d.in" ) { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } else { ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } diff --git a/run_tests.sh b/run_tests.sh index 8f06d39c..2e2edc6f 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -17,9 +17,26 @@ status=0 export PGPORT=55435 export VIRTUAL_ENV_DISABLE_PROMPT=1 -# rebuild PostgreSQL with cassert + valgrind support +PATHMAN_DIR=$PWD + +# indicator of using cassert + valgrind support +USE_ASSERT_VALGRIND=false if [ "$LEVEL" = "hardcore" ] || \ [ "$LEVEL" = "nightmare" ]; then + USE_ASSERT_VALGRIND=true +fi + +# indicator of using special patch for vanilla +if [ "$(printf '%s\n' "14" "$PG_VERSION" | sort -V | head -n1)" = "$PG_VERSION" ]; then + USE_PATH=false +else + #patch version 14 and newer + USE_PATH=true +fi + +# rebuild PostgreSQL with cassert + valgrind support +if [ "$USE_ASSERT_VALGRIND" = true ] || \ + [ "$USE_PATH" = true ]; then set -e @@ -40,15 +57,28 @@ if [ "$LEVEL" = "hardcore" ] || \ cd $CUSTOM_PG_SRC - # enable Valgrind support - sed -i.bak "s/\/* #define USE_VALGRIND *\//#define USE_VALGRIND/g" src/include/pg_config_manual.h - - # enable additional options - ./configure \ - CFLAGS='-Og -ggdb3 -fno-omit-frame-pointer' \ - --enable-cassert \ - --prefix=$CUSTOM_PG_BIN \ - --quiet + if [ "$USE_PATH" = true ]; then + # apply the patch + patch -p1 < $PATHMAN_DIR/patches/REL_${PG_VERSION%.*}_STABLE-pg_pathman-core.diff + fi + + if [ "$USE_ASSERT_VALGRIND" = true ]; then + # enable Valgrind support + sed -i.bak "s/\/* #define USE_VALGRIND *\//#define USE_VALGRIND/g" src/include/pg_config_manual.h + + # enable additional options + ./configure \ + CFLAGS='-Og -ggdb3 -fno-omit-frame-pointer' \ + --enable-cassert \ + --prefix=$CUSTOM_PG_BIN \ + --quiet + else + # without additional options + ./configure \ + --enable-cassert \ + --prefix=$CUSTOM_PG_BIN \ + --quiet + fi # build & install PG time make -s -j$(nproc) && make -s install diff --git a/src/hooks.c b/src/hooks.c index f376e4a0..46204d5c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -293,7 +293,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, * Currently we use get_parameterized_joinrel_size() since * it works just fine, but this might change some day. */ -#if PG_VERSION_NUM >= 150000 /* reason: commit 18fea737b5e4 */ +#if PG_VERSION_NUM >= 150000 /* for commit 18fea737b5e4 */ nest_path->jpath.path.rows = #else nest_path->path.rows = diff --git a/src/partition_creation.c b/src/partition_creation.c index a89f8f68..b98163d7 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -92,7 +92,7 @@ static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_rel static Oid text_to_regprocedure(text *proname_args); static Constraint *make_constraint_common(char *name, Node *raw_expr); -#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ static String make_string_value_struct(char *str); static Integer make_int_value_struct(int int_val); #else @@ -1361,7 +1361,7 @@ build_raw_range_check_tree(Node *raw_expression, const Bound *end_value, Oid value_type) { -#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ #define BuildConstExpr(node, value, value_type) \ do { \ (node)->val.sval = make_string_value_struct( \ @@ -1568,7 +1568,7 @@ build_raw_hash_check_tree(Node *raw_expression, hash_proc = tce->hash_proc; /* Total amount of partitions */ -#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ part_count_c->val.ival = make_int_value_struct(part_count); #else part_count_c->val = make_int_value_struct(part_count); @@ -1576,7 +1576,7 @@ build_raw_hash_check_tree(Node *raw_expression, part_count_c->location = -1; /* Index of this partition (hash % total amount) */ -#if PG_VERSION_NUM >= 150000 /* reason: commit 639a86e36aae */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ part_idx_c->val.ival = make_int_value_struct(part_idx); #else part_idx_c->val = make_int_value_struct(part_idx); diff --git a/src/partition_router.c b/src/partition_router.c index 90727c00..54f6e25e 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -198,12 +198,14 @@ partition_router_exec(CustomScanState *node) TupleTableSlot *old_slot; ResultRelInfo *rri; #endif - TupleTableSlot *full_slot = slot; + TupleTableSlot *full_slot; bool partition_changed = false; ItemPointerSetInvalid(&ctid); #if PG_VERSION_NUM < 140000 + full_slot = slot; + /* Build new junkfilter if needed */ if (state->junkfilter == NULL) state->junkfilter = state->current_rri->ri_junkFilter; diff --git a/src/pg_pathman.c b/src/pg_pathman.c index b6b5d815..3b99a7e7 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -314,7 +314,7 @@ pathman_xact_cb(XactEvent event, void *arg) * ------------------- */ -#if PG_VERSION_NUM >= 150000 +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ static shmem_request_hook_type prev_shmem_request_hook = NULL; static void pg_pathman_shmem_request(void); #endif @@ -331,7 +331,7 @@ _PG_init(void) } /* Request additional shared resources */ -#if PG_VERSION_NUM >= 150000 +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ prev_shmem_request_hook = shmem_request_hook; shmem_request_hook = pg_pathman_shmem_request; #else @@ -373,7 +373,7 @@ _PG_init(void) #endif } -#if PG_VERSION_NUM >= 150000 +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ static void pg_pathman_shmem_request(void) { diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 2477cc7f..b321d9e6 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -495,7 +495,17 @@ disable_standard_inheritance(Query *parse, transform_query_cxt *context) if (rte->rtekind != RTE_RELATION || rte->relkind != RELKIND_RELATION || parse->resultRelation == current_rti) /* is it a result relation? */ + { +#if PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ + if (parse->commandType == CMD_MERGE && + (rte->rtekind == RTE_RELATION || + rte->relkind == RELKIND_RELATION) && + rte->inh && has_pathman_relation_info(rte->relid)) + elog(ERROR, "pg_pathman doesn't support MERGE command yet"); +#endif + continue; + } /* Table may be partitioned */ if (rte->inh) @@ -805,7 +815,9 @@ partition_filter_visitor(Plan *plan, void *context) if (lc3) { returning_list = lfirst(lc3); +#if PG_VERSION_NUM < 140000 lc3 = lnext_compat(modify_table->returningLists, lc3); +#endif } #if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ @@ -893,7 +905,9 @@ partition_router_visitor(Plan *plan, void *context) if (lc3) { returning_list = lfirst(lc3); +#if PG_VERSION_NUM < 140000 lc3 = lnext_compat(modify_table->returningLists, lc3); +#endif } #if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index ad555455..152b8b19 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -549,7 +549,7 @@ def test_parallel_nodes(self): } ] """) - self.assertEqual(ordered(plan, skip_keys=['Subplans Removed']), ordered(expected)) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed', 'Async Capable']), ordered(expected)) # Check count of returned tuples count = con.execute( @@ -602,7 +602,7 @@ def test_parallel_nodes(self): } ] """) - self.assertEqual(ordered(plan, skip_keys=['Subplans Removed']), ordered(expected)) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed', 'Async Capable']), ordered(expected)) # Check tuples returned by query above res_tuples = con.execute( @@ -625,7 +625,7 @@ def test_parallel_nodes(self): } ] """) - self.assertEqual(ordered(plan), ordered(expected)) + self.assertEqual(ordered(plan, skip_keys=['Async Capable']), ordered(expected)) # Remove all objects for testing node.psql('drop table range_partitioned cascade') @@ -665,13 +665,13 @@ def con2_thread(): res = con2.execute(""" explain (analyze, costs off, timing off) select * from drop_test - where val = any (select generate_series(1, 40, 34)) - """) # query selects from drop_test_1 and drop_test_4 + where val = any (select generate_series(22, 40, 13)) + """) # query selects from drop_test_3 and drop_test_4 con2.commit() has_runtime_append = False - has_drop_test_1 = False + has_drop_test_3 = False has_drop_test_4 = False for row in res: @@ -679,8 +679,8 @@ def con2_thread(): has_runtime_append = True continue - if row[0].find('drop_test_1') >= 0: - has_drop_test_1 = True + if row[0].find('drop_test_3') >= 0: + has_drop_test_3 = True continue if row[0].find('drop_test_4') >= 0: @@ -688,7 +688,7 @@ def con2_thread(): continue # return all values in tuple - queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4)) + queue.put((has_runtime_append, has_drop_test_3, has_drop_test_4)) # Step 1: cache partitioned table in con1 con1.begin() @@ -702,7 +702,7 @@ def con2_thread(): # Step 3: drop first partition of 'drop_test' con1.begin() - con1.execute('drop table drop_test_1') + con1.execute('drop table drop_test_3') # Step 4: try executing select (RuntimeAppend) t = threading.Thread(target=con2_thread) @@ -734,9 +734,9 @@ def con2_thread(): self.assertEqual(len(rows), 99) # check RuntimeAppend + selected partitions - (has_runtime_append, has_drop_test_1, has_drop_test_4) = queue.get() + (has_runtime_append, has_drop_test_3, has_drop_test_4) = queue.get() self.assertTrue(has_runtime_append) - self.assertFalse(has_drop_test_1) + self.assertFalse(has_drop_test_3) self.assertTrue(has_drop_test_4) def test_conc_part_creation_insert(self): @@ -1044,34 +1044,36 @@ def test_update_node_plan1(self): self.assertEqual(plan["Relation Name"], "test_range") self.assertEqual(len(plan["Target Tables"]), 11) - expected_format = ''' - { - "Plans": [ - { - "Plans": [ - { - "Filter": "(comment = '15'::text)", - "Node Type": "Seq Scan", - "Relation Name": "test_range%s", - "Parent Relationship": "child" - } - ], - "Node Type": "Custom Scan", - "Parent Relationship": "child", - "Custom Plan Provider": "PartitionRouter" - } - ], - "Node Type": "Custom Scan", - "Parent Relationship": "Member", - "Custom Plan Provider": "PartitionFilter" - } - ''' - - for i, f in enumerate([''] + list(map(str, range(1, 10)))): - num = '_' + f if f else '' - expected = json.loads(expected_format % num) - p = ordered(plan["Plans"][i], skip_keys=['Parallel Aware', 'Alias']) - self.assertEqual(p, ordered(expected)) + # Plan was seriously changed in vanilla since v14 + if version < LooseVersion('14'): + expected_format = ''' + { + "Plans": [ + { + "Plans": [ + { + "Filter": "(comment = '15'::text)", + "Node Type": "Seq Scan", + "Relation Name": "test_range%s", + "Parent Relationship": "child" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "child", + "Custom Plan Provider": "PartitionRouter" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "Member", + "Custom Plan Provider": "PartitionFilter" + } + ''' + + for i, f in enumerate([''] + list(map(str, range(1, 10)))): + num = '_' + f if f else '' + expected = json.loads(expected_format % num) + p = ordered(plan["Plans"][i], skip_keys=['Parallel Aware', 'Alias']) + self.assertEqual(p, ordered(expected)) node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') From 02010f9888545169d029ab58d9f02940fe408683 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Mon, 22 Aug 2022 19:18:00 +0300 Subject: [PATCH 1072/1124] PGPRO-6148, PGPRO-7080: Use common macro for all PG versions instead of add_vars_to_targetlist() function. Reason: b3ff6c74 --- src/compat/rowmarks_fix.c | 2 +- src/include/compat/rowmarks_fix.h | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/compat/rowmarks_fix.c b/src/compat/rowmarks_fix.c index 4dd1c20a..35eea44b 100644 --- a/src/compat/rowmarks_fix.c +++ b/src/compat/rowmarks_fix.c @@ -47,7 +47,7 @@ append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc) root->processed_tlist = lappend(root->processed_tlist, tle); - add_vars_to_targetlist(root, list_make1(var), bms_make_singleton(0), true); + add_vars_to_targetlist_compat(root, list_make1(var), bms_make_singleton(0)); } diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h index 09e5fbef..c94504c3 100644 --- a/src/include/compat/rowmarks_fix.h +++ b/src/include/compat/rowmarks_fix.h @@ -45,5 +45,17 @@ void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc); #endif +/* + * add_vars_to_targetlist() + * In >=16 last argument was removed (b3ff6c742f6c) + */ +#if PG_VERSION_NUM >= 160000 +#define add_vars_to_targetlist_compat(root, vars, where_needed) \ + add_vars_to_targetlist((root), (vars), (where_needed)); +#else +#define add_vars_to_targetlist_compat(root, vars, where_needed) \ + add_vars_to_targetlist((root), (vars), (where_needed), true); +#endif + #endif /* ROWMARKS_FIX_H */ From 19d6a1a00b62ccecf81223e6f7795460e2590354 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 13 Oct 2022 16:38:32 +0300 Subject: [PATCH 1073/1124] travis-ci for v15 --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 67a5f2ee..dd63d98f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,8 @@ notifications: on_failure: always env: + - PG_VERSION=15 LEVEL=hardcore + - PG_VERSION=15 - PG_VERSION=14 LEVEL=hardcore - PG_VERSION=14 - PG_VERSION=13 LEVEL=hardcore From c76321ac5b87063b6b4e35901a4958341e58a66a Mon Sep 17 00:00:00 2001 From: Anton Voloshin Date: Thu, 20 Oct 2022 12:43:31 +0300 Subject: [PATCH 1074/1124] PGPRO-7123: unexport ExecInitUpdateProjection for timescaledb In vanilla PostgreSQL ExecInitUpdateProjection is a static function. However, pgpro does export that function due to pg_pathman needs. Starting with 15th version, we rename exported function, adding Pgpro prefix to avoid compatibility issues with timescaledb. --- src/partition_router.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/partition_router.c b/src/partition_router.c index 54f6e25e..b551158e 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -254,7 +254,11 @@ partition_router_exec(CustomScanState *node) /* Initialize projection info if first time for this table. */ if (unlikely(!rri->ri_projectNewInfoValid)) +#if PG_VERSION_NUM >= 150000 /* after PGPRO-7123 */ + PgproExecInitUpdateProjection(state->mt_state, rri); +#else ExecInitUpdateProjection(state->mt_state, rri); +#endif /* PG_VERSION_NUM >= 150000 ... else */ old_slot = rri->ri_oldTupleSlot; /* Fetch the most recent version of old tuple. */ @@ -264,7 +268,7 @@ partition_router_exec(CustomScanState *node) /* Build full tuple (using "old_slot" + changed from "slot"): */ full_slot = ExecGetUpdateNewTuple(rri, slot, old_slot); -#endif +#endif /* PG_VERSION_NUM >= 140000 */ /* Lock or delete tuple from old partition */ full_slot = router_lock_or_delete_tuple(state, full_slot, From eabe2a886f564c5cc3a8f1b9bb29e35d8e5108c8 Mon Sep 17 00:00:00 2001 From: Anton Voloshin Date: Mon, 24 Oct 2022 15:11:44 +0300 Subject: [PATCH 1075/1124] Use proper ifdef to get ExecInitUpdateProjection's name updating the previous commit. --- src/partition_router.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/partition_router.c b/src/partition_router.c index b551158e..eefc44bf 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -254,11 +254,11 @@ partition_router_exec(CustomScanState *node) /* Initialize projection info if first time for this table. */ if (unlikely(!rri->ri_projectNewInfoValid)) -#if PG_VERSION_NUM >= 150000 /* after PGPRO-7123 */ +#ifdef PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION PgproExecInitUpdateProjection(state->mt_state, rri); #else ExecInitUpdateProjection(state->mt_state, rri); -#endif /* PG_VERSION_NUM >= 150000 ... else */ +#endif /* !PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION */ old_slot = rri->ri_oldTupleSlot; /* Fetch the most recent version of old tuple. */ From 7a244036d5db177e5fc89530fc643a91f64b7502 Mon Sep 17 00:00:00 2001 From: Anton Voloshin Date: Tue, 25 Oct 2022 14:22:16 +0300 Subject: [PATCH 1076/1124] update .patch for REL_15_STABLE to match current code --- patches/REL_15_STABLE-pg_pathman-core.diff | 91 +++++++++++++--------- 1 file changed, 53 insertions(+), 38 deletions(-) diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff index b30b0230..e0eb9a62 100644 --- a/patches/REL_15_STABLE-pg_pathman-core.diff +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -1,5 +1,5 @@ diff --git a/contrib/Makefile b/contrib/Makefile -index bbf220407b..9a82a2db04 100644 +index bbf220407b0..9a82a2db046 100644 --- a/contrib/Makefile +++ b/contrib/Makefile @@ -34,6 +34,7 @@ SUBDIRS = \ @@ -11,7 +11,7 @@ index bbf220407b..9a82a2db04 100644 pg_stat_statements \ pg_surgery \ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c -index 47d80b0d25..6689776769 100644 +index 594d8da2cdc..a2049e70e95 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -78,7 +78,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; @@ -24,10 +24,10 @@ index 47d80b0d25..6689776769 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index e44ad68cda..b9ba79e756 100644 +index ef0f9577ab1..95858960d50 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c -@@ -1831,6 +1831,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) +@@ -1801,6 +1801,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) } out: @@ -45,7 +45,7 @@ index e44ad68cda..b9ba79e756 100644 return state->resvalue; } diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c -index ef2fd46092..8551733c55 100644 +index ef2fd46092e..8551733c55d 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -826,6 +826,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) @@ -77,19 +77,24 @@ index ef2fd46092..8551733c55 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index a49c3da5b6..2c0b32e2df 100644 +index 04454ad6e60..6a52e86b782 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c -@@ -551,7 +551,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, - * This is also a convenient place to verify that the output of an UPDATE - * matches the target table (ExecBuildUpdateProjection does that). - */ --static void +@@ -603,6 +603,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, + resultRelInfo->ri_projectNewInfoValid = true; + } + +void - ExecInitUpdateProjection(ModifyTableState *mtstate, - ResultRelInfo *resultRelInfo) - { -@@ -3460,6 +3460,7 @@ ExecModifyTable(PlanState *pstate) ++PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo) ++{ ++ ExecInitUpdateProjection(mtstate, resultRelInfo); ++} ++ + /* + * ExecGetInsertNewTuple + * This prepares a "new" tuple ready to be inserted into given result +@@ -3461,6 +3468,7 @@ ExecModifyTable(PlanState *pstate) PartitionTupleRouting *proute = node->mt_partition_tuple_routing; List *relinfos = NIL; ListCell *lc; @@ -97,7 +102,7 @@ index a49c3da5b6..2c0b32e2df 100644 CHECK_FOR_INTERRUPTS(); -@@ -3501,6 +3502,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3502,6 +3510,8 @@ ExecModifyTable(PlanState *pstate) context.mtstate = node; context.epqstate = &node->mt_epqstate; context.estate = estate; @@ -106,7 +111,7 @@ index a49c3da5b6..2c0b32e2df 100644 /* * Fetch rows from subplan, and execute the required table modification -@@ -3508,6 +3511,14 @@ ExecModifyTable(PlanState *pstate) +@@ -3509,6 +3519,14 @@ ExecModifyTable(PlanState *pstate) */ for (;;) { @@ -121,7 +126,7 @@ index a49c3da5b6..2c0b32e2df 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -3541,7 +3552,9 @@ ExecModifyTable(PlanState *pstate) +@@ -3542,7 +3560,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -132,7 +137,7 @@ index a49c3da5b6..2c0b32e2df 100644 &isNull); if (isNull) { -@@ -3578,6 +3591,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3579,6 +3599,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -141,7 +146,7 @@ index a49c3da5b6..2c0b32e2df 100644 /* * A scan slot containing the data that was actually inserted, -@@ -3587,6 +3602,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3588,6 +3610,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); @@ -149,7 +154,7 @@ index a49c3da5b6..2c0b32e2df 100644 return slot; } -@@ -3617,7 +3633,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3618,7 +3641,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -159,7 +164,7 @@ index a49c3da5b6..2c0b32e2df 100644 resultRelInfo->ri_RowIdAttNo, &isNull); -@@ -3665,7 +3682,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3666,7 +3690,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -169,7 +174,7 @@ index a49c3da5b6..2c0b32e2df 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -3696,9 +3714,12 @@ ExecModifyTable(PlanState *pstate) +@@ -3697,9 +3722,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -185,7 +190,7 @@ index a49c3da5b6..2c0b32e2df 100644 break; case CMD_UPDATE: -@@ -3706,38 +3727,46 @@ ExecModifyTable(PlanState *pstate) +@@ -3707,38 +3735,46 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); @@ -255,7 +260,7 @@ index a49c3da5b6..2c0b32e2df 100644 true, false, node->canSetTag, NULL, NULL); break; -@@ -3755,7 +3784,10 @@ ExecModifyTable(PlanState *pstate) +@@ -3756,7 +3792,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -266,7 +271,7 @@ index a49c3da5b6..2c0b32e2df 100644 } /* -@@ -3784,6 +3816,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3785,6 +3824,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -274,7 +279,7 @@ index a49c3da5b6..2c0b32e2df 100644 return NULL; } -@@ -3858,6 +3891,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3859,6 +3899,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -282,7 +287,7 @@ index a49c3da5b6..2c0b32e2df 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -3958,6 +3992,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3959,6 +4000,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -296,7 +301,7 @@ index a49c3da5b6..2c0b32e2df 100644 /* * Now we may initialize the subplan. */ -@@ -4040,6 +4081,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4041,6 +4089,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) } } @@ -306,7 +311,7 @@ index a49c3da5b6..2c0b32e2df 100644 * If this is an inherited update/delete/merge, there will be a junk * attribute named "tableoid" present in the subplan's targetlist. It diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c -index 1a5d29ac9b..c70e3ff8b8 100644 +index 1a5d29ac9ba..aadca8ea474 100644 --- a/src/backend/utils/init/globals.c +++ b/src/backend/utils/init/globals.c @@ -25,7 +25,7 @@ @@ -319,7 +324,7 @@ index 1a5d29ac9b..c70e3ff8b8 100644 volatile sig_atomic_t InterruptPending = false; volatile sig_atomic_t QueryCancelPending = false; diff --git a/src/include/access/xact.h b/src/include/access/xact.h -index 4794941df3..483050268e 100644 +index 65616ca2f79..965eb544217 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; @@ -332,19 +337,29 @@ index 4794941df3..483050268e 100644 /* flag for logging statements in this transaction */ diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h -index d68a6b9d28..a96eb93316 100644 +index 82925b4b633..de23622ca24 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h -@@ -661,5 +661,7 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, +@@ -659,5 +659,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid, bool missing_ok, bool update_cache); -+extern void ExecInitUpdateProjection(ModifyTableState *mtstate, -+ ResultRelInfo *resultRelInfo); ++#define PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION ++/* ++ * This function is static in vanilla, but pg_pathman wants it exported. ++ * We cannot make it extern with the same name to avoid compilation errors ++ * in timescaledb, which ships it's own static copy of the same function. ++ * So, export ExecInitUpdateProjection with Pgpro prefix. ++ * ++ * The define above helps pg_pathman to expect proper exported symbol ++ * from various versions of pgpro. ++ */ ++extern void PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); #endif /* EXECUTOR_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h -index 5728801379..ec5496afff 100644 +index 57288013795..ec5496afffa 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -611,6 +611,12 @@ typedef struct EState @@ -361,7 +376,7 @@ index 5728801379..ec5496afff 100644 /* diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm -index 8de79c618c..c9226ba5ad 100644 +index 8de79c618cb..c9226ba5ad4 100644 --- a/src/tools/msvc/Install.pm +++ b/src/tools/msvc/Install.pm @@ -30,6 +30,18 @@ my @client_program_files = ( @@ -393,7 +408,7 @@ index 8de79c618c..c9226ba5ad 100644 sub CopyIncludeFiles diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm -index e4feda10fd..74a0a0a062 100644 +index e4feda10fd8..74a0a0a062b 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -39,8 +39,8 @@ my $contrib_defines = {}; From 2680eee6d84f2a8955e0e6ad50f3f4f4db43a4ba Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Fri, 18 Nov 2022 07:47:55 +0300 Subject: [PATCH 1077/1124] Fix build due to new checks in PostgreSQL 16 Due to the commit c8b2ef05f481ef06326d7b9f3eb14b303f215c7e in PostgreSQL 16: - The macro CStringGetTextDatum returns a Datum, so use the more appropriate macro PG_RETURN_DATUM instead of PG_RETURN_TEXT_P. - The input to the macro TextDatumGetCString must be of type Datum, so use the more appropriate macro PG_GETARG_DATUM instead of PG_GETARG_TEXT_P. --- src/pl_funcs.c | 10 +++++----- src/pl_hash_funcs.c | 2 +- src/pl_range_funcs.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 76ecbe3d..b638fc47 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -179,7 +179,7 @@ get_partition_cooked_key_pl(PG_FUNCTION_ARGS) pfree(expr_cstr); pfree(expr); - PG_RETURN_TEXT_P(CStringGetTextDatum(cooked_cstr)); + PG_RETURN_DATUM(CStringGetTextDatum(cooked_cstr)); } /* @@ -199,7 +199,7 @@ get_cached_partition_cooked_key_pl(PG_FUNCTION_ARGS) res = CStringGetTextDatum(nodeToString(prel->expr)); close_pathman_relation_info(prel); - PG_RETURN_TEXT_P(res); + PG_RETURN_DATUM(res); } /* @@ -688,7 +688,7 @@ validate_expression(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(1)) { - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL"))); @@ -818,7 +818,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(1)) { - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL"))); @@ -1203,7 +1203,7 @@ is_operator_supported(PG_FUNCTION_ARGS) { Oid opid, typid = PG_GETARG_OID(0); - char *opname = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + char *opname = TextDatumGetCString(PG_GETARG_DATUM(1)); opid = compatible_oper_opid(list_make1(makeString(opname)), typid, typid, true); diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index f4a44b71..ddaaa8c0 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -119,7 +119,7 @@ Datum build_hash_condition(PG_FUNCTION_ARGS) { Oid expr_type = PG_GETARG_OID(0); - char *expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + char *expr_cstr = TextDatumGetCString(PG_GETARG_DATUM(1)); uint32 part_count = PG_GETARG_UINT32(2), part_idx = PG_GETARG_UINT32(3); diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 4465d36e..b2a8dc3d 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -156,7 +156,7 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* Fetch 'tablespace' */ if (!PG_ARGISNULL(4)) { - tablespace = TextDatumGetCString(PG_GETARG_TEXT_P(4)); + tablespace = TextDatumGetCString(PG_GETARG_DATUM(4)); } else tablespace = NULL; /* default */ @@ -429,7 +429,7 @@ validate_interval_value(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL"))); } - else expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION)); + else expr_cstr = TextDatumGetCString(PG_GETARG_DATUM(ARG_EXPRESSION)); if (PG_ARGISNULL(ARG_PARTTYPE)) { @@ -1086,7 +1086,7 @@ build_range_condition(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(1)) { - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL")));; From 12594a31a25f3ca34d7b1331889c740870bff765 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Mon, 21 Nov 2022 15:44:24 +0300 Subject: [PATCH 1078/1124] Fix compiler warnings due to new checks in PostgreSQL 16 See the commit 0fe954c28584169938e5c0738cfaa9930ce77577 (Add -Wshadow=compatible-local to the standard compilation flags) in PostgreSQL 16. --- src/partition_creation.c | 6 ++---- src/relation_info.c | 18 +++++++++--------- src/runtime_merge_append.c | 6 ++++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index b98163d7..b42372b3 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -2027,11 +2027,9 @@ build_partitioning_expression(Oid parent_relid, /* We need expression type for hash functions */ if (expr_type) { - Node *expr; - expr = cook_partitioning_expression(parent_relid, expr_cstr, NULL); - /* Finally return expression type */ - *expr_type = exprType(expr); + *expr_type = exprType( + cook_partitioning_expression(parent_relid, expr_cstr, NULL)); } if (columns) diff --git a/src/relation_info.c b/src/relation_info.c index 64c04c2f..90e30d0e 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -71,34 +71,34 @@ int prel_resowner_line = 0; #define LeakTrackerAdd(prel) \ do { \ - MemoryContext old_mcxt = MemoryContextSwitchTo((prel)->mcxt); \ + MemoryContext leak_tracker_add_old_mcxt = MemoryContextSwitchTo((prel)->mcxt); \ (prel)->owners = \ list_append_unique( \ (prel)->owners, \ list_make2(makeString((char *) prel_resowner_function), \ makeInteger(prel_resowner_line))); \ - MemoryContextSwitchTo(old_mcxt); \ + MemoryContextSwitchTo(leak_tracker_add_old_mcxt); \ \ (prel)->access_total++; \ } while (0) #define LeakTrackerPrint(prel) \ do { \ - ListCell *lc; \ - foreach (lc, (prel)->owners) \ + ListCell *leak_tracker_print_lc; \ + foreach (leak_tracker_print_lc, (prel)->owners) \ { \ - char *fun = strVal(linitial(lfirst(lc))); \ - int line = intVal(lsecond(lfirst(lc))); \ + char *fun = strVal(linitial(lfirst(leak_tracker_print_lc))); \ + int line = intVal(lsecond(lfirst(leak_tracker_print_lc))); \ elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); \ } \ } while (0) #define LeakTrackerFree(prel) \ do { \ - ListCell *lc; \ - foreach (lc, (prel)->owners) \ + ListCell *leak_tracker_free_lc; \ + foreach (leak_tracker_free_lc, (prel)->owners) \ { \ - list_free_deep(lfirst(lc)); \ + list_free_deep(lfirst(leak_tracker_free_lc)); \ } \ list_free((prel)->owners); \ (prel)->owners = NIL; \ diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index 601c663f..5edd803c 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -374,7 +374,8 @@ fetch_next_tuple(CustomScanState *node) for (i = 0; i < scan_state->rstate.ncur_plans; i++) { ChildScanCommon child = scan_state->rstate.cur_plans[i]; - PlanState *ps = child->content.plan_state; + + ps = child->content.plan_state; Assert(child->content_type == CHILD_PLAN_STATE); @@ -721,10 +722,11 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys, foreach(j, ec->ec_members) { - EquivalenceMember *em = (EquivalenceMember *) lfirst(j); List *exprvars; ListCell *k; + em = (EquivalenceMember *) lfirst(j); + /* * We shouldn't be trying to sort by an equivalence class that * contains a constant, so no need to consider such cases any From a9e82f4cee1d675af19f26aeaca035e5d3ba6c65 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 16 May 2022 22:13:54 +0300 Subject: [PATCH 1079/1124] [PGPRO-4997] Integrated two vanilla commits for EXPLAIN correction (v13+) 1) ce76c0ba - Add a reverse-translation column number array to struct AppendRelInfo. 2) 55a1954d - Fix EXPLAIN's column alias output for mismatched child tables. --- expected/pathman_array_qual_2.out | 2398 ++++++++++++++++++++++++++ expected/pathman_basic_2.out | 364 ++-- expected/pathman_calamity_2.out | 48 +- expected/pathman_calamity_3.out | 48 +- expected/pathman_cte_2.out | 10 +- expected/pathman_cte_3.out | 266 +++ expected/pathman_domains_1.out | 131 ++ expected/pathman_expressions_3.out | 436 +++++ expected/pathman_gaps_2.out | 819 +++++++++ expected/pathman_hashjoin_4.out | 6 +- expected/pathman_hashjoin_5.out | 2 +- expected/pathman_inserts_2.out | 114 +- expected/pathman_join_clause_2.out | 4 +- expected/pathman_join_clause_3.out | 182 ++ expected/pathman_lateral_2.out | 32 +- expected/pathman_mergejoin_4.out | 10 +- expected/pathman_mergejoin_5.out | 2 +- expected/pathman_only_2.out | 94 +- expected/pathman_rowmarks_3.out | 120 +- expected/pathman_subpartitions_2.out | 461 +++++ expected/pathman_upd_del_3.out | 462 +++++ expected/pathman_views_3.out | 98 +- expected/pathman_views_4.out | 191 ++ src/include/pathman.h | 3 +- src/partition_creation.c | 2 +- src/partition_filter.c | 2 +- src/pg_pathman.c | 91 +- src/planner_tree_modification.c | 2 +- 28 files changed, 5916 insertions(+), 482 deletions(-) create mode 100644 expected/pathman_array_qual_2.out create mode 100644 expected/pathman_cte_3.out create mode 100644 expected/pathman_domains_1.out create mode 100644 expected/pathman_expressions_3.out create mode 100644 expected/pathman_gaps_2.out create mode 100644 expected/pathman_join_clause_3.out create mode 100644 expected/pathman_subpartitions_2.out create mode 100644 expected/pathman_upd_del_3.out create mode 100644 expected/pathman_views_4.out diff --git a/expected/pathman_array_qual_2.out b/expected/pathman_array_qual_2.out new file mode 100644 index 00000000..ab504858 --- /dev/null +++ b/expected/pathman_array_qual_2.out @@ -0,0 +1,2398 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + array_qual.test_1 +(1 row) + +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + array_qual.test_2 +(1 row) + +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + array_qual.test_3 +(1 row) + +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + array_qual.test_4 +(1 row) + +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); +ANALYZE; +/* + * Test expr op ANY (...) + */ +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); + QUERY PLAN +------------------------- + Seq Scan on test_1 test +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 +(5 rows) + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: (val < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); +ERROR: collation mismatch between explicit collations "C" and "POSIX" at character 95 +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text = ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: ((val)::text = ANY ('{a,b}'::text[])) +(5 rows) + +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_3 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_4 + Filter: (val ~~ ANY ('{a,b}'::text[])) +(9 rows) + +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); +ANALYZE; +/* + * Test expr IN (...) + */ +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{1,2,3,4}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); + QUERY PLAN +----------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{-100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,100}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) +(21 rows) + +/* + * Test expr = ANY (...) + */ +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr = ALL (...) + */ +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a = ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ALL ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr < ANY (...) + */ +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + Filter: (a < 550) +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < 700) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < ANY ('{NULL,700}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +/* + * Test expr < ALL (...) + */ +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a < ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 99) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + Filter: (a < 500) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (...) + */ +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 99) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > 500) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_7 test_1 + Filter: (a > ANY ('{NULL,700}'::integer[])) + -> Seq Scan on test_8 test_2 + -> Seq Scan on test_9 test_3 + -> Seq Scan on test_10 test_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +/* + * Test expr > ALL (...) + */ +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a > ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_2 test_1 + Filter: (a > 101) + -> Seq Scan on test_3 test_2 + -> Seq Scan on test_4 test_3 + -> Seq Scan on test_5 test_4 + -> Seq Scan on test_6 test_5 + -> Seq Scan on test_7 test_6 + -> Seq Scan on test_8 test_7 + -> Seq Scan on test_9 test_8 + -> Seq Scan on test_10 test_9 +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 550) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_7 test_1 + Filter: (a > 700) + -> Seq Scan on test_8 test_2 + -> Seq Scan on test_9 test_3 + -> Seq Scan on test_10 test_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +/* + * Test expr > ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(999); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 5 +DEALLOCATE q; +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q('{1, 999}'): number of partitions: 1 +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXECUTE q(1000); + a | b +---+--- +(0 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 1 +DEALLOCATE q; +/* + * Test expr = ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(100); + a | b +-----+----- + 100 | 100 +(1 row) + +DEALLOCATE q; +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_basic_2.out b/expected/pathman_basic_2.out index 7cfde8a6..ec180fdb 100644 --- a/expected/pathman_basic_2.out +++ b/expected/pathman_basic_2.out @@ -36,13 +36,13 @@ SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_dat (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN -------------------------------------------- + QUERY PLAN +----------------------------------------- Append -> Seq Scan on hash_rel hash_rel_1 - -> Seq Scan on hash_rel_0 - -> Seq Scan on hash_rel_1 hash_rel_1_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_0 hash_rel_2 + -> Seq Scan on hash_rel_1 hash_rel_3 + -> Seq Scan on hash_rel_2 hash_rel_4 (5 rows) SELECT * FROM test.hash_rel; @@ -60,12 +60,12 @@ SELECT pathman.set_enable_parent('test.hash_rel', false); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN ------------------------------- + QUERY PLAN +----------------------------------------- Append - -> Seq Scan on hash_rel_0 - -> Seq Scan on hash_rel_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 (4 rows) SELECT * FROM test.hash_rel; @@ -80,13 +80,13 @@ SELECT pathman.set_enable_parent('test.hash_rel', true); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN -------------------------------------------- + QUERY PLAN +----------------------------------------- Append -> Seq Scan on hash_rel hash_rel_1 - -> Seq Scan on hash_rel_0 - -> Seq Scan on hash_rel_1 hash_rel_1_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_0 hash_rel_2 + -> Seq Scan on hash_rel_1 hash_rel_3 + -> Seq Scan on hash_rel_2 hash_rel_4 (5 rows) SELECT * FROM test.hash_rel; @@ -224,12 +224,12 @@ SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +------------------------------------------------------ Append -> Seq Scan on improved_dummy_1 Filter: ((id = 5) AND (name = 'ib'::text)) - -> Seq Scan on improved_dummy_11 + -> Seq Scan on improved_dummy_11 improved_dummy_2 Filter: (id = 101) (5 rows) @@ -245,9 +245,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 A Append -> Seq Scan on improved_dummy improved_dummy_1 Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) - -> Seq Scan on improved_dummy_1 improved_dummy_1_1 + -> Seq Scan on improved_dummy_1 improved_dummy_2 Filter: ((id = 5) AND (name = 'ib'::text)) - -> Seq Scan on improved_dummy_11 + -> Seq Scan on improved_dummy_11 improved_dummy_3 Filter: (id = 101) (7 rows) @@ -259,9 +259,9 @@ SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable paren ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; - QUERY PLAN -------------------------------- - Seq Scan on improved_dummy_11 + QUERY PLAN +---------------------------------------------- + Seq Scan on improved_dummy_11 improved_dummy Filter: (id = 101) (2 rows) @@ -277,7 +277,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 A Append -> Seq Scan on improved_dummy improved_dummy_1 Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) - -> Seq Scan on improved_dummy_11 + -> Seq Scan on improved_dummy_11 improved_dummy_2 Filter: (id = 101) (5 rows) @@ -389,16 +389,16 @@ EXPLAIN (COSTS OFF) INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select WHERE val <= 80; - QUERY PLAN ---------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------- Insert on insert_into_select_copy -> Append -> Seq Scan on insert_into_select insert_into_select_1 Filter: (val <= 80) - -> Seq Scan on insert_into_select_1 insert_into_select_1_1 - -> Seq Scan on insert_into_select_2 - -> Seq Scan on insert_into_select_3 - -> Seq Scan on insert_into_select_4 + -> Seq Scan on insert_into_select_1 insert_into_select_2 + -> Seq Scan on insert_into_select_2 insert_into_select_3 + -> Seq Scan on insert_into_select_3 insert_into_select_4 + -> Seq Scan on insert_into_select_4 insert_into_select_5 Filter: (val <= 80) (9 rows) @@ -418,12 +418,12 @@ SET enable_indexscan = OFF; SET enable_bitmapscan = OFF; SET enable_seqscan = ON; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN ------------------------------- + QUERY PLAN +----------------------------------------- Append - -> Seq Scan on hash_rel_0 - -> Seq Scan on hash_rel_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; @@ -441,16 +441,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; - QUERY PLAN ------------------------- - Seq Scan on hash_rel_1 + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel Filter: (value = 2) (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ - QUERY PLAN ------------------------- - Seq Scan on hash_rel_1 + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel Filter: (2 = value) (2 rows) @@ -465,45 +465,45 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ - QUERY PLAN ------------------------------ - Seq Scan on num_range_rel_3 + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_3 num_range_rel Filter: (2500 = id) (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ - QUERY PLAN ------------------------------------ + QUERY PLAN +--------------------------------------------------- Append - -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_3 num_range_rel_1 Filter: (2500 < id) - -> Seq Scan on num_range_rel_4 + -> Seq Scan on num_range_rel_4 num_range_rel_2 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; - QUERY PLAN ------------------------------------ + QUERY PLAN +--------------------------------------------------- Append - -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_3 num_range_rel_1 Filter: (id > 2500) - -> Seq Scan on num_range_rel_4 + -> Seq Scan on num_range_rel_4 num_range_rel_2 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; - QUERY PLAN ------------------------------------ + QUERY PLAN +--------------------------------------------------- Append - -> Seq Scan on num_range_rel_2 - -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_2 num_range_rel_1 + -> Seq Scan on num_range_rel_3 num_range_rel_2 (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; - QUERY PLAN ------------------------------------ + QUERY PLAN +--------------------------------------------------- Append - -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_2 num_range_rel_1 Filter: (id >= 1500) - -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_3 num_range_rel_2 Filter: (id < 2500) (5 rows) @@ -524,35 +524,35 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; QUERY PLAN -------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_2 range_rel_1 Filter: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ QUERY PLAN -------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_2 range_rel_1 Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; - QUERY PLAN -------------------------- - Seq Scan on range_rel_2 + QUERY PLAN +----------------------------------- + Seq Scan on range_rel_2 range_rel (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; QUERY PLAN --------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_2 range_rel_1 Filter: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) - -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_3 range_rel_2 Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) (5 rows) @@ -573,12 +573,12 @@ SET enable_indexscan = ON; SET enable_bitmapscan = OFF; SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN ------------------------------- + QUERY PLAN +----------------------------------------- Append - -> Seq Scan on hash_rel_0 - -> Seq Scan on hash_rel_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; @@ -596,16 +596,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; - QUERY PLAN ------------------------- - Seq Scan on hash_rel_1 + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel Filter: (value = 2) (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ - QUERY PLAN ------------------------- - Seq Scan on hash_rel_1 + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel Filter: (2 = value) (2 rows) @@ -620,45 +620,45 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ - QUERY PLAN ----------------------------------------------------------- - Index Scan using num_range_rel_3_pkey on num_range_rel_3 + QUERY PLAN +------------------------------------------------------------------------ + Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel Index Cond: (id = 2500) (2 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ - QUERY PLAN ----------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------- Append - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_1 Index Cond: (id > 2500) - -> Seq Scan on num_range_rel_4 + -> Seq Scan on num_range_rel_4 num_range_rel_2 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; - QUERY PLAN ----------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------- Append - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_1 Index Cond: (id > 2500) - -> Seq Scan on num_range_rel_4 + -> Seq Scan on num_range_rel_4 num_range_rel_2 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; - QUERY PLAN ------------------------------------ + QUERY PLAN +--------------------------------------------------- Append - -> Seq Scan on num_range_rel_2 - -> Seq Scan on num_range_rel_3 + -> Seq Scan on num_range_rel_2 num_range_rel_1 + -> Seq Scan on num_range_rel_3 num_range_rel_2 (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; - QUERY PLAN ----------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------- Append - -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 num_range_rel_1 Index Cond: (id >= 1500) - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_2 Index Cond: (id < 2500) (5 rows) @@ -699,35 +699,35 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; QUERY PLAN ------------------------------------------------------------------------------------ Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ QUERY PLAN ------------------------------------------------------------------------------------ Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; - QUERY PLAN -------------------------- - Seq Scan on range_rel_2 + QUERY PLAN +----------------------------------- + Seq Scan on range_rel_2 range_rel (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; QUERY PLAN ------------------------------------------------------------------------------------- Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 Index Cond: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) - -> Index Scan using range_rel_3_dt_idx on range_rel_3 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 range_rel_2 Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) (5 rows) @@ -774,7 +774,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER B QUERY PLAN ------------------------------------- Sort - Sort Key: range_rel_1.dt + Sort Key: range_rel.dt -> Append -> Seq Scan on range_rel_1 -> Seq Scan on range_rel_2 @@ -823,18 +823,18 @@ CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $ select * from test.sql_inline where id = i_id limit 1; $$ LANGUAGE sql STABLE; EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); - QUERY PLAN --------------------------------- + QUERY PLAN +------------------------------------------- Limit - -> Seq Scan on sql_inline_0 + -> Seq Scan on sql_inline_0 sql_inline Filter: (id = 5) (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); - QUERY PLAN --------------------------------- + QUERY PLAN +------------------------------------------- Limit - -> Seq Scan on sql_inline_2 + -> Seq Scan on sql_inline_2 sql_inline Filter: (id = 1) (3 rows) @@ -876,12 +876,12 @@ SELECT pathman.split_range_partition('test.num_range_rel_1', 500); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; - QUERY PLAN ----------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------- Append -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 Index Cond: (id >= 100) - -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 + -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 num_range_rel_2 Index Cond: (id <= 700) (5 rows) @@ -907,9 +907,9 @@ SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_re (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; - QUERY PLAN ----------------------------------------------------------- - Index Scan using num_range_rel_1_pkey on num_range_rel_1 + QUERY PLAN +------------------------------------------------------------------------ + Index Scan using num_range_rel_1_pkey on num_range_rel_1 num_range_rel Index Cond: ((id >= 100) AND (id <= 700)) (2 rows) @@ -927,9 +927,9 @@ SELECT pathman.append_range_partition('test.num_range_rel'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; - QUERY PLAN ------------------------------ - Seq Scan on num_range_rel_6 + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_6 num_range_rel (1 row) SELECT pathman.prepend_range_partition('test.num_range_rel'); @@ -939,9 +939,9 @@ SELECT pathman.prepend_range_partition('test.num_range_rel'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; - QUERY PLAN ------------------------------ - Seq Scan on num_range_rel_7 + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_7 num_range_rel (1 row) SELECT pathman.drop_range_partition('test.num_range_rel_7'); @@ -995,9 +995,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' A QUERY PLAN ------------------------------------------------------------------------------------- Append - -> Index Scan using range_rel_7_dt_idx on range_rel_7 + -> Index Scan using range_rel_7_dt_idx on range_rel_7 range_rel_1 Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) - -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) (5 rows) @@ -1010,7 +1010,7 @@ SELECT pathman.drop_range_partition('test.range_rel_7'); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; QUERY PLAN ------------------------------------------------------------------------------- - Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) (2 rows) @@ -1026,9 +1026,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' A QUERY PLAN ------------------------------------------------------------------------------------- Append - -> Index Scan using range_rel_8_dt_idx on range_rel_8 + -> Index Scan using range_rel_8_dt_idx on range_rel_8 range_rel_1 Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) - -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) (5 rows) @@ -1045,10 +1045,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' A QUERY PLAN ------------------------------------------------------------------------------------- Append - -> Index Scan using range_rel_archive_dt_idx on range_rel_archive + -> Index Scan using range_rel_archive_dt_idx on range_rel_archive range_rel_1 Index Cond: (dt >= 'Sat Nov 15 00:00:00 2014'::timestamp without time zone) - -> Seq Scan on range_rel_8 - -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Seq Scan on range_rel_8 range_rel_2 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_3 Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) (6 rows) @@ -1062,8 +1062,8 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' A QUERY PLAN ------------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_8 - -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Seq Scan on range_rel_8 range_rel_1 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) (4 rows) @@ -1120,19 +1120,19 @@ SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::RE INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; - QUERY PLAN --------------------------------------------- + QUERY PLAN +-------------------------------------------------------- Append - -> Seq Scan on range_rel_minus_infinity - -> Seq Scan on range_rel_8 + -> Seq Scan on range_rel_minus_infinity range_rel_1 + -> Seq Scan on range_rel_8 range_rel_2 (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; - QUERY PLAN -------------------------------------------- + QUERY PLAN +------------------------------------------------------- Append - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_plus_infinity + -> Seq Scan on range_rel_6 range_rel_1 + -> Seq Scan on range_rel_plus_infinity range_rel_2 (3 rows) /* @@ -1199,12 +1199,12 @@ SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern') /* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN ------------------------------------ + QUERY PLAN +---------------------------------------------- Append - -> Seq Scan on hash_rel_extern - -> Seq Scan on hash_rel_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_extern hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 (4 rows) SELECT parent, partition, parttype @@ -1247,12 +1247,12 @@ CREATE TABLE test.hash_rel_wrong( SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); ERROR: column "value" in child table must be marked NOT NULL EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; - QUERY PLAN ------------------------------------ + QUERY PLAN +---------------------------------------------- Append - -> Seq Scan on hash_rel_extern - -> Seq Scan on hash_rel_1 - -> Seq Scan on hash_rel_2 + -> Seq Scan on hash_rel_extern hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 (4 rows) /* @@ -1350,7 +1350,7 @@ SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; QUERY PLAN -------------------------------------------------------------------------- - Seq Scan on range_rel_14 + Seq Scan on range_rel_14 range_rel Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) (2 rows) @@ -1363,7 +1363,7 @@ SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; QUERY PLAN -------------------------------------------------------------------------- - Seq Scan on range_rel_8 + Seq Scan on range_rel_8 range_rel Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) (2 rows) @@ -1532,14 +1532,14 @@ SELECT create_hash_partitions('test.hash_rel', 'value', 3); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------- Append - -> Index Scan using hash_rel_0_pkey on hash_rel_0 + -> Index Scan using hash_rel_0_pkey on hash_rel_0 hash_rel_1 Index Cond: (id = 1234) - -> Index Scan using hash_rel_1_pkey on hash_rel_1 + -> Index Scan using hash_rel_1_pkey on hash_rel_1 hash_rel_2 Index Cond: (id = 1234) - -> Index Scan using hash_rel_2_pkey on hash_rel_2 + -> Index Scan using hash_rel_2_pkey on hash_rel_2 hash_rel_3 Index Cond: (id = 1234) (7 rows) @@ -1580,21 +1580,21 @@ SELECT prepend_range_partition('test.range_rel'); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; - QUERY PLAN --------------------------------- + QUERY PLAN +-------------------------------------------- Append - -> Seq Scan on range_rel_15 - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_13 + -> Seq Scan on range_rel_15 range_rel_1 + -> Seq Scan on range_rel_1 range_rel_2 + -> Seq Scan on range_rel_13 range_rel_3 (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; QUERY PLAN -------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_12 + -> Seq Scan on range_rel_12 range_rel_1 Filter: (dt > 'Wed Dec 15 00:00:00 2010'::timestamp without time zone) - -> Seq Scan on range_rel_14 + -> Seq Scan on range_rel_14 range_rel_2 (4 rows) /* Create range partitions from whole range */ @@ -1682,14 +1682,14 @@ SELECT set_enable_parent('test.special_case_1_ind_o_s', true); (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; - QUERY PLAN --------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- Append -> Seq Scan on special_case_1_ind_o_s special_case_1_ind_o_s_1 Filter: ((val < 75) AND (comment = 'a'::text)) - -> Seq Scan on special_case_1_ind_o_s_1 special_case_1_ind_o_s_1_1 + -> Seq Scan on special_case_1_ind_o_s_1 special_case_1_ind_o_s_2 Filter: (comment = 'a'::text) - -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 special_case_1_ind_o_s_3 Index Cond: ((val < 75) AND (comment = 'a'::text)) (7 rows) @@ -1757,18 +1757,18 @@ SELECT set_enable_parent('test.index_on_childs', true); VACUUM ANALYZE test.index_on_childs; EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; - QUERY PLAN ------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------ Append -> Index Scan using index_on_childs_c2_idx on index_on_childs index_on_childs_1 Index Cond: (c2 = 500) Filter: ((c1 > 100) AND (c1 < 2500)) - -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k index_on_childs_2 Index Cond: (c2 = 500) Filter: (c1 > 100) - -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k index_on_childs_3 Index Cond: (c2 = 500) - -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k index_on_childs_4 Index Cond: (c2 = 500) Filter: (c1 < 2500) (12 rows) diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out index f647e788..5bb1053f 100644 --- a/expected/pathman_calamity_2.out +++ b/expected/pathman_calamity_2.out @@ -603,25 +603,25 @@ NOTICE: merging column "val" with inherited definition SELECT add_to_pathman_config('calamity.part_test', 'val'); ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition @@ -630,13 +630,13 @@ CHECK (val = 1 OR val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: wrong constraint format for RANGE partition "wrong_partition" EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; @@ -646,13 +646,13 @@ CHECK (val >= 10 AND val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: wrong constraint format for RANGE partition "wrong_partition" EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; diff --git a/expected/pathman_calamity_3.out b/expected/pathman_calamity_3.out index f64a5f8b..bfb3b63c 100644 --- a/expected/pathman_calamity_3.out +++ b/expected/pathman_calamity_3.out @@ -607,25 +607,25 @@ NOTICE: merging column "val" with inherited definition SELECT add_to_pathman_config('calamity.part_test', 'val'); ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition @@ -634,13 +634,13 @@ CHECK (val = 1 OR val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: wrong constraint format for RANGE partition "wrong_partition" EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; @@ -650,13 +650,13 @@ CHECK (val >= 10 AND val = 2); /* wrong constraint */ SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); ERROR: wrong constraint format for RANGE partition "wrong_partition" EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ - QUERY PLAN ------------------------------ + QUERY PLAN +--------------------------------------- Append - -> Seq Scan on part_ok_0 - -> Seq Scan on part_ok_1 - -> Seq Scan on part_ok_2 - -> Seq Scan on part_ok_3 + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 (5 rows) ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; diff --git a/expected/pathman_cte_2.out b/expected/pathman_cte_2.out index 6b64ad42..b9bf8730 100644 --- a/expected/pathman_cte_2.out +++ b/expected/pathman_cte_2.out @@ -29,8 +29,8 @@ SELECT * FROM ttt; QUERY PLAN -------------------------------------------------------------------------------- Append - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_2 range_rel_1 + -> Seq Scan on range_rel_3 range_rel_2 Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) (4 rows) @@ -52,9 +52,9 @@ SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); EXPLAIN (COSTS OFF) WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) SELECT * FROM ttt; - QUERY PLAN ------------------------- - Seq Scan on hash_rel_1 + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel Filter: (value = 2) (2 rows) diff --git a/expected/pathman_cte_3.out b/expected/pathman_cte_3.out new file mode 100644 index 00000000..a7f3acd0 --- /dev/null +++ b/expected/pathman_cte_3.out @@ -0,0 +1,266 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + -> Seq Scan on range_rel_3 range_rel_2 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + Delete on cte_del_xacts_2 t_2 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash Join + Hash Cond: ((t_2.id = cte_del_xacts_specdata.tid) AND (t_2.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_2 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(22 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(15 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_domains_1.out b/expected/pathman_domains_1.out new file mode 100644 index 00000000..aaa0867f --- /dev/null +++ b/expected/pathman_domains_1.out @@ -0,0 +1,131 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA domains; +CREATE DOMAIN domains.dom_test AS numeric CHECK (value < 1200); +CREATE TABLE domains.dom_table(val domains.dom_test NOT NULL); +INSERT INTO domains.dom_table SELECT generate_series(1, 999); +SELECT create_range_partitions('domains.dom_table', 'val', 1, 100); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 250; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on dom_table_1 + -> Seq Scan on dom_table_2 + -> Seq Scan on dom_table_3 + Filter: ((val)::numeric < '250'::numeric) +(5 rows) + +INSERT INTO domains.dom_table VALUES(1500); +ERROR: value for domain domains.dom_test violates check constraint "dom_test_check" +INSERT INTO domains.dom_table VALUES(-10); +SELECT append_range_partition('domains.dom_table'); + append_range_partition +------------------------ + domains.dom_table_12 +(1 row) + +SELECT prepend_range_partition('domains.dom_table'); + prepend_range_partition +------------------------- + domains.dom_table_13 +(1 row) + +SELECT merge_range_partitions('domains.dom_table_1', 'domains.dom_table_2'); + merge_range_partitions +------------------------ + domains.dom_table_1 +(1 row) + +SELECT split_range_partition('domains.dom_table_1', 50); + split_range_partition +----------------------- + domains.dom_table_14 +(1 row) + +INSERT INTO domains.dom_table VALUES(1101); +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 450; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on dom_table_13 dom_table_1 + -> Seq Scan on dom_table_11 dom_table_2 + -> Seq Scan on dom_table_1 dom_table_3 + -> Seq Scan on dom_table_14 dom_table_4 + -> Seq Scan on dom_table_3 dom_table_5 + -> Seq Scan on dom_table_4 dom_table_6 + -> Seq Scan on dom_table_5 dom_table_7 + Filter: ((val)::numeric < '450'::numeric) +(9 rows) + +SELECT * FROM pathman_partition_list +ORDER BY range_min::INT, range_max::INT; + parent | partition | parttype | expr | range_min | range_max +-------------------+----------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_13 | 2 | val | -199 | -99 + domains.dom_table | domains.dom_table_11 | 2 | val | -99 | 1 + domains.dom_table | domains.dom_table_1 | 2 | val | 1 | 50 + domains.dom_table | domains.dom_table_14 | 2 | val | 50 | 201 + domains.dom_table | domains.dom_table_3 | 2 | val | 201 | 301 + domains.dom_table | domains.dom_table_4 | 2 | val | 301 | 401 + domains.dom_table | domains.dom_table_5 | 2 | val | 401 | 501 + domains.dom_table | domains.dom_table_6 | 2 | val | 501 | 601 + domains.dom_table | domains.dom_table_7 | 2 | val | 601 | 701 + domains.dom_table | domains.dom_table_8 | 2 | val | 701 | 801 + domains.dom_table | domains.dom_table_9 | 2 | val | 801 | 901 + domains.dom_table | domains.dom_table_10 | 2 | val | 901 | 1001 + domains.dom_table | domains.dom_table_12 | 2 | val | 1001 | 1101 + domains.dom_table | domains.dom_table_15 | 2 | val | 1101 | 1201 +(14 rows) + +SELECT drop_partitions('domains.dom_table'); +NOTICE: 49 rows copied from domains.dom_table_1 +NOTICE: 100 rows copied from domains.dom_table_3 +NOTICE: 100 rows copied from domains.dom_table_4 +NOTICE: 100 rows copied from domains.dom_table_5 +NOTICE: 100 rows copied from domains.dom_table_6 +NOTICE: 100 rows copied from domains.dom_table_7 +NOTICE: 100 rows copied from domains.dom_table_8 +NOTICE: 100 rows copied from domains.dom_table_9 +NOTICE: 99 rows copied from domains.dom_table_10 +NOTICE: 1 rows copied from domains.dom_table_11 +NOTICE: 0 rows copied from domains.dom_table_12 +NOTICE: 0 rows copied from domains.dom_table_13 +NOTICE: 151 rows copied from domains.dom_table_14 +NOTICE: 1 rows copied from domains.dom_table_15 + drop_partitions +----------------- + 14 +(1 row) + +SELECT create_hash_partitions('domains.dom_table', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +SELECT * FROM pathman_partition_list +ORDER BY "partition"::TEXT; + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_0 | 1 | val | | + domains.dom_table | domains.dom_table_1 | 1 | val | | + domains.dom_table | domains.dom_table_2 | 1 | val | | + domains.dom_table | domains.dom_table_3 | 1 | val | | + domains.dom_table | domains.dom_table_4 | 1 | val | | +(5 rows) + +DROP TABLE domains.dom_table CASCADE; +NOTICE: drop cascades to 5 other objects +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_expressions_3.out b/expected/pathman_expressions_3.out new file mode 100644 index 00000000..eacb1009 --- /dev/null +++ b/expected/pathman_expressions_3.out @@ -0,0 +1,436 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------------- + Seq Scan on canon_1 canon +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +-------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +----------------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 canary + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel_0 hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 hash_rel_3 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 hash_rel_4 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------- + Seq Scan on hash_rel_0 hash_rel + Filter: ((value * value2) = 5) +(2 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +----------------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 canary + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Seq Scan on range_rel_4 range_rel + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(2 rows) + +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_gaps_2.out b/expected/pathman_gaps_2.out new file mode 100644 index 00000000..b229be66 --- /dev/null +++ b/expected/pathman_gaps_2.out @@ -0,0 +1,819 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 + Filter: (val = 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 test_1_2 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 + Filter: (val > 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +----------------------------- + Seq Scan on test_2_4 test_2 + Filter: (val = 31) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 + -> Seq Scan on test_2_5 test_2_4 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_2 test_2_1 + Filter: (val > 11) + -> Seq Scan on test_2_4 test_2_2 + -> Seq Scan on test_2_5 test_2_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_2 test_2_1 + -> Seq Scan on test_2_4 test_2_2 + -> Seq Scan on test_2_5 test_2_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + Filter: (val > 31) + -> Seq Scan on test_2_5 test_2_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +----------------------------- + Seq Scan on test_3_5 test_3 + Filter: (val = 41) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 + -> Seq Scan on test_3_6 test_3_5 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_3 test_3_1 + Filter: (val > 21) + -> Seq Scan on test_3_5 test_3_2 + -> Seq Scan on test_3_6 test_3_3 + -> Seq Scan on test_3_7 test_3_4 + -> Seq Scan on test_3_8 test_3_5 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_3 test_3_1 + -> Seq Scan on test_3_5 test_3_2 + -> Seq Scan on test_3_6 test_3_3 + -> Seq Scan on test_3_7 test_3_4 + -> Seq Scan on test_3_8 test_3_5 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + Filter: (val > 41) + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +----------------------------- + Seq Scan on test_4_6 test_4 + Filter: (val = 51) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 + -> Seq Scan on test_4_7 test_4_5 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_3 test_4_1 + Filter: (val > 21) + -> Seq Scan on test_4_6 test_4_2 + -> Seq Scan on test_4_7 test_4_3 + -> Seq Scan on test_4_8 test_4_4 + -> Seq Scan on test_4_9 test_4_5 + -> Seq Scan on test_4_10 test_4_6 + -> Seq Scan on test_4_11 test_4_7 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_3 test_4_1 + -> Seq Scan on test_4_6 test_4_2 + -> Seq Scan on test_4_7 test_4_3 + -> Seq Scan on test_4_8 test_4_4 + -> Seq Scan on test_4_9 test_4_5 + -> Seq Scan on test_4_10 test_4_6 + -> Seq Scan on test_4_11 test_4_7 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + Filter: (val > 51) + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_hashjoin_4.out b/expected/pathman_hashjoin_4.out index ef8dfc29..e827628f 100644 --- a/expected/pathman_hashjoin_4.out +++ b/expected/pathman_hashjoin_4.out @@ -54,11 +54,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; QUERY PLAN --------------------------------------------------------------------------------------- Sort - Sort Key: j2_1.dt + Sort Key: j2.dt -> Hash Join - Hash Cond: (j1_1.id = j2_1.id) + Hash Cond: (j1.id = j2.id) -> Hash Join - Hash Cond: (j3_1.id = j1_1.id) + Hash Cond: (j3.id = j1.id) -> Append -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 diff --git a/expected/pathman_hashjoin_5.out b/expected/pathman_hashjoin_5.out index a8f3b6e7..c66a9306 100644 --- a/expected/pathman_hashjoin_5.out +++ b/expected/pathman_hashjoin_5.out @@ -56,7 +56,7 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; Sort Sort Key: j2.dt -> Hash Join - Hash Cond: (j3_1.id = j2.id) + Hash Cond: (j3.id = j2.id) -> Append -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 diff --git a/expected/pathman_inserts_2.out b/expected/pathman_inserts_2.out index 91f05753..3c31fc53 100644 --- a/expected/pathman_inserts_2.out +++ b/expected/pathman_inserts_2.out @@ -902,124 +902,124 @@ FROM generate_series(1, 10) i; EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e FROM test_inserts.storage; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e -> Result - Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, storage_11.e + Output: NULL::integer, storage_1.b, NULL::integer, storage_1.d, storage_1.e -> Append - -> Seq Scan on test_inserts.storage_11 - Output: storage_11.b, storage_11.d, storage_11.e - -> Seq Scan on test_inserts.storage_1 storage_1_1 - Output: storage_1_1.b, storage_1_1.d, storage_1_1.e - -> Seq Scan on test_inserts.storage_2 + -> Seq Scan on test_inserts.storage_11 storage_2 Output: storage_2.b, storage_2.d, storage_2.e - -> Seq Scan on test_inserts.storage_3 + -> Seq Scan on test_inserts.storage_1 storage_3 Output: storage_3.b, storage_3.d, storage_3.e - -> Seq Scan on test_inserts.storage_4 + -> Seq Scan on test_inserts.storage_2 storage_4 Output: storage_4.b, storage_4.d, storage_4.e - -> Seq Scan on test_inserts.storage_5 + -> Seq Scan on test_inserts.storage_3 storage_5 Output: storage_5.b, storage_5.d, storage_5.e - -> Seq Scan on test_inserts.storage_6 + -> Seq Scan on test_inserts.storage_4 storage_6 Output: storage_6.b, storage_6.d, storage_6.e - -> Seq Scan on test_inserts.storage_7 + -> Seq Scan on test_inserts.storage_5 storage_7 Output: storage_7.b, storage_7.d, storage_7.e - -> Seq Scan on test_inserts.storage_8 + -> Seq Scan on test_inserts.storage_6 storage_8 Output: storage_8.b, storage_8.d, storage_8.e - -> Seq Scan on test_inserts.storage_9 + -> Seq Scan on test_inserts.storage_7 storage_9 Output: storage_9.b, storage_9.d, storage_9.e - -> Seq Scan on test_inserts.storage_10 + -> Seq Scan on test_inserts.storage_8 storage_10 Output: storage_10.b, storage_10.d, storage_10.e - -> Seq Scan on test_inserts.storage_12 + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b, storage_11.d, storage_11.e + -> Seq Scan on test_inserts.storage_10 storage_12 Output: storage_12.b, storage_12.d, storage_12.e - -> Seq Scan on test_inserts.storage_13 + -> Seq Scan on test_inserts.storage_12 storage_13 Output: storage_13.b, storage_13.d, storage_13.e - -> Seq Scan on test_inserts.storage_14 + -> Seq Scan on test_inserts.storage_13 storage_14 Output: storage_14.b, storage_14.d, storage_14.e + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b, storage_15.d, storage_15.e (34 rows) EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b, d) SELECT b, d FROM test_inserts.storage; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint -> Result - Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, NULL::bigint + Output: NULL::integer, storage_1.b, NULL::integer, storage_1.d, NULL::bigint -> Append - -> Seq Scan on test_inserts.storage_11 - Output: storage_11.b, storage_11.d - -> Seq Scan on test_inserts.storage_1 storage_1_1 - Output: storage_1_1.b, storage_1_1.d - -> Seq Scan on test_inserts.storage_2 + -> Seq Scan on test_inserts.storage_11 storage_2 Output: storage_2.b, storage_2.d - -> Seq Scan on test_inserts.storage_3 + -> Seq Scan on test_inserts.storage_1 storage_3 Output: storage_3.b, storage_3.d - -> Seq Scan on test_inserts.storage_4 + -> Seq Scan on test_inserts.storage_2 storage_4 Output: storage_4.b, storage_4.d - -> Seq Scan on test_inserts.storage_5 + -> Seq Scan on test_inserts.storage_3 storage_5 Output: storage_5.b, storage_5.d - -> Seq Scan on test_inserts.storage_6 + -> Seq Scan on test_inserts.storage_4 storage_6 Output: storage_6.b, storage_6.d - -> Seq Scan on test_inserts.storage_7 + -> Seq Scan on test_inserts.storage_5 storage_7 Output: storage_7.b, storage_7.d - -> Seq Scan on test_inserts.storage_8 + -> Seq Scan on test_inserts.storage_6 storage_8 Output: storage_8.b, storage_8.d - -> Seq Scan on test_inserts.storage_9 + -> Seq Scan on test_inserts.storage_7 storage_9 Output: storage_9.b, storage_9.d - -> Seq Scan on test_inserts.storage_10 + -> Seq Scan on test_inserts.storage_8 storage_10 Output: storage_10.b, storage_10.d - -> Seq Scan on test_inserts.storage_12 + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b, storage_11.d + -> Seq Scan on test_inserts.storage_10 storage_12 Output: storage_12.b, storage_12.d - -> Seq Scan on test_inserts.storage_13 + -> Seq Scan on test_inserts.storage_12 storage_13 Output: storage_13.b, storage_13.d - -> Seq Scan on test_inserts.storage_14 + -> Seq Scan on test_inserts.storage_13 storage_14 Output: storage_14.b, storage_14.d + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b, storage_15.d (34 rows) EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO test_inserts.storage (b) SELECT b FROM test_inserts.storage; - QUERY PLAN --------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------- Insert on test_inserts.storage -> Custom Scan (PartitionFilter) Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint -> Result - Output: NULL::integer, storage_11.b, NULL::integer, NULL::text, NULL::bigint + Output: NULL::integer, storage_1.b, NULL::integer, NULL::text, NULL::bigint -> Append - -> Seq Scan on test_inserts.storage_11 - Output: storage_11.b - -> Seq Scan on test_inserts.storage_1 storage_1_1 - Output: storage_1_1.b - -> Seq Scan on test_inserts.storage_2 + -> Seq Scan on test_inserts.storage_11 storage_2 Output: storage_2.b - -> Seq Scan on test_inserts.storage_3 + -> Seq Scan on test_inserts.storage_1 storage_3 Output: storage_3.b - -> Seq Scan on test_inserts.storage_4 + -> Seq Scan on test_inserts.storage_2 storage_4 Output: storage_4.b - -> Seq Scan on test_inserts.storage_5 + -> Seq Scan on test_inserts.storage_3 storage_5 Output: storage_5.b - -> Seq Scan on test_inserts.storage_6 + -> Seq Scan on test_inserts.storage_4 storage_6 Output: storage_6.b - -> Seq Scan on test_inserts.storage_7 + -> Seq Scan on test_inserts.storage_5 storage_7 Output: storage_7.b - -> Seq Scan on test_inserts.storage_8 + -> Seq Scan on test_inserts.storage_6 storage_8 Output: storage_8.b - -> Seq Scan on test_inserts.storage_9 + -> Seq Scan on test_inserts.storage_7 storage_9 Output: storage_9.b - -> Seq Scan on test_inserts.storage_10 + -> Seq Scan on test_inserts.storage_8 storage_10 Output: storage_10.b - -> Seq Scan on test_inserts.storage_12 + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b + -> Seq Scan on test_inserts.storage_10 storage_12 Output: storage_12.b - -> Seq Scan on test_inserts.storage_13 + -> Seq Scan on test_inserts.storage_12 storage_13 Output: storage_13.b - -> Seq Scan on test_inserts.storage_14 + -> Seq Scan on test_inserts.storage_13 storage_14 Output: storage_14.b + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b (34 rows) /* test gap case (missing partition in between) */ diff --git a/expected/pathman_join_clause_2.out b/expected/pathman_join_clause_2.out index a1fae839..df2ea0a5 100644 --- a/expected/pathman_join_clause_2.out +++ b/expected/pathman_join_clause_2.out @@ -132,10 +132,10 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); QUERY PLAN ---------------------------------------------------------------------- Nested Loop Left Join - Join Filter: (child_1.parent_id = parent.id) + Join Filter: (child.parent_id = parent.id) -> Seq Scan on parent Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) - -> Seq Scan on child_1 + -> Seq Scan on child_1 child Filter: (owner_id = 3) (6 rows) diff --git a/expected/pathman_join_clause_3.out b/expected/pathman_join_clause_3.out new file mode 100644 index 00000000..80b8de4c --- /dev/null +++ b/expected/pathman_join_clause_3.out @@ -0,0 +1,182 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Bitmap Heap Scan on mytbl_0 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_0_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_1 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_1_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_2 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_2_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_3 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_3_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_4 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_4_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_5 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_5_pkey + Index Cond: (id1 = fk.id1) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Heap Scan on mytbl_7 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_7_pkey + Index Cond: (id1 = fk.id1) +(41 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_lateral_2.out b/expected/pathman_lateral_2.out index df5292f8..e4a64a56 100644 --- a/expected/pathman_lateral_2.out +++ b/expected/pathman_lateral_2.out @@ -32,13 +32,13 @@ select * from t1.id > t2.id and exists(select * from test_lateral.data t where t1.id = t2.id and t.id = t3.id); - QUERY PLAN --------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Nested Loop -> Nested Loop - Join Filter: ((t2_1.id + t1_1.id) = t_1.id) + Join Filter: ((t2.id + t1.id) = t.id) -> HashAggregate - Group Key: t_1.id + Group Key: t.id -> Append -> Seq Scan on data_0 t_1 -> Seq Scan on data_1 t_2 @@ -52,7 +52,7 @@ select * from -> Seq Scan on data_9 t_10 -> Materialize -> Nested Loop - Join Filter: ((t2_1.id > t1_1.id) AND (t1_1.id > t2_1.id) AND (t1_1.id = t2_1.id)) + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) -> Append -> Seq Scan on data_0 t2_1 Filter: ((id >= 2) AND (id <= 299)) @@ -97,27 +97,27 @@ select * from -> Seq Scan on data_9 t1_10 Filter: ((id >= 1) AND (id <= 100)) -> Custom Scan (RuntimeAppend) - Prune by: (t_1.id = t3.id) + Prune by: (t.id = t3.id) -> Seq Scan on data_0 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_1 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_2 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_3 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_4 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_5 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_6 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_7 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_8 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) -> Seq Scan on data_9 t3 - Filter: (t_1.id = id) + Filter: (t.id = id) (84 rows) set enable_hashjoin = on; diff --git a/expected/pathman_mergejoin_4.out b/expected/pathman_mergejoin_4.out index e2affa74..fc9bc95f 100644 --- a/expected/pathman_mergejoin_4.out +++ b/expected/pathman_mergejoin_4.out @@ -57,17 +57,17 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; QUERY PLAN --------------------------------------------------------------------------------- Sort - Sort Key: j2_1.dt + Sort Key: j2.dt -> Merge Join - Merge Cond: (j2_1.id = j3_1.id) + Merge Cond: (j2.id = j3.id) -> Merge Join - Merge Cond: (j1_1.id = j2_1.id) + Merge Cond: (j1.id = j2.id) -> Merge Append - Sort Key: j1_1.id + Sort Key: j1.id -> Index Scan using range_rel_1_pkey on range_rel_1 j1_1 -> Index Scan using range_rel_2_pkey on range_rel_2 j1_2 -> Merge Append - Sort Key: j2_1.id + Sort Key: j2.id -> Index Scan using range_rel_2_pkey on range_rel_2 j2_1 -> Index Scan using range_rel_3_pkey on range_rel_3 j2_2 -> Index Scan using range_rel_4_pkey on range_rel_4 j2_3 diff --git a/expected/pathman_mergejoin_5.out b/expected/pathman_mergejoin_5.out index 7b607435..b99e40db 100644 --- a/expected/pathman_mergejoin_5.out +++ b/expected/pathman_mergejoin_5.out @@ -59,7 +59,7 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; Sort Sort Key: j2.dt -> Merge Join - Merge Cond: (j2.id = j3_1.id) + Merge Cond: (j2.id = j3.id) -> Index Scan using range_rel_2_pkey on range_rel_2 j2 Index Cond: (id IS NOT NULL) -> Append diff --git a/expected/pathman_only_2.out b/expected/pathman_only_2.out index 63638012..c37dd5f4 100644 --- a/expected/pathman_only_2.out +++ b/expected/pathman_only_2.out @@ -36,16 +36,16 @@ UNION SELECT * FROM test_only.from_only_test; -> Append -> Seq Scan on from_only_test -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 (15 rows) /* should be OK */ @@ -55,7 +55,7 @@ UNION SELECT * FROM ONLY test_only.from_only_test; QUERY PLAN ---------------------------------------------------------- HashAggregate - Group Key: from_only_test_1.val + Group Key: from_only_test.val -> Append -> Append -> Seq Scan on from_only_test_1 @@ -76,10 +76,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_only.from_only_test UNION SELECT * FROM test_only.from_only_test UNION SELECT * FROM ONLY test_only.from_only_test; - QUERY PLAN ---------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------- HashAggregate - Group Key: from_only_test_1.val + Group Key: from_only_test.val -> Append -> Append -> Seq Scan on from_only_test_1 @@ -93,17 +93,17 @@ UNION SELECT * FROM ONLY test_only.from_only_test; -> Seq Scan on from_only_test_9 -> Seq Scan on from_only_test_10 -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 from_only_test_2_1 - -> Seq Scan on from_only_test_3 from_only_test_3_1 - -> Seq Scan on from_only_test_4 from_only_test_4_1 - -> Seq Scan on from_only_test_5 from_only_test_5_1 - -> Seq Scan on from_only_test_6 from_only_test_6_1 - -> Seq Scan on from_only_test_7 from_only_test_7_1 - -> Seq Scan on from_only_test_8 from_only_test_8_1 - -> Seq Scan on from_only_test_9 from_only_test_9_1 - -> Seq Scan on from_only_test_10 from_only_test_10_1 - -> Seq Scan on from_only_test from_only_test_12 + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 (26 rows) /* should be OK */ @@ -111,34 +111,34 @@ EXPLAIN (COSTS OFF) SELECT * FROM ONLY test_only.from_only_test UNION SELECT * FROM test_only.from_only_test UNION SELECT * FROM test_only.from_only_test; - QUERY PLAN ---------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------- HashAggregate Group Key: from_only_test.val -> Append -> Seq Scan on from_only_test -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 -> Append - -> Seq Scan on from_only_test_1 from_only_test_1_2 - -> Seq Scan on from_only_test_2 from_only_test_2_1 - -> Seq Scan on from_only_test_3 from_only_test_3_1 - -> Seq Scan on from_only_test_4 from_only_test_4_1 - -> Seq Scan on from_only_test_5 from_only_test_5_1 - -> Seq Scan on from_only_test_6 from_only_test_6_1 - -> Seq Scan on from_only_test_7 from_only_test_7_1 - -> Seq Scan on from_only_test_8 from_only_test_8_1 - -> Seq Scan on from_only_test_9 from_only_test_9_1 - -> Seq Scan on from_only_test_10 from_only_test_10_1 + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 (26 rows) /* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ diff --git a/expected/pathman_rowmarks_3.out b/expected/pathman_rowmarks_3.out index c2539d76..af61e5f7 100644 --- a/expected/pathman_rowmarks_3.out +++ b/expected/pathman_rowmarks_3.out @@ -42,17 +42,17 @@ SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; /* Simple case (plan) */ EXPLAIN (COSTS OFF) SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; - QUERY PLAN ---------------------------------------- + QUERY PLAN +----------------------------------------------- LockRows -> Sort - Sort Key: first_0.id + Sort Key: first.id -> Append - -> Seq Scan on first_0 - -> Seq Scan on first_1 - -> Seq Scan on first_2 - -> Seq Scan on first_3 - -> Seq Scan on first_4 + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 (9 rows) /* Simple case (execution) */ @@ -98,20 +98,20 @@ WHERE id = (SELECT id FROM rowmarks.first OFFSET 10 LIMIT 1 FOR UPDATE) FOR SHARE; - QUERY PLAN ---------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------- LockRows InitPlan 1 (returns $1) -> Limit -> LockRows -> Sort - Sort Key: first_0.id + Sort Key: first_1.id -> Append - -> Seq Scan on first_0 - -> Seq Scan on first_1 first_1_1 - -> Seq Scan on first_2 - -> Seq Scan on first_3 - -> Seq Scan on first_4 + -> Seq Scan on first_0 first_2 + -> Seq Scan on first_1 first_3 + -> Seq Scan on first_2 first_4 + -> Seq Scan on first_3 first_5 + -> Seq Scan on first_4 first_6 -> Custom Scan (RuntimeAppend) Prune by: (first.id = $1) -> Seq Scan on first_0 first @@ -187,19 +187,19 @@ SELECT * FROM rowmarks.first JOIN rowmarks.second USING(id) ORDER BY id FOR UPDATE; - QUERY PLAN ---------------------------------------------------- + QUERY PLAN +----------------------------------------------------- LockRows -> Sort - Sort Key: first_0.id + Sort Key: first.id -> Hash Join - Hash Cond: (first_0.id = second.id) + Hash Cond: (first.id = second.id) -> Append - -> Seq Scan on first_0 - -> Seq Scan on first_1 - -> Seq Scan on first_2 - -> Seq Scan on first_3 - -> Seq Scan on first_4 + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 -> Hash -> Seq Scan on second (13 rows) @@ -244,53 +244,53 @@ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Seq Scan on first_0 + -> Seq Scan on first_0 first Filter: (id = 1) (6 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); - QUERY PLAN ------------------------------------------------ + QUERY PLAN +----------------------------------------------------- Update on second -> Nested Loop Semi Join - Join Filter: (second.id = first_0.id) + Join Filter: (second.id = first.id) -> Seq Scan on second -> Materialize -> Append - -> Seq Scan on first_0 + -> Seq Scan on first_0 first_1 Filter: (id < 1) - -> Seq Scan on first_1 + -> Seq Scan on first_1 first_2 Filter: (id < 1) - -> Seq Scan on first_2 + -> Seq Scan on first_2 first_3 Filter: (id < 1) - -> Seq Scan on first_3 + -> Seq Scan on first_3 first_4 Filter: (id < 1) - -> Seq Scan on first_4 + -> Seq Scan on first_4 first_5 Filter: (id < 1) (16 rows) EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); - QUERY PLAN ------------------------------------------------ + QUERY PLAN +----------------------------------------------------- Update on second -> Nested Loop Semi Join - Join Filter: (second.id = first_0.id) + Join Filter: (second.id = first.id) -> Seq Scan on second -> Materialize -> Append - -> Seq Scan on first_0 + -> Seq Scan on first_0 first_1 Filter: (id = 1) - -> Seq Scan on first_1 + -> Seq Scan on first_1 first_2 Filter: (id = 2) (10 rows) @@ -298,13 +298,13 @@ EXPLAIN (COSTS OFF) UPDATE rowmarks.second SET id = 2 WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) RETURNING *, tableoid::regclass; - QUERY PLAN ---------------------------------- + QUERY PLAN +--------------------------------------- Update on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Seq Scan on first_0 + -> Seq Scan on first_0 first Filter: (id = 1) (6 rows) @@ -326,53 +326,53 @@ SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); - QUERY PLAN ---------------------------------- + QUERY PLAN +--------------------------------------- Delete on second -> Nested Loop Semi Join -> Seq Scan on second Filter: (id = 1) - -> Seq Scan on first_0 + -> Seq Scan on first_0 first Filter: (id = 1) (6 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); - QUERY PLAN ------------------------------------------------ + QUERY PLAN +----------------------------------------------------- Delete on second -> Nested Loop Semi Join - Join Filter: (second.id = first_0.id) + Join Filter: (second.id = first.id) -> Seq Scan on second -> Materialize -> Append - -> Seq Scan on first_0 + -> Seq Scan on first_0 first_1 Filter: (id < 1) - -> Seq Scan on first_1 + -> Seq Scan on first_1 first_2 Filter: (id < 1) - -> Seq Scan on first_2 + -> Seq Scan on first_2 first_3 Filter: (id < 1) - -> Seq Scan on first_3 + -> Seq Scan on first_3 first_4 Filter: (id < 1) - -> Seq Scan on first_4 + -> Seq Scan on first_4 first_5 Filter: (id < 1) (16 rows) EXPLAIN (COSTS OFF) DELETE FROM rowmarks.second WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); - QUERY PLAN ------------------------------------------------ + QUERY PLAN +----------------------------------------------------- Delete on second -> Nested Loop Semi Join - Join Filter: (second.id = first_0.id) + Join Filter: (second.id = first.id) -> Seq Scan on second -> Materialize -> Append - -> Seq Scan on first_0 + -> Seq Scan on first_0 first_1 Filter: (id = 1) - -> Seq Scan on first_1 + -> Seq Scan on first_1 first_2 Filter: (id = 2) (10 rows) diff --git a/expected/pathman_subpartitions_2.out b/expected/pathman_subpartitions_2.out new file mode 100644 index 00000000..26eae913 --- /dev/null +++ b/expected/pathman_subpartitions_2.out @@ -0,0 +1,461 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; + QUERY PLAN +--------------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 abc_2 + -> Seq Scan on abc_1_1 abc_3 + -> Seq Scan on abc_1_2 abc_4 + -> Append + -> Seq Scan on abc_2_0 abc_6 + Filter: (a < 150) + -> Seq Scan on abc_2_1 abc_7 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; + QUERY PLAN +--------------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 abc_2 + Filter: (b = 215) + -> Seq Scan on abc_1_1 abc_3 + Filter: (b = 215) + -> Seq Scan on abc_1_2 abc_4 + Filter: (b = 215) + -> Seq Scan on abc_2_1 abc_5 + Filter: (b = 215) + -> Seq Scan on abc_3_2 abc_6 + Filter: (b = 215) +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------- + Seq Scan on abc_3_2 abc + Filter: ((a = 215) AND (b = 215)) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + QUERY PLAN +------------------------- + Seq Scan on abc_3_2 abc + Filter: (a >= 210) +(2 rows) + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); + append_range_partition +------------------------ + subpartitions.abc_4 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 15 other objects +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ + split_range_partition +----------------------- + subpartitions.abc_2_4 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2_1 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition +------------------------- + subpartitions.abc_3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 +(1 row) + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) + +SET pg_pathman.enable_partitionrouter = ON; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_upd_del_3.out b/expected/pathman_upd_del_3.out new file mode 100644 index 00000000..70b41e7d --- /dev/null +++ b/expected/pathman_upd_del_3.out @@ -0,0 +1,462 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +--------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 tmp2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_views_3.out b/expected/pathman_views_3.out index cf5ca58e..ae50bcb3 100644 --- a/expected/pathman_views_3.out +++ b/expected/pathman_views_3.out @@ -39,33 +39,33 @@ on views.abc for each row execute procedure views.disable_modification(); /* Test SELECT */ explain (costs off) select * from views.abc; - QUERY PLAN --------------------------- + QUERY PLAN +---------------------------------- Append - -> Seq Scan on _abc_0 - -> Seq Scan on _abc_1 - -> Seq Scan on _abc_2 - -> Seq Scan on _abc_3 - -> Seq Scan on _abc_4 - -> Seq Scan on _abc_5 - -> Seq Scan on _abc_6 - -> Seq Scan on _abc_7 - -> Seq Scan on _abc_8 - -> Seq Scan on _abc_9 + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 (11 rows) explain (costs off) select * from views.abc where id = 1; - QUERY PLAN --------------------- - Seq Scan on _abc_0 + QUERY PLAN +------------------------- + Seq Scan on _abc_0 _abc Filter: (id = 1) (2 rows) explain (costs off) select * from views.abc where id = 1 for update; - QUERY PLAN --------------------------- + QUERY PLAN +------------------------------- LockRows - -> Seq Scan on _abc_0 + -> Seq Scan on _abc_0 _abc Filter: (id = 1) (3 rows) @@ -93,14 +93,14 @@ insert into views.abc values (1); ERROR: INSERT /* Test UPDATE */ explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; - QUERY PLAN --------------------------------------- + QUERY PLAN +--------------------------------------------- Update on abc -> Result -> Append - -> Seq Scan on _abc_0 + -> Seq Scan on _abc_0 _abc_1 Filter: (id = 1) - -> Seq Scan on _abc_6 + -> Seq Scan on _abc_6 _abc_2 Filter: (id = 2) (7 rows) @@ -108,14 +108,14 @@ update views.abc set id = 2 where id = 1 or id = 2; ERROR: UPDATE /* Test DELETE */ explain (costs off) delete from views.abc where id = 1 or id = 2; - QUERY PLAN --------------------------------------- + QUERY PLAN +--------------------------------------------- Delete on abc -> Result -> Append - -> Seq Scan on _abc_0 + -> Seq Scan on _abc_0 _abc_1 Filter: (id = 1) - -> Seq Scan on _abc_6 + -> Seq Scan on _abc_6 _abc_2 Filter: (id = 2) (7 rows) @@ -125,43 +125,43 @@ ERROR: DELETE create view views.abc_union as table views._abc union table views._abc_add; create view views.abc_union_all as table views._abc union all table views._abc_add; explain (costs off) table views.abc_union; - QUERY PLAN --------------------------------------- + QUERY PLAN +---------------------------------------------- HashAggregate - Group Key: _abc_0.id + Group Key: _abc.id -> Append -> Append - -> Seq Scan on _abc_0 - -> Seq Scan on _abc_1 - -> Seq Scan on _abc_2 - -> Seq Scan on _abc_3 - -> Seq Scan on _abc_4 - -> Seq Scan on _abc_5 - -> Seq Scan on _abc_6 - -> Seq Scan on _abc_7 - -> Seq Scan on _abc_8 - -> Seq Scan on _abc_9 + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 -> Seq Scan on _abc_add (15 rows) explain (costs off) select * from views.abc_union where id = 5; - QUERY PLAN ----------------------------------------- + QUERY PLAN +------------------------------------------- Unique -> Sort - Sort Key: _abc_8.id + Sort Key: _abc.id -> Append - -> Seq Scan on _abc_8 + -> Seq Scan on _abc_8 _abc Filter: (id = 5) -> Seq Scan on _abc_add Filter: (id = 5) (8 rows) explain (costs off) table views.abc_union_all; - QUERY PLAN ----------------------------- + QUERY PLAN +------------------------------- Append - -> Seq Scan on _abc_0 + -> Seq Scan on _abc_0 _abc -> Seq Scan on _abc_1 -> Seq Scan on _abc_2 -> Seq Scan on _abc_3 @@ -175,10 +175,10 @@ explain (costs off) table views.abc_union_all; (12 rows) explain (costs off) select * from views.abc_union_all where id = 5; - QUERY PLAN ----------------------------- + QUERY PLAN +------------------------------- Append - -> Seq Scan on _abc_8 + -> Seq Scan on _abc_8 _abc Filter: (id = 5) -> Seq Scan on _abc_add Filter: (id = 5) diff --git a/expected/pathman_views_4.out b/expected/pathman_views_4.out new file mode 100644 index 00000000..8fde5770 --- /dev/null +++ b/expected/pathman_views_4.out @@ -0,0 +1,191 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +------------------------- + Seq Scan on _abc_0 _abc + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +------------------------------- + LockRows + -> Seq Scan on _abc_0 _abc + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +---------------------------------------------- + HashAggregate + Group Key: _abc.id + -> Append + -> Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +------------------------------------- + HashAggregate + Group Key: _abc.id + -> Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(7 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_0 _abc + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/src/include/pathman.h b/src/include/pathman.h index b9acfe59..28f6ef30 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -118,7 +118,8 @@ Index append_child_relation(PlannerInfo *root, * Copied from PostgreSQL (prepunion.c) */ void make_inh_translation_list(Relation oldrelation, Relation newrelation, - Index newvarno, List **translated_vars); + Index newvarno, List **translated_vars, + AppendRelInfo *appinfo); Bitmapset *translate_col_privs(const Bitmapset *parent_privs, List *translated_vars); diff --git a/src/partition_creation.c b/src/partition_creation.c index b42372b3..b2d94794 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -995,7 +995,7 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) parent_rel = heap_open_compat(parent_relid, NoLock); partition_rel = heap_open_compat(partition_relid, NoLock); - make_inh_translation_list(parent_rel, partition_rel, 0, &translated_vars); + make_inh_translation_list(parent_rel, partition_rel, 0, &translated_vars, NULL); heap_close_compat(parent_rel, NoLock); heap_close_compat(partition_rel, NoLock); diff --git a/src/partition_filter.c b/src/partition_filter.c index 0ef84e61..3a72a70d 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -307,7 +307,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) child_rel = heap_open_compat(partid, NoLock); /* Build Var translation list for 'inserted_cols' */ - make_inh_translation_list(base_rel, child_rel, 0, &translated_vars); + make_inh_translation_list(base_rel, child_rel, 0, &translated_vars, NULL); /* Create RangeTblEntry for partition */ child_rte = makeNode(RangeTblEntry); diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 3b99a7e7..0f150bba 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -507,6 +507,11 @@ append_child_relation(PlannerInfo *root, ListCell *lc1, *lc2; LOCKMODE lockmode; +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + TupleDesc child_tupdesc; + List *parent_colnames; + List *child_colnames; +#endif /* Choose a correct lock mode */ if (parent_rti == root->parse->resultRelation) @@ -538,7 +543,12 @@ append_child_relation(PlannerInfo *root, child_relation = heap_open_compat(child_oid, NoLock); /* Create RangeTblEntry for child relation */ +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + child_rte = makeNode(RangeTblEntry); + memcpy(child_rte, parent_rte, sizeof(RangeTblEntry)); +#else child_rte = copyObject(parent_rte); +#endif child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; child_rte->requiredPerms = 0; /* perform all checks on parent */ @@ -560,7 +570,56 @@ append_child_relation(PlannerInfo *root, appinfo->child_reltype = RelationGetDescr(child_relation)->tdtypeid; make_inh_translation_list(parent_relation, child_relation, child_rti, - &appinfo->translated_vars); + &appinfo->translated_vars, appinfo); + +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + /* tablesample is probably null, but copy it */ + child_rte->tablesample = copyObject(parent_rte->tablesample); + + /* + * Construct an alias clause for the child, which we can also use as eref. + * This is important so that EXPLAIN will print the right column aliases + * for child-table columns. (Since ruleutils.c doesn't have any easy way + * to reassociate parent and child columns, we must get the child column + * aliases right to start with. Note that setting childrte->alias forces + * ruleutils.c to use these column names, which it otherwise would not.) + */ + child_tupdesc = RelationGetDescr(child_relation); + parent_colnames = parent_rte->eref->colnames; + child_colnames = NIL; + for (int cattno = 0; cattno < child_tupdesc->natts; cattno++) + { + Form_pg_attribute att = TupleDescAttr(child_tupdesc, cattno); + const char *attname; + + if (att->attisdropped) + { + /* Always insert an empty string for a dropped column */ + attname = ""; + } + else if (appinfo->parent_colnos[cattno] > 0 && + appinfo->parent_colnos[cattno] <= list_length(parent_colnames)) + { + /* Duplicate the query-assigned name for the parent column */ + attname = strVal(list_nth(parent_colnames, + appinfo->parent_colnos[cattno] - 1)); + } + else + { + /* New column, just use its real name */ + attname = NameStr(att->attname); + } + child_colnames = lappend(child_colnames, makeString(pstrdup(attname))); + } + + /* + * We just duplicate the parent's table alias name for each child. If the + * plan gets printed, ruleutils.c has to sort out unique table aliases to + * use, which it can handle. + */ + child_rte->alias = child_rte->eref = makeAlias(parent_rte->eref->aliasname, + child_colnames); +#endif /* Now append 'appinfo' to 'root->append_rel_list' */ root->append_rel_list = lappend(root->append_rel_list, appinfo); @@ -627,6 +686,14 @@ append_child_relation(PlannerInfo *root, child_rte->updatedCols = translate_col_privs(parent_rte->updatedCols, appinfo->translated_vars); } +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + else + { + child_rte->selectedCols = bms_copy(parent_rte->selectedCols); + child_rte->insertedCols = bms_copy(parent_rte->insertedCols); + child_rte->updatedCols = bms_copy(parent_rte->updatedCols); + } +#endif /* Here and below we assume that parent RelOptInfo exists */ AssertState(parent_rel); @@ -1945,7 +2012,8 @@ translate_col_privs(const Bitmapset *parent_privs, */ void make_inh_translation_list(Relation oldrelation, Relation newrelation, - Index newvarno, List **translated_vars) + Index newvarno, List **translated_vars, + AppendRelInfo *appinfo) { List *vars = NIL; TupleDesc old_tupdesc = RelationGetDescr(oldrelation); @@ -1953,6 +2021,17 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, int oldnatts = old_tupdesc->natts; int newnatts = new_tupdesc->natts; int old_attno; +#if PG_VERSION_NUM >= 130000 /* see commit ce76c0ba */ + AttrNumber *pcolnos = NULL; + + if (appinfo) + { + /* Initialize reverse-translation array with all entries zero */ + appinfo->num_child_cols = newnatts; + appinfo->parent_colnos = pcolnos = + (AttrNumber *) palloc0(newnatts * sizeof(AttrNumber)); + } +#endif for (old_attno = 0; old_attno < oldnatts; old_attno++) { @@ -1987,6 +2066,10 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, atttypmod, attcollation, 0)); +#if PG_VERSION_NUM >= 130000 + if (pcolnos) + pcolnos[old_attno] = old_attno + 1; +#endif continue; } @@ -2044,6 +2127,10 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, atttypmod, attcollation, 0)); +#if PG_VERSION_NUM >= 130000 + if (pcolnos) + pcolnos[new_attno] = old_attno + 1; +#endif } *translated_vars = vars; diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index b321d9e6..027fd4e1 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -609,7 +609,7 @@ handle_modification_query(Query *parse, transform_query_cxt *context) child_rel = heap_open_compat(child, NoLock); parent_rel = heap_open_compat(parent, NoLock); - make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); + make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars, NULL); /* Perform some additional adjustments */ if (!inh_translation_list_is_trivial(translated_vars)) From 2b286c48f7e43f8e637a4828b1b809546368db3f Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 14 Dec 2022 09:07:16 +0300 Subject: [PATCH 1080/1124] Remove AssertArg and AssertState See the commit b1099eca8f38ff5cfaf0901bb91cb6a22f909bc6 (Remove AssertArg and AssertState) in PostgreSQL 16. --- src/compat/pg_compat.c | 2 +- src/init.c | 12 ++++++------ src/nodes_common.c | 2 +- src/partition_creation.c | 2 +- src/pg_pathman.c | 2 +- src/relation_info.c | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index 7afdd99a..216fd382 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -234,7 +234,7 @@ McxtStatsInternal(MemoryContext context, int level, MemoryContextCounters local_totals; MemoryContext child; - AssertArg(MemoryContextIsValid(context)); + Assert(MemoryContextIsValid(context)); /* Examine the context itself */ #if PG_VERSION_NUM >= 140000 diff --git a/src/init.c b/src/init.c index 99b79f55..9f72bcb7 100644 --- a/src/init.c +++ b/src/init.c @@ -569,7 +569,7 @@ find_inheritance_children_array(Oid parent_relid, char * build_check_constraint_name_relid_internal(Oid relid) { - AssertArg(OidIsValid(relid)); + Assert(OidIsValid(relid)); return build_check_constraint_name_relname_internal(get_rel_name(relid)); } @@ -580,7 +580,7 @@ build_check_constraint_name_relid_internal(Oid relid) char * build_check_constraint_name_relname_internal(const char *relname) { - AssertArg(relname != NULL); + Assert(relname != NULL); return psprintf("pathman_%s_check", relname); } @@ -591,7 +591,7 @@ build_check_constraint_name_relname_internal(const char *relname) char * build_sequence_name_relid_internal(Oid relid) { - AssertArg(OidIsValid(relid)); + Assert(OidIsValid(relid)); return build_sequence_name_relname_internal(get_rel_name(relid)); } @@ -602,7 +602,7 @@ build_sequence_name_relid_internal(Oid relid) char * build_sequence_name_relname_internal(const char *relname) { - AssertArg(relname != NULL); + Assert(relname != NULL); return psprintf("%s_seq", relname); } @@ -613,7 +613,7 @@ build_sequence_name_relname_internal(const char *relname) char * build_update_trigger_name_internal(Oid relid) { - AssertArg(OidIsValid(relid)); + Assert(OidIsValid(relid)); return psprintf("%s_upd_trig", get_rel_name(relid)); } @@ -624,7 +624,7 @@ build_update_trigger_name_internal(Oid relid) char * build_update_trigger_func_name_internal(Oid relid) { - AssertArg(OidIsValid(relid)); + Assert(OidIsValid(relid)); return psprintf("%s_upd_trig_func", get_rel_name(relid)); } diff --git a/src/nodes_common.c b/src/nodes_common.c index b6bf24cb..a6fecb51 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -59,7 +59,7 @@ transform_plans_into_states(RuntimeAppendState *scan_state, ChildScanCommon child; PlanState *ps; - AssertArg(selected_plans); + Assert(selected_plans); child = selected_plans[i]; /* Create new node since this plan hasn't been used yet */ diff --git a/src/partition_creation.c b/src/partition_creation.c index b2d94794..eb438b91 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -2035,7 +2035,7 @@ build_partitioning_expression(Oid parent_relid, if (columns) { /* Column list should be empty */ - AssertArg(*columns == NIL); + Assert(*columns == NIL); extract_column_names(expr, columns); } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 0f150bba..34600249 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -696,7 +696,7 @@ append_child_relation(PlannerInfo *root, #endif /* Here and below we assume that parent RelOptInfo exists */ - AssertState(parent_rel); + Assert(parent_rel); /* Adjust join quals for this child */ child_rel->joininfo = (List *) adjust_appendrel_attrs_compat(root, diff --git a/src/relation_info.c b/src/relation_info.c index 90e30d0e..e3ba540c 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -304,7 +304,7 @@ invalidate_psin_entry(PartStatusInfo *psin) void close_pathman_relation_info(PartRelationInfo *prel) { - AssertArg(prel); + Assert(prel); (void) resowner_prel_del(prel); } From e32efa8bd6bc6159b120326c5128dd7e1419e03b Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 14 Dec 2022 09:11:03 +0300 Subject: [PATCH 1081/1124] Fix pg_pathman_enable_partition_router initial value Thus it is equal to its boot value. See the commit a73952b795632b2cf5acada8476e7cf75857e9be (Add check on initial and boot values when loading GUCs) in PostgreSQL 16. --- src/partition_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/partition_router.c b/src/partition_router.c index eefc44bf..2e982299 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -63,7 +63,7 @@ -bool pg_pathman_enable_partition_router = true; +bool pg_pathman_enable_partition_router = false; CustomScanMethods partition_router_plan_methods; CustomExecMethods partition_router_exec_methods; From 364d200e647eb41c5b686a87b82c5a86d7a58748 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 14 Dec 2022 09:28:54 +0300 Subject: [PATCH 1082/1124] Avoid making commutatively-duplicate clauses in EquivalenceClasses. See the commit a5fc46414deb7cbcd4cec1275efac69b9ac10500 (Avoid making commutatively-duplicate clauses in EquivalenceClasses.) in PostgreSQL 16. --- expected/pathman_join_clause_4.out | 161 +++++++++ expected/pathman_lateral_4.out | 128 ++++++++ expected/pathman_only_3.out | 281 ++++++++++++++++ expected/pathman_runtime_nodes_1.out | 468 +++++++++++++++++++++++++++ 4 files changed, 1038 insertions(+) create mode 100644 expected/pathman_join_clause_4.out create mode 100644 expected/pathman_lateral_4.out create mode 100644 expected/pathman_only_3.out create mode 100644 expected/pathman_runtime_nodes_1.out diff --git a/expected/pathman_join_clause_4.out b/expected/pathman_join_clause_4.out new file mode 100644 index 00000000..17791fb9 --- /dev/null +++ b/expected/pathman_join_clause_4.out @@ -0,0 +1,161 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (m.id1 = fk.id1) + -> Seq Scan on mytbl_0 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_lateral_4.out b/expected/pathman_lateral_4.out new file mode 100644 index 00000000..d35da608 --- /dev/null +++ b/expected/pathman_lateral_4.out @@ -0,0 +1,128 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t.id) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Materialize + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> Custom Scan (RuntimeAppend) + Prune by: (t3.id = t.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(84 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_3.out b/expected/pathman_only_3.out new file mode 100644 index 00000000..2f2fcc75 --- /dev/null +++ b/expected/pathman_only_3.out @@ -0,0 +1,281 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (a.val = b.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_runtime_nodes_1.out b/expected/pathman_runtime_nodes_1.out new file mode 100644 index 00000000..65382269 --- /dev/null +++ b/expected/pathman_runtime_nodes_1.out @@ -0,0 +1,468 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test RuntimeAppend + */ +create or replace function test.pathman_assert(smt bool, error_msg text) returns text as $$ +begin + if not smt then + raise exception '%', error_msg; + end if; + + return 'ok'; +end; +$$ language plpgsql; +create or replace function test.pathman_equal(a text, b text, error_msg text) returns text as $$ +begin + if a != b then + raise exception '''%'' is not equal to ''%'', %', a, b, error_msg; + end if; + + return 'equal'; +end; +$$ language plpgsql; +create or replace function test.pathman_test(query text) returns jsonb as $$ +declare + plan jsonb; +begin + execute 'explain (analyze, format json)' || query into plan; + + return plan; +end; +$$ language plpgsql; +create or replace function test.pathman_test_1() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = (select * from test.run_values limit 1)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Relation Name')::text, + format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(1), 6)), + 'wrong partition'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans') into num; + perform test.pathman_equal(num::text, '2', 'expected 2 child plans for custom scan'); + + return 'ok'; +end; +$$ language plpgsql +set pg_pathman.enable = true +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_2() returns text as $$ +declare + plan jsonb; + num int; + c text; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = any (select * from test.run_values limit 4)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + execute 'select string_agg(y.z, '','') from + (select (x->''Relation Name'')::text as z from + jsonb_array_elements($1->0->''Plan''->''Plans''->1->''Plans'') x + order by x->''Relation Name'') y' + into c using plan; + perform test.pathman_equal(c, '"runtime_test_1_2","runtime_test_1_3","runtime_test_1_4","runtime_test_1_5"', + 'wrong partitions'); + + for i in 0..3 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_equal(num::text, '1', 'expected 1 loop'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set pg_pathman.enable = true +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_3() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 a join test.run_values b on a.id = b.val'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '6', 'expected 6 child plans for custom scan'); + + for i in 0..5 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num > 0 and num <= 1718, 'expected no more than 1718 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set pg_pathman.enable = true +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_4() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.category c, lateral' || + '(select * from test.runtime_test_2 g where g.category_id = c.id order by rating limit 4) as tg'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + /* Limit -> Custom Scan */ + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Custom Plan Provider')::text, + '"RuntimeMergeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + for i in 0..3 loop + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Relation Name')::text, + format('"runtime_test_2_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), + 'wrong partition'); + + num = plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num = 1, 'expected no more than 1 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set pg_pathman.enable = true +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_5() returns text as $$ +declare + res record; +begin + select + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test empty tlist */ + + + select id * 2, id, 17 + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test computations */ + + + select test.vals.* from test.vals, lateral (select from test.runtime_test_3 + where id = test.vals.val) as q + into res; /* test lateral */ + + + select id, generate_series(1, 2) gen, val + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + order by id, gen, val + offset 1 limit 1 + into res; /* without IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '1', 'id is incorrect (t2)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t2)'); + perform test.pathman_equal(res.val::text, 'k = 1', 'val is incorrect (t2)'); + + + select id + from test.runtime_test_3 + where id = any (select * from test.vals order by val limit 5) + order by id + offset 3 limit 1 + into res; /* with IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '4', 'id is incorrect (t3)'); + + + select v.val v1, generate_series(2, 2) gen, t.val v2 + from test.runtime_test_3 t join test.vals v on id = v.val + order by v1, gen, v2 + limit 1 + into res; + + perform test.pathman_equal(res.v1::text, '1', 'v1 is incorrect (t4)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t4)'); + perform test.pathman_equal(res.v2::text, 'k = 1', 'v2 is incorrect (t4)'); + + return 'ok'; +end; +$$ language plpgsql +set pg_pathman.enable = true +set enable_hashjoin = off +set enable_mergejoin = off; +create table test.run_values as select generate_series(1, 10000) val; +create table test.runtime_test_1(id serial primary key, val real); +insert into test.runtime_test_1 select generate_series(1, 10000), random(); +select pathman.create_hash_partitions('test.runtime_test_1', 'id', 6); + create_hash_partitions +------------------------ + 6 +(1 row) + +create table test.category as (select id, 'cat' || id::text as name from generate_series(1, 4) id); +create table test.runtime_test_2 (id serial, category_id int not null, name text, rating real); +insert into test.runtime_test_2 (select id, (id % 6) + 1 as category_id, 'good' || id::text as name, random() as rating from generate_series(1, 100000) id); +create index on test.runtime_test_2 (category_id, rating); +select pathman.create_hash_partitions('test.runtime_test_2', 'category_id', 6); + create_hash_partitions +------------------------ + 6 +(1 row) + +create table test.vals as (select generate_series(1, 10000) as val); +create table test.runtime_test_3(val text, id serial not null); +insert into test.runtime_test_3(id, val) select * from generate_series(1, 10000) k, format('k = %s', k); +select pathman.create_hash_partitions('test.runtime_test_3', 'id', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +create index on test.runtime_test_3 (id); +create index on test.runtime_test_3_0 (id); +create table test.runtime_test_4(val text, id int not null); +insert into test.runtime_test_4(id, val) select * from generate_series(1, 10000) k, md5(k::text); +select pathman.create_range_partitions('test.runtime_test_4', 'id', 1, 2000); + create_range_partitions +------------------------- + 5 +(1 row) + +VACUUM ANALYZE; +set pg_pathman.enable_runtimeappend = on; +set pg_pathman.enable_runtimemergeappend = on; +select test.pathman_test_1(); /* RuntimeAppend (select ... where id = (subquery)) */ + pathman_test_1 +---------------- + ok +(1 row) + +select test.pathman_test_2(); /* RuntimeAppend (select ... where id = any(subquery)) */ + pathman_test_2 +---------------- + ok +(1 row) + +select test.pathman_test_3(); /* RuntimeAppend (a join b on a.id = b.val) */ + pathman_test_3 +---------------- + ok +(1 row) + +select test.pathman_test_4(); /* RuntimeMergeAppend (lateral) */ + pathman_test_4 +---------------- + ok +(1 row) + +select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ + pathman_test_5 +---------------- + ok +(1 row) + +/* RuntimeAppend (join, enabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', true); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + Prune by: (t1.id = run_values.val) + -> Seq Scan on runtime_test_1 t1 + Filter: (id = run_values.val) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(19 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, disabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', false); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + Prune by: (t1.id = run_values.val) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(17 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, additional projections) */ +select generate_series(1, 2) from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + generate_series +----------------- + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 2 +(8 rows) + +/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ +select count(*) = 0 from pathman.pathman_partition_list +where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < 0; + ?column? +---------- + t +(1 row) + +/* RuntimeAppend (check that dropped columns don't break tlists) */ +create table test.dropped_cols(val int4 not null); +select pathman.create_hash_partitions('test.dropped_cols', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +insert into test.dropped_cols select generate_series(1, 100); +alter table test.dropped_cols add column new_col text; /* add column */ +alter table test.dropped_cols drop column new_col; /* drop column! */ +explain (costs off) select * from generate_series(1, 10) f(id), lateral (select count(1) FILTER (WHERE true) from test.dropped_cols where val = f.id) c; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Function Scan on generate_series f + -> Aggregate + -> Custom Scan (RuntimeAppend) + Prune by: (dropped_cols.val = f.id) + -> Seq Scan on dropped_cols_0 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_1 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_2 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_3 dropped_cols + Filter: (val = f.id) +(13 rows) + +drop table test.dropped_cols cascade; +NOTICE: drop cascades to 4 other objects +set enable_hashjoin = off; +set enable_mergejoin = off; +select from test.runtime_test_4 +where id = any (select generate_series(-10, -1)); /* should be empty */ +-- +(0 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_2 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_3 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test.runtime_test_4 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; From 874412561e9d406547ef04f6ac3cc2d34e8c37f5 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 22 Nov 2022 00:41:16 +0300 Subject: [PATCH 1083/1124] [PGPRO-7417] Added 'volatile' modifier for local variables that are modified in PG_TRY and read in PG_CATCH/PG_FINALLY --- src/include/init.h | 4 ++-- src/init.c | 4 ++-- src/pl_funcs.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/include/init.h b/src/include/init.h index f2234c8f..58335c46 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -171,8 +171,8 @@ void *pathman_cache_search_relid(HTAB *cache_table, /* * Save and restore PathmanInitState. */ -void save_pathman_init_state(PathmanInitState *temp_init_state); -void restore_pathman_init_state(const PathmanInitState *temp_init_state); +void save_pathman_init_state(volatile PathmanInitState *temp_init_state); +void restore_pathman_init_state(const volatile PathmanInitState *temp_init_state); /* * Create main GUC variables. diff --git a/src/init.c b/src/init.c index 9f72bcb7..bdec28fd 100644 --- a/src/init.c +++ b/src/init.c @@ -134,13 +134,13 @@ pathman_cache_search_relid(HTAB *cache_table, */ void -save_pathman_init_state(PathmanInitState *temp_init_state) +save_pathman_init_state(volatile PathmanInitState *temp_init_state) { *temp_init_state = pathman_init_state; } void -restore_pathman_init_state(const PathmanInitState *temp_init_state) +restore_pathman_init_state(const volatile PathmanInitState *temp_init_state) { /* * initialization_needed is not restored: it is not just a setting but diff --git a/src/pl_funcs.c b/src/pl_funcs.c index b638fc47..809884c2 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -796,7 +796,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) Oid expr_type; - PathmanInitState init_state; + volatile PathmanInitState init_state; if (!IsPathmanReady()) elog(ERROR, "pg_pathman is disabled"); From 47806e7f69935caaa86f40e87cf215cb90aaf9a3 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 22 Dec 2022 05:10:34 +0300 Subject: [PATCH 1084/1124] [PGPRO-7585] Fixes for v16 due to vanilla changes Tags: pg_pathman Caused by: - ad86d159b6: Add 'missing_ok' argument to build_attrmap_by_name - a61b1f7482: Rework query relation permission checking - b5d6382496: Provide per-table permissions for vacuum and analyze --- expected/pathman_permissions_1.out | 263 +++++++++++++++++++++++++++++ src/include/partition_filter.h | 8 +- src/partition_filter.c | 68 +++++++- src/pg_pathman.c | 7 + src/pl_funcs.c | 5 +- src/planner_tree_modification.c | 27 +++ src/utility_stmt_hooking.c | 47 +++++- 7 files changed, 410 insertions(+), 15 deletions(-) create mode 100644 expected/pathman_permissions_1.out diff --git a/expected/pathman_permissions_1.out b/expected/pathman_permissions_1.out new file mode 100644 index 00000000..c7e04210 --- /dev/null +++ b/expected/pathman_permissions_1.out @@ -0,0 +1,263 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA permissions; +CREATE ROLE user1 LOGIN; +CREATE ROLE user2 LOGIN; +GRANT USAGE, CREATE ON SCHEMA permissions TO user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO user2; +/* Switch to #1 */ +SET ROLE user1; +CREATE TABLE permissions.user1_table(id serial, a int); +INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; +/* Should fail (can't SELECT) */ +SET ROLE user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Grant SELECT to user2 */ +SET ROLE user1; +GRANT SELECT ON permissions.user1_table TO user2; +/* Should fail (don't own parent) */ +SET ROLE user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Should be ok */ +SET ROLE user1; +SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +/* Should be able to see */ +SET ROLE user2; +SELECT * FROM pathman_config; + partrel | expr | parttype | range_interval +-------------------------+------+----------+---------------- + permissions.user1_table | id | 2 | 10 +(1 row) + +SELECT * FROM pathman_config_params; + partrel | enable_parent | auto | init_callback | spawn_using_bgw +-------------------------+---------------+------+---------------+----------------- + permissions.user1_table | f | t | | f +(1 row) + +/* Should fail */ +SET ROLE user2; +SELECT set_enable_parent('permissions.user1_table', true); +WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +ERROR: new row violates row-level security policy for table "pathman_config_params" +SELECT set_auto('permissions.user1_table', false); +WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +ERROR: new row violates row-level security policy for table "pathman_config_params" +/* Should fail */ +SET ROLE user2; +DELETE FROM pathman_config +WHERE partrel = 'permissions.user1_table'::regclass; +WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +/* No rights to insert, should fail */ +SET ROLE user2; +DO $$ +BEGIN + INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* No rights to create partitions (need INSERT privilege) */ +SET ROLE user2; +SELECT prepend_range_partition('permissions.user1_table'); +ERROR: permission denied for parent relation "user1_table" +/* Allow user2 to create partitions */ +SET ROLE user1; +GRANT INSERT ON permissions.user1_table TO user2; +GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ +/* Should be able to prepend a partition */ +SET ROLE user2; +SELECT prepend_range_partition('permissions.user1_table'); + prepend_range_partition +--------------------------- + permissions.user1_table_4 +(1 row) + +SELECT attname, attacl FROM pg_attribute +WHERE attrelid = (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.user1_table'::REGCLASS + ORDER BY range_min::int ASC /* prepend */ + LIMIT 1) +ORDER BY attname; /* check ACL for each column */ + attname | attacl +----------+----------------- + a | {user2=w/user1} + cmax | + cmin | + ctid | + id | + tableoid | + xmax | + xmin | +(8 rows) + +/* Have rights, should be ok (parent's ACL is shared by new children) */ +SET ROLE user2; +INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; + id | a +----+--- + 35 | 0 +(1 row) + +SELECT relname, relacl FROM pg_class +WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.user1_table'::REGCLASS + ORDER BY range_max::int DESC /* append */ + LIMIT 3) +ORDER BY relname; /* we also check ACL for "user1_table_2" */ + relname | relacl +---------------+---------------------------------------- + user1_table_2 | {user1=arwdDxtvz/user1,user2=r/user1} + user1_table_5 | {user1=arwdDxtvz/user1,user2=ar/user1} + user1_table_6 | {user1=arwdDxtvz/user1,user2=ar/user1} +(3 rows) + +/* Try to drop partition, should fail */ +DO $$ +BEGIN + SELECT drop_range_partition('permissions.user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Disable automatic partition creation */ +SET ROLE user1; +SELECT set_auto('permissions.user1_table', false); + set_auto +---------- + +(1 row) + +/* Partition creation, should fail */ +SET ROLE user2; +INSERT INTO permissions.user1_table (id, a) VALUES (55, 0) RETURNING *; +ERROR: no suitable partition for key '55' +/* Finally drop partitions */ +SET ROLE user1; +SELECT drop_partitions('permissions.user1_table'); +NOTICE: 10 rows copied from permissions.user1_table_1 +NOTICE: 10 rows copied from permissions.user1_table_2 +NOTICE: 0 rows copied from permissions.user1_table_4 +NOTICE: 0 rows copied from permissions.user1_table_5 +NOTICE: 1 rows copied from permissions.user1_table_6 + drop_partitions +----------------- + 5 +(1 row) + +/* Switch to #2 */ +SET ROLE user2; +/* Test ddl event trigger */ +CREATE TABLE permissions.user2_table(id serial); +SELECT create_hash_partitions('permissions.user2_table', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO permissions.user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.user2_table'); +NOTICE: 9 rows copied from permissions.user2_table_0 +NOTICE: 11 rows copied from permissions.user2_table_1 +NOTICE: 10 rows copied from permissions.user2_table_2 + drop_partitions +----------------- + 3 +(1 row) + +/* Switch to #1 */ +SET ROLE user1; +CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); +INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; +SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column */ + attrelid | attname | attacl +------------------------------+---------+------------------ + permissions.dropped_column_1 | val | {user2=ar/user1} + permissions.dropped_column_2 | val | {user2=ar/user1} + permissions.dropped_column_3 | val | {user2=ar/user1} +(3 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_4 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+------------------ + permissions.dropped_column_1 | val | {user2=ar/user1} + permissions.dropped_column_2 | val | {user2=ar/user1} + permissions.dropped_column_3 | val | {user2=ar/user1} + permissions.dropped_column_4 | val | {user2=ar/user1} +(4 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_5 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+------------------ + permissions.dropped_column_1 | val | {user2=ar/user1} + permissions.dropped_column_2 | val | {user2=ar/user1} + permissions.dropped_column_3 | val | {user2=ar/user1} + permissions.dropped_column_4 | val | {user2=ar/user1} + permissions.dropped_column_5 | val | {user2=ar/user1} +(5 rows) + +DROP TABLE permissions.dropped_column CASCADE; +NOTICE: drop cascades to 6 other objects +/* Finally reset user */ +RESET ROLE; +DROP OWNED BY user1; +DROP OWNED BY user2; +DROP USER user1; +DROP USER user2; +DROP SCHEMA permissions; +DROP EXTENSION pg_pathman; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 0c912abe..d3c2c482 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -101,6 +101,9 @@ struct ResultPartsStorage PartRelationInfo *prel; ExprState *prel_expr_state; ExprContext *prel_econtext; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + ResultRelInfo *init_rri; /* first initialized ResultRelInfo */ +#endif }; typedef struct @@ -167,7 +170,7 @@ void init_result_parts_storage(ResultPartsStorage *parts_storage, void fini_result_parts_storage(ResultPartsStorage *parts_storage); /* Find ResultRelInfo holder in storage */ -ResultRelInfoHolder * scan_result_parts_storage(ResultPartsStorage *storage, Oid partid); +ResultRelInfoHolder * scan_result_parts_storage(EState *estate, ResultPartsStorage *storage, Oid partid); /* Refresh PartRelationInfo in storage */ PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid); @@ -186,7 +189,8 @@ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); -ResultRelInfoHolder *select_partition_for_insert(ResultPartsStorage *parts_storage, +ResultRelInfoHolder *select_partition_for_insert(EState *estate, + ResultPartsStorage *parts_storage, TupleTableSlot *slot); Plan * make_partition_filter(Plan *subplan, diff --git a/src/partition_filter.c b/src/partition_filter.c index 3a72a70d..a267c702 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -27,6 +27,9 @@ #include "foreign/fdwapi.h" #include "foreign/foreign.h" #include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif #include "rewrite/rewriteManip.h" #include "utils/guc.h" #include "utils/memutils.h" @@ -257,7 +260,8 @@ fini_result_parts_storage(ResultPartsStorage *parts_storage) /* Find a ResultRelInfo for the partition using ResultPartsStorage */ ResultRelInfoHolder * -scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) +scan_result_parts_storage(EState *estate, ResultPartsStorage *parts_storage, + Oid partid) { #define CopyToResultRelInfo(field_name) \ ( child_result_rel_info->field_name = parts_storage->base_rri->field_name ) @@ -280,6 +284,12 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) ResultRelInfo *child_result_rel_info; List *translated_vars; MemoryContext old_mcxt; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *parent_perminfo, + *child_perminfo; + /* ResultRelInfo of partitioned table. */ + RangeTblEntry *init_rte; +#endif /* Lock partition and check if it exists */ LockRelationOid(partid, parts_storage->head_open_lock_mode); @@ -306,15 +316,41 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) /* Open child relation and check if it is a valid target */ child_rel = heap_open_compat(partid, NoLock); - /* Build Var translation list for 'inserted_cols' */ - make_inh_translation_list(base_rel, child_rel, 0, &translated_vars, NULL); - /* Create RangeTblEntry for partition */ child_rte = makeNode(RangeTblEntry); child_rte->rtekind = RTE_RELATION; child_rte->relid = partid; child_rte->relkind = child_rel->rd_rel->relkind; child_rte->eref = parent_rte->eref; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* Build Var translation list for 'inserted_cols' */ + make_inh_translation_list(parts_storage->init_rri->ri_RelationDesc, + child_rel, 0, &translated_vars, NULL); + + /* + * Need to use ResultRelInfo of partitioned table 'init_rri' because + * 'base_rri' can be ResultRelInfo of partition without any + * ResultRelInfo, see expand_single_inheritance_child(). + */ + init_rte = rt_fetch(parts_storage->init_rri->ri_RangeTableIndex, + parts_storage->estate->es_range_table); + parent_perminfo = getRTEPermissionInfo(estate->es_rteperminfos, init_rte); + + child_rte->perminfoindex = 0; /* expected by addRTEPermissionInfo() */ + child_perminfo = addRTEPermissionInfo(&estate->es_rteperminfos, child_rte); + child_perminfo->requiredPerms = parent_perminfo->requiredPerms; + child_perminfo->checkAsUser = parent_perminfo->checkAsUser; + child_perminfo->insertedCols = translate_col_privs(parent_perminfo->insertedCols, + translated_vars); + child_perminfo->updatedCols = translate_col_privs(parent_perminfo->updatedCols, + translated_vars); + + /* Check permissions for partition */ + ExecCheckPermissions(list_make1(child_rte), list_make1(child_perminfo), true); +#else + /* Build Var translation list for 'inserted_cols' */ + make_inh_translation_list(base_rel, child_rel, 0, &translated_vars, NULL); + child_rte->requiredPerms = parent_rte->requiredPerms; child_rte->checkAsUser = parent_rte->checkAsUser; child_rte->insertedCols = translate_col_privs(parent_rte->insertedCols, @@ -324,6 +360,7 @@ scan_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) /* Check permissions for partition */ ExecCheckRTPerms(list_make1(child_rte), true); +#endif /* Append RangeTblEntry to estate->es_range_table */ child_rte_idx = append_rte_to_estate(parts_storage->estate, child_rte, child_rel); @@ -498,7 +535,9 @@ build_part_tuple_map_child(Relation child_rel) child_tupdesc2->tdtypeid = InvalidOid; /* Generate tuple transformation map */ -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 160000 /* for commit ad86d159b6ab */ + attrMap = build_attrmap_by_name(child_tupdesc1, child_tupdesc2, false); +#elif PG_VERSION_NUM >= 130000 attrMap = build_attrmap_by_name(child_tupdesc1, child_tupdesc2); #else attrMap = convert_tuples_by_name_map(child_tupdesc1, child_tupdesc2, @@ -586,7 +625,8 @@ find_partitions_for_value(Datum value, Oid value_type, * Smart wrapper for scan_result_parts_storage(). */ ResultRelInfoHolder * -select_partition_for_insert(ResultPartsStorage *parts_storage, +select_partition_for_insert(EState *estate, + ResultPartsStorage *parts_storage, TupleTableSlot *slot) { PartRelationInfo *prel = parts_storage->prel; @@ -637,7 +677,7 @@ select_partition_for_insert(ResultPartsStorage *parts_storage, else partition_relid = parts[0]; /* Get ResultRelationInfo holder for the selected partition */ - result = scan_result_parts_storage(parts_storage, partition_relid); + result = scan_result_parts_storage(estate, parts_storage, partition_relid); /* Somebody has dropped or created partitions */ if ((nparts == 0 || result == NULL) && !PrelIsFresh(prel)) @@ -837,6 +877,10 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) state->on_conflict_action != ONCONFLICT_NONE, RPS_RRI_CB(prepare_rri_for_insert, state), RPS_RRI_CB(NULL, NULL)); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* ResultRelInfo of partitioned table. */ + state->result_parts.init_rri = current_rri; +#endif } #if PG_VERSION_NUM >= 140000 @@ -906,7 +950,7 @@ partition_filter_exec(CustomScanState *node) old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); /* Search for a matching partition */ - rri_holder = select_partition_for_insert(&state->result_parts, slot); + rri_holder = select_partition_for_insert(estate, &state->result_parts, slot); /* Switch back and clean up per-tuple context */ MemoryContextSwitchTo(old_mcxt); @@ -1223,6 +1267,14 @@ prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, query.targetList = NIL; query.returningList = NIL; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* + * Copy the RTEPermissionInfos into query as well, so that + * add_rte_to_flat_rtable() will work correctly. + */ + query.rteperminfos = estate->es_rteperminfos; +#endif + /* Generate 'query.targetList' using 'tupdesc' */ target_attr = 1; for (i = 0; i < tupdesc->natts; i++) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 34600249..2e8b1d7e 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -551,7 +551,12 @@ append_child_relation(PlannerInfo *root, #endif child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* No permission checking for the child RTE */ + child_rte->perminfoindex = 0; +#else child_rte->requiredPerms = 0; /* perform all checks on parent */ +#endif child_rte->inh = false; /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ @@ -676,6 +681,7 @@ append_child_relation(PlannerInfo *root, } +#if PG_VERSION_NUM < 160000 /* for commit a61b1f74823c */ /* Translate column privileges for this child */ if (parent_rte->relid != child_oid) { @@ -694,6 +700,7 @@ append_child_relation(PlannerInfo *root, child_rte->updatedCols = bms_copy(parent_rte->updatedCols); } #endif +#endif /* PG_VERSION_NUM < 160000 */ /* Here and below we assume that parent RelOptInfo exists */ Assert(parent_rel); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 809884c2..542f99ae 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -725,7 +725,10 @@ is_tuple_convertible(PG_FUNCTION_ARGS) rel2 = heap_open_compat(PG_GETARG_OID(1), AccessShareLock); /* Try to build a conversion map */ -#if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 160000 /* for commit ad86d159b6ab */ + map = build_attrmap_by_name(RelationGetDescr(rel1), + RelationGetDescr(rel2), false); +#elif PG_VERSION_NUM >= 130000 map = build_attrmap_by_name(RelationGetDescr(rel1), RelationGetDescr(rel2)); #else diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 027fd4e1..d9d64cfd 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -27,6 +27,9 @@ #include "foreign/fdwapi.h" #include "miscadmin.h" #include "optimizer/clauses.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif #include "storage/lmgr.h" #include "utils/syscache.h" @@ -578,6 +581,10 @@ handle_modification_query(Query *parse, transform_query_cxt *context) List *translated_vars; adjust_appendrel_varnos_cxt aav_cxt; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *parent_perminfo, + *child_perminfo; +#endif /* Lock 'child' table */ LockRelationOid(child, lockmode); @@ -598,10 +605,24 @@ handle_modification_query(Query *parse, transform_query_cxt *context) return; /* nothing to do here */ } +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + parent_perminfo = getRTEPermissionInfo(parse->rteperminfos, rte); +#endif /* Update RTE's relid and relkind (for FDW) */ rte->relid = child; rte->relkind = child_relkind; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* Copy parent RTEPermissionInfo. */ + rte->perminfoindex = 0; /* expected by addRTEPermissionInfo() */ + child_perminfo = addRTEPermissionInfo(&parse->rteperminfos, rte); + memcpy(child_perminfo, parent_perminfo, sizeof(RTEPermissionInfo)); + + /* Correct RTEPermissionInfo for child. */ + child_perminfo->relid = child; + child_perminfo->inh = false; +#endif + /* HACK: unset the 'inh' flag (no children) */ rte->inh = false; @@ -622,10 +643,16 @@ handle_modification_query(Query *parse, transform_query_cxt *context) aav_cxt.translated_vars = translated_vars; adjust_appendrel_varnos((Node *) parse, &aav_cxt); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + child_perminfo->selectedCols = translate_col_privs(parent_perminfo->selectedCols, translated_vars); + child_perminfo->insertedCols = translate_col_privs(parent_perminfo->insertedCols, translated_vars); + child_perminfo->updatedCols = translate_col_privs(parent_perminfo->updatedCols, translated_vars); +#else /* Translate column privileges for this child */ rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); rte->insertedCols = translate_col_privs(rte->insertedCols, translated_vars); rte->updatedCols = translate_col_privs(rte->updatedCols, translated_vars); +#endif } /* Close relations (should remain locked, though) */ diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 35786092..d1d9010c 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -26,12 +26,18 @@ #include "access/xact.h" #include "catalog/namespace.h" #include "commands/copy.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "commands/copyfrom_internal.h" +#endif #include "commands/defrem.h" #include "commands/trigger.h" #include "commands/tablecmds.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "nodes/makefuncs.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/memutils.h" @@ -414,6 +420,9 @@ PathmanDoCopy(const CopyStmt *stmt, "psql's \\copy command also works for anyone."))); } + pstate = make_parsestate(NULL); + pstate->p_sourcetext = queryString; + /* Check that we have a relation */ if (stmt->relation) { @@ -422,6 +431,9 @@ PathmanDoCopy(const CopyStmt *stmt, List *attnums; ListCell *cur; RangeTblEntry *rte; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *perminfo; +#endif Assert(!stmt->query); @@ -432,11 +444,30 @@ PathmanDoCopy(const CopyStmt *stmt, rte->rtekind = RTE_RELATION; rte->relid = RelationGetRelid(rel); rte->relkind = rel->rd_rel->relkind; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + pstate->p_rtable = lappend(pstate->p_rtable, rte); + perminfo = addRTEPermissionInfo(&pstate->p_rteperminfos, rte); + perminfo->requiredPerms = required_access; +#else rte->requiredPerms = required_access; +#endif range_table = list_make1(rte); tupDesc = RelationGetDescr(rel); attnums = PathmanCopyGetAttnums(tupDesc, rel, stmt->attlist); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + foreach(cur, attnums) + { + int attno; + Bitmapset **bms; + + attno = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; + bms = is_from ? &perminfo->insertedCols : &perminfo->selectedCols; + + *bms = bms_add_member(*bms, attno); + } + ExecCheckPermissions(pstate->p_rtable, list_make1(perminfo), true); +#else foreach(cur, attnums) { int attnum = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; @@ -447,6 +478,7 @@ PathmanDoCopy(const CopyStmt *stmt, rte->selectedCols = bms_add_member(rte->selectedCols, attnum); } ExecCheckRTPerms(range_table, true); +#endif /* Disable COPY FROM if table has RLS */ if (is_from && check_enable_rls(rte->relid, InvalidOid, false) == RLS_ENABLED) @@ -470,9 +502,6 @@ PathmanDoCopy(const CopyStmt *stmt, /* This should never happen (see is_pathman_related_copy()) */ else elog(ERROR, "error in function " CppAsString(PathmanDoCopy)); - pstate = make_parsestate(NULL); - pstate->p_sourcetext = queryString; - if (is_from) { /* check read-only transaction and parallel mode */ @@ -567,6 +596,16 @@ PathmanCopyFrom( RPS_DEFAULT_SPECULATIVE, RPS_RRI_CB(prepare_rri_for_copy, cstate), RPS_RRI_CB(finish_rri_for_copy, NULL)); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* ResultRelInfo of partitioned table. */ + parts_storage.init_rri = parent_rri; + + /* + * Copy the RTEPermissionInfos into estate as well, so that + * scan_result_parts_storage() et al will work correctly. + */ + estate->es_rteperminfos = cstate->rteperminfos; +#endif /* Set up a tuple slot too */ myslot = ExecInitExtraTupleSlotCompat(estate, NULL, &TTSOpsHeapTuple); @@ -629,7 +668,7 @@ PathmanCopyFrom( #endif /* Search for a matching partition */ - rri_holder = select_partition_for_insert(&parts_storage, slot); + rri_holder = select_partition_for_insert(estate, &parts_storage, slot); child_rri = rri_holder->result_rel_info; /* Magic: replace parent's ResultRelInfo with ours */ From bb9f6e49a7643b77126fb2575a96024bba0ae326 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Tue, 10 Jan 2023 19:13:48 +0300 Subject: [PATCH 1085/1124] Convert the reg* input functions to report (most) errors softly. See the commit 858e776c84f48841e7e16fba7b690b76e54f3675 (Convert the reg* input functions to report (most) errors softly.) in PostgreSQL 16. The function qualified_relnames_to_rangevars is used in the functions create_hash_partitions_internal and create_range_partitions_internal. It looks like these functions should not skip partition names (e.g. in the functions create_hash_partitions and create_range_partitions respectively).. --- src/include/compat/pg_compat.h | 11 +++++++++++ src/utils.c | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 80a76d60..4ae249e6 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -1084,6 +1084,17 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, expression_tree_mutator((node), (mutator), (context)) #endif +/* + * stringToQualifiedNameList + */ +#if PG_VERSION_NUM >= 160000 +#define stringToQualifiedNameListCompat(string) \ + stringToQualifiedNameList((string), NULL) +#else +#define stringToQualifiedNameListCompat(string) \ + stringToQualifiedNameList((string)) +#endif + /* * ------------- * Common code diff --git a/src/utils.c b/src/utils.c index 15552f56..6ebfb8a8 100644 --- a/src/utils.c +++ b/src/utils.c @@ -518,7 +518,7 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) rangevars = palloc(sizeof(RangeVar *) * nrelnames); for (i = 0; i < nrelnames; i++) { - List *nl = stringToQualifiedNameList(relnames[i]); + List *nl = stringToQualifiedNameListCompat(relnames[i]); rangevars[i] = makeRangeVarFromNameList(nl); } From 2d49e88e1cb6c3df338ba82d733e0b2e896d0e15 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Tue, 10 Jan 2023 19:19:57 +0300 Subject: [PATCH 1086/1124] Add grantable MAINTAIN privilege and pg_maintain role. See the commit 60684dd834a222fefedd49b19d1f0a6189c1632e (Add grantable MAINTAIN privilege and pg_maintain role.) in PostgreSQL 16. Since pathman_permissions_1.out is already in use for PostgreSQL 16+ (see the commit 47806e7f69935caaa86f40e87cf215cb90aaf9a3 [PGPRO-7585] Fixes for v16 due to vanilla changes), do not create pathman_permissions_2.out. --- expected/pathman_permissions_1.out | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/expected/pathman_permissions_1.out b/expected/pathman_permissions_1.out index c7e04210..a50aa524 100644 --- a/expected/pathman_permissions_1.out +++ b/expected/pathman_permissions_1.out @@ -126,11 +126,11 @@ WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list ORDER BY range_max::int DESC /* append */ LIMIT 3) ORDER BY relname; /* we also check ACL for "user1_table_2" */ - relname | relacl ----------------+---------------------------------------- - user1_table_2 | {user1=arwdDxtvz/user1,user2=r/user1} - user1_table_5 | {user1=arwdDxtvz/user1,user2=ar/user1} - user1_table_6 | {user1=arwdDxtvz/user1,user2=ar/user1} + relname | relacl +---------------+--------------------------------------- + user1_table_2 | {user1=arwdDxtm/user1,user2=r/user1} + user1_table_5 | {user1=arwdDxtm/user1,user2=ar/user1} + user1_table_6 | {user1=arwdDxtm/user1,user2=ar/user1} (3 rows) /* Try to drop partition, should fail */ From e939296ebf9ef89c25fec08d9ebd6cbed5f6a9ca Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 24 Jan 2023 13:00:13 +0300 Subject: [PATCH 1087/1124] README: update versions list and remove obsolete emails --- README.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index d4b8e3bb..43d585ff 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ ### NOTE: this project is not under development anymore -`pg_pathman` supports Postgres versions [9.5..13], but most probably it won't be ported to 14 and later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. +`pg_pathman` supports Postgres versions [11..15], but most probably it won't be ported to later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. # pg_pathman @@ -13,8 +13,9 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions The extension is compatible with: - * PostgreSQL 9.5, 9.6, 10, 11, 12, 13; - * Postgres Pro Standard 9.5, 9.6, 10, 11, 12; + * PostgreSQL 11, 12, 13; + * PostgreSQL with core-patch: 14, 15; + * Postgres Pro Standard 11, 12, 13, 14, 15; * Postgres Pro Enterprise; Take a look at our Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). @@ -789,7 +790,7 @@ Do not hesitate to post your issues, questions and new ideas at the [issues](htt ## Authors [Ildar Musin](https://github.com/zilder) -Alexander Korotkov Postgres Professional Ltd., Russia +[Alexander Korotkov](https://github.com/akorotkov) [Dmitry Ivanov](https://github.com/funbringer) -Maksim Milyutin Postgres Professional Ltd., Russia +[Maksim Milyutin](https://github.com/maksm90) [Ildus Kurbangaliev](https://github.com/ildus) From db83c707475f263b4814103bed6eeebec3be67f5 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 27 Jan 2023 19:43:22 +0300 Subject: [PATCH 1088/1124] [PGPRO-7287] New PgproRegisterXactCallback to filter by event kind Tags: pg_pathman --- src/pg_pathman.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 3b99a7e7..6457cdca 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -368,9 +368,13 @@ _PG_init(void) init_partition_overseer_static_data(); #if defined(PGPRO_EE) && PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 /* Callbacks for reload relcache for ATX transactions */ + PgproRegisterXactCallback(pathman_xact_cb, NULL, XACT_EVENT_KIND_VANILLA | XACT_EVENT_KIND_ATX); +#else RegisterXactCallback(pathman_xact_cb, NULL); #endif +#endif } #if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ From bcf2424f6d6ffe75c240adbcfd54d21d238cc750 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Sat, 4 Feb 2023 00:42:58 +0300 Subject: [PATCH 1089/1124] [PGPRO-7742] Use PgproRegisterXactCallback for all EE-versions Tags: pg_pathman --- src/pg_pathman.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/pg_pathman.c b/src/pg_pathman.c index d902d5d4..94cfce84 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -367,13 +367,9 @@ _PG_init(void) init_partition_router_static_data(); init_partition_overseer_static_data(); -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 100000 -#if PG_VERSION_NUM >= 150000 +#ifdef PGPRO_EE /* Callbacks for reload relcache for ATX transactions */ PgproRegisterXactCallback(pathman_xact_cb, NULL, XACT_EVENT_KIND_VANILLA | XACT_EVENT_KIND_ATX); -#else - RegisterXactCallback(pathman_xact_cb, NULL); -#endif #endif } From 96254aa04e5f40e37e8f7e577a04e354eec92571 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 6 Mar 2023 19:37:14 +0300 Subject: [PATCH 1090/1124] Fix for REL_14_STABLE/REL_15_STABLE diffs --- patches/REL_14_STABLE-pg_pathman-core.diff | 64 ++++++++++---------- patches/REL_15_STABLE-pg_pathman-core.diff | 70 +++++++++++----------- 2 files changed, 67 insertions(+), 67 deletions(-) diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff index 751095aa..57576c44 100644 --- a/patches/REL_14_STABLE-pg_pathman-core.diff +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -11,7 +11,7 @@ index f27e458482..ea47c341c1 100644 pg_stat_statements \ pg_surgery \ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c -index ca6f6d57d3..8ab313b910 100644 +index bf551b0395..10d2044ae6 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -76,7 +76,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; @@ -24,7 +24,7 @@ index ca6f6d57d3..8ab313b910 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index 5483dee650..e2864e6ae9 100644 +index 6b63f93e6d..060146d127 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1799,6 +1799,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -77,10 +77,10 @@ index b3ce4bae53..8f2bb12542 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index d328856ae5..27235ec869 100644 +index 0780554246..a90f3a495d 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c -@@ -450,7 +450,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, +@@ -510,7 +510,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, * This is also a convenient place to verify that the output of an UPDATE * matches the target table (ExecBuildUpdateProjection does that). */ @@ -89,15 +89,15 @@ index d328856ae5..27235ec869 100644 ExecInitUpdateProjection(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo) { -@@ -2363,6 +2363,7 @@ ExecModifyTable(PlanState *pstate) - PartitionTupleRouting *proute = node->mt_partition_tuple_routing; - List *relinfos = NIL; - ListCell *lc; +@@ -2487,6 +2487,7 @@ ExecModifyTable(PlanState *pstate) + ItemPointerData tuple_ctid; + HeapTupleData oldtupdata; + HeapTuple oldtuple; + ResultRelInfo *saved_resultRelInfo; CHECK_FOR_INTERRUPTS(); -@@ -2400,12 +2401,23 @@ ExecModifyTable(PlanState *pstate) +@@ -2524,12 +2525,23 @@ ExecModifyTable(PlanState *pstate) resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex; subplanstate = outerPlanState(node); @@ -111,7 +111,7 @@ index d328856ae5..27235ec869 100644 for (;;) { + /* -+ * "es_original_tuple" should contain original modified tuple (new ++ * "es_original_tuple" should contains original modified tuple (new + * values of the changed columns plus row identity information such as + * CTID) in case tuple planSlot is replaced in pg_pathman to new value + * in call "ExecProcNode(subplanstate)". @@ -121,7 +121,7 @@ index d328856ae5..27235ec869 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -2439,7 +2451,9 @@ ExecModifyTable(PlanState *pstate) +@@ -2563,7 +2575,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -132,7 +132,7 @@ index d328856ae5..27235ec869 100644 &isNull); if (isNull) elog(ERROR, "tableoid is NULL"); -@@ -2458,6 +2472,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2582,6 +2596,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -141,7 +141,7 @@ index d328856ae5..27235ec869 100644 /* * A scan slot containing the data that was actually inserted, -@@ -2467,6 +2483,7 @@ ExecModifyTable(PlanState *pstate) +@@ -2591,6 +2607,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, planSlot); @@ -149,7 +149,7 @@ index d328856ae5..27235ec869 100644 return slot; } -@@ -2496,7 +2513,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2620,7 +2637,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -159,7 +159,7 @@ index d328856ae5..27235ec869 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -2526,7 +2544,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2650,7 +2668,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -169,7 +169,7 @@ index d328856ae5..27235ec869 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -2557,8 +2576,12 @@ ExecModifyTable(PlanState *pstate) +@@ -2681,8 +2700,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -184,7 +184,7 @@ index d328856ae5..27235ec869 100644 estate, node->canSetTag); break; case CMD_UPDATE: -@@ -2566,37 +2589,45 @@ ExecModifyTable(PlanState *pstate) +@@ -2690,37 +2713,45 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); @@ -253,7 +253,7 @@ index d328856ae5..27235ec869 100644 planSlot, &node->mt_epqstate, estate, true, /* processReturning */ node->canSetTag, -@@ -2613,7 +2644,10 @@ ExecModifyTable(PlanState *pstate) +@@ -2737,7 +2768,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -264,7 +264,7 @@ index d328856ae5..27235ec869 100644 } /* -@@ -2642,6 +2676,7 @@ ExecModifyTable(PlanState *pstate) +@@ -2753,6 +2787,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -272,7 +272,7 @@ index d328856ae5..27235ec869 100644 return NULL; } -@@ -2716,6 +2751,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -2827,6 +2862,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -280,7 +280,7 @@ index d328856ae5..27235ec869 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -2812,6 +2848,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -2923,6 +2959,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -294,8 +294,8 @@ index d328856ae5..27235ec869 100644 /* * Now we may initialize the subplan. */ -@@ -2884,6 +2927,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) - } +@@ -3004,6 +3047,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ExecInitStoredGenerated(resultRelInfo, estate, operation); } + estate->es_result_relation_info = saved_resultRelInfo; @@ -304,7 +304,7 @@ index d328856ae5..27235ec869 100644 * If this is an inherited update/delete, there will be a junk attribute * named "tableoid" present in the subplan's targetlist. It will be used diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c -index 381d9e548d..9d101c3a86 100644 +index 381d9e548d..0a4657d291 100644 --- a/src/backend/utils/init/globals.c +++ b/src/backend/utils/init/globals.c @@ -25,7 +25,7 @@ @@ -317,7 +317,7 @@ index 381d9e548d..9d101c3a86 100644 volatile sig_atomic_t InterruptPending = false; volatile sig_atomic_t QueryCancelPending = false; diff --git a/src/include/access/xact.h b/src/include/access/xact.h -index 134f6862da..92ff475332 100644 +index 5af78bd0dc..0c13bc9d83 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -53,7 +53,9 @@ extern PGDLLIMPORT int XactIsoLevel; @@ -357,7 +357,7 @@ index 3dc03c913e..1002d97499 100644 #endif /* EXECUTOR_H */ diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h -index 02015efe13..2091f7f3b7 100644 +index 4acb1cda6e..fd8d38347d 100644 --- a/src/include/libpq/libpq-be.h +++ b/src/include/libpq/libpq-be.h @@ -327,7 +327,7 @@ extern ssize_t be_gssapi_read(Port *port, void *ptr, size_t len); @@ -370,10 +370,10 @@ index 02015efe13..2091f7f3b7 100644 /* TCP keepalives configuration. These are no-ops on an AF_UNIX socket. */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h -index 105180764e..2a40d2ce15 100644 +index ee5ad3c058..dc474819d7 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h -@@ -579,6 +579,12 @@ typedef struct EState +@@ -592,6 +592,12 @@ typedef struct EState * es_result_relations in no * specific order */ @@ -419,7 +419,7 @@ index de22c9ba2c..c8be5323b8 100644 sub CopyIncludeFiles diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm -index 05ff67e693..d169271df1 100644 +index 9b6539fb15..f8a67c6701 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -41,7 +41,10 @@ my @contrib_uselibpq = @@ -434,7 +434,7 @@ index 05ff67e693..d169271df1 100644 my $contrib_extrasource = { 'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ], 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], -@@ -970,6 +973,7 @@ sub AddContrib +@@ -973,6 +976,7 @@ sub AddContrib my $dn = $1; my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); $proj->AddReference($postgres); @@ -442,7 +442,7 @@ index 05ff67e693..d169271df1 100644 AdjustContribProj($proj); } elsif ($mf =~ /^MODULES\s*=\s*(.*)$/mg) -@@ -999,6 +1003,19 @@ sub AddContrib +@@ -1002,6 +1006,19 @@ sub AddContrib return; } @@ -462,7 +462,7 @@ index 05ff67e693..d169271df1 100644 sub GenerateContribSqlFiles { my $n = shift; -@@ -1023,23 +1040,53 @@ sub GenerateContribSqlFiles +@@ -1026,23 +1043,53 @@ sub GenerateContribSqlFiles substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); } diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff index e0eb9a62..3d72d2e7 100644 --- a/patches/REL_15_STABLE-pg_pathman-core.diff +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -1,5 +1,5 @@ diff --git a/contrib/Makefile b/contrib/Makefile -index bbf220407b0..9a82a2db046 100644 +index bbf220407b..9a82a2db04 100644 --- a/contrib/Makefile +++ b/contrib/Makefile @@ -34,6 +34,7 @@ SUBDIRS = \ @@ -11,7 +11,7 @@ index bbf220407b0..9a82a2db046 100644 pg_stat_statements \ pg_surgery \ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c -index 594d8da2cdc..a2049e70e95 100644 +index d0e5bc26a7..5ca196518e 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -78,7 +78,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; @@ -24,7 +24,7 @@ index 594d8da2cdc..a2049e70e95 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index ef0f9577ab1..95858960d50 100644 +index ef0f9577ab..95858960d5 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1801,6 +1801,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -45,7 +45,7 @@ index ef0f9577ab1..95858960d50 100644 return state->resvalue; } diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c -index ef2fd46092e..8551733c55d 100644 +index ef2fd46092..8551733c55 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -826,6 +826,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) @@ -77,10 +77,10 @@ index ef2fd46092e..8551733c55d 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index 04454ad6e60..6a52e86b782 100644 +index ad0aa8dd9d..a2715efa09 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c -@@ -603,6 +603,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, +@@ -663,6 +663,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, resultRelInfo->ri_projectNewInfoValid = true; } @@ -94,15 +94,15 @@ index 04454ad6e60..6a52e86b782 100644 /* * ExecGetInsertNewTuple * This prepares a "new" tuple ready to be inserted into given result -@@ -3461,6 +3468,7 @@ ExecModifyTable(PlanState *pstate) - PartitionTupleRouting *proute = node->mt_partition_tuple_routing; - List *relinfos = NIL; - ListCell *lc; +@@ -3581,6 +3588,7 @@ ExecModifyTable(PlanState *pstate) + HeapTupleData oldtupdata; + HeapTuple oldtuple; + ItemPointer tupleid; + ResultRelInfo *saved_resultRelInfo; CHECK_FOR_INTERRUPTS(); -@@ -3502,6 +3510,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3622,6 +3630,8 @@ ExecModifyTable(PlanState *pstate) context.mtstate = node; context.epqstate = &node->mt_epqstate; context.estate = estate; @@ -111,7 +111,7 @@ index 04454ad6e60..6a52e86b782 100644 /* * Fetch rows from subplan, and execute the required table modification -@@ -3509,6 +3519,14 @@ ExecModifyTable(PlanState *pstate) +@@ -3629,6 +3639,14 @@ ExecModifyTable(PlanState *pstate) */ for (;;) { @@ -126,7 +126,7 @@ index 04454ad6e60..6a52e86b782 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -3542,7 +3560,9 @@ ExecModifyTable(PlanState *pstate) +@@ -3662,7 +3680,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -137,7 +137,7 @@ index 04454ad6e60..6a52e86b782 100644 &isNull); if (isNull) { -@@ -3579,6 +3599,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3699,6 +3719,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -146,7 +146,7 @@ index 04454ad6e60..6a52e86b782 100644 /* * A scan slot containing the data that was actually inserted, -@@ -3588,6 +3610,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3708,6 +3730,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); @@ -154,7 +154,7 @@ index 04454ad6e60..6a52e86b782 100644 return slot; } -@@ -3618,7 +3641,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3738,7 +3761,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -164,7 +164,7 @@ index 04454ad6e60..6a52e86b782 100644 resultRelInfo->ri_RowIdAttNo, &isNull); -@@ -3666,7 +3690,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3786,7 +3810,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -174,7 +174,7 @@ index 04454ad6e60..6a52e86b782 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -3697,9 +3722,12 @@ ExecModifyTable(PlanState *pstate) +@@ -3817,9 +3842,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -190,7 +190,7 @@ index 04454ad6e60..6a52e86b782 100644 break; case CMD_UPDATE: -@@ -3707,38 +3735,46 @@ ExecModifyTable(PlanState *pstate) +@@ -3827,38 +3855,46 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); @@ -260,7 +260,7 @@ index 04454ad6e60..6a52e86b782 100644 true, false, node->canSetTag, NULL, NULL); break; -@@ -3756,7 +3792,10 @@ ExecModifyTable(PlanState *pstate) +@@ -3876,7 +3912,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -271,7 +271,7 @@ index 04454ad6e60..6a52e86b782 100644 } /* -@@ -3785,6 +3824,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3892,6 +3931,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -279,7 +279,7 @@ index 04454ad6e60..6a52e86b782 100644 return NULL; } -@@ -3859,6 +3899,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3966,6 +4006,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -287,7 +287,7 @@ index 04454ad6e60..6a52e86b782 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -3959,6 +4000,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4066,6 +4107,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -301,8 +301,8 @@ index 04454ad6e60..6a52e86b782 100644 /* * Now we may initialize the subplan. */ -@@ -4041,6 +4089,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) - } +@@ -4157,6 +4205,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ExecInitStoredGenerated(resultRelInfo, estate, operation); } + estate->es_result_relation_info = saved_resultRelInfo; @@ -311,7 +311,7 @@ index 04454ad6e60..6a52e86b782 100644 * If this is an inherited update/delete/merge, there will be a junk * attribute named "tableoid" present in the subplan's targetlist. It diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c -index 1a5d29ac9ba..aadca8ea474 100644 +index 1a5d29ac9b..aadca8ea47 100644 --- a/src/backend/utils/init/globals.c +++ b/src/backend/utils/init/globals.c @@ -25,7 +25,7 @@ @@ -324,7 +324,7 @@ index 1a5d29ac9ba..aadca8ea474 100644 volatile sig_atomic_t InterruptPending = false; volatile sig_atomic_t QueryCancelPending = false; diff --git a/src/include/access/xact.h b/src/include/access/xact.h -index 65616ca2f79..965eb544217 100644 +index 8d46a781bb..150d70cb64 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; @@ -337,7 +337,7 @@ index 65616ca2f79..965eb544217 100644 /* flag for logging statements in this transaction */ diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h -index 82925b4b633..de23622ca24 100644 +index 82925b4b63..de23622ca2 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -659,5 +659,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, @@ -359,10 +359,10 @@ index 82925b4b633..de23622ca24 100644 #endif /* EXECUTOR_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h -index 57288013795..ec5496afffa 100644 +index f34d06eff4..0970e5f110 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h -@@ -611,6 +611,12 @@ typedef struct EState +@@ -624,6 +624,12 @@ typedef struct EState * es_result_relations in no * specific order */ @@ -376,7 +376,7 @@ index 57288013795..ec5496afffa 100644 /* diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm -index 8de79c618cb..c9226ba5ad4 100644 +index 8de79c618c..c9226ba5ad 100644 --- a/src/tools/msvc/Install.pm +++ b/src/tools/msvc/Install.pm @@ -30,6 +30,18 @@ my @client_program_files = ( @@ -408,7 +408,7 @@ index 8de79c618cb..c9226ba5ad4 100644 sub CopyIncludeFiles diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm -index e4feda10fd8..74a0a0a062b 100644 +index ef0a33c10f..27033b0a45 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -39,8 +39,8 @@ my $contrib_defines = {}; @@ -422,7 +422,7 @@ index e4feda10fd8..74a0a0a062b 100644 my $contrib_extrasource = {}; my @contrib_excludes = ( 'bool_plperl', 'commit_ts', -@@ -964,6 +964,7 @@ sub AddContrib +@@ -967,6 +967,7 @@ sub AddContrib my $dn = $1; my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); $proj->AddReference($postgres); @@ -430,7 +430,7 @@ index e4feda10fd8..74a0a0a062b 100644 AdjustContribProj($proj); push @projects, $proj; } -@@ -1067,6 +1068,19 @@ sub AddContrib +@@ -1070,6 +1071,19 @@ sub AddContrib return; } @@ -450,7 +450,7 @@ index e4feda10fd8..74a0a0a062b 100644 sub GenerateContribSqlFiles { my $n = shift; -@@ -1091,23 +1105,53 @@ sub GenerateContribSqlFiles +@@ -1094,23 +1108,53 @@ sub GenerateContribSqlFiles substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); } From 92f073473bd407311c736d03bfd074c659a21e68 Mon Sep 17 00:00:00 2001 From: Svetlana Derevyanko Date: Fri, 27 Jan 2023 09:54:11 +0300 Subject: [PATCH 1091/1124] [PGPRO-7630] Post-processing for nodes added in plan tree by pathman New nodes added in pathman planner hook had no correct plan_node_id, which could cause problems later for statistics collector. Added fixes for 'custom_scan_tlist' to let EXPLAIN (VERBOSE) work. Also changed queryId type on uint64. Added hook for compatibility with pgpro_stats. Fixed tree walkers for ModifyTable. Tags: pg_pathman --- src/hooks.c | 81 ++++++++++++++++++++++++++++++++- src/include/hooks.h | 3 ++ src/include/partition_filter.h | 1 + src/include/partition_router.h | 2 +- src/partition_filter.c | 29 ++++++++++++ src/partition_router.c | 8 ++-- src/pg_pathman.c | 2 + src/planner_tree_modification.c | 34 +++++++------- 8 files changed, 137 insertions(+), 23 deletions(-) diff --git a/src/hooks.c b/src/hooks.c index 46204d5c..65c62494 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -21,6 +21,7 @@ #include "hooks.h" #include "init.h" #include "partition_filter.h" +#include "partition_overseer.h" #include "partition_router.h" #include "pathman_workers.h" #include "planner_tree_modification.h" @@ -74,6 +75,7 @@ planner_hook_type pathman_planner_hook_next = NULL; post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next = NULL; shmem_startup_hook_type pathman_shmem_startup_hook_next = NULL; ProcessUtility_hook_type pathman_process_utility_hook_next = NULL; +ExecutorStart_hook_type pathman_executor_start_hook_prev = NULL; /* Take care of joins */ @@ -673,6 +675,23 @@ execute_for_plantree(PlannedStmt *planned_stmt, planned_stmt->subplans = subplans; } +/* + * Truncated version of set_plan_refs. + * Pathman can add nodes to already completed and post-processed plan tree. + * reset_plan_node_ids fixes some presentation values for updated plan tree + * to avoid problems in further processing. + */ +static Plan * +reset_plan_node_ids(Plan *plan, void *lastPlanNodeId) +{ + if (plan == NULL) + return NULL; + + plan->plan_node_id = (*(int *) lastPlanNodeId)++; + + return plan; +} + /* * Planner hook. It disables inheritance for tables that have been partitioned * by pathman to prevent standart PostgreSQL partitioning mechanism from @@ -688,7 +707,7 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) #endif { PlannedStmt *result; - uint32 query_id = parse->queryId; + uint64 query_id = parse->queryId; /* Save the result in case it changes */ bool pathman_ready = IsPathmanReady(); @@ -720,6 +739,9 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) if (pathman_ready) { + int lastPlanNodeId = 0; + ListCell *l; + /* Add PartitionFilter node for INSERT queries */ execute_for_plantree(result, add_partition_filters); @@ -729,6 +751,13 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Decrement planner() calls count */ decr_planner_calls_count(); + /* remake parsed tree presentation fixes due to possible adding nodes */ + result->planTree = plan_tree_visitor(result->planTree, reset_plan_node_ids, &lastPlanNodeId); + foreach(l, result->subplans) + { + lfirst(l) = plan_tree_visitor((Plan *) lfirst(l), reset_plan_node_ids, &lastPlanNodeId); + } + /* HACK: restore queryId set by pg_stat_statements */ result->queryId = query_id; } @@ -1125,3 +1154,53 @@ pathman_process_utility_hook(Node *first_arg, dest, completionTag); #endif } + +/* + * Planstate tree nodes could have been copied. + * It breaks references on correspoding + * ModifyTable node from PartitionRouter nodes. + */ +static void +fix_mt_refs(PlanState *state, void *context) +{ + ModifyTableState *mt_state = (ModifyTableState *) state; + PartitionRouterState *pr_state; +#if PG_VERSION_NUM < 140000 + int i; +#endif + + if (!IsA(state, ModifyTableState)) + return; +#if PG_VERSION_NUM >= 140000 + { + CustomScanState *pf_state = (CustomScanState *) outerPlanState(mt_state); +#else + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; +#endif + if (IsPartitionFilterState(pf_state)) + { + pr_state = linitial(pf_state->custom_ps); + if (IsPartitionRouterState(pr_state)) + { + pr_state->mt_state = mt_state; + } + } + } +} + +void +pathman_executor_start_hook(QueryDesc *queryDesc, int eflags) +{ + if (pathman_executor_start_hook_prev) + pathman_executor_start_hook_prev(queryDesc, eflags); + else + standard_ExecutorStart(queryDesc, eflags); + + /* + * HACK for compatibility with pgpro_stats. + * Fix possibly broken planstate tree. + */ + state_tree_visitor(queryDesc->planstate, fix_mt_refs, NULL); +} diff --git a/src/include/hooks.h b/src/include/hooks.h index ccfe060b..813d1342 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -28,6 +28,7 @@ extern post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next; extern shmem_startup_hook_type pathman_shmem_startup_hook_next; extern ProcessUtility_hook_type pathman_process_utility_hook_next; extern ExecutorRun_hook_type pathman_executor_run_hook_next; +extern ExecutorStart_hook_type pathman_executor_start_hook_prev; void pathman_join_pathlist_hook(PlannerInfo *root, @@ -115,4 +116,6 @@ void pathman_executor_hook(QueryDesc *queryDesc, ExecutorRun_CountArgType count); #endif +void pathman_executor_start_hook(QueryDesc *queryDescc, + int eflags); #endif /* PATHMAN_HOOKS_H */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index d3c2c482..9b9f52f9 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -183,6 +183,7 @@ void destroy_tuple_map(TupleConversionMap *tuple_map); List * pfilter_build_tlist(Plan *subplan); +void pfilter_tlist_fix_resjunk(CustomScan *subplan); /* Find suitable partition using 'value' */ Oid * find_partitions_for_value(Datum value, Oid value_type, diff --git a/src/include/partition_router.h b/src/include/partition_router.h index c6924609..d5684eba 100644 --- a/src/include/partition_router.h +++ b/src/include/partition_router.h @@ -78,7 +78,7 @@ void partition_router_explain(CustomScanState *node, List *ancestors, ExplainState *es); -Plan *make_partition_router(Plan *subplan, int epq_param); +Plan *make_partition_router(Plan *subplan, int epq_param, Index parent_rti); Node *partition_router_create_scan_state(CustomScan *node); TupleTableSlot *partition_router_exec(CustomScanState *node); diff --git a/src/partition_filter.c b/src/partition_filter.c index a267c702..78ad126b 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -817,6 +817,7 @@ make_partition_filter(Plan *subplan, /* Prepare 'custom_scan_tlist' for EXPLAIN (VERBOSE) */ cscan->custom_scan_tlist = copyObject(cscan->scan.plan.targetlist); ChangeVarNodes((Node *) cscan->custom_scan_tlist, INDEX_VAR, parent_rti, 0); + pfilter_tlist_fix_resjunk(cscan); /* Pack partitioned table's Oid and conflict_action */ cscan->custom_private = list_make4(makeInteger(parent_relid), @@ -1114,6 +1115,34 @@ pfilter_build_tlist(Plan *subplan) return result_tlist; } +/* + * resjunk Vars had its varattnos being set on nonexisting relation columns. + * For future processing service attributes should be indicated correctly. + */ +void +pfilter_tlist_fix_resjunk(CustomScan *css) +{ + ListCell *lc; + + foreach(lc, css->custom_scan_tlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(lc); + + if (!IsA(tle->expr, Const)) + { + Var *var = (Var *) tle->expr; + + if (tle->resjunk) + { + /* To make Var recognizable as service attribute. */ + var->varattno = -1; + } + } + } + + return; +} + /* * ---------------------------------------------- * Additional init steps for ResultPartsStorage diff --git a/src/partition_router.c b/src/partition_router.c index 2e982299..bd081218 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -115,7 +115,7 @@ init_partition_router_static_data(void) } Plan * -make_partition_router(Plan *subplan, int epq_param) +make_partition_router(Plan *subplan, int epq_param, Index parent_rti) { CustomScan *cscan = makeNode(CustomScan); @@ -136,8 +136,10 @@ make_partition_router(Plan *subplan, int epq_param) /* Build an appropriate target list */ cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); - /* FIXME: should we use the same tlist? */ - cscan->custom_scan_tlist = subplan->targetlist; + /* Fix 'custom_scan_tlist' for EXPLAIN (VERBOSE) */ + cscan->custom_scan_tlist = copyObject(cscan->scan.plan.targetlist); + ChangeVarNodes((Node *) cscan->custom_scan_tlist, INDEX_VAR, parent_rti, 0); + pfilter_tlist_fix_resjunk(cscan); return &cscan->scan.plan; } diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 94cfce84..6e835a1f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -357,6 +357,8 @@ _PG_init(void) planner_hook = pathman_planner_hook; pathman_process_utility_hook_next = ProcessUtility_hook; ProcessUtility_hook = pathman_process_utility_hook; + pathman_executor_start_hook_prev = ExecutorStart_hook; + ExecutorStart_hook = pathman_executor_start_hook; /* Initialize static data for all subsystems */ init_main_pathman_toggles(); diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index d9d64cfd..5b6a7982 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -122,8 +122,8 @@ static void handle_modification_query(Query *parse, transform_query_cxt *context static Plan *partition_filter_visitor(Plan *plan, void *context); static Plan *partition_router_visitor(Plan *plan, void *context); -static void state_visit_subplans(List *plans, void (*visitor) (), void *context); -static void state_visit_members(PlanState **planstates, int nplans, void (*visitor) (), void *context); +static void state_visit_subplans(List *plans, void (*visitor) (PlanState *plan, void *context), void *context); +static void state_visit_members(PlanState **planstates, int nplans, void (*visitor) (PlanState *plan, void *context), void *context); static Oid find_deepest_partition(Oid relid, Index rti, Expr *quals); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); @@ -137,13 +137,13 @@ static bool modifytable_contains_fdw(List *rtable, ModifyTable *node); * id in order to recognize them properly. */ #define QUERY_ID_INITIAL 0 -static uint32 latest_query_id = QUERY_ID_INITIAL; +static uint64 latest_query_id = QUERY_ID_INITIAL; void assign_query_id(Query *query) { - uint32 prev_id = latest_query_id++; + uint64 prev_id = latest_query_id++; if (prev_id > latest_query_id) elog(WARNING, "assign_query_id(): queryId overflow"); @@ -187,14 +187,12 @@ plan_tree_visitor(Plan *plan, plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; +#if PG_VERSION_NUM < 140000 /* reworked in commit 86dc90056dfd */ case T_ModifyTable: -#if PG_VERSION_NUM >= 140000 /* reworked in commit 86dc90056dfd */ - plan_tree_visitor(outerPlan(plan), visitor, context); -#else foreach (l, ((ModifyTable *) plan)->plans) plan_tree_visitor((Plan *) lfirst(l), visitor, context); -#endif break; +#endif case T_Append: foreach (l, ((Append *) plan)->appendplans) @@ -254,15 +252,13 @@ state_tree_visitor(PlanState *state, state_tree_visitor((PlanState *) lfirst(lc), visitor, context); break; +#if PG_VERSION_NUM < 140000 /* reworked in commit 86dc90056dfd */ case T_ModifyTable: -#if PG_VERSION_NUM >= 140000 /* reworked in commit 86dc90056dfd */ - visitor(outerPlanState(state), context); -#else state_visit_members(((ModifyTableState *) state)->mt_plans, ((ModifyTableState *) state)->mt_nplans, visitor, context); -#endif break; +#endif case T_Append: state_visit_members(((AppendState *) state)->appendplans, @@ -307,7 +303,7 @@ state_tree_visitor(PlanState *state, */ static void state_visit_subplans(List *plans, - void (*visitor) (), + void (*visitor) (PlanState *plan, void *context), void *context) { ListCell *lc; @@ -315,7 +311,7 @@ state_visit_subplans(List *plans, foreach (lc, plans) { SubPlanState *sps = lfirst_node(SubPlanState, lc); - visitor(sps->planstate, context); + state_tree_visitor(sps->planstate, visitor, context); } } @@ -325,12 +321,12 @@ state_visit_subplans(List *plans, */ static void state_visit_members(PlanState **planstates, int nplans, - void (*visitor) (), void *context) + void (*visitor) (PlanState *plan, void *context), void *context) { int i; for (i = 0; i < nplans; i++) - visitor(planstates[i], context); + state_tree_visitor(planstates[i], visitor, context); } @@ -939,10 +935,12 @@ partition_router_visitor(Plan *plan, void *context) #if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ prouter = make_partition_router(subplan, - modify_table->epqParam); + modify_table->epqParam, + modify_table->nominalRelation); #else prouter = make_partition_router((Plan *) lfirst(lc1), - modify_table->epqParam); + modify_table->epqParam, + modify_table->nominalRelation); #endif pfilter = make_partition_filter((Plan *) prouter, relid, From 6bcd9d82b91baffd6b7024501e7c1837ddaffb1e Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 10 Mar 2023 19:58:11 +0300 Subject: [PATCH 1092/1124] [PGPRO-7880] Need to check relation Oid before locking It need to do before relation locking: locking of invalid Oid causes an error on replica Tags: pg_pathman --- expected/pathman_calamity.out | 10 +++++----- expected/pathman_calamity_1.out | 10 +++++----- expected/pathman_calamity_2.out | 10 +++++----- expected/pathman_calamity_3.out | 10 +++++----- src/include/utils.h | 1 + src/pathman_workers.c | 2 ++ src/pl_funcs.c | 6 ++++++ src/pl_range_funcs.c | 3 +++ src/utils.c | 11 +++++++++++ 9 files changed, 43 insertions(+), 20 deletions(-) diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 7226e7b9..b9421bde 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -320,7 +320,7 @@ SELECT validate_relname(NULL); ERROR: relation should not be NULL /* check function validate_expression() */ SELECT validate_expression(1::regclass, NULL); /* not ok */ -ERROR: relation "1" does not exist +ERROR: identifier "1" must be normal Oid SELECT validate_expression(NULL::regclass, NULL); /* not ok */ ERROR: 'relid' should not be NULL SELECT validate_expression('calamity.part_test', NULL); /* not ok */ @@ -426,19 +426,19 @@ SELECT build_sequence_name(NULL) IS NULL; /* check function partition_table_concurrently() */ SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ -ERROR: relation "1" has no partitions +ERROR: identifier "1" must be normal Oid SELECT partition_table_concurrently('pg_class', 0); /* not ok */ ERROR: 'batch_size' should not be less than 1 or greater than 10000 SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ ERROR: 'sleep_time' should not be less than 0.5 SELECT partition_table_concurrently('pg_class'); /* not ok */ -ERROR: relation "pg_class" has no partitions +ERROR: identifier "1259" must be normal Oid /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ ERROR: cannot find worker for relation "1" /* check function drop_range_partition_expand_next() */ SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ -ERROR: relation "pg_class" is not a partition +ERROR: identifier "1259" must be normal Oid SELECT drop_range_partition_expand_next(NULL) IS NULL; ?column? ---------- @@ -560,7 +560,7 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ -ERROR: relation "0" does not exist +ERROR: identifier "0" must be normal Oid SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out index 62050cfd..6ca2e7dd 100644 --- a/expected/pathman_calamity_1.out +++ b/expected/pathman_calamity_1.out @@ -320,7 +320,7 @@ SELECT validate_relname(NULL); ERROR: relation should not be NULL /* check function validate_expression() */ SELECT validate_expression(1::regclass, NULL); /* not ok */ -ERROR: relation "1" does not exist +ERROR: identifier "1" must be normal Oid SELECT validate_expression(NULL::regclass, NULL); /* not ok */ ERROR: 'relid' should not be NULL SELECT validate_expression('calamity.part_test', NULL); /* not ok */ @@ -426,19 +426,19 @@ SELECT build_sequence_name(NULL) IS NULL; /* check function partition_table_concurrently() */ SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ -ERROR: relation "1" has no partitions +ERROR: identifier "1" must be normal Oid SELECT partition_table_concurrently('pg_class', 0); /* not ok */ ERROR: 'batch_size' should not be less than 1 or greater than 10000 SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ ERROR: 'sleep_time' should not be less than 0.5 SELECT partition_table_concurrently('pg_class'); /* not ok */ -ERROR: relation "pg_class" has no partitions +ERROR: identifier "1259" must be normal Oid /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ ERROR: cannot find worker for relation "1" /* check function drop_range_partition_expand_next() */ SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ -ERROR: relation "pg_class" is not a partition +ERROR: identifier "1259" must be normal Oid SELECT drop_range_partition_expand_next(NULL) IS NULL; ?column? ---------- @@ -560,7 +560,7 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ -ERROR: relation "0" does not exist +ERROR: identifier "0" must be normal Oid SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out index 5bb1053f..fa3295f6 100644 --- a/expected/pathman_calamity_2.out +++ b/expected/pathman_calamity_2.out @@ -320,7 +320,7 @@ SELECT validate_relname(NULL); ERROR: relation should not be NULL /* check function validate_expression() */ SELECT validate_expression(1::regclass, NULL); /* not ok */ -ERROR: relation "1" does not exist +ERROR: identifier "1" must be normal Oid SELECT validate_expression(NULL::regclass, NULL); /* not ok */ ERROR: 'relid' should not be NULL SELECT validate_expression('calamity.part_test', NULL); /* not ok */ @@ -426,19 +426,19 @@ SELECT build_sequence_name(NULL) IS NULL; /* check function partition_table_concurrently() */ SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ -ERROR: relation "1" has no partitions +ERROR: identifier "1" must be normal Oid SELECT partition_table_concurrently('pg_class', 0); /* not ok */ ERROR: 'batch_size' should not be less than 1 or greater than 10000 SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ ERROR: 'sleep_time' should not be less than 0.5 SELECT partition_table_concurrently('pg_class'); /* not ok */ -ERROR: relation "pg_class" has no partitions +ERROR: identifier "1259" must be normal Oid /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ ERROR: cannot find worker for relation "1" /* check function drop_range_partition_expand_next() */ SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ -ERROR: relation "pg_class" is not a partition +ERROR: identifier "1259" must be normal Oid SELECT drop_range_partition_expand_next(NULL) IS NULL; ?column? ---------- @@ -560,7 +560,7 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ -ERROR: relation "0" does not exist +ERROR: identifier "0" must be normal Oid SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ diff --git a/expected/pathman_calamity_3.out b/expected/pathman_calamity_3.out index bfb3b63c..a8879ef7 100644 --- a/expected/pathman_calamity_3.out +++ b/expected/pathman_calamity_3.out @@ -324,7 +324,7 @@ SELECT validate_relname(NULL); ERROR: relation should not be NULL /* check function validate_expression() */ SELECT validate_expression(1::regclass, NULL); /* not ok */ -ERROR: relation "1" does not exist +ERROR: identifier "1" must be normal Oid SELECT validate_expression(NULL::regclass, NULL); /* not ok */ ERROR: 'relid' should not be NULL SELECT validate_expression('calamity.part_test', NULL); /* not ok */ @@ -430,19 +430,19 @@ SELECT build_sequence_name(NULL) IS NULL; /* check function partition_table_concurrently() */ SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ -ERROR: relation "1" has no partitions +ERROR: identifier "1" must be normal Oid SELECT partition_table_concurrently('pg_class', 0); /* not ok */ ERROR: 'batch_size' should not be less than 1 or greater than 10000 SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ ERROR: 'sleep_time' should not be less than 0.5 SELECT partition_table_concurrently('pg_class'); /* not ok */ -ERROR: relation "pg_class" has no partitions +ERROR: identifier "1259" must be normal Oid /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ ERROR: cannot find worker for relation "1" /* check function drop_range_partition_expand_next() */ SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ -ERROR: relation "pg_class" is not a partition +ERROR: identifier "1259" must be normal Oid SELECT drop_range_partition_expand_next(NULL) IS NULL; ?column? ---------- @@ -564,7 +564,7 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ -ERROR: relation "0" does not exist +ERROR: identifier "0" must be normal Oid SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ diff --git a/src/include/utils.h b/src/include/utils.h index 1e0b87a4..566c04db 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -84,5 +84,6 @@ Datum extract_binary_interval_from_text(Datum interval_text, Oid *interval_type); char **deconstruct_text_array(Datum array, int *array_size); RangeVar **qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); +void check_relation_oid(Oid relid); #endif /* PATHMAN_UTILS_H */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index eca9ee52..3eb82ab7 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -712,6 +712,8 @@ partition_table_concurrently(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'sleep_time' should not be less than 0.5"))); + check_relation_oid(relid); + /* Prevent concurrent function calls */ LockRelationOid(relid, lockmode); diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 542f99ae..10538bea 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -673,6 +673,7 @@ validate_expression(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(0)) { relid = PG_GETARG_OID(0); + check_relation_oid(relid); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'relid' should not be NULL"))); @@ -807,6 +808,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(0)) { relid = PG_GETARG_OID(0); + check_relation_oid(relid); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'parent_relid' should not be NULL"))); @@ -1037,6 +1039,8 @@ prevent_part_modification(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); + check_relation_oid(relid); + /* Lock partitioned relation till transaction's end */ LockRelationOid(relid, ShareUpdateExclusiveLock); @@ -1051,6 +1055,8 @@ prevent_data_modification(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); + check_relation_oid(relid); + /* * Check that isolation level is READ COMMITTED. * Else we won't be able to see new rows diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index b2a8dc3d..19292a0a 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -499,6 +499,7 @@ split_range_partition(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(0)) { partition1 = PG_GETARG_OID(0); + check_relation_oid(partition1); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'partition1' should not be NULL"))); @@ -835,6 +836,8 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) RangeEntry *ranges; int i; + check_relation_oid(partition); + /* Lock the partition we're going to drop */ LockRelationOid(partition, AccessExclusiveLock); diff --git a/src/utils.c b/src/utils.c index 6ebfb8a8..9402d618 100644 --- a/src/utils.c +++ b/src/utils.c @@ -527,3 +527,14 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) return rangevars; } +/* + * Checks that Oid is valid (it need to do before relation locking: locking of + * invalid Oid causes an error on replica). + */ +void +check_relation_oid(Oid relid) +{ + if (relid < FirstNormalObjectId) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("identifier \"%u\" must be normal Oid", relid))); +} From 47acbe67e07bbf5841e52b705bdb03aa1adf768e Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 9 Mar 2023 21:29:56 +0300 Subject: [PATCH 1093/1124] [PGPRO-7870] Added error for case executing prepared query after DROP/CREATE EXTENSION Tags: pg_pathman --- expected/pathman_cache_pranks.out | 150 ++++++++++++++++++ expected/pathman_cache_pranks_1.out | 237 ++++++++++++++++++++++++++++ sql/pathman_cache_pranks.sql | 69 ++++++++ src/nodes_common.c | 15 +- 4 files changed, 469 insertions(+), 2 deletions(-) create mode 100644 expected/pathman_cache_pranks_1.out diff --git a/expected/pathman_cache_pranks.out b/expected/pathman_cache_pranks.out index 5493ae96..278643ff 100644 --- a/expected/pathman_cache_pranks.out +++ b/expected/pathman_cache_pranks.out @@ -76,5 +76,155 @@ ERROR: can't partition table "part_test" with existing children DROP TABLE part_test CASCADE; NOTICE: drop cascades to 302 other objects -- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +BEGIN; + BEGIN AUTONOMOUS; + DROP EXTENSION pg_pathman; + CREATE EXTENSION pg_pathman; + COMMIT; +COMMIT; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 3 other objects -- finalize DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cache_pranks_1.out b/expected/pathman_cache_pranks_1.out new file mode 100644 index 00000000..4a3982a6 --- /dev/null +++ b/expected/pathman_cache_pranks_1.out @@ -0,0 +1,237 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? +SET search_path = 'public'; +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; +-- create it for further tests +CREATE EXTENSION pg_pathman; +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('part_test', 100); + set_interval +-------------- + +(1 row) + +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +ERROR: table "part_test" has no partitions +SELECT disable_pathman_for('part_test'); + disable_pathman_for +--------------------- + +(1 row) + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +SELECT add_to_pathman_config('part_test', 'val'); +ERROR: wrong constraint format for HASH partition "part_test_1" +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 5 other objects +-- +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 300 +(1 row) + +SELECT append_range_partition('part_test'); + append_range_partition +------------------------ + part_test_301 +(1 row) + +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +ERROR: cannot create partition with range (-inf, +inf) +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: can't partition table "part_test" with existing children +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 302 other objects +-- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +BEGIN; + BEGIN AUTONOMOUS; +ERROR: syntax error at or near "AUTONOMOUS" at character 7 + DROP EXTENSION pg_pathman; +ERROR: current transaction is aborted, commands ignored until end of transaction block + CREATE EXTENSION pg_pathman; +ERROR: current transaction is aborted, commands ignored until end of transaction block + COMMIT; +COMMIT; +WARNING: there is no transaction in progress +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 3 other objects +-- finalize +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_cache_pranks.sql b/sql/pathman_cache_pranks.sql index 782ef7f0..e3fe00d9 100644 --- a/sql/pathman_cache_pranks.sql +++ b/sql/pathman_cache_pranks.sql @@ -48,6 +48,75 @@ SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]: DROP TABLE part_test CASCADE; -- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; + +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +SET pg_pathman.enable = f; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; + +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +BEGIN; + BEGIN AUTONOMOUS; + DROP EXTENSION pg_pathman; + CREATE EXTENSION pg_pathman; + COMMIT; +COMMIT; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; -- finalize DROP EXTENSION pg_pathman; diff --git a/src/nodes_common.c b/src/nodes_common.c index a6fecb51..f4ebc6b1 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -601,7 +601,10 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, CustomScan *cscan; prel = get_pathman_relation_info(rpath->relid); - Assert(prel); + if (!prel) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(rpath->relid)))); cscan = makeNode(CustomScan); cscan->custom_scan_tlist = NIL; /* initial value (empty list) */ @@ -709,7 +712,15 @@ begin_append_common(CustomScanState *node, EState *estate, int eflags) #endif scan_state->prel = get_pathman_relation_info(scan_state->relid); - Assert(scan_state->prel); + /* + * scan_state->prel can be NULL in case execution of prepared query that + * was prepared before DROP/CREATE EXTENSION pg_pathman or after + * pathman_config table truncation etc. + */ + if (!scan_state->prel) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(scan_state->relid)))); /* Prepare expression according to set_set_customscan_references() */ scan_state->prel_expr = PrelExpressionForRelid(scan_state->prel, INDEX_VAR); From c4c0e34a6cc74cb8c455c1f26582883457e16630 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 22 Mar 2023 10:06:36 +0300 Subject: [PATCH 1094/1124] [PGPRO-7928] Variable pg_pathman.enable must be called before any query Tags: pg_pathman --- expected/pathman_runtime_nodes.out | 41 ++++++++++++++++++++++++---- expected/pathman_runtime_nodes_1.out | 41 ++++++++++++++++++++++++---- sql/pathman_runtime_nodes.sql | 32 ++++++++++++++++++---- src/hooks.c | 24 +++++++++++++++- src/include/hooks.h | 1 + src/init.c | 2 +- 6 files changed, 123 insertions(+), 18 deletions(-) diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index 17905e59..5d3b5638 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -58,7 +58,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_2() returns text as $$ @@ -100,7 +99,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_3() returns text as $$ @@ -133,7 +131,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_4() returns text as $$ @@ -172,7 +169,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_5() returns text as $$ @@ -233,7 +229,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_hashjoin = off set enable_mergejoin = off; create table test.run_values as select generate_series(1, 10000) val; @@ -464,5 +459,41 @@ DROP FUNCTION test.pathman_test_3(); DROP FUNCTION test.pathman_test_4(); DROP FUNCTION test.pathman_test_5(); DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); +ERROR: function create_hash_partitions(unknown, unknown, integer, partition_names => text[]) does not exist at character 8 +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +ERROR: relation "part_test_1" does not exist +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+----------- + 2 | part_test +(1 row) + +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+----------- + 3 | part_test +(1 row) + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +DROP FUNCTION part_test_trigger(); DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman; diff --git a/expected/pathman_runtime_nodes_1.out b/expected/pathman_runtime_nodes_1.out index 65382269..10435240 100644 --- a/expected/pathman_runtime_nodes_1.out +++ b/expected/pathman_runtime_nodes_1.out @@ -58,7 +58,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_2() returns text as $$ @@ -100,7 +99,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_3() returns text as $$ @@ -133,7 +131,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_4() returns text as $$ @@ -172,7 +169,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_5() returns text as $$ @@ -233,7 +229,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_hashjoin = off set enable_mergejoin = off; create table test.run_values as select generate_series(1, 10000) val; @@ -464,5 +459,41 @@ DROP FUNCTION test.pathman_test_3(); DROP FUNCTION test.pathman_test_4(); DROP FUNCTION test.pathman_test_5(); DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); +ERROR: function create_hash_partitions(unknown, unknown, integer, partition_names => text[]) does not exist at character 8 +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +ERROR: relation "part_test_1" does not exist +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+----------- + 2 | part_test +(1 row) + +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+----------- + 3 | part_test +(1 row) + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +DROP FUNCTION part_test_trigger(); DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index 81c046db..9fa7028f 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -63,7 +63,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -106,7 +105,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -140,7 +138,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -180,7 +177,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -242,7 +238,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_hashjoin = off set enable_mergejoin = off; @@ -347,6 +342,31 @@ DROP FUNCTION test.pathman_test_3(); DROP FUNCTION test.pathman_test_4(); DROP FUNCTION test.pathman_test_5(); DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +DROP FUNCTION part_test_trigger(); + DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman; - diff --git a/src/hooks.c b/src/hooks.c index 65c62494..b4ae796a 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -39,8 +39,9 @@ #include "optimizer/prep.h" #include "optimizer/restrictinfo.h" #include "rewrite/rewriteManip.h" -#include "utils/typcache.h" #include "utils/lsyscache.h" +#include "utils/typcache.h" +#include "utils/snapmgr.h" #ifdef USE_ASSERT_CHECKING @@ -614,6 +615,27 @@ pathman_rel_pathlist_hook(PlannerInfo *root, close_pathman_relation_info(prel); } +/* + * 'pg_pathman.enable' GUC check. + */ +bool +pathman_enable_check_hook(bool *newval, void **extra, GucSource source) +{ + if (FirstSnapshotSet || + GetTopTransactionIdIfAny() != InvalidTransactionId || +#ifdef PGPRO_EE + getNestLevelATX() > 0 || +#endif + IsSubTransaction()) + { + GUC_check_errcode(ERRCODE_ACTIVE_SQL_TRANSACTION); + GUC_check_errmsg("\"pg_pathman.enable\" must be called before any query"); + return false; + } + + return true; +} + /* * Intercept 'pg_pathman.enable' GUC assignments. */ diff --git a/src/include/hooks.h b/src/include/hooks.h index 813d1342..4d426f5a 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -44,6 +44,7 @@ void pathman_rel_pathlist_hook(PlannerInfo *root, RangeTblEntry *rte); void pathman_enable_assign_hook(bool newval, void *extra); +bool pathman_enable_check_hook(bool *newval, void **extra, GucSource source); PlannedStmt * pathman_planner_hook(Query *parse, #if PG_VERSION_NUM >= 130000 diff --git a/src/init.c b/src/init.c index bdec28fd..4341d406 100644 --- a/src/init.c +++ b/src/init.c @@ -166,7 +166,7 @@ init_main_pathman_toggles(void) DEFAULT_PATHMAN_ENABLE, PGC_SUSET, 0, - NULL, + pathman_enable_check_hook, pathman_enable_assign_hook, NULL); From 2bb067d44ea8b54ba0e3c0ac17af07cf334941ac Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 18 Apr 2023 14:41:11 +0300 Subject: [PATCH 1095/1124] [PGPRO-8041] Fixed restrictions for pg_pathman.enable Tags: pg_pathman --- expected/pathman_runtime_nodes.out | 24 +++++++++++++++--------- expected/pathman_runtime_nodes_1.out | 24 +++++++++++++++--------- sql/pathman_runtime_nodes.sql | 2 +- src/hooks.c | 19 ++++++++++++++++--- 4 files changed, 47 insertions(+), 22 deletions(-) diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index 5d3b5638..ab8a7e02 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -465,8 +465,12 @@ DROP SCHEMA test; -- Variable pg_pathman.enable must be called before any query. -- CREATE TABLE part_test (val int NOT NULL); -SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); -ERROR: function create_hash_partitions(unknown, unknown, integer, partition_names => text[]) does not exist at character 8 +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); + create_hash_partitions +------------------------ + 2 +(1 row) + CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ BEGIN RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); @@ -478,22 +482,24 @@ END; $$ LANGUAGE PLPGSQL; SET pg_pathman.enable_partitionrouter = t; CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); -ERROR: relation "part_test_1" does not exist INSERT INTO part_test VALUES (1); UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; - val | tableoid ------+----------- - 2 | part_test + val | tableoid +-----+------------- + 2 | part_test_1 (1 row) UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; - val | tableoid ------+----------- - 3 | part_test +NOTICE: AFTER DELETE ROW (part_test_1) +WARNING: "SET pg_pathman.enable" must be called before any query. Command ignored. + val | tableoid +-----+------------ + 3 | pg_pathman (1 row) RESET pg_pathman.enable_partitionrouter; DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 2 other objects DROP FUNCTION part_test_trigger(); DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman; diff --git a/expected/pathman_runtime_nodes_1.out b/expected/pathman_runtime_nodes_1.out index 10435240..ef928861 100644 --- a/expected/pathman_runtime_nodes_1.out +++ b/expected/pathman_runtime_nodes_1.out @@ -465,8 +465,12 @@ DROP SCHEMA test; -- Variable pg_pathman.enable must be called before any query. -- CREATE TABLE part_test (val int NOT NULL); -SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); -ERROR: function create_hash_partitions(unknown, unknown, integer, partition_names => text[]) does not exist at character 8 +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); + create_hash_partitions +------------------------ + 2 +(1 row) + CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ BEGIN RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); @@ -478,22 +482,24 @@ END; $$ LANGUAGE PLPGSQL; SET pg_pathman.enable_partitionrouter = t; CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); -ERROR: relation "part_test_1" does not exist INSERT INTO part_test VALUES (1); UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; - val | tableoid ------+----------- - 2 | part_test + val | tableoid +-----+------------- + 2 | part_test_1 (1 row) UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; - val | tableoid ------+----------- - 3 | part_test +NOTICE: AFTER DELETE ROW (part_test_1) +WARNING: "SET pg_pathman.enable" must be called before any query. Command ignored. + val | tableoid +-----+------------ + 3 | pg_pathman (1 row) RESET pg_pathman.enable_partitionrouter; DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 2 other objects DROP FUNCTION part_test_trigger(); DROP EXTENSION pg_pathman CASCADE; DROP SCHEMA pathman; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index 9fa7028f..bf917d88 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -348,7 +348,7 @@ DROP SCHEMA test; -- Variable pg_pathman.enable must be called before any query. -- CREATE TABLE part_test (val int NOT NULL); -SELECT create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ BEGIN RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); diff --git a/src/hooks.c b/src/hooks.c index b4ae796a..89d2074e 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -621,6 +621,15 @@ pathman_rel_pathlist_hook(PlannerInfo *root, bool pathman_enable_check_hook(bool *newval, void **extra, GucSource source) { + /* The top level statement requires immediate commit: accept GUC change */ + if (MyXactFlags & XACT_FLAGS_NEEDIMMEDIATECOMMIT) + return true; + + /* Ignore the case of re-setting the same value */ + if (*newval == pathman_init_state.pg_pathman_enable) + return true; + + /* Command must be at top level of a fresh transaction. */ if (FirstSnapshotSet || GetTopTransactionIdIfAny() != InvalidTransactionId || #ifdef PGPRO_EE @@ -628,9 +637,13 @@ pathman_enable_check_hook(bool *newval, void **extra, GucSource source) #endif IsSubTransaction()) { - GUC_check_errcode(ERRCODE_ACTIVE_SQL_TRANSACTION); - GUC_check_errmsg("\"pg_pathman.enable\" must be called before any query"); - return false; + /* Keep the old value. */ + *newval = pathman_init_state.pg_pathman_enable; + + ereport(WARNING, + (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("\"SET pg_pathman.enable\" must be called before any query. " + "Command ignored."))); } return true; From e568aa64afbdc1cdb0191492fef1f7361b34769a Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 19 Apr 2023 13:38:49 +0300 Subject: [PATCH 1096/1124] [PGPRO-8041] Corrected warning message; moved line with assignment Tags: pg_pathman --- expected/pathman_runtime_nodes.out | 2 +- expected/pathman_runtime_nodes_1.out | 2 +- src/hooks.c | 9 ++++----- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index ab8a7e02..f699ddeb 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -491,7 +491,7 @@ UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; NOTICE: AFTER DELETE ROW (part_test_1) -WARNING: "SET pg_pathman.enable" must be called before any query. Command ignored. +WARNING: "pg_pathman.enable" must be called before any query, ignored val | tableoid -----+------------ 3 | pg_pathman diff --git a/expected/pathman_runtime_nodes_1.out b/expected/pathman_runtime_nodes_1.out index ef928861..e975c761 100644 --- a/expected/pathman_runtime_nodes_1.out +++ b/expected/pathman_runtime_nodes_1.out @@ -491,7 +491,7 @@ UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; NOTICE: AFTER DELETE ROW (part_test_1) -WARNING: "SET pg_pathman.enable" must be called before any query. Command ignored. +WARNING: "pg_pathman.enable" must be called before any query, ignored val | tableoid -----+------------ 3 | pg_pathman diff --git a/src/hooks.c b/src/hooks.c index 89d2074e..437c89a6 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -637,13 +637,12 @@ pathman_enable_check_hook(bool *newval, void **extra, GucSource source) #endif IsSubTransaction()) { - /* Keep the old value. */ - *newval = pathman_init_state.pg_pathman_enable; - ereport(WARNING, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), - errmsg("\"SET pg_pathman.enable\" must be called before any query. " - "Command ignored."))); + errmsg("\"pg_pathman.enable\" must be called before any query, ignored"))); + + /* Keep the old value. */ + *newval = pathman_init_state.pg_pathman_enable; } return true; From 6ef2ea0dcc8d06e0a28571d83efb522bfa414fe6 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Tue, 28 Mar 2023 21:57:35 +0300 Subject: [PATCH 1097/1124] [PGPRO-7963] Fix for REL_14_STABLE/REL_15_STABLE diffs Tags: pg_pathman --- patches/REL_14_STABLE-pg_pathman-core.diff | 89 +++++++------------- patches/REL_15_STABLE-pg_pathman-core.diff | 96 +++++++--------------- 2 files changed, 59 insertions(+), 126 deletions(-) diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff index 57576c44..af130c15 100644 --- a/patches/REL_14_STABLE-pg_pathman-core.diff +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -24,7 +24,7 @@ index bf551b0395..10d2044ae6 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index 6b63f93e6d..060146d127 100644 +index bdf59a10fc..972453d9a5 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1799,6 +1799,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -77,7 +77,7 @@ index b3ce4bae53..8f2bb12542 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index 0780554246..a90f3a495d 100644 +index 55c430c9ec..21d9e6304a 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -510,7 +510,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, @@ -89,7 +89,7 @@ index 0780554246..a90f3a495d 100644 ExecInitUpdateProjection(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo) { -@@ -2487,6 +2487,7 @@ ExecModifyTable(PlanState *pstate) +@@ -2486,6 +2486,7 @@ ExecModifyTable(PlanState *pstate) ItemPointerData tuple_ctid; HeapTupleData oldtupdata; HeapTuple oldtuple; @@ -97,7 +97,7 @@ index 0780554246..a90f3a495d 100644 CHECK_FOR_INTERRUPTS(); -@@ -2524,12 +2525,23 @@ ExecModifyTable(PlanState *pstate) +@@ -2523,12 +2524,23 @@ ExecModifyTable(PlanState *pstate) resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex; subplanstate = outerPlanState(node); @@ -121,7 +121,7 @@ index 0780554246..a90f3a495d 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -2563,7 +2575,9 @@ ExecModifyTable(PlanState *pstate) +@@ -2562,7 +2574,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -132,7 +132,7 @@ index 0780554246..a90f3a495d 100644 &isNull); if (isNull) elog(ERROR, "tableoid is NULL"); -@@ -2582,6 +2596,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2581,6 +2595,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -141,7 +141,7 @@ index 0780554246..a90f3a495d 100644 /* * A scan slot containing the data that was actually inserted, -@@ -2591,6 +2607,7 @@ ExecModifyTable(PlanState *pstate) +@@ -2590,6 +2606,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, planSlot); @@ -149,7 +149,7 @@ index 0780554246..a90f3a495d 100644 return slot; } -@@ -2620,7 +2637,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2619,7 +2636,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -159,7 +159,7 @@ index 0780554246..a90f3a495d 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -2650,7 +2668,8 @@ ExecModifyTable(PlanState *pstate) +@@ -2649,7 +2667,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -169,7 +169,7 @@ index 0780554246..a90f3a495d 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -2681,8 +2700,12 @@ ExecModifyTable(PlanState *pstate) +@@ -2680,8 +2699,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -184,58 +184,25 @@ index 0780554246..a90f3a495d 100644 estate, node->canSetTag); break; case CMD_UPDATE: -@@ -2690,37 +2713,45 @@ ExecModifyTable(PlanState *pstate) +@@ -2689,6 +2712,13 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); -- /* -- * Make the new tuple by combining plan's output tuple with -- * the old tuple being updated. -- */ -- oldSlot = resultRelInfo->ri_oldTupleSlot; -- if (oldtuple != NULL) -- { -- /* Use the wholerow junk attr as the old tuple. */ -- ExecForceStoreHeapTuple(oldtuple, oldSlot, false); -- } -- else ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ + /* Do nothing in case tuple was modified in pg_pathman: */ + if (!estate->es_original_tuple) - { -- /* Fetch the most recent version of old tuple. */ -- Relation relation = resultRelInfo->ri_RelationDesc; -- -- Assert(tupleid != NULL); -- if (!table_tuple_fetch_row_version(relation, tupleid, -- SnapshotAny, -- oldSlot)) -- elog(ERROR, "failed to fetch tuple being updated"); -+ /* -+ * Make the new tuple by combining plan's output tuple -+ * with the old tuple being updated. -+ */ -+ oldSlot = resultRelInfo->ri_oldTupleSlot; -+ if (oldtuple != NULL) -+ { -+ /* Use the wholerow junk attr as the old tuple. */ -+ ExecForceStoreHeapTuple(oldtuple, oldSlot, false); -+ } -+ else -+ { -+ /* Fetch the most recent version of old tuple. */ -+ Relation relation = resultRelInfo->ri_RelationDesc; -+ -+ Assert(tupleid != NULL); -+ if (!table_tuple_fetch_row_version(relation, tupleid, -+ SnapshotAny, -+ oldSlot)) -+ elog(ERROR, "failed to fetch tuple being updated"); -+ } -+ slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, -+ oldSlot); ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -2712,14 +2742,19 @@ ExecModifyTable(PlanState *pstate) } -- slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, -- oldSlot); + slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, + oldSlot); ++ } /* Now apply the update. */ - slot = ExecUpdate(node, resultRelInfo, tupleid, oldtuple, slot, @@ -253,7 +220,7 @@ index 0780554246..a90f3a495d 100644 planSlot, &node->mt_epqstate, estate, true, /* processReturning */ node->canSetTag, -@@ -2737,7 +2768,10 @@ ExecModifyTable(PlanState *pstate) +@@ -2736,7 +2771,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -264,7 +231,7 @@ index 0780554246..a90f3a495d 100644 } /* -@@ -2753,6 +2787,7 @@ ExecModifyTable(PlanState *pstate) +@@ -2752,6 +2790,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -272,7 +239,7 @@ index 0780554246..a90f3a495d 100644 return NULL; } -@@ -2827,6 +2862,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -2826,6 +2865,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -280,7 +247,7 @@ index 0780554246..a90f3a495d 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -2923,6 +2959,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -2922,6 +2962,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -294,7 +261,7 @@ index 0780554246..a90f3a495d 100644 /* * Now we may initialize the subplan. */ -@@ -3004,6 +3047,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3002,6 +3049,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ExecInitStoredGenerated(resultRelInfo, estate, operation); } diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff index 3d72d2e7..04fae9aa 100644 --- a/patches/REL_15_STABLE-pg_pathman-core.diff +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -24,7 +24,7 @@ index d0e5bc26a7..5ca196518e 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index ef0f9577ab..95858960d5 100644 +index d5e46098c2..d3c02c1def 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1801,6 +1801,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -77,10 +77,10 @@ index ef2fd46092..8551733c55 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index ad0aa8dd9d..a2715efa09 100644 +index 2f6e66b641..d4a1e48c20 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c -@@ -663,6 +663,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, +@@ -641,6 +641,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, resultRelInfo->ri_projectNewInfoValid = true; } @@ -94,7 +94,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* * ExecGetInsertNewTuple * This prepares a "new" tuple ready to be inserted into given result -@@ -3581,6 +3588,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3524,6 +3531,7 @@ ExecModifyTable(PlanState *pstate) HeapTupleData oldtupdata; HeapTuple oldtuple; ItemPointer tupleid; @@ -102,7 +102,7 @@ index ad0aa8dd9d..a2715efa09 100644 CHECK_FOR_INTERRUPTS(); -@@ -3622,6 +3630,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3565,6 +3573,8 @@ ExecModifyTable(PlanState *pstate) context.mtstate = node; context.epqstate = &node->mt_epqstate; context.estate = estate; @@ -111,7 +111,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* * Fetch rows from subplan, and execute the required table modification -@@ -3629,6 +3639,14 @@ ExecModifyTable(PlanState *pstate) +@@ -3572,6 +3582,14 @@ ExecModifyTable(PlanState *pstate) */ for (;;) { @@ -126,7 +126,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -3662,7 +3680,9 @@ ExecModifyTable(PlanState *pstate) +@@ -3605,7 +3623,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -137,7 +137,7 @@ index ad0aa8dd9d..a2715efa09 100644 &isNull); if (isNull) { -@@ -3699,6 +3719,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3642,6 +3662,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -146,7 +146,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* * A scan slot containing the data that was actually inserted, -@@ -3708,6 +3730,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3651,6 +3673,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); @@ -154,7 +154,7 @@ index ad0aa8dd9d..a2715efa09 100644 return slot; } -@@ -3738,7 +3761,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3681,7 +3704,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -164,7 +164,7 @@ index ad0aa8dd9d..a2715efa09 100644 resultRelInfo->ri_RowIdAttNo, &isNull); -@@ -3786,7 +3810,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3729,7 +3753,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -174,7 +174,7 @@ index ad0aa8dd9d..a2715efa09 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -3817,9 +3842,12 @@ ExecModifyTable(PlanState *pstate) +@@ -3760,9 +3785,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -190,59 +190,25 @@ index ad0aa8dd9d..a2715efa09 100644 break; case CMD_UPDATE: -@@ -3827,38 +3855,46 @@ ExecModifyTable(PlanState *pstate) +@@ -3770,6 +3798,13 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); -- /* -- * Make the new tuple by combining plan's output tuple with -- * the old tuple being updated. -- */ -- oldSlot = resultRelInfo->ri_oldTupleSlot; -- if (oldtuple != NULL) -- { -- /* Use the wholerow junk attr as the old tuple. */ -- ExecForceStoreHeapTuple(oldtuple, oldSlot, false); -- } -- else ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ + /* Do nothing in case tuple was modified in pg_pathman: */ + if (!estate->es_original_tuple) - { -- /* Fetch the most recent version of old tuple. */ -- Relation relation = resultRelInfo->ri_RelationDesc; -+ /* -+ * Make the new tuple by combining plan's output tuple -+ * with the old tuple being updated. -+ */ -+ oldSlot = resultRelInfo->ri_oldTupleSlot; -+ if (oldtuple != NULL) -+ { -+ /* Use the wholerow junk attr as the old tuple. */ -+ ExecForceStoreHeapTuple(oldtuple, oldSlot, false); -+ } -+ else -+ { -+ /* Fetch the most recent version of old tuple. */ -+ Relation relation = resultRelInfo->ri_RelationDesc; - -- if (!table_tuple_fetch_row_version(relation, tupleid, -- SnapshotAny, -- oldSlot)) -- elog(ERROR, "failed to fetch tuple being updated"); -+ if (!table_tuple_fetch_row_version(relation, tupleid, -+ SnapshotAny, -+ oldSlot)) -+ elog(ERROR, "failed to fetch tuple being updated"); -+ } -+ slot = internalGetUpdateNewTuple(resultRelInfo, context.planSlot, -+ oldSlot, NULL); -+ context.GetUpdateNewTuple = internalGetUpdateNewTuple; -+ context.relaction = NULL; - } -- slot = internalGetUpdateNewTuple(resultRelInfo, context.planSlot, -- oldSlot, NULL); -- context.GetUpdateNewTuple = internalGetUpdateNewTuple; -- context.relaction = NULL; ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -3793,14 +3828,19 @@ ExecModifyTable(PlanState *pstate) + slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, + oldSlot); + context.relaction = NULL; ++ } /* Now apply the update. */ - slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, @@ -260,7 +226,7 @@ index ad0aa8dd9d..a2715efa09 100644 true, false, node->canSetTag, NULL, NULL); break; -@@ -3876,7 +3912,10 @@ ExecModifyTable(PlanState *pstate) +@@ -3818,7 +3858,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -271,7 +237,7 @@ index ad0aa8dd9d..a2715efa09 100644 } /* -@@ -3892,6 +3931,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3834,6 +3877,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -279,7 +245,7 @@ index ad0aa8dd9d..a2715efa09 100644 return NULL; } -@@ -3966,6 +4006,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3908,6 +3952,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -287,7 +253,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -4066,6 +4107,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4008,6 +4053,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -301,7 +267,7 @@ index ad0aa8dd9d..a2715efa09 100644 /* * Now we may initialize the subplan. */ -@@ -4157,6 +4205,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4102,6 +4154,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ExecInitStoredGenerated(resultRelInfo, estate, operation); } From 084e2645a8ccd85feeaf860935a971381d8c84f7 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 7 Apr 2023 15:24:20 +0300 Subject: [PATCH 1098/1124] [PGPRO-7928] Fix for REL_14_STABLE diff Tags: pg_pathman --- patches/REL_14_STABLE-pg_pathman-core.diff | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff index af130c15..a6ac1afa 100644 --- a/patches/REL_14_STABLE-pg_pathman-core.diff +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -353,6 +353,19 @@ index ee5ad3c058..dc474819d7 100644 PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ /* +diff --git a/src/include/utils/snapmgr.h b/src/include/utils/snapmgr.h +index 33e6c14e81..abd9bba23e 100644 +--- a/src/include/utils/snapmgr.h ++++ b/src/include/utils/snapmgr.h +@@ -53,7 +53,7 @@ extern TimestampTz GetSnapshotCurrentTimestamp(void); + extern TimestampTz GetOldSnapshotThresholdTimestamp(void); + extern void SnapshotTooOldMagicForTest(void); + +-extern bool FirstSnapshotSet; ++extern PGDLLIMPORT bool FirstSnapshotSet; + + extern PGDLLIMPORT TransactionId TransactionXmin; + extern PGDLLIMPORT TransactionId RecentXmin; diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm index de22c9ba2c..c8be5323b8 100644 --- a/src/tools/msvc/Install.pm From b1f19b7e331ee0d9d6c1866b301cc30fce856ad0 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 1 Jun 2023 21:39:56 +0300 Subject: [PATCH 1099/1124] Travis-CI: added clang15 for PostgreSQL v11-v13 and removed v10 --- .travis.yml | 6 ------ Dockerfile.tmpl | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index dd63d98f..81a40e18 100644 --- a/.travis.yml +++ b/.travis.yml @@ -30,9 +30,3 @@ env: - PG_VERSION=12 - PG_VERSION=11 LEVEL=hardcore - PG_VERSION=11 - - PG_VERSION=10 LEVEL=hardcore - - PG_VERSION=10 - -jobs: - allow_failures: - - env: PG_VERSION=10 LEVEL=nightmare diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 0a25ad14..309719de 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -9,7 +9,7 @@ RUN apk add --no-cache \ coreutils linux-headers \ make musl-dev gcc bison flex \ zlib-dev libedit-dev \ - clang clang-analyzer; + clang clang15 clang-analyzer; # Install fresh valgrind RUN apk add valgrind \ From d752a8071e79ac54ad21d8df90ad38e662a17921 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Thu, 1 Jun 2023 01:32:59 +0300 Subject: [PATCH 1100/1124] PGPRO-8238, PGPRO-8122: Fix build with master at 5df319f3d. Correct number of args in ExecInitRangeTable(), ExecInsertIndexTuples(), ExecBRUpdateTriggers() and ExecBRDeleteTriggers(). Caused by: - b803b7d132e3505ab77c29acf91f3d1caa298f95 Fill EState.es_rteperminfos more systematically. - 19d8e2308bc51ec4ab993ce90077342c915dd116 Ignore BRIN indexes when checking for HOT updates - 9321c79c86e6a6a4eac22e2235a21a8b68388723 Fix concurrent update issues with MERGE. Tags: pg_pathman --- src/include/compat/pg_compat.h | 26 ++++++++++++++++++++------ src/utility_stmt_hooking.c | 8 ++++++-- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 4ae249e6..bc9323ae 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -779,7 +779,12 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecBRUpdateTriggers() */ -#if PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ +#if PG_VERSION_NUM >= 160000 +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot), NULL, NULL) +#elif PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ #define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ tupleid, fdw_trigtuple, newslot) \ ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ @@ -809,7 +814,12 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecBRDeleteTriggers() */ -#if PG_VERSION_NUM >= 110000 +#if PG_VERSION_NUM >= 160000 +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (epqslot), NULL, NULL) +#elif PG_VERSION_NUM >= 110000 #define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ fdw_trigtuple, epqslot) \ ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ @@ -1028,15 +1038,19 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecInsertIndexTuples. Since 12 slot contains tupleid. * Since 14: new fields "resultRelInfo", "update". + * Since 16: new bool field "onlySummarizing". */ -#if PG_VERSION_NUM >= 140000 -#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ +#if PG_VERSION_NUM >= 160000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ + ExecInsertIndexTuples((resultRelInfo), (slot), (estate), (update), (noDupError), (specConflict), (arbiterIndexes), (onlySummarizing)) +#elif PG_VERSION_NUM >= 140000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ ExecInsertIndexTuples((resultRelInfo), (slot), (estate), (update), (noDupError), (specConflict), (arbiterIndexes)) #elif PG_VERSION_NUM >= 120000 -#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ ExecInsertIndexTuples((slot), (estate), (noDupError), (specConflict), (arbiterIndexes)) #else -#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes) \ +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ ExecInsertIndexTuples((slot), (tupleid), (estate), (noDupError), (specConflict), (arbiterIndexes)) #endif diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index d1d9010c..704387d8 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -564,10 +564,14 @@ PathmanCopyFrom( #if PG_VERSION_NUM >= 140000 /* reworked in 1375422c7826 */ /* - * Call ExecInitRangeTable() should be first because in 14 it initializes + * Call ExecInitRangeTable() should be first because in 14+ it initializes * field "estate->es_result_relations": */ +#if PG_VERSION_NUM >= 160000 + ExecInitRangeTable(estate, range_table, cstate->rteperminfos); +#else ExecInitRangeTable(estate, range_table); +#endif estate->es_result_relations = (ResultRelInfo **) palloc0(list_length(range_table) * sizeof(ResultRelInfo *)); estate->es_result_relations[0] = parent_rri; @@ -749,7 +753,7 @@ PathmanCopyFrom( /* ... and create index entries for it */ if (child_rri->ri_NumIndices > 0) recheckIndexes = ExecInsertIndexTuplesCompat(estate->es_result_relation_info, - slot, &(tuple->t_self), estate, false, false, NULL, NIL); + slot, &(tuple->t_self), estate, false, false, NULL, NIL, false); } #ifdef PG_SHARDMAN /* Handle foreign tables */ From 4f9a6024ee6e6fd99de44e8f254469db6564f599 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Thu, 22 Jun 2023 11:17:15 +0300 Subject: [PATCH 1101/1124] PGPRO-8166: Fix build with vanilla at db93e739ac. Tags: pg_pathman --- expected/pathman_column_type_2.out | 203 +++++++++++++++++++++++++++++ expected/pathman_join_clause_5.out | 160 +++++++++++++++++++++++ src/hooks.c | 8 +- src/include/compat/pg_compat.h | 17 ++- 4 files changed, 383 insertions(+), 5 deletions(-) create mode 100644 expected/pathman_column_type_2.out create mode 100644 expected/pathman_join_clause_5.out diff --git a/expected/pathman_column_type_2.out b/expected/pathman_column_type_2.out new file mode 100644 index 00000000..0fbd0793 --- /dev/null +++ b/expected/pathman_column_type_2.out @@ -0,0 +1,203 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; +/* + * RANGE partitioning. + */ +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + +/* change column's type (should also flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | val +-------------------------+----- + test_column_type.test_1 | 1 +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 +NOTICE: 0 rows copied from test_column_type.test_5 +NOTICE: 0 rows copied from test_column_type.test_6 +NOTICE: 0 rows copied from test_column_type.test_7 +NOTICE: 0 rows copied from test_column_type.test_8 +NOTICE: 0 rows copied from test_column_type.test_9 +NOTICE: 0 rows copied from test_column_type.test_10 + drop_partitions +----------------- + 10 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +/* + * HASH partitioning. + */ +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; +ERROR: cannot change type of column "id" of table "test" partitioned by HASH +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | id | val +-------------------------+----+----- + test_column_type.test_0 | 1 | +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_0 +NOTICE: 0 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 + drop_partitions +----------------- + 5 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +DROP SCHEMA test_column_type; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_join_clause_5.out b/expected/pathman_join_clause_5.out new file mode 100644 index 00000000..179f50f7 --- /dev/null +++ b/expected/pathman_join_clause_5.out @@ -0,0 +1,160 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (m.id1 = fk.id1) + -> Seq Scan on mytbl_0 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/src/hooks.c b/src/hooks.c index 437c89a6..2ff2667c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -449,12 +449,12 @@ pathman_rel_pathlist_hook(PlannerInfo *root, tce = lookup_type_cache(prel->ev_type, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); /* Make pathkeys */ - pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, - tce->lt_opr, NULL, false); + pathkeys = build_expression_pathkey_compat(root, (Expr *) part_expr, NULL, + tce->lt_opr, NULL, false); if (pathkeys) pathkeyAsc = (PathKey *) linitial(pathkeys); - pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, - tce->gt_opr, NULL, false); + pathkeys = build_expression_pathkey_compat(root, (Expr *) part_expr, NULL, + tce->gt_opr, NULL, false); if (pathkeys) pathkeyDesc = (PathKey *) linitial(pathkeys); } diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index bc9323ae..e75ab1c4 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -1208,13 +1208,18 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); /* * make_restrictinfo() + * In >=16 3th and 9th arguments were removed (b448f1c8d83) * In >=14 new argument was added (55dc86eca70) */ +#if PG_VERSION_NUM >= 160000 +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (p), (sl), (rr), (or)) +#else #if PG_VERSION_NUM >= 140000 #define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (od), (p), (sl), (rr), (or), (nr)) #else #define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((c), (ipd), (od), (p), (sl), (rr), (or), (nr)) -#endif +#endif /* #if PG_VERSION_NUM >= 140000 */ +#endif /* #if PG_VERSION_NUM >= 160000 */ /* * pull_varnos() @@ -1226,4 +1231,14 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); #define pull_varnos_compat(r, n) pull_varnos(n) #endif +/* + * build_expression_pathkey() + * In >=16 argument was removed (b448f1c8d83) + */ +#if PG_VERSION_NUM >= 160000 +#define build_expression_pathkey_compat(root, expr, nullable_relids, opno, rel, create_it) build_expression_pathkey(root, expr, opno, rel, create_it) +#else +#define build_expression_pathkey_compat(root, expr, nullable_relids, opno, rel, create_it) build_expression_pathkey(root, expr, nullable_relids, opno, rel, create_it) +#endif + #endif /* PG_COMPAT_H */ From ac5f05a5b4ae9a97cc9c1517bbd2ba26309209b0 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 28 Jun 2023 00:17:27 +0300 Subject: [PATCH 1102/1124] [PGPRO-8370] Fix build with vanilla at f5c446e336 Tags: pg_pathman --- src/include/compat/pg_compat.h | 13 ++++++++++++- src/partition_filter.c | 4 ++-- src/partition_router.c | 6 +++--- src/utility_stmt_hooking.c | 8 ++++++++ 4 files changed, 25 insertions(+), 6 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index e75ab1c4..5a12b528 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -1208,11 +1208,12 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); /* * make_restrictinfo() + * In >=16 4th, 5th and 9th arguments were added (991a3df227e) * In >=16 3th and 9th arguments were removed (b448f1c8d83) * In >=14 new argument was added (55dc86eca70) */ #if PG_VERSION_NUM >= 160000 -#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (p), (sl), (rr), (or)) +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), false, false, (p), (sl), (rr), NULL, (or)) #else #if PG_VERSION_NUM >= 140000 #define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (od), (p), (sl), (rr), (or), (nr)) @@ -1241,4 +1242,14 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); #define build_expression_pathkey_compat(root, expr, nullable_relids, opno, rel, create_it) build_expression_pathkey(root, expr, nullable_relids, opno, rel, create_it) #endif +/* + * EvalPlanQualInit() + * In >=16 argument was added (70b42f27902) + */ +#if PG_VERSION_NUM >= 160000 +#define EvalPlanQualInit_compat(epqstate, parentestate, subplan, auxrowmarks, epqParam) EvalPlanQualInit(epqstate, parentestate, subplan, auxrowmarks, epqParam, NIL) +#else +#define EvalPlanQualInit_compat(epqstate, parentestate, subplan, auxrowmarks, epqParam) EvalPlanQualInit(epqstate, parentestate, subplan, auxrowmarks, epqParam) +#endif + #endif /* PG_COMPAT_H */ diff --git a/src/partition_filter.c b/src/partition_filter.c index 78ad126b..d4cf8308 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -345,8 +345,8 @@ scan_result_parts_storage(EState *estate, ResultPartsStorage *parts_storage, child_perminfo->updatedCols = translate_col_privs(parent_perminfo->updatedCols, translated_vars); - /* Check permissions for partition */ - ExecCheckPermissions(list_make1(child_rte), list_make1(child_perminfo), true); + /* Check permissions for one partition */ + ExecCheckOneRtePermissions(child_rte, child_perminfo, true); #else /* Build Var translation list for 'inserted_cols' */ make_inh_translation_list(base_rel, child_rel, 0, &translated_vars, NULL); diff --git a/src/partition_router.c b/src/partition_router.c index bd081218..4a597a13 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -170,9 +170,9 @@ partition_router_begin(CustomScanState *node, EState *estate, int eflags) /* Remember current relation we're going to delete from */ state->current_rri = estate->es_result_relation_info; - EvalPlanQualInit(&state->epqstate, estate, - state->subplan, NIL, - state->epqparam); + EvalPlanQualInit_compat(&state->epqstate, estate, + state->subplan, NIL, + state->epqparam); /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index 704387d8..83bfa680 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -517,6 +517,14 @@ PathmanDoCopy(const CopyStmt *stmt, } else { +#if PG_VERSION_NUM >= 160000 /* for commit f75cec4fff87 */ + /* + * Forget current RangeTblEntries and RTEPermissionInfos. + * Standard DoCopy will create new ones. + */ + pstate->p_rtable = NULL; + pstate->p_rteperminfos = NULL; +#endif /* Call standard DoCopy using a new CopyStmt */ DoCopyCompat(pstate, stmt, stmt_location, stmt_len, processed); } From 144d954c9dcc6a296926572398d460f44f49f482 Mon Sep 17 00:00:00 2001 From: Maxim Orlov Date: Mon, 26 Jun 2023 12:37:31 +0300 Subject: [PATCH 1103/1124] Fix parallel installcheck-world Create unique db objects in order not to mess with other tests. tags: pg_pathman --- expected/pathman_CVE-2020-14350.out | 22 +-- expected/pathman_permissions.out | 204 ++++++++++++++-------------- expected/pathman_permissions_1.out | 204 ++++++++++++++-------------- sql/pathman_CVE-2020-14350.sql | 22 +-- sql/pathman_permissions.sql | 110 +++++++-------- 5 files changed, 281 insertions(+), 281 deletions(-) diff --git a/expected/pathman_CVE-2020-14350.out b/expected/pathman_CVE-2020-14350.out index c4250097..a48e182f 100644 --- a/expected/pathman_CVE-2020-14350.out +++ b/expected/pathman_CVE-2020-14350.out @@ -7,15 +7,15 @@ DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); DROP TABLE IF EXISTS test1 CASCADE; DROP TABLE IF EXISTS test2 CASCADE; -DROP ROLE IF EXISTS regress_hacker; +DROP ROLE IF EXISTS pathman_regress_hacker; SET client_min_messages = 'notice'; GRANT CREATE ON SCHEMA public TO PUBLIC; CREATE EXTENSION pg_pathman; -CREATE ROLE regress_hacker LOGIN; +CREATE ROLE pathman_regress_hacker LOGIN; -- Test 1 RESET ROLE; -ALTER ROLE regress_hacker NOSUPERUSER; -SET ROLE regress_hacker; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; +SET ROLE pathman_regress_hacker; SHOW is_superuser; is_superuser -------------- @@ -26,7 +26,7 @@ CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_tota RETURNS bigint AS $$ BEGIN - ALTER ROLE regress_hacker SUPERUSER; + ALTER ROLE pathman_regress_hacker SUPERUSER; SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; END $$ LANGUAGE plpgsql; @@ -53,7 +53,7 @@ SELECT pg_sleep(1); (1 row) -- Test result (must be 'off') -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; is_superuser -------------- @@ -62,8 +62,8 @@ SHOW is_superuser; -- Test 2 RESET ROLE; -ALTER ROLE regress_hacker NOSUPERUSER; -SET ROLE regress_hacker; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; +SET ROLE pathman_regress_hacker; SHOW is_superuser; is_superuser -------------- @@ -74,7 +74,7 @@ CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANY RETURNS REGCLASS AS $$ BEGIN - ALTER ROLE regress_hacker SUPERUSER; + ALTER ROLE pathman_regress_hacker SUPERUSER; RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); END $$ LANGUAGE plpgsql; @@ -89,7 +89,7 @@ SELECT create_range_partitions('test2', 'i', 0, 1); INSERT INTO test2 values(1); -- Test result (must be 'off') -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; is_superuser -------------- @@ -112,5 +112,5 @@ NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to sequence test2_seq drop cascades to table test2_1 drop cascades to table test2_2 -DROP ROLE regress_hacker; +DROP ROLE pathman_regress_hacker; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 04b1112d..a29865d0 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -2,107 +2,107 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; -CREATE ROLE user1 LOGIN; -CREATE ROLE user2 LOGIN; -GRANT USAGE, CREATE ON SCHEMA permissions TO user1; -GRANT USAGE, CREATE ON SCHEMA permissions TO user2; +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; /* Switch to #1 */ -SET ROLE user1; -CREATE TABLE permissions.user1_table(id serial, a int); -INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; /* Should fail (can't SELECT) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges -/* Grant SELECT to user2 */ -SET ROLE user1; -GRANT SELECT ON permissions.user1_table TO user2; +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; /* Should fail (don't own parent) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* Should be ok */ -SET ROLE user1; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); create_range_partitions ------------------------- 2 (1 row) /* Should be able to see */ -SET ROLE user2; +SET ROLE pathman_user2; SELECT * FROM pathman_config; - partrel | expr | parttype | range_interval --------------------------+------+----------+---------------- - permissions.user1_table | id | 2 | 10 + partrel | expr | parttype | range_interval +---------------------------------+------+----------+---------------- + permissions.pathman_user1_table | id | 2 | 10 (1 row) SELECT * FROM pathman_config_params; - partrel | enable_parent | auto | init_callback | spawn_using_bgw --------------------------+---------------+------+---------------+----------------- - permissions.user1_table | f | t | | f + partrel | enable_parent | auto | init_callback | spawn_using_bgw +---------------------------------+---------------+------+---------------+----------------- + permissions.pathman_user1_table | f | t | | f (1 row) /* Should fail */ -SET ROLE user2; -SELECT set_enable_parent('permissions.user1_table', true); -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" ERROR: new row violates row-level security policy for table "pathman_config_params" -SELECT set_auto('permissions.user1_table', false); -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +SELECT set_auto('permissions.pathman_user1_table', false); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" ERROR: new row violates row-level security policy for table "pathman_config_params" /* Should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DELETE FROM pathman_config -WHERE partrel = 'permissions.user1_table'::regclass; -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +WHERE partrel = 'permissions.pathman_user1_table'::regclass; +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" /* No rights to insert, should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* No rights to create partitions (need INSERT privilege) */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); -ERROR: permission denied for parent relation "user1_table" -/* Allow user2 to create partitions */ -SET ROLE user1; -GRANT INSERT ON permissions.user1_table TO user2; -GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); +ERROR: permission denied for parent relation "pathman_user1_table" +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ /* Should be able to prepend a partition */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); - prepend_range_partition ---------------------------- - permissions.user1_table_4 +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); + prepend_range_partition +----------------------------------- + permissions.pathman_user1_table_4 (1 row) SELECT attname, attacl FROM pg_attribute WHERE attrelid = (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ LIMIT 1) ORDER BY attname; /* check ACL for each column */ - attname | attacl -----------+----------------- - a | {user2=w/user1} + attname | attacl +----------+--------------------------------- + a | {pathman_user2=w/pathman_user1} cmax | cmin | ctid | @@ -113,8 +113,8 @@ ORDER BY attname; /* check ACL for each column */ (8 rows) /* Have rights, should be ok (parent's ACL is shared by new children) */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; id | a ----+--- 35 | 0 @@ -122,76 +122,76 @@ INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; SELECT relname, relacl FROM pg_class WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_max::int DESC /* append */ LIMIT 3) -ORDER BY relname; /* we also check ACL for "user1_table_2" */ - relname | relacl ----------------+-------------------------------------- - user1_table_2 | {user1=arwdDxt/user1,user2=r/user1} - user1_table_5 | {user1=arwdDxt/user1,user2=ar/user1} - user1_table_6 | {user1=arwdDxt/user1,user2=ar/user1} +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ + relname | relacl +-----------------------+---------------------------------------------------------------------- + pathman_user1_table_2 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=r/pathman_user1} + pathman_user1_table_5 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=ar/pathman_user1} + pathman_user1_table_6 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=ar/pathman_user1} (3 rows) /* Try to drop partition, should fail */ DO $$ BEGIN - SELECT drop_range_partition('permissions.user1_table_4'); + SELECT drop_range_partition('permissions.pathman_user1_table_4'); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* Disable automatic partition creation */ -SET ROLE user1; -SELECT set_auto('permissions.user1_table', false); +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); set_auto ---------- (1 row) /* Partition creation, should fail */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (55, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; ERROR: no suitable partition for key '55' /* Finally drop partitions */ -SET ROLE user1; -SELECT drop_partitions('permissions.user1_table'); -NOTICE: 10 rows copied from permissions.user1_table_1 -NOTICE: 10 rows copied from permissions.user1_table_2 -NOTICE: 0 rows copied from permissions.user1_table_4 -NOTICE: 0 rows copied from permissions.user1_table_5 -NOTICE: 1 rows copied from permissions.user1_table_6 +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); +NOTICE: 10 rows copied from permissions.pathman_user1_table_1 +NOTICE: 10 rows copied from permissions.pathman_user1_table_2 +NOTICE: 0 rows copied from permissions.pathman_user1_table_4 +NOTICE: 0 rows copied from permissions.pathman_user1_table_5 +NOTICE: 1 rows copied from permissions.pathman_user1_table_6 drop_partitions ----------------- 5 (1 row) /* Switch to #2 */ -SET ROLE user2; +SET ROLE pathman_user2; /* Test ddl event trigger */ -CREATE TABLE permissions.user2_table(id serial); -SELECT create_hash_partitions('permissions.user2_table', 'id', 3); +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); create_hash_partitions ------------------------ 3 (1 row) -INSERT INTO permissions.user2_table SELECT generate_series(1, 30); -SELECT drop_partitions('permissions.user2_table'); -NOTICE: 9 rows copied from permissions.user2_table_0 -NOTICE: 11 rows copied from permissions.user2_table_1 -NOTICE: 10 rows copied from permissions.user2_table_2 +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); +NOTICE: 9 rows copied from permissions.pathman_user2_table_0 +NOTICE: 11 rows copied from permissions.pathman_user2_table_1 +NOTICE: 10 rows copied from permissions.pathman_user2_table_2 drop_partitions ----------------- 3 (1 row) /* Switch to #1 */ -SET ROLE user1; +SET ROLE pathman_user1; CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; -GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); create_range_partitions ------------------------- @@ -203,11 +203,11 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} (3 rows) ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ @@ -222,12 +222,12 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} - permissions.dropped_column_4 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} (4 rows) ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ @@ -242,22 +242,22 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} - permissions.dropped_column_4 | val | {user2=ar/user1} - permissions.dropped_column_5 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_5 | val | {pathman_user2=ar/pathman_user1} (5 rows) DROP TABLE permissions.dropped_column CASCADE; NOTICE: drop cascades to 6 other objects /* Finally reset user */ RESET ROLE; -DROP OWNED BY user1; -DROP OWNED BY user2; -DROP USER user1; -DROP USER user2; +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; DROP SCHEMA permissions; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions_1.out b/expected/pathman_permissions_1.out index a50aa524..dc976aae 100644 --- a/expected/pathman_permissions_1.out +++ b/expected/pathman_permissions_1.out @@ -2,107 +2,107 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; -CREATE ROLE user1 LOGIN; -CREATE ROLE user2 LOGIN; -GRANT USAGE, CREATE ON SCHEMA permissions TO user1; -GRANT USAGE, CREATE ON SCHEMA permissions TO user2; +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; /* Switch to #1 */ -SET ROLE user1; -CREATE TABLE permissions.user1_table(id serial, a int); -INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; /* Should fail (can't SELECT) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges -/* Grant SELECT to user2 */ -SET ROLE user1; -GRANT SELECT ON permissions.user1_table TO user2; +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; /* Should fail (don't own parent) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* Should be ok */ -SET ROLE user1; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); create_range_partitions ------------------------- 2 (1 row) /* Should be able to see */ -SET ROLE user2; +SET ROLE pathman_user2; SELECT * FROM pathman_config; - partrel | expr | parttype | range_interval --------------------------+------+----------+---------------- - permissions.user1_table | id | 2 | 10 + partrel | expr | parttype | range_interval +---------------------------------+------+----------+---------------- + permissions.pathman_user1_table | id | 2 | 10 (1 row) SELECT * FROM pathman_config_params; - partrel | enable_parent | auto | init_callback | spawn_using_bgw --------------------------+---------------+------+---------------+----------------- - permissions.user1_table | f | t | | f + partrel | enable_parent | auto | init_callback | spawn_using_bgw +---------------------------------+---------------+------+---------------+----------------- + permissions.pathman_user1_table | f | t | | f (1 row) /* Should fail */ -SET ROLE user2; -SELECT set_enable_parent('permissions.user1_table', true); -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" ERROR: new row violates row-level security policy for table "pathman_config_params" -SELECT set_auto('permissions.user1_table', false); -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +SELECT set_auto('permissions.pathman_user1_table', false); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" ERROR: new row violates row-level security policy for table "pathman_config_params" /* Should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DELETE FROM pathman_config -WHERE partrel = 'permissions.user1_table'::regclass; -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +WHERE partrel = 'permissions.pathman_user1_table'::regclass; +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" /* No rights to insert, should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* No rights to create partitions (need INSERT privilege) */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); -ERROR: permission denied for parent relation "user1_table" -/* Allow user2 to create partitions */ -SET ROLE user1; -GRANT INSERT ON permissions.user1_table TO user2; -GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); +ERROR: permission denied for parent relation "pathman_user1_table" +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ /* Should be able to prepend a partition */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); - prepend_range_partition ---------------------------- - permissions.user1_table_4 +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); + prepend_range_partition +----------------------------------- + permissions.pathman_user1_table_4 (1 row) SELECT attname, attacl FROM pg_attribute WHERE attrelid = (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ LIMIT 1) ORDER BY attname; /* check ACL for each column */ - attname | attacl -----------+----------------- - a | {user2=w/user1} + attname | attacl +----------+--------------------------------- + a | {pathman_user2=w/pathman_user1} cmax | cmin | ctid | @@ -113,8 +113,8 @@ ORDER BY attname; /* check ACL for each column */ (8 rows) /* Have rights, should be ok (parent's ACL is shared by new children) */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; id | a ----+--- 35 | 0 @@ -122,76 +122,76 @@ INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; SELECT relname, relacl FROM pg_class WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_max::int DESC /* append */ LIMIT 3) -ORDER BY relname; /* we also check ACL for "user1_table_2" */ - relname | relacl ----------------+--------------------------------------- - user1_table_2 | {user1=arwdDxtm/user1,user2=r/user1} - user1_table_5 | {user1=arwdDxtm/user1,user2=ar/user1} - user1_table_6 | {user1=arwdDxtm/user1,user2=ar/user1} +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ + relname | relacl +-----------------------+----------------------------------------------------------------------- + pathman_user1_table_2 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=r/pathman_user1} + pathman_user1_table_5 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=ar/pathman_user1} + pathman_user1_table_6 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=ar/pathman_user1} (3 rows) /* Try to drop partition, should fail */ DO $$ BEGIN - SELECT drop_range_partition('permissions.user1_table_4'); + SELECT drop_range_partition('permissions.pathman_user1_table_4'); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; NOTICE: Insufficient priviliges /* Disable automatic partition creation */ -SET ROLE user1; -SELECT set_auto('permissions.user1_table', false); +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); set_auto ---------- (1 row) /* Partition creation, should fail */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (55, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; ERROR: no suitable partition for key '55' /* Finally drop partitions */ -SET ROLE user1; -SELECT drop_partitions('permissions.user1_table'); -NOTICE: 10 rows copied from permissions.user1_table_1 -NOTICE: 10 rows copied from permissions.user1_table_2 -NOTICE: 0 rows copied from permissions.user1_table_4 -NOTICE: 0 rows copied from permissions.user1_table_5 -NOTICE: 1 rows copied from permissions.user1_table_6 +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); +NOTICE: 10 rows copied from permissions.pathman_user1_table_1 +NOTICE: 10 rows copied from permissions.pathman_user1_table_2 +NOTICE: 0 rows copied from permissions.pathman_user1_table_4 +NOTICE: 0 rows copied from permissions.pathman_user1_table_5 +NOTICE: 1 rows copied from permissions.pathman_user1_table_6 drop_partitions ----------------- 5 (1 row) /* Switch to #2 */ -SET ROLE user2; +SET ROLE pathman_user2; /* Test ddl event trigger */ -CREATE TABLE permissions.user2_table(id serial); -SELECT create_hash_partitions('permissions.user2_table', 'id', 3); +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); create_hash_partitions ------------------------ 3 (1 row) -INSERT INTO permissions.user2_table SELECT generate_series(1, 30); -SELECT drop_partitions('permissions.user2_table'); -NOTICE: 9 rows copied from permissions.user2_table_0 -NOTICE: 11 rows copied from permissions.user2_table_1 -NOTICE: 10 rows copied from permissions.user2_table_2 +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); +NOTICE: 9 rows copied from permissions.pathman_user2_table_0 +NOTICE: 11 rows copied from permissions.pathman_user2_table_1 +NOTICE: 10 rows copied from permissions.pathman_user2_table_2 drop_partitions ----------------- 3 (1 row) /* Switch to #1 */ -SET ROLE user1; +SET ROLE pathman_user1; CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; -GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); create_range_partitions ------------------------- @@ -203,11 +203,11 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} (3 rows) ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ @@ -222,12 +222,12 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} - permissions.dropped_column_4 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} (4 rows) ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ @@ -242,22 +242,22 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} - permissions.dropped_column_4 | val | {user2=ar/user1} - permissions.dropped_column_5 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_5 | val | {pathman_user2=ar/pathman_user1} (5 rows) DROP TABLE permissions.dropped_column CASCADE; NOTICE: drop cascades to 6 other objects /* Finally reset user */ RESET ROLE; -DROP OWNED BY user1; -DROP OWNED BY user2; -DROP USER user1; -DROP USER user2; +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; DROP SCHEMA permissions; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_CVE-2020-14350.sql b/sql/pathman_CVE-2020-14350.sql index e3730744..07daa617 100644 --- a/sql/pathman_CVE-2020-14350.sql +++ b/sql/pathman_CVE-2020-14350.sql @@ -8,24 +8,24 @@ DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); DROP TABLE IF EXISTS test1 CASCADE; DROP TABLE IF EXISTS test2 CASCADE; -DROP ROLE IF EXISTS regress_hacker; +DROP ROLE IF EXISTS pathman_regress_hacker; SET client_min_messages = 'notice'; GRANT CREATE ON SCHEMA public TO PUBLIC; CREATE EXTENSION pg_pathman; -CREATE ROLE regress_hacker LOGIN; +CREATE ROLE pathman_regress_hacker LOGIN; -- Test 1 RESET ROLE; -ALTER ROLE regress_hacker NOSUPERUSER; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_total BIGINT) RETURNS bigint AS $$ BEGIN - ALTER ROLE regress_hacker SUPERUSER; + ALTER ROLE pathman_regress_hacker SUPERUSER; SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; END $$ LANGUAGE plpgsql; @@ -39,20 +39,20 @@ SELECT partition_table_concurrently('test1', 10, 1); SELECT pg_sleep(1); -- Test result (must be 'off') -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; -- Test 2 RESET ROLE; -ALTER ROLE regress_hacker NOSUPERUSER; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT) RETURNS REGCLASS AS $$ BEGIN - ALTER ROLE regress_hacker SUPERUSER; + ALTER ROLE pathman_regress_hacker SUPERUSER; RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); END $$ LANGUAGE plpgsql; @@ -64,7 +64,7 @@ SELECT create_range_partitions('test2', 'i', 0, 1); INSERT INTO test2 values(1); -- Test result (must be 'off') -SET ROLE regress_hacker; +SET ROLE pathman_regress_hacker; SHOW is_superuser; -- Cleanup @@ -73,6 +73,6 @@ DROP FUNCTION _partition_data_concurrent(oid,integer); DROP FUNCTION create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); DROP TABLE test1 CASCADE; DROP TABLE test2 CASCADE; -DROP ROLE regress_hacker; +DROP ROLE pathman_regress_hacker; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 49e1fc18..3e2cf92a 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -4,137 +4,137 @@ SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; -CREATE ROLE user1 LOGIN; -CREATE ROLE user2 LOGIN; +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; -GRANT USAGE, CREATE ON SCHEMA permissions TO user1; -GRANT USAGE, CREATE ON SCHEMA permissions TO user2; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; /* Switch to #1 */ -SET ROLE user1; -CREATE TABLE permissions.user1_table(id serial, a int); -INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; /* Should fail (can't SELECT) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; -/* Grant SELECT to user2 */ -SET ROLE user1; -GRANT SELECT ON permissions.user1_table TO user2; +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; /* Should fail (don't own parent) */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; /* Should be ok */ -SET ROLE user1; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); /* Should be able to see */ -SET ROLE user2; +SET ROLE pathman_user2; SELECT * FROM pathman_config; SELECT * FROM pathman_config_params; /* Should fail */ -SET ROLE user2; -SELECT set_enable_parent('permissions.user1_table', true); -SELECT set_auto('permissions.user1_table', false); +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +SELECT set_auto('permissions.pathman_user1_table', false); /* Should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DELETE FROM pathman_config -WHERE partrel = 'permissions.user1_table'::regclass; +WHERE partrel = 'permissions.pathman_user1_table'::regclass; /* No rights to insert, should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DO $$ BEGIN - INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; /* No rights to create partitions (need INSERT privilege) */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); -/* Allow user2 to create partitions */ -SET ROLE user1; -GRANT INSERT ON permissions.user1_table TO user2; -GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ /* Should be able to prepend a partition */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); SELECT attname, attacl FROM pg_attribute WHERE attrelid = (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ LIMIT 1) ORDER BY attname; /* check ACL for each column */ /* Have rights, should be ok (parent's ACL is shared by new children) */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; SELECT relname, relacl FROM pg_class WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_max::int DESC /* append */ LIMIT 3) -ORDER BY relname; /* we also check ACL for "user1_table_2" */ +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ /* Try to drop partition, should fail */ DO $$ BEGIN - SELECT drop_range_partition('permissions.user1_table_4'); + SELECT drop_range_partition('permissions.pathman_user1_table_4'); EXCEPTION WHEN insufficient_privilege THEN RAISE NOTICE 'Insufficient priviliges'; END$$; /* Disable automatic partition creation */ -SET ROLE user1; -SELECT set_auto('permissions.user1_table', false); +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); /* Partition creation, should fail */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (55, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; /* Finally drop partitions */ -SET ROLE user1; -SELECT drop_partitions('permissions.user1_table'); +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); /* Switch to #2 */ -SET ROLE user2; +SET ROLE pathman_user2; /* Test ddl event trigger */ -CREATE TABLE permissions.user2_table(id serial); -SELECT create_hash_partitions('permissions.user2_table', 'id', 3); -INSERT INTO permissions.user2_table SELECT generate_series(1, 30); -SELECT drop_partitions('permissions.user2_table'); +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); /* Switch to #1 */ -SET ROLE user1; +SET ROLE pathman_user1; CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; -GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); @@ -168,10 +168,10 @@ DROP TABLE permissions.dropped_column CASCADE; /* Finally reset user */ RESET ROLE; -DROP OWNED BY user1; -DROP OWNED BY user2; -DROP USER user1; -DROP USER user2; +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; DROP SCHEMA permissions; From 789e1117119d2347a8b86a3201359a8d45d98865 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Mon, 24 Jul 2023 11:05:37 +0300 Subject: [PATCH 1104/1124] PGPRO-8546: Create targetlist in partition filter and partition router nodes right with the parent_rti indexes. In accordance with the pointing of Tom Lane: https://www.postgresql.org/message-id/71315.1686243488%40sss.pgh.pa.us Tags: pg_pathman --- src/include/partition_filter.h | 4 +--- src/partition_filter.c | 39 +++------------------------------- src/partition_overseer.c | 2 +- src/partition_router.c | 7 +----- 4 files changed, 6 insertions(+), 46 deletions(-) diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 9b9f52f9..042b1d55 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -181,9 +181,7 @@ TupleConversionMap * build_part_tuple_map_child(Relation child_rel); void destroy_tuple_map(TupleConversionMap *tuple_map); -List * pfilter_build_tlist(Plan *subplan); - -void pfilter_tlist_fix_resjunk(CustomScan *subplan); +List * pfilter_build_tlist(Plan *subplan, Index varno); /* Find suitable partition using 'value' */ Oid * find_partitions_for_value(Datum value, Oid value_type, diff --git a/src/partition_filter.c b/src/partition_filter.c index d4cf8308..4391bcf3 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -812,12 +812,7 @@ make_partition_filter(Plan *subplan, cscan->scan.scanrelid = 0; /* Build an appropriate target list */ - cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); - - /* Prepare 'custom_scan_tlist' for EXPLAIN (VERBOSE) */ - cscan->custom_scan_tlist = copyObject(cscan->scan.plan.targetlist); - ChangeVarNodes((Node *) cscan->custom_scan_tlist, INDEX_VAR, parent_rti, 0); - pfilter_tlist_fix_resjunk(cscan); + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, parent_rti); /* Pack partitioned table's Oid and conflict_action */ cscan->custom_private = list_make4(makeInteger(parent_relid), @@ -1076,7 +1071,7 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e * Build partition filter's target list pointing to subplan tuple's elements. */ List * -pfilter_build_tlist(Plan *subplan) +pfilter_build_tlist(Plan *subplan, Index varno) { List *result_tlist = NIL; ListCell *lc; @@ -1096,7 +1091,7 @@ pfilter_build_tlist(Plan *subplan) } else { - Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ + Var *var = makeVar(varno, /* point to subplan's elements */ tle->resno, exprType((Node *) tle->expr), exprTypmod((Node *) tle->expr), @@ -1115,34 +1110,6 @@ pfilter_build_tlist(Plan *subplan) return result_tlist; } -/* - * resjunk Vars had its varattnos being set on nonexisting relation columns. - * For future processing service attributes should be indicated correctly. - */ -void -pfilter_tlist_fix_resjunk(CustomScan *css) -{ - ListCell *lc; - - foreach(lc, css->custom_scan_tlist) - { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - - if (!IsA(tle->expr, Const)) - { - Var *var = (Var *) tle->expr; - - if (tle->resjunk) - { - /* To make Var recognizable as service attribute. */ - var->varattno = -1; - } - } - } - - return; -} - /* * ---------------------------------------------- * Additional init steps for ResultPartsStorage diff --git a/src/partition_overseer.c b/src/partition_overseer.c index ffa770ba..d858374a 100644 --- a/src/partition_overseer.c +++ b/src/partition_overseer.c @@ -46,7 +46,7 @@ make_partition_overseer(Plan *subplan) cscan->scan.scanrelid = 0; /* Build an appropriate target list */ - cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, INDEX_VAR); cscan->custom_scan_tlist = subplan->targetlist; return &cscan->scan.plan; diff --git a/src/partition_router.c b/src/partition_router.c index 4a597a13..5f00e9b1 100644 --- a/src/partition_router.c +++ b/src/partition_router.c @@ -134,12 +134,7 @@ make_partition_router(Plan *subplan, int epq_param, Index parent_rti) cscan->scan.scanrelid = 0; /* Build an appropriate target list */ - cscan->scan.plan.targetlist = pfilter_build_tlist(subplan); - - /* Fix 'custom_scan_tlist' for EXPLAIN (VERBOSE) */ - cscan->custom_scan_tlist = copyObject(cscan->scan.plan.targetlist); - ChangeVarNodes((Node *) cscan->custom_scan_tlist, INDEX_VAR, parent_rti, 0); - pfilter_tlist_fix_resjunk(cscan); + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, parent_rti); return &cscan->scan.plan; } From 7ed25e40d3ace75277614e1ebfd870ead5148ef5 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 22 Sep 2023 12:45:11 +0300 Subject: [PATCH 1105/1124] travis-ci for v16 --- .travis.yml | 2 ++ Dockerfile.tmpl | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 81a40e18..411c98aa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,8 @@ notifications: on_failure: always env: + - PG_VERSION=16 LEVEL=hardcore + - PG_VERSION=16 - PG_VERSION=15 LEVEL=hardcore - PG_VERSION=15 - PG_VERSION=14 LEVEL=hardcore diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 309719de..4dd24ca5 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -9,7 +9,7 @@ RUN apk add --no-cache \ coreutils linux-headers \ make musl-dev gcc bison flex \ zlib-dev libedit-dev \ - clang clang15 clang-analyzer; + pkgconf icu-dev clang clang15 clang-analyzer; # Install fresh valgrind RUN apk add valgrind \ From 34430a5277e560ce1ccf84405357105e713b9b37 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 22 Sep 2023 14:21:22 +0300 Subject: [PATCH 1106/1124] core patch for v16 --- patches/REL_16_STABLE-pg_pathman-core.diff | 547 +++++++++++++++++++++ 1 file changed, 547 insertions(+) create mode 100644 patches/REL_16_STABLE-pg_pathman-core.diff diff --git a/patches/REL_16_STABLE-pg_pathman-core.diff b/patches/REL_16_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..63d88a38 --- /dev/null +++ b/patches/REL_16_STABLE-pg_pathman-core.diff @@ -0,0 +1,547 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index bbf220407b..9a82a2db04 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -34,6 +34,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index 37c5e34cce..d4bad64db1 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -79,7 +79,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel = XACT_READ_COMMITTED; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index 851946a927..32758378c7 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1845,6 +1845,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index 4c5a7bbf62..7d638aa22d 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -561,6 +561,39 @@ ExecutorRewind(QueryDesc *queryDesc) + } + + ++/* ++ * ExecCheckOneRtePermissions ++ * Check access permissions for one RTE ++ * ++ * Returns true if permissions are adequate. Otherwise, throws an appropriate ++ * error if ereport_on_violation is true, or simply returns false otherwise. ++ * ++ * This function uses pg_pathman due to commit f75cec4fff, see PGPRO-7792 ++ */ ++bool ++ExecCheckOneRtePermissions(RangeTblEntry *rte, RTEPermissionInfo *perminfo, ++ bool ereport_on_violation) ++{ ++ bool result = true; ++ ++ Assert(OidIsValid(perminfo->relid)); ++ Assert(rte->relid == perminfo->relid); ++ ++ result = ExecCheckOneRelPerms(perminfo); ++ ++ if (!result) ++ { ++ if (ereport_on_violation) ++ aclcheck_error(ACLCHECK_NO_PRIV, ++ get_relkind_objtype(get_rel_relkind(perminfo->relid)), ++ get_rel_name(perminfo->relid)); ++ return false; ++ } ++ ++ return result; ++} ++ ++ + /* + * ExecCheckPermissions + * Check access permissions of relations mentioned in a query +@@ -856,6 +889,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2873,6 +2913,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_output_cid = parentestate->es_output_cid; + rcestate->es_queryEnv = parentestate->es_queryEnv; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index 5005d8c0d1..e664848393 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -660,6 +660,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, + resultRelInfo->ri_projectNewInfoValid = true; + } + ++void ++PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo) ++{ ++ ExecInitUpdateProjection(mtstate, resultRelInfo); ++} ++ + /* + * ExecGetInsertNewTuple + * This prepares a "new" tuple ready to be inserted into given result +@@ -3550,6 +3557,7 @@ ExecModifyTable(PlanState *pstate) + HeapTupleData oldtupdata; + HeapTuple oldtuple; + ItemPointer tupleid; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -3591,6 +3599,8 @@ ExecModifyTable(PlanState *pstate) + context.mtstate = node; + context.epqstate = &node->mt_epqstate; + context.estate = estate; ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; + + /* + * Fetch rows from subplan, and execute the required table modification +@@ -3598,6 +3608,14 @@ ExecModifyTable(PlanState *pstate) + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contain original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -3631,7 +3649,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : context.planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + { +@@ -3668,6 +3688,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -3677,6 +3699,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -3707,7 +3730,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + +@@ -3755,7 +3779,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -3786,9 +3811,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); +- slot = ExecInsert(&context, resultRelInfo, slot, +- node->canSetTag, NULL, NULL); ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); ++ slot = ExecInsert(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, node->canSetTag, NULL, NULL); + break; + + case CMD_UPDATE: +@@ -3796,6 +3824,13 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -3819,14 +3854,19 @@ ExecModifyTable(PlanState *pstate) + slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, + oldSlot); + context.relaction = NULL; ++ } + + /* Now apply the update. */ +- slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecUpdate(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + slot, node->canSetTag); + break; + + case CMD_DELETE: +- slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + true, false, node->canSetTag, NULL, NULL); + break; + +@@ -3844,7 +3884,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -3860,6 +3903,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -3934,6 +3978,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -4035,6 +4080,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -4117,6 +4169,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + } + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete/merge, there will be a junk + * attribute named "tableoid" present in the subplan's targetlist. It +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 011ec18015..7b4fcb2807 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 7d3b9446e6..20030111f4 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern PGDLLIMPORT bool DefaultXactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY + extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index ac02247947..c39ae13a8e 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -208,6 +208,9 @@ extern void standard_ExecutorFinish(QueryDesc *queryDesc); + extern void ExecutorEnd(QueryDesc *queryDesc); + extern void standard_ExecutorEnd(QueryDesc *queryDesc); + extern void ExecutorRewind(QueryDesc *queryDesc); ++extern bool ExecCheckOneRtePermissions(RangeTblEntry *rte, ++ RTEPermissionInfo *perminfo, ++ bool ereport_on_violation); + extern bool ExecCheckPermissions(List *rangeTable, + List *rteperminfos, bool ereport_on_violation); + extern void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation); +@@ -676,5 +679,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++#define PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION ++/* ++ * This function is static in vanilla, but pg_pathman wants it exported. ++ * We cannot make it extern with the same name to avoid compilation errors ++ * in timescaledb, which ships it's own static copy of the same function. ++ * So, export ExecInitUpdateProjection with Pgpro prefix. ++ * ++ * The define above helps pg_pathman to expect proper exported symbol ++ * from various versions of pgpro. ++ */ ++extern void PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index cb714f4a19..d34a103fc6 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -638,6 +638,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index 05548d7c0a..37754370e0 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,22 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables ++{ ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) ++ { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) ++ { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub lcopy + { + my $src = shift; +@@ -580,7 +596,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 9e05eb91b1..baedbb784a 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -40,7 +40,7 @@ my @contrib_uselibpq = (); + my @contrib_uselibpgport = (); + my @contrib_uselibpgcommon = (); + my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = {}; ++my $contrib_extraincludes = { 'pg_pathman' => ['contrib/pg_pathman/src/include'] }; + my $contrib_extrasource = {}; + my @contrib_excludes = ( + 'bool_plperl', 'commit_ts', +@@ -979,6 +979,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + push @projects, $proj; + } +@@ -1082,6 +1083,22 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables ++{ ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) ++ { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) ++ { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1106,23 +1123,59 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ if ( -f "contrib/$n/$d.in" ) ++ { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } ++ else + { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) ++ { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) ++ { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) ++ { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } From ceeeaa66e53bb72b9248acaf0ad05835a62ad140 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 22 Sep 2023 16:36:37 +0300 Subject: [PATCH 1107/1124] Corrected some functions for v16 --- tests/cmocka/missing_basic.c | 17 +++++++++++++---- tests/cmocka/missing_stringinfo.c | 8 +++++++- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/tests/cmocka/missing_basic.c b/tests/cmocka/missing_basic.c index 36d76160..d20eb87f 100644 --- a/tests/cmocka/missing_basic.c +++ b/tests/cmocka/missing_basic.c @@ -24,20 +24,29 @@ pfree(void *pointer) void ExceptionalCondition(const char *conditionName, +#if PG_VERSION_NUM < 160000 const char *errorType, +#endif const char *fileName, int lineNumber) { - if (!PointerIsValid(conditionName) || - !PointerIsValid(fileName) || - !PointerIsValid(errorType)) + if (!PointerIsValid(conditionName) || !PointerIsValid(fileName) +#if PG_VERSION_NUM < 160000 + || !PointerIsValid(errorType) +#endif + ) { printf("TRAP: ExceptionalCondition: bad arguments\n"); } else { printf("TRAP: %s(\"%s\", File: \"%s\", Line: %d)\n", - errorType, conditionName, +#if PG_VERSION_NUM < 160000 + errorType, +#else + "", +#endif + conditionName, fileName, lineNumber); } diff --git a/tests/cmocka/missing_stringinfo.c b/tests/cmocka/missing_stringinfo.c index edf4d8a4..80710a4e 100644 --- a/tests/cmocka/missing_stringinfo.c +++ b/tests/cmocka/missing_stringinfo.c @@ -206,7 +206,13 @@ appendStringInfoSpaces(StringInfo str, int count) * if necessary. */ void -appendBinaryStringInfo(StringInfo str, const char *data, int datalen) +appendBinaryStringInfo(StringInfo str, +#if PG_VERSION_NUM < 160000 + const char *data, +#else + const void *data, +#endif + int datalen) { Assert(str != NULL); From db938cca85e7dc42ee7090a5d9e12774e2cee782 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Wed, 27 Sep 2023 18:25:39 +0300 Subject: [PATCH 1108/1124] PGPRO-8546: Add core patch for REL_11_STABLE. Don't generate deforming jit code for tuples without user attributes. Without this patch an "ERROR: unknown alignment" may occur during jit compilation. Tags: pg_pathman --- README.md | 4 +- patches/REL_11_STABLE-pg_pathman-core.diff | 53 ++++++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) create mode 100644 patches/REL_11_STABLE-pg_pathman-core.diff diff --git a/README.md b/README.md index 43d585ff..1394bc6f 100644 --- a/README.md +++ b/README.md @@ -13,8 +13,8 @@ The `pg_pathman` module provides optimized partitioning mechanism and functions The extension is compatible with: - * PostgreSQL 11, 12, 13; - * PostgreSQL with core-patch: 14, 15; + * PostgreSQL 12, 13; + * PostgreSQL with core-patch: 11, 14, 15; * Postgres Pro Standard 11, 12, 13, 14, 15; * Postgres Pro Enterprise; diff --git a/patches/REL_11_STABLE-pg_pathman-core.diff b/patches/REL_11_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..b3b08e0a --- /dev/null +++ b/patches/REL_11_STABLE-pg_pathman-core.diff @@ -0,0 +1,53 @@ +diff --git a/src/backend/jit/llvm/llvmjit_deform.c b/src/backend/jit/llvm/llvmjit_deform.c +index 6384ac940d8..8b4f731e7a8 100644 +--- a/src/backend/jit/llvm/llvmjit_deform.c ++++ b/src/backend/jit/llvm/llvmjit_deform.c +@@ -104,6 +104,10 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc, int natts) + + int attnum; + ++ /* don't generate code for tuples without user attributes */ ++ if (desc->natts == 0) ++ return NULL; ++ + mod = llvm_mutable_module(context); + + funcname = llvm_expand_funcname(context, "deform"); +diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c +index 12138e49577..8638ebc4ba1 100644 +--- a/src/backend/jit/llvm/llvmjit_expr.c ++++ b/src/backend/jit/llvm/llvmjit_expr.c +@@ -274,6 +274,7 @@ llvm_compile_expr(ExprState *state) + LLVMValueRef v_slot; + LLVMBasicBlockRef b_fetch; + LLVMValueRef v_nvalid; ++ LLVMValueRef l_jit_deform = NULL; + + b_fetch = l_bb_before_v(opblocks[i + 1], + "op.%d.fetch", i); +@@ -336,17 +337,20 @@ llvm_compile_expr(ExprState *state) + */ + if (desc && (context->base.flags & PGJIT_DEFORM)) + { +- LLVMValueRef params[1]; +- LLVMValueRef l_jit_deform; +- + l_jit_deform = +- slot_compile_deform(context, desc, ++ slot_compile_deform(context, ++ desc, + op->d.fetch.last_var); ++ } ++ ++ if (l_jit_deform) ++ { ++ LLVMValueRef params[1]; ++ + params[0] = v_slot; + + LLVMBuildCall(b, l_jit_deform, + params, lengthof(params), ""); +- + } + else + { From 35ab52f1f86795daf2992012e142ac84166116bf Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 3 Nov 2023 04:09:31 +0300 Subject: [PATCH 1109/1124] [PGPRO-9137] Fix for vanilla commits 178ee1d858, b1444a09dc Tags: pg_pathman --- expected/pathman_update_triggers_1.out | 198 +++++++++++++++++++++++++ src/include/partition_filter.h | 5 + src/partition_filter.c | 30 +++- 3 files changed, 232 insertions(+), 1 deletion(-) create mode 100644 expected/pathman_update_triggers_1.out diff --git a/expected/pathman_update_triggers_1.out b/expected/pathman_update_triggers_1.out new file mode 100644 index 00000000..5d26ac1e --- /dev/null +++ b/expected/pathman_update_triggers_1.out @@ -0,0 +1,198 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + create_hash_partitions +------------------------ + 2 +(1 row) + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; +/* + * Statement level triggers + */ +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +select count(distinct val) from test_update_triggers.test; + count +------- + 200 +(1 row) + +truncate test_update_triggers.test; +/* + * Row level triggers + */ +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +/* single value */ +insert into test_update_triggers.test values (1); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 2 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_2) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER INSERT ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 3 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: AFTER UPDATE ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 4 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: BEFORE DELETE ROW (test_2) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_2) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 5 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 6 | test_update_triggers.test_1 +(1 row) + +select count(distinct val) from test_update_triggers.test; + count +------- + 1 +(1 row) + +DROP TABLE test_update_triggers.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; +DROP EXTENSION pg_pathman CASCADE; diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index 042b1d55..4aae0bbb 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -119,6 +119,11 @@ typedef struct CmdType command_type; TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ + +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + Index parent_rti; /* Parent RT index for use of EXPLAIN, + see "ModifyTable::nominalRelation" */ +#endif } PartitionFilterState; diff --git a/src/partition_filter.c b/src/partition_filter.c index 4391bcf3..3d5e4bd3 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -815,10 +815,18 @@ make_partition_filter(Plan *subplan, cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, parent_rti); /* Pack partitioned table's Oid and conflict_action */ +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + cscan->custom_private = list_make5(makeInteger(parent_relid), + makeInteger(conflict_action), + returning_list, + makeInteger(command_type), + makeInteger(parent_rti)); +#else cscan->custom_private = list_make4(makeInteger(parent_relid), makeInteger(conflict_action), returning_list, makeInteger(command_type)); +#endif return &cscan->scan.plan; } @@ -841,6 +849,9 @@ partition_filter_create_scan_state(CustomScan *node) state->on_conflict_action = intVal(lsecond(node->custom_private)); state->returning_list = (List *) lthird(node->custom_private); state->command_type = (CmdType) intVal(lfourth(node->custom_private)); +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + state->parent_rti = (Index) intVal(lfirst(list_nth_cell(node->custom_private, 4))); +#endif /* Check boundaries */ Assert(state->on_conflict_action >= ONCONFLICT_NONE || @@ -875,7 +886,24 @@ partition_filter_begin(CustomScanState *node, EState *estate, int eflags) RPS_RRI_CB(NULL, NULL)); #if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ /* ResultRelInfo of partitioned table. */ - state->result_parts.init_rri = current_rri; + { + RangeTblEntry *rte = rt_fetch(current_rri->ri_RangeTableIndex, estate->es_range_table); + + if (rte->perminfoindex > 0) + state->result_parts.init_rri = current_rri; + else + { + /* + * Additional changes for 178ee1d858d: we cannot use current_rri + * because RTE for this ResultRelInfo has perminfoindex = 0. Need + * to use parent_rti (modify_table->nominalRelation) instead. + */ + Assert(state->parent_rti > 0); + state->result_parts.init_rri = estate->es_result_relations[state->parent_rti - 1]; + if (!state->result_parts.init_rri) + elog(ERROR, "cannot determine result info for partitioned table"); + } + } #endif } From 51edb67c59eb34187b841970833e55a8a9de4c9d Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 29 Nov 2023 18:41:37 +0300 Subject: [PATCH 1110/1124] [PGPRO-9251] Added new parameter PG_TEST_SKIP PG_TEST_SKIP parameter is used similarly to PG_TEST_EXTRA and is intended for skip pg_pathman regression tests. Tags: pg_pathman --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c3fe4038..b8a683a3 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,7 @@ DATA = pg_pathman--1.0--1.1.sql \ PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" +ifneq (pg_pathman,$(filter pg_pathman,$(PG_TEST_SKIP))) REGRESS = pathman_array_qual \ pathman_basic \ pathman_bgw \ @@ -63,7 +64,7 @@ REGRESS = pathman_array_qual \ pathman_utility_stmt \ pathman_views \ pathman_CVE-2020-14350 - +endif EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add From 9283ab7e4996cac120e78987ae7d0e69124815df Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 15 Dec 2023 00:58:19 +0300 Subject: [PATCH 1111/1124] [PGPRO-9334] Corrections for isolation tests Tags: pg_pathman --- Makefile | 2 +- expected/for_update.out | 28 +- expected/insert_nodes.out | 130 +++-- expected/rollback_on_create_partitions.out | 618 ++++++++++++++------- specs/for_update.spec | 2 - specs/insert_nodes.spec | 5 +- specs/rollback_on_create_partitions.spec | 2 +- 7 files changed, 509 insertions(+), 278 deletions(-) diff --git a/Makefile b/Makefile index c3fe4038..ce7a3b0c 100644 --- a/Makefile +++ b/Makefile @@ -99,7 +99,7 @@ ISOLATIONCHECKS=insert_nodes for_update rollback_on_create_partitions submake-isolation: $(MAKE) -C $(top_builddir)/src/test/isolation all -isolationcheck: | submake-isolation +isolationcheck: | submake-isolation temp-install $(MKDIR_P) isolation_output $(pg_isolation_regress_check) \ --temp-config=$(top_srcdir)/$(subdir)/conf.add \ diff --git a/expected/for_update.out b/expected/for_update.out index 3e41031e..ffd425e4 100644 --- a/expected/for_update.out +++ b/expected/for_update.out @@ -2,37 +2,49 @@ Parsed test spec with 2 sessions starting permutation: s1_b s1_update s2_select s1_r create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select: select * from test_tbl where id = 1; -id val +id|val +--+--- + 1| 1 +(1 row) -1 1 step s1_r: rollback; starting permutation: s1_b s1_update s2_select_locked s1_r create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select_locked: select * from test_tbl where id = 1 for share; step s1_r: rollback; step s2_select_locked: <... completed> -id val +id|val +--+--- + 1| 1 +(1 row) -1 1 starting permutation: s1_b s1_update s2_select_locked s1_c create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select_locked: select * from test_tbl where id = 1 for share; step s1_c: commit; step s2_select_locked: <... completed> -id val +id|val +--+--- +(0 rows) diff --git a/expected/insert_nodes.out b/expected/insert_nodes.out index 64758aef..5ff8d63d 100644 --- a/expected/insert_nodes.out +++ b/expected/insert_nodes.out @@ -2,122 +2,144 @@ Parsed test spec with 2 sessions starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +(4 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) step s2b: BEGIN; step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +(4 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_300 s2c s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +(4 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +PRIMARY KEY (id) +CHECK (((id >= 201) AND (id < 301))) +(6 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_300 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +PRIMARY KEY (id) +CHECK (((id >= 201) AND (id < 301))) +(6 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) step s2b: BEGIN; step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +PRIMARY KEY (id) +CHECK (((id >= 201) AND (id < 301))) +(6 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_150 s2b s2_insert_300 s1r s2r s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; step s2r: ROLLBACK; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +PRIMARY KEY (id) +CHECK (((id >= 1) AND (id < 101))) +PRIMARY KEY (id) +CHECK (((id >= 101) AND (id < 201))) +PRIMARY KEY (id) +CHECK (((id >= 201) AND (id < 301))) +(6 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) diff --git a/expected/rollback_on_create_partitions.out b/expected/rollback_on_create_partitions.out index 3531107d..ee0c7c0f 100644 --- a/expected/rollback_on_create_partitions.out +++ b/expected/rollback_on_create_partitions.out @@ -5,64 +5,72 @@ step begin: BEGIN; step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data create_partitions show_rel commit show_rel step begin: BEGIN; step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback show_rel step begin: BEGIN; @@ -70,23 +78,39 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c commit show_rel step begin: BEGIN; @@ -94,23 +118,39 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions savepoint_c rollback_b show_rel rollback show_rel step begin: BEGIN; @@ -118,34 +158,50 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions savepoint_c rollback_b show_rel commit show_rel step begin: BEGIN; @@ -153,44 +209,60 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_a show_rel rollback show_rel step begin: BEGIN; @@ -198,28 +270,45 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_a: ROLLBACK TO SAVEPOINT a; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_a show_rel commit show_rel step begin: BEGIN; @@ -227,28 +316,45 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_a: ROLLBACK TO SAVEPOINT a; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_b drop_partitions show_rel rollback show_rel step begin: BEGIN; @@ -256,32 +362,61 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_b drop_partitions show_rel commit show_rel step begin: BEGIN; @@ -289,32 +424,61 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions rollback_a create_partitions show_rel rollback show_rel step begin: BEGIN; @@ -322,37 +486,55 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step rollback_a: ROLLBACK TO SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions rollback_a create_partitions show_rel commit show_rel step begin: BEGIN; @@ -360,44 +542,62 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step rollback_a: ROLLBACK TO SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + diff --git a/specs/for_update.spec b/specs/for_update.spec index f7a8f758..c18cd4f8 100644 --- a/specs/for_update.spec +++ b/specs/for_update.spec @@ -19,8 +19,6 @@ step "s1_r" { rollback; } step "s1_update" { update test_tbl set id = 2 where id = 1; } session "s2" -step "s2_b" { begin; } -step "s2_c" { commit; } step "s2_select_locked" { select * from test_tbl where id = 1 for share; } step "s2_select" { select * from test_tbl where id = 1; } diff --git a/specs/insert_nodes.spec b/specs/insert_nodes.spec index 3bb67746..5ceea0d4 100644 --- a/specs/insert_nodes.spec +++ b/specs/insert_nodes.spec @@ -17,18 +17,17 @@ session "s1" step "s1b" { BEGIN; } step "s1_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } step "s1_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } -step "s1_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step "s1_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; } step "s1r" { ROLLBACK; } -step "s1c" { COMMIT; } session "s2" step "s2b" { BEGIN; } step "s2_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } step "s2_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } -step "s2_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step "s2_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid WHERE i.inhparent = 'range_rel'::regclass ORDER BY c.oid; } diff --git a/specs/rollback_on_create_partitions.spec b/specs/rollback_on_create_partitions.spec index a24c2897..806e6072 100644 --- a/specs/rollback_on_create_partitions.spec +++ b/specs/rollback_on_create_partitions.spec @@ -22,7 +22,7 @@ step "rollback_a" { ROLLBACK TO SAVEPOINT a; } step "savepoint_b" { SAVEPOINT b; } step "rollback_b" { ROLLBACK TO SAVEPOINT b; } step "savepoint_c" { SAVEPOINT c; } -step "show_rel" { EXPLAIN (COSTS OFF) SELECT * FROM range_rel; } +step "show_rel" { SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; } permutation "begin" "insert_data" "create_partitions" "show_rel" "rollback" "show_rel" From 47b75b0d57a720eaf0e6f732bd6a4f692aea904b Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 15 Dec 2023 13:55:41 +0300 Subject: [PATCH 1112/1124] [PGPRO-9334] Enable isolation tests Tags: pg_pathman --- .gitignore | 1 - Makefile | 26 ++++++++++------------ expected/insert_nodes.out | 46 ++++++++++++--------------------------- specs/insert_nodes.spec | 4 ++-- 4 files changed, 28 insertions(+), 49 deletions(-) diff --git a/.gitignore b/.gitignore index f627990d..1bc422a5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,4 @@ .deps -isolation_output results/* regression.diffs regression.out diff --git a/Makefile b/Makefile index ce7a3b0c..004f747f 100644 --- a/Makefile +++ b/Makefile @@ -64,11 +64,13 @@ REGRESS = pathman_array_qual \ pathman_views \ pathman_CVE-2020-14350 +ISOLATION = insert_nodes for_update rollback_on_create_partitions -EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add +REGRESS_OPTS = --temp-config $(top_srcdir)/$(subdir)/conf.add +ISOLATION_OPTS = --temp-config $(top_srcdir)/$(subdir)/conf.add CMOCKA_EXTRA_CLEAN = missing_basic.o missing_list.o missing_stringinfo.o missing_bitmapset.o rangeset_tests.o rangeset_tests -EXTRA_CLEAN = ./isolation_output $(patsubst %,tests/cmocka/%, $(CMOCKA_EXTRA_CLEAN)) +EXTRA_CLEAN = $(patsubst %,tests/cmocka/%, $(CMOCKA_EXTRA_CLEAN)) ifdef USE_PGXS PG_CONFIG=pg_config @@ -83,6 +85,14 @@ OBJS += src/declarative.o override PG_CPPFLAGS += -DENABLE_DECLARATIVE endif +# We cannot run isolation test for versions 12,13 in PGXS case +# because 'pg_isolation_regress' is not copied to install +# directory, see src/test/isolation/Makefile +ifeq ($(VNUM),$(filter 12% 13%,$(VNUM))) +undefine ISOLATION +undefine ISOLATION_OPTS +endif + include $(PGXS) else subdir = contrib/pg_pathman @@ -94,18 +104,6 @@ endif $(EXTENSION)--$(EXTVERSION).sql: init.sql hash.sql range.sql cat $^ > $@ -ISOLATIONCHECKS=insert_nodes for_update rollback_on_create_partitions - -submake-isolation: - $(MAKE) -C $(top_builddir)/src/test/isolation all - -isolationcheck: | submake-isolation temp-install - $(MKDIR_P) isolation_output - $(pg_isolation_regress_check) \ - --temp-config=$(top_srcdir)/$(subdir)/conf.add \ - --outputdir=./isolation_output \ - $(ISOLATIONCHECKS) - python_tests: $(MAKE) -C tests/python partitioning_tests CASE=$(CASE) diff --git a/expected/insert_nodes.out b/expected/insert_nodes.out index 5ff8d63d..8f725216 100644 --- a/expected/insert_nodes.out +++ b/expected/insert_nodes.out @@ -11,30 +11,26 @@ step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -(4 rows) +(2 rows) step s2b: BEGIN; step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -(4 rows) +(2 rows) starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_300 s2c s2_show_partitions @@ -48,32 +44,27 @@ step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -(4 rows) +(2 rows) step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s2c: COMMIT; step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -PRIMARY KEY (id) CHECK (((id >= 201) AND (id < 301))) -(6 rows) +(3 rows) starting permutation: s1b s1_insert_300 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions @@ -87,34 +78,28 @@ step s1_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -PRIMARY KEY (id) CHECK (((id >= 201) AND (id < 301))) -(6 rows) +(3 rows) step s2b: BEGIN; step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -PRIMARY KEY (id) CHECK (((id >= 201) AND (id < 301))) -(6 rows) +(3 rows) starting permutation: s1b s1_insert_150 s2b s2_insert_300 s1r s2r s2_show_partitions @@ -131,15 +116,12 @@ step s1r: ROLLBACK; step s2r: ROLLBACK; step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; pg_get_constraintdef ------------------------------------ -PRIMARY KEY (id) CHECK (((id >= 1) AND (id < 101))) -PRIMARY KEY (id) CHECK (((id >= 101) AND (id < 201))) -PRIMARY KEY (id) CHECK (((id >= 201) AND (id < 301))) -(6 rows) +(3 rows) diff --git a/specs/insert_nodes.spec b/specs/insert_nodes.spec index 5ceea0d4..a5d0c7f9 100644 --- a/specs/insert_nodes.spec +++ b/specs/insert_nodes.spec @@ -19,7 +19,7 @@ step "s1_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); step "s1_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } step "s1_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; } step "s1r" { ROLLBACK; } @@ -29,7 +29,7 @@ step "s2_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); step "s2_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } step "s2_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; } step "s2r" { ROLLBACK; } step "s2c" { COMMIT; } From 1857bde09f87f168ae1e218a92f337e471dba98b Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 22 Dec 2023 19:15:08 +0300 Subject: [PATCH 1113/1124] Correction for docker-compose.yml --- docker-compose.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 471ab779..0544d859 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,2 +1,3 @@ -tests: +services: + tests: build: . From f5605c5dc340753410e958c6b1852691d87ec67a Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 27 Mar 2024 12:40:03 +0300 Subject: [PATCH 1114/1124] [PGPRO-9977] Added new expected results after vanilla commit Tags: pg_pathman See b262ad440ede - Add better handling of redundant IS [NOT] NULL quals --- expected/pathman_hashjoin_6.out | 75 ++++++++++++++++++++++++++++++ expected/pathman_mergejoin_6.out | 80 ++++++++++++++++++++++++++++++++ 2 files changed, 155 insertions(+) create mode 100644 expected/pathman_hashjoin_6.out create mode 100644 expected/pathman_mergejoin_6.out diff --git a/expected/pathman_hashjoin_6.out b/expected/pathman_hashjoin_6.out new file mode 100644 index 00000000..1c57f49b --- /dev/null +++ b/expected/pathman_hashjoin_6.out @@ -0,0 +1,75 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 +(11 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_6.out b/expected/pathman_mergejoin_6.out new file mode 100644 index 00000000..0cca2aef --- /dev/null +++ b/expected/pathman_mergejoin_6.out @@ -0,0 +1,80 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(10 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; From 07f0a98b060ed85fc52847b61654c6576dd2a586 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 27 Mar 2024 14:43:30 +0300 Subject: [PATCH 1115/1124] [PGPRO-9977] Updated patches for v15, v16 Tags: pg_pathman --- patches/REL_15_STABLE-pg_pathman-core.diff | 52 +++++++++++----------- patches/REL_16_STABLE-pg_pathman-core.diff | 50 ++++++++++----------- 2 files changed, 51 insertions(+), 51 deletions(-) diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff index 04fae9aa..b8db29fd 100644 --- a/patches/REL_15_STABLE-pg_pathman-core.diff +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -11,7 +11,7 @@ index bbf220407b..9a82a2db04 100644 pg_stat_statements \ pg_surgery \ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c -index d0e5bc26a7..5ca196518e 100644 +index 7a3d9b4b01..0c3d2dec6c 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -78,7 +78,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; @@ -24,7 +24,7 @@ index d0e5bc26a7..5ca196518e 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index d5e46098c2..d3c02c1def 100644 +index 87c7603f2b..9cc0bc0da8 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1801,6 +1801,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -45,7 +45,7 @@ index d5e46098c2..d3c02c1def 100644 return state->resvalue; } diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c -index ef2fd46092..8551733c55 100644 +index 0ba61fd547..29d93998b2 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -826,6 +826,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) @@ -62,7 +62,7 @@ index ef2fd46092..8551733c55 100644 /* * Next, build the ExecRowMark array from the PlanRowMark(s), if any. */ -@@ -2811,6 +2818,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) +@@ -2849,6 +2856,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) rcestate->es_junkFilter = parentestate->es_junkFilter; rcestate->es_output_cid = parentestate->es_output_cid; @@ -77,7 +77,7 @@ index ef2fd46092..8551733c55 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index 2f6e66b641..d4a1e48c20 100644 +index 1ad5dcb406..047508e0da 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -641,6 +641,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, @@ -94,7 +94,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * ExecGetInsertNewTuple * This prepares a "new" tuple ready to be inserted into given result -@@ -3524,6 +3531,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3581,6 +3588,7 @@ ExecModifyTable(PlanState *pstate) HeapTupleData oldtupdata; HeapTuple oldtuple; ItemPointer tupleid; @@ -102,7 +102,7 @@ index 2f6e66b641..d4a1e48c20 100644 CHECK_FOR_INTERRUPTS(); -@@ -3565,6 +3573,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3622,6 +3630,8 @@ ExecModifyTable(PlanState *pstate) context.mtstate = node; context.epqstate = &node->mt_epqstate; context.estate = estate; @@ -111,7 +111,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * Fetch rows from subplan, and execute the required table modification -@@ -3572,6 +3582,14 @@ ExecModifyTable(PlanState *pstate) +@@ -3629,6 +3639,14 @@ ExecModifyTable(PlanState *pstate) */ for (;;) { @@ -126,7 +126,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -3605,7 +3623,9 @@ ExecModifyTable(PlanState *pstate) +@@ -3662,7 +3680,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -137,7 +137,7 @@ index 2f6e66b641..d4a1e48c20 100644 &isNull); if (isNull) { -@@ -3642,6 +3662,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3699,6 +3719,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -146,7 +146,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * A scan slot containing the data that was actually inserted, -@@ -3651,6 +3673,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3708,6 +3730,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); @@ -154,7 +154,7 @@ index 2f6e66b641..d4a1e48c20 100644 return slot; } -@@ -3681,7 +3704,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3738,7 +3761,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -164,7 +164,7 @@ index 2f6e66b641..d4a1e48c20 100644 resultRelInfo->ri_RowIdAttNo, &isNull); -@@ -3729,7 +3753,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3786,7 +3810,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -174,7 +174,7 @@ index 2f6e66b641..d4a1e48c20 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -3760,9 +3785,12 @@ ExecModifyTable(PlanState *pstate) +@@ -3817,9 +3842,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -190,7 +190,7 @@ index 2f6e66b641..d4a1e48c20 100644 break; case CMD_UPDATE: -@@ -3770,6 +3798,13 @@ ExecModifyTable(PlanState *pstate) +@@ -3827,6 +3855,13 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); @@ -204,7 +204,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * Make the new tuple by combining plan's output tuple with * the old tuple being updated. -@@ -3793,14 +3828,19 @@ ExecModifyTable(PlanState *pstate) +@@ -3850,14 +3885,19 @@ ExecModifyTable(PlanState *pstate) slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, oldSlot); context.relaction = NULL; @@ -223,10 +223,10 @@ index 2f6e66b641..d4a1e48c20 100644 + slot = ExecDelete(&context, estate->es_result_relation_info ? + estate->es_result_relation_info : resultRelInfo, + tupleid, oldtuple, - true, false, node->canSetTag, NULL, NULL); + true, false, node->canSetTag, NULL, NULL, NULL); break; -@@ -3818,7 +3858,10 @@ ExecModifyTable(PlanState *pstate) +@@ -3875,7 +3915,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -237,7 +237,7 @@ index 2f6e66b641..d4a1e48c20 100644 } /* -@@ -3834,6 +3877,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3891,6 +3934,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -245,7 +245,7 @@ index 2f6e66b641..d4a1e48c20 100644 return NULL; } -@@ -3908,6 +3952,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3965,6 +4009,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -253,7 +253,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -4008,6 +4053,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4067,6 +4112,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -267,7 +267,7 @@ index 2f6e66b641..d4a1e48c20 100644 /* * Now we may initialize the subplan. */ -@@ -4102,6 +4154,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4161,6 +4213,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ExecInitStoredGenerated(resultRelInfo, estate, operation); } @@ -303,10 +303,10 @@ index 8d46a781bb..150d70cb64 100644 /* flag for logging statements in this transaction */ diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h -index 82925b4b63..de23622ca2 100644 +index 7cd9b2f2bf..b31a7934a4 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h -@@ -659,5 +659,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, +@@ -662,5 +662,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid, bool missing_ok, bool update_cache); @@ -325,7 +325,7 @@ index 82925b4b63..de23622ca2 100644 #endif /* EXECUTOR_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h -index f34d06eff4..0970e5f110 100644 +index 9f176b0e37..a65799dcce 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -624,6 +624,12 @@ typedef struct EState @@ -374,7 +374,7 @@ index 8de79c618c..c9226ba5ad 100644 sub CopyIncludeFiles diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm -index ef0a33c10f..27033b0a45 100644 +index 990c223a9b..cd5048f8d5 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -39,8 +39,8 @@ my $contrib_defines = {}; diff --git a/patches/REL_16_STABLE-pg_pathman-core.diff b/patches/REL_16_STABLE-pg_pathman-core.diff index 63d88a38..50dad389 100644 --- a/patches/REL_16_STABLE-pg_pathman-core.diff +++ b/patches/REL_16_STABLE-pg_pathman-core.diff @@ -11,7 +11,7 @@ index bbf220407b..9a82a2db04 100644 pg_stat_statements \ pg_surgery \ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c -index 37c5e34cce..d4bad64db1 100644 +index 4a2ea4adba..7cadde5499 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -79,7 +79,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; @@ -24,7 +24,7 @@ index 37c5e34cce..d4bad64db1 100644 bool DefaultXactDeferrable = false; bool XactDeferrable; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c -index 851946a927..32758378c7 100644 +index 6b7997465d..5e9e878d3b 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1845,6 +1845,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) @@ -117,7 +117,7 @@ index 4c5a7bbf62..7d638aa22d 100644 * ResultRelInfos needed by subplans are initialized from scratch when the * subplans themselves are initialized. diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c -index 5005d8c0d1..e664848393 100644 +index c84caeeaee..2a355607e9 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -660,6 +660,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, @@ -134,7 +134,7 @@ index 5005d8c0d1..e664848393 100644 /* * ExecGetInsertNewTuple * This prepares a "new" tuple ready to be inserted into given result -@@ -3550,6 +3557,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3570,6 +3577,7 @@ ExecModifyTable(PlanState *pstate) HeapTupleData oldtupdata; HeapTuple oldtuple; ItemPointer tupleid; @@ -142,7 +142,7 @@ index 5005d8c0d1..e664848393 100644 CHECK_FOR_INTERRUPTS(); -@@ -3591,6 +3599,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3611,6 +3619,8 @@ ExecModifyTable(PlanState *pstate) context.mtstate = node; context.epqstate = &node->mt_epqstate; context.estate = estate; @@ -151,7 +151,7 @@ index 5005d8c0d1..e664848393 100644 /* * Fetch rows from subplan, and execute the required table modification -@@ -3598,6 +3608,14 @@ ExecModifyTable(PlanState *pstate) +@@ -3618,6 +3628,14 @@ ExecModifyTable(PlanState *pstate) */ for (;;) { @@ -166,7 +166,7 @@ index 5005d8c0d1..e664848393 100644 /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly -@@ -3631,7 +3649,9 @@ ExecModifyTable(PlanState *pstate) +@@ -3651,7 +3669,9 @@ ExecModifyTable(PlanState *pstate) bool isNull; Oid resultoid; @@ -177,7 +177,7 @@ index 5005d8c0d1..e664848393 100644 &isNull); if (isNull) { -@@ -3668,6 +3688,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3688,6 +3708,8 @@ ExecModifyTable(PlanState *pstate) if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); @@ -186,7 +186,7 @@ index 5005d8c0d1..e664848393 100644 /* * A scan slot containing the data that was actually inserted, -@@ -3677,6 +3699,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3697,6 +3719,7 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); @@ -194,7 +194,7 @@ index 5005d8c0d1..e664848393 100644 return slot; } -@@ -3707,7 +3730,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3727,7 +3750,8 @@ ExecModifyTable(PlanState *pstate) { /* ri_RowIdAttNo refers to a ctid attribute */ Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); @@ -204,7 +204,7 @@ index 5005d8c0d1..e664848393 100644 resultRelInfo->ri_RowIdAttNo, &isNull); -@@ -3755,7 +3779,8 @@ ExecModifyTable(PlanState *pstate) +@@ -3775,7 +3799,8 @@ ExecModifyTable(PlanState *pstate) */ else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) { @@ -214,7 +214,7 @@ index 5005d8c0d1..e664848393 100644 resultRelInfo->ri_RowIdAttNo, &isNull); /* shouldn't ever get a null result... */ -@@ -3786,9 +3811,12 @@ ExecModifyTable(PlanState *pstate) +@@ -3806,9 +3831,12 @@ ExecModifyTable(PlanState *pstate) /* Initialize projection info if first time for this table */ if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitInsertProjection(node, resultRelInfo); @@ -230,7 +230,7 @@ index 5005d8c0d1..e664848393 100644 break; case CMD_UPDATE: -@@ -3796,6 +3824,13 @@ ExecModifyTable(PlanState *pstate) +@@ -3816,6 +3844,13 @@ ExecModifyTable(PlanState *pstate) if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) ExecInitUpdateProjection(node, resultRelInfo); @@ -244,7 +244,7 @@ index 5005d8c0d1..e664848393 100644 /* * Make the new tuple by combining plan's output tuple with * the old tuple being updated. -@@ -3819,14 +3854,19 @@ ExecModifyTable(PlanState *pstate) +@@ -3839,14 +3874,19 @@ ExecModifyTable(PlanState *pstate) slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, oldSlot); context.relaction = NULL; @@ -263,10 +263,10 @@ index 5005d8c0d1..e664848393 100644 + slot = ExecDelete(&context, estate->es_result_relation_info ? + estate->es_result_relation_info : resultRelInfo, + tupleid, oldtuple, - true, false, node->canSetTag, NULL, NULL); + true, false, node->canSetTag, NULL, NULL, NULL); break; -@@ -3844,7 +3884,10 @@ ExecModifyTable(PlanState *pstate) +@@ -3864,7 +3904,10 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) @@ -277,7 +277,7 @@ index 5005d8c0d1..e664848393 100644 } /* -@@ -3860,6 +3903,7 @@ ExecModifyTable(PlanState *pstate) +@@ -3880,6 +3923,7 @@ ExecModifyTable(PlanState *pstate) node->mt_done = true; @@ -285,7 +285,7 @@ index 5005d8c0d1..e664848393 100644 return NULL; } -@@ -3934,6 +3978,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -3954,6 +3998,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *l; int i; Relation rel; @@ -293,7 +293,7 @@ index 5005d8c0d1..e664848393 100644 /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); -@@ -4035,6 +4080,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4056,6 +4101,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } @@ -307,7 +307,7 @@ index 5005d8c0d1..e664848393 100644 /* * Now we may initialize the subplan. */ -@@ -4117,6 +4169,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +@@ -4138,6 +4190,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) } } @@ -375,7 +375,7 @@ index ac02247947..c39ae13a8e 100644 #endif /* EXECUTOR_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h -index cb714f4a19..d34a103fc6 100644 +index 869465d6f8..6bdde351d7 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -638,6 +638,12 @@ typedef struct EState @@ -428,7 +428,7 @@ index 05548d7c0a..37754370e0 100644 sub CopyIncludeFiles diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm -index 9e05eb91b1..baedbb784a 100644 +index 6a79a0e037..93696f53ae 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -40,7 +40,7 @@ my @contrib_uselibpq = (); @@ -440,7 +440,7 @@ index 9e05eb91b1..baedbb784a 100644 my $contrib_extrasource = {}; my @contrib_excludes = ( 'bool_plperl', 'commit_ts', -@@ -979,6 +979,7 @@ sub AddContrib +@@ -980,6 +980,7 @@ sub AddContrib my $dn = $1; my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); $proj->AddReference($postgres); @@ -448,7 +448,7 @@ index 9e05eb91b1..baedbb784a 100644 AdjustContribProj($proj); push @projects, $proj; } -@@ -1082,6 +1083,22 @@ sub AddContrib +@@ -1083,6 +1084,22 @@ sub AddContrib return; } @@ -471,7 +471,7 @@ index 9e05eb91b1..baedbb784a 100644 sub GenerateContribSqlFiles { my $n = shift; -@@ -1106,23 +1123,59 @@ sub GenerateContribSqlFiles +@@ -1107,23 +1124,59 @@ sub GenerateContribSqlFiles substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); } From ba5c4c790074d5c923f6eb08ea46792c48a38f49 Mon Sep 17 00:00:00 2001 From: Svetlana Derevyanko Date: Thu, 28 Mar 2024 11:37:28 +0300 Subject: [PATCH 1116/1124] [PGPRO-9874] Added check on SearchSysCache returning NULL Tags: pg_pathman --- src/partition_creation.c | 3 ++- src/relation_info.c | 15 +++++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/partition_creation.c b/src/partition_creation.c index eb438b91..a0bdaa55 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -606,7 +606,8 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ /* Get typname of range_bound_type to perform cast */ typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(range_bound_type)); - Assert(HeapTupleIsValid(typeTuple)); + if (!HeapTupleIsValid(typeTuple)) + elog(ERROR, "cache lookup failed for type %u", range_bound_type); typname = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname)); ReleaseSysCache(typeTuple); diff --git a/src/relation_info.c b/src/relation_info.c index e3ba540c..db75646f 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1167,7 +1167,7 @@ invalidate_bounds_cache(void) /* * Get constraint expression tree of a partition. * - * build_check_constraint_name_internal() is used to build conname. + * build_check_constraint_name_relid_internal() is used to build conname. */ Expr * get_partition_constraint_expr(Oid partition, bool raise_error) @@ -1193,6 +1193,16 @@ get_partition_constraint_expr(Oid partition, bool raise_error) } con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); + if (!HeapTupleIsValid(con_tuple)) + { + if (!raise_error) + return NULL; + + ereport(ERROR, + (errmsg("cache lookup failed for constraint \"%s\" of partition \"%s\"", + conname, get_rel_name_or_relid(partition)))); + } + conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, Anum_pg_constraint_conbin, &conbin_isnull); @@ -1204,9 +1214,6 @@ get_partition_constraint_expr(Oid partition, bool raise_error) ereport(ERROR, (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", conname, get_rel_name_or_relid(partition)))); - pfree(conname); - - return NULL; /* could not parse */ } pfree(conname); From eab5f7d2b1bc952d7aa452fbe01d87861a84f731 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Wed, 3 Apr 2024 14:35:37 +0300 Subject: [PATCH 1117/1124] [PGPRO-9977] Fix after vanilla commit 5f2e179bd31e Tags: pg_pathman --- src/include/compat/pg_compat.h | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 5a12b528..2cc9e96d 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -119,7 +119,10 @@ /* * CheckValidResultRel() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 170000 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd), NIL) +#elif PG_VERSION_NUM >= 100000 #define CheckValidResultRelCompat(rri, cmd) \ CheckValidResultRel((rri), (cmd)) #elif PG_VERSION_NUM >= 90500 @@ -237,18 +240,6 @@ #endif -/* - * CheckValidResultRel() - */ -#if PG_VERSION_NUM >= 100000 -#define CheckValidResultRelCompat(rri, cmd) \ - CheckValidResultRel((rri), (cmd)) -#elif PG_VERSION_NUM >= 90500 -#define CheckValidResultRelCompat(rri, cmd) \ - CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) -#endif - - /* * create_append_path() */ From 5376dfba1b459de1935982964b2ba94a03fdcd6b Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Wed, 17 Apr 2024 10:47:46 +0300 Subject: [PATCH 1118/1124] PGPRO-9797: Temporary disable test pathman_upd_del.sql To be fixed in PGPRO-10100. Tags: joinsel --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index f6780044..f32398da 100644 --- a/Makefile +++ b/Makefile @@ -64,6 +64,8 @@ REGRESS = pathman_array_qual \ pathman_utility_stmt \ pathman_views \ pathman_CVE-2020-14350 + +REGRESS := $(filter-out pathman_upd_del, $(REGRESS)) endif ISOLATION = insert_nodes for_update rollback_on_create_partitions From f03128e83bd959756c191f27307615f28a042545 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 20 May 2024 14:25:13 +0300 Subject: [PATCH 1119/1124] Pgindent fixes --- src/include/compat/pg_compat.h | 82 +++++++++++++++++----------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 2cc9e96d..f6330627 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -65,11 +65,11 @@ */ #if PG_VERSION_NUM >= 110000 #define calc_nestloop_required_outer_compat(outer, inner) \ - calc_nestloop_required_outer((outer)->parent->relids, PATH_REQ_OUTER(outer), \ + calc_nestloop_required_outer((outer)->parent->relids, PATH_REQ_OUTER(outer), \ (inner)->parent->relids, PATH_REQ_OUTER(inner)) #else #define calc_nestloop_required_outer_compat(outer, inner) \ - calc_nestloop_required_outer((outer), (inner)) + calc_nestloop_required_outer((outer), (inner)) #endif @@ -120,14 +120,14 @@ * CheckValidResultRel() */ #if PG_VERSION_NUM >= 170000 -#define CheckValidResultRelCompat(rri, cmd) \ - CheckValidResultRel((rri), (cmd), NIL) +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd), NIL) #elif PG_VERSION_NUM >= 100000 -#define CheckValidResultRelCompat(rri, cmd) \ - CheckValidResultRel((rri), (cmd)) +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd)) #elif PG_VERSION_NUM >= 90500 -#define CheckValidResultRelCompat(rri, cmd) \ - CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) #endif /* @@ -265,7 +265,7 @@ #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ (parallel_workers), false, NIL, -1, false) -#endif /* PGPRO_VERSION */ +#endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 110000 @@ -277,7 +277,7 @@ #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ (parallel_workers), false, NIL, -1, false, NIL) -#endif /* PGPRO_VERSION */ +#endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 100000 @@ -288,7 +288,7 @@ #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NIL, \ false, NIL) -#endif /* PGPRO_VERSION */ +#endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 90600 @@ -299,12 +299,12 @@ #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), \ false, NIL, (parallel_workers)) -#endif /* PGPRO_VERSION */ +#endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 90500 #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer)) -#endif /* PG_VERSION_NUM */ +#endif /* PG_VERSION_NUM */ /* @@ -414,8 +414,8 @@ extern void create_plain_partial_paths(PlannerInfo *root, static inline Datum ExecEvalExprCompat(ExprState *expr, ExprContext *econtext, bool *isnull) { - ExprDoneCond isdone; - Datum result = ExecEvalExpr(expr, econtext, isnull, &isdone); + ExprDoneCond isdone; + Datum result = ExecEvalExpr(expr, econtext, isnull, &isdone); if (isdone != ExprSingleResult) elog(ERROR, "expression should return single value"); @@ -432,9 +432,9 @@ ExecEvalExprCompat(ExprState *expr, ExprContext *econtext, bool *isnull) static inline bool ExecCheck(ExprState *state, ExprContext *econtext) { - Datum ret; - bool isnull; - MemoryContext old_mcxt; + Datum ret; + bool isnull; + MemoryContext old_mcxt; /* short-circuit (here and in ExecInitCheck) for empty restriction list */ if (state == NULL) @@ -530,7 +530,7 @@ extern List *get_all_actual_clauses(List *restrictinfo_list); * get_rel_persistence() */ #if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 -char get_rel_persistence(Oid relid); +char get_rel_persistence(Oid relid); #endif @@ -583,8 +583,8 @@ char get_rel_persistence(Oid relid); * make_restrictinfo() */ #if PG_VERSION_NUM >= 100000 -extern List * make_restrictinfos_from_actual_clauses(PlannerInfo *root, - List *clause_list); +extern List *make_restrictinfos_from_actual_clauses(PlannerInfo *root, + List *clause_list); #endif @@ -607,9 +607,9 @@ extern Result *make_result(List *tlist, * McxtStatsInternal() */ #if PG_VERSION_NUM >= 90600 -void McxtStatsInternal(MemoryContext context, int level, - bool examine_children, - MemoryContextCounters *totals); +void McxtStatsInternal(MemoryContext context, int level, + bool examine_children, + MemoryContextCounters *totals); #endif @@ -617,7 +617,7 @@ void McxtStatsInternal(MemoryContext context, int level, * oid_cmp() */ #if PG_VERSION_NUM >=90500 && PG_VERSION_NUM < 100000 -extern int oid_cmp(const void *p1, const void *p2); +extern int oid_cmp(const void *p1, const void *p2); #endif @@ -626,7 +626,7 @@ extern int oid_cmp(const void *p1, const void *p2); * * for v10 cast first arg to RawStmt type */ -#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ #define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ query_env) \ parse_analyze_fixedparams((RawStmt *) (parse_tree), (query_string), (param_types), \ @@ -649,7 +649,7 @@ extern int oid_cmp(const void *p1, const void *p2); * * for v10 cast first arg to RawStmt type */ -#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ #define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ nparams, query_env) \ pg_analyze_and_rewrite_fixedparams((RawStmt *) (parsetree), (query_string), \ @@ -722,7 +722,7 @@ extern int oid_cmp(const void *p1, const void *p2); * set_dummy_rel_pathlist() */ #if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 -void set_dummy_rel_pathlist(RelOptInfo *rel); +void set_dummy_rel_pathlist(RelOptInfo *rel); #endif @@ -744,8 +744,9 @@ extern void set_rel_consider_parallel(PlannerInfo *root, * in compat version the type of first argument is (Expr *) */ #if PG_VERSION_NUM >= 100000 -#if PG_VERSION_NUM >= 140000 /* function removed in 375398244168add84a884347625d14581a421e71 */ -extern TargetEntry *tlist_member_ignore_relabel(Expr * node, List * targetlist); +#if PG_VERSION_NUM >= 140000 /* function removed in + * 375398244168add84a884347625d14581a421e71 */ +extern TargetEntry *tlist_member_ignore_relabel(Expr *node, List *targetlist); #endif #define tlist_member_ignore_relabel_compat(expr, targetlist) \ tlist_member_ignore_relabel((expr), (targetlist)) @@ -775,7 +776,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, tupleid, fdw_trigtuple, newslot) \ ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ (fdw_trigtuple), (newslot), NULL, NULL) -#elif PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ +#elif PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ #define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ tupleid, fdw_trigtuple, newslot) \ ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ @@ -826,7 +827,7 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, /* * ExecARDeleteTriggers() */ -#if PG_VERSION_NUM >= 150000 /* for commit ba9a7e392171 */ +#if PG_VERSION_NUM >= 150000 /* for commit ba9a7e392171 */ #define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ fdw_trigtuple, transition_capture) \ ExecARDeleteTriggers((estate), (relinfo), (tupleid), \ @@ -970,9 +971,9 @@ extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, * we need access to entire tuple, not just its header. */ #ifdef XID_IS_64BIT -# define HeapTupleGetXminCompat(htup) HeapTupleGetXmin(htup) +#define HeapTupleGetXminCompat(htup) HeapTupleGetXmin(htup) #else -# define HeapTupleGetXminCompat(htup) HeapTupleHeaderGetXmin((htup)->t_data) +#define HeapTupleGetXminCompat(htup) HeapTupleHeaderGetXmin((htup)->t_data) #endif /* @@ -1115,9 +1116,10 @@ static inline TupleTableSlot * ExecInitExtraTupleSlotCompatHorse(EState *s, TupleDesc t) { #if PG_VERSION_NUM >= 110000 - return ExecInitExtraTupleSlot(s,t); + return ExecInitExtraTupleSlot(s, t); #else - TupleTableSlot *res = ExecInitExtraTupleSlot(s); + TupleTableSlot *res = ExecInitExtraTupleSlot(s); + if (t) ExecSetSlotDescriptor(res, t); @@ -1149,7 +1151,7 @@ CustomEvalParamExternCompat(Param *param, return prm; } -void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); +void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); /* * lnext() @@ -1210,8 +1212,8 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); #define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (od), (p), (sl), (rr), (or), (nr)) #else #define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((c), (ipd), (od), (p), (sl), (rr), (or), (nr)) -#endif /* #if PG_VERSION_NUM >= 140000 */ -#endif /* #if PG_VERSION_NUM >= 160000 */ +#endif /* #if PG_VERSION_NUM >= 140000 */ +#endif /* #if PG_VERSION_NUM >= 160000 */ /* * pull_varnos() @@ -1243,4 +1245,4 @@ void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); #define EvalPlanQualInit_compat(epqstate, parentestate, subplan, auxrowmarks, epqParam) EvalPlanQualInit(epqstate, parentestate, subplan, auxrowmarks, epqParam) #endif -#endif /* PG_COMPAT_H */ +#endif /* PG_COMPAT_H */ From d67141658ee6f8d7e7bf11f9c74fd1c14035445e Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 3 Jun 2024 22:28:56 +0300 Subject: [PATCH 1120/1124] [PGPRO-10286] Added error processing for some cases Tags: pg_pathman --- src/declarative.c | 4 ++++ src/init.c | 3 ++- src/partition_creation.c | 16 ++++++++++++++-- src/pathman_workers.c | 14 ++++++++++++-- src/pl_hash_funcs.c | 7 ++++++- 5 files changed, 38 insertions(+), 6 deletions(-) diff --git a/src/declarative.c b/src/declarative.c index 367df752..42e9ffac 100644 --- a/src/declarative.c +++ b/src/declarative.c @@ -237,6 +237,8 @@ handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) /* Fetch pg_pathman's schema */ pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* Build function's name */ proc_name = list_make2(makeString(pathman_schema), @@ -296,6 +298,8 @@ handle_detach_partition(AlterTableCmd *cmd) /* Fetch pg_pathman's schema */ pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* Build function's name */ proc_name = list_make2(makeString(pathman_schema), diff --git a/src/init.c b/src/init.c index 4341d406..1907d9dc 100644 --- a/src/init.c +++ b/src/init.c @@ -273,7 +273,8 @@ static bool init_pathman_relation_oids(void) { Oid schema = get_pathman_schema(); - Assert(schema != InvalidOid); + if (schema == InvalidOid) + return false; /* extension can be dropped by another backend */ /* Cache PATHMAN_CONFIG relation's Oid */ pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, schema); diff --git a/src/partition_creation.c b/src/partition_creation.c index a0bdaa55..d6080c85 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -585,6 +585,7 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ Oid parent_nsp = get_rel_namespace(parent_relid); char *parent_nsp_name = get_namespace_name(parent_nsp); char *partition_name = choose_range_partition_name(parent_relid, parent_nsp); + char *pathman_schema; /* Assign the 'following' boundary to current 'leading' value */ cur_following_bound = cur_leading_bound; @@ -611,10 +612,14 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ typname = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname)); ReleaseSysCache(typeTuple); + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + /* Construct call to create_single_range_partition() */ create_sql = psprintf( "select %s.create_single_range_partition('%s.%s'::regclass, '%s'::%s, '%s'::%s, '%s.%s', NULL::text)", - quote_identifier(get_namespace_name(get_pathman_schema())), + quote_identifier(pathman_schema), quote_identifier(parent_nsp_name), quote_identifier(get_rel_name(parent_relid)), IsInfinite(&bounds[0]) ? "NULL" : datum_to_cstring(bounds[0].value, range_bound_type), @@ -1195,6 +1200,8 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) /* Fetch pg_pathman's schema */ pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* Build function's name */ copy_fkeys_proc_name = list_make2(makeString(pathman_schema), @@ -1564,6 +1571,7 @@ build_raw_hash_check_tree(Node *raw_expression, Oid hash_proc; TypeCacheEntry *tce; + char *pathman_schema; tce = lookup_type_cache(value_type, TYPECACHE_HASH_PROC); hash_proc = tce->hash_proc; @@ -1596,9 +1604,13 @@ build_raw_hash_check_tree(Node *raw_expression, hash_call->over = NULL; hash_call->location = -1; + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + /* Build schema-qualified name of function get_hash_part_idx() */ get_hash_part_idx_proc = - list_make2(makeString(get_namespace_name(get_pathman_schema())), + list_make2(makeString(pathman_schema), makeString("get_hash_part_idx")); /* Call get_hash_part_idx() */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index 3eb82ab7..bf23bd94 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -520,6 +520,11 @@ bgw_main_concurrent_part(Datum main_arg) if (sql == NULL) { MemoryContext current_mcxt; + char *pathman_schema; + + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* * Allocate SQL query in TopPathmanContext because current @@ -527,7 +532,7 @@ bgw_main_concurrent_part(Datum main_arg) */ current_mcxt = MemoryContextSwitchTo(TopPathmanContext); sql = psprintf("SELECT %s._partition_data_concurrent($1::regclass, NULL::text, NULL::text, p_limit:=$2)", - get_namespace_name(get_pathman_schema())); + pathman_schema); MemoryContextSwitchTo(current_mcxt); } @@ -700,6 +705,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) i; TransactionId rel_xmin; LOCKMODE lockmode = ShareUpdateExclusiveLock; + char *pathman_schema; /* Check batch_size */ if (batch_size < 1 || batch_size > 10000) @@ -800,11 +806,15 @@ partition_table_concurrently(PG_FUNCTION_ARGS) start_bgworker_errmsg(concurrent_part_bgw); } + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + /* Tell user everything's fine */ elog(NOTICE, "worker started, you can stop it " "with the following command: select %s.%s('%s');", - get_namespace_name(get_pathman_schema()), + pathman_schema, CppAsString(stop_concurrent_part_task), get_rel_name(relid)); diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index ddaaa8c0..4b08c324 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -122,6 +122,7 @@ build_hash_condition(PG_FUNCTION_ARGS) char *expr_cstr = TextDatumGetCString(PG_GETARG_DATUM(1)); uint32 part_count = PG_GETARG_UINT32(2), part_idx = PG_GETARG_UINT32(3); + char *pathman_schema; TypeCacheEntry *tce; @@ -141,9 +142,13 @@ build_hash_condition(PG_FUNCTION_ARGS) errmsg("no hash function for type %s", format_type_be(expr_type)))); + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + /* Create hash condition CSTRING */ result = psprintf("%s.get_hash_part_idx(%s(%s), %u) = %u", - get_namespace_name(get_pathman_schema()), + pathman_schema, get_func_name(tce->hash_proc), expr_cstr, part_count, From 92b69d85b777d6a9f9246017e77caeed532396c7 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Mon, 3 Jun 2024 23:22:14 +0300 Subject: [PATCH 1121/1124] Replaced deprecated python LooseVersion function --- tests/python/partitioning_test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py index 152b8b19..ba4b205f 100644 --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -20,7 +20,7 @@ import time import unittest -from distutils.version import LooseVersion +from packaging.version import Version from testgres import get_new_node, get_pg_version, configure_testgres # set setup base logging config, it can be turned on by `use_python_logging` @@ -58,7 +58,7 @@ } logging.config.dictConfig(LOG_CONFIG) -version = LooseVersion(get_pg_version()) +version = Version(get_pg_version()) # Helper function for json equality @@ -448,7 +448,7 @@ def test_parallel_nodes(self): # Check version of postgres server # If version < 9.6 skip all tests for parallel queries - if version < LooseVersion('9.6.0'): + if version < Version('9.6.0'): return # Prepare test database @@ -485,7 +485,7 @@ def test_parallel_nodes(self): # Test parallel select with node.connect() as con: con.execute('set max_parallel_workers_per_gather = 2') - if version >= LooseVersion('10'): + if version >= Version('10'): con.execute('set min_parallel_table_scan_size = 0') else: con.execute('set min_parallel_relation_size = 0') @@ -1045,7 +1045,7 @@ def test_update_node_plan1(self): self.assertEqual(len(plan["Target Tables"]), 11) # Plan was seriously changed in vanilla since v14 - if version < LooseVersion('14'): + if version < Version('14'): expected_format = ''' { "Plans": [ From afbec7faa3b8c86a6ad4ab386a7abe6b43027d9c Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Tue, 18 Jun 2024 11:48:15 +0300 Subject: [PATCH 1122/1124] Update pg_pathman due to vanilla PostgreSQL. 1. Fix regression output due to fd0398fcb099. Changed tests: pathman_only and pathman_rowmarks. 2. Fix code due to commit d20d8fbd3e4d. 3. Fix comments in test files due to alternate outputs. --- expected/pathman_only.out | 26 +- expected/pathman_only_1.out | 26 +- expected/pathman_only_2.out | 26 +- expected/pathman_only_3.out | 26 +- expected/pathman_only_4.out | 299 +++++++++++++++++++++++ expected/pathman_rowmarks.out | 27 ++- expected/pathman_rowmarks_1.out | 27 ++- expected/pathman_rowmarks_2.out | 27 ++- expected/pathman_rowmarks_3.out | 27 ++- expected/pathman_rowmarks_4.out | 407 ++++++++++++++++++++++++++++++++ sql/pathman_only.sql | 26 +- sql/pathman_rowmarks.sql | 27 ++- src/pl_funcs.c | 11 + src/relation_info.c | 3 +- 14 files changed, 939 insertions(+), 46 deletions(-) create mode 100644 expected/pathman_only_4.out create mode 100644 expected/pathman_rowmarks_4.out diff --git a/expected/pathman_only.out b/expected/pathman_only.out index 1b9f6a6b..f44f2256 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -3,13 +3,31 @@ * NOTE: This test behaves differenly on PgPro * --------------------------------------------- * - * Since 12 (608b167f9f), CTEs which are scanned once are no longer an - * optimization fence, which changes practically all plans here. There is + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out index b92a8eaf..ce6fd127 100644 --- a/expected/pathman_only_1.out +++ b/expected/pathman_only_1.out @@ -3,13 +3,31 @@ * NOTE: This test behaves differenly on PgPro * --------------------------------------------- * - * Since 12 (608b167f9f), CTEs which are scanned once are no longer an - * optimization fence, which changes practically all plans here. There is + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_2.out b/expected/pathman_only_2.out index c37dd5f4..6aeadb76 100644 --- a/expected/pathman_only_2.out +++ b/expected/pathman_only_2.out @@ -3,13 +3,31 @@ * NOTE: This test behaves differenly on PgPro * --------------------------------------------- * - * Since 12 (608b167f9f), CTEs which are scanned once are no longer an - * optimization fence, which changes practically all plans here. There is + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_3.out b/expected/pathman_only_3.out index 2f2fcc75..1999309d 100644 --- a/expected/pathman_only_3.out +++ b/expected/pathman_only_3.out @@ -3,13 +3,31 @@ * NOTE: This test behaves differenly on PgPro * --------------------------------------------- * - * Since 12 (608b167f9f), CTEs which are scanned once are no longer an - * optimization fence, which changes practically all plans here. There is + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_only_4.out b/expected/pathman_only_4.out new file mode 100644 index 00000000..fbcc397c --- /dev/null +++ b/expected/pathman_only_4.out @@ -0,0 +1,299 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (a.val = b.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = (InitPlan 1).col1) + InitPlan 1 + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = (InitPlan 1).col1) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index ea047c9e..6d4611ee 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -1,13 +1,30 @@ /* * ------------------------------------------- - * NOTE: This test behaves differenly on 9.5 + * NOTE: This test behaves differenly on PgPro * ------------------------------------------- * - * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, - * causing different output; pathman_rowmarks_2.out is the updated version. + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out index 256b8637..063fca8d 100644 --- a/expected/pathman_rowmarks_1.out +++ b/expected/pathman_rowmarks_1.out @@ -1,13 +1,30 @@ /* * ------------------------------------------- - * NOTE: This test behaves differenly on 9.5 + * NOTE: This test behaves differenly on PgPro * ------------------------------------------- * - * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, - * causing different output; pathman_rowmarks_2.out is the updated version. + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_2.out b/expected/pathman_rowmarks_2.out index 06fb88ac..91d7804e 100644 --- a/expected/pathman_rowmarks_2.out +++ b/expected/pathman_rowmarks_2.out @@ -1,13 +1,30 @@ /* * ------------------------------------------- - * NOTE: This test behaves differenly on 9.5 + * NOTE: This test behaves differenly on PgPro * ------------------------------------------- * - * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, - * causing different output; pathman_rowmarks_2.out is the updated version. + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_3.out b/expected/pathman_rowmarks_3.out index af61e5f7..e8644292 100644 --- a/expected/pathman_rowmarks_3.out +++ b/expected/pathman_rowmarks_3.out @@ -1,13 +1,30 @@ /* * ------------------------------------------- - * NOTE: This test behaves differenly on 9.5 + * NOTE: This test behaves differenly on PgPro * ------------------------------------------- * - * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, - * causing different output; pathman_rowmarks_2.out is the updated version. + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_4.out b/expected/pathman_rowmarks_4.out new file mode 100644 index 00000000..5fbec84d --- /dev/null +++ b/expected/pathman_rowmarks_4.out @@ -0,0 +1,407 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +----------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +------------------------------------------------------------- + LockRows + InitPlan 1 + -> Limit + -> LockRows + -> Sort + Sort Key: first_1.id + -> Append + -> Seq Scan on first_0 first_2 + -> Seq Scan on first_1 first_3 + -> Seq Scan on first_2 first_4 + -> Seq Scan on first_3 first_5 + -> Seq Scan on first_4 first_6 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = (InitPlan 1).col1) + -> Seq Scan on first_0 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_1 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_2 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_3 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_4 first + Filter: (id = (InitPlan 1).col1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +-------------------------------------------------- + LockRows + InitPlan 1 + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = (InitPlan 1).col1) + -> Seq Scan on first_0 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_1 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_2 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_3 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_4 first + Filter: (id = (InitPlan 1).col1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +----------------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Hash Join + Hash Cond: (first.id = second.id) + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_only.sql b/sql/pathman_only.sql index 88f4e88a..68dc4ca1 100644 --- a/sql/pathman_only.sql +++ b/sql/pathman_only.sql @@ -3,13 +3,31 @@ * NOTE: This test behaves differenly on PgPro * --------------------------------------------- * - * Since 12 (608b167f9f), CTEs which are scanned once are no longer an - * optimization fence, which changes practically all plans here. There is + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is * an option to forcibly make them MATERIALIZED, but we also need to run tests * on older versions, so create pathman_only_1.out instead. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index bb7719ea..8847b80c 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -1,13 +1,30 @@ /* * ------------------------------------------- - * NOTE: This test behaves differenly on 9.5 + * NOTE: This test behaves differenly on PgPro * ------------------------------------------- * - * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, - * causing different output; pathman_rowmarks_2.out is the updated version. + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. * - * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, - * now it includes aliases for inherited tables. + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ SET search_path = 'public'; CREATE EXTENSION pg_pathman; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index 10538bea..75c1c12a 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -174,7 +174,12 @@ get_partition_cooked_key_pl(PG_FUNCTION_ARGS) expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); expr = cook_partitioning_expression(relid, expr_cstr, NULL); + +#if PG_VERSION_NUM >= 170000 /* for commit d20d8fbd3e4d */ + cooked_cstr = nodeToStringWithLocations(expr); +#else cooked_cstr = nodeToString(expr); +#endif pfree(expr_cstr); pfree(expr); @@ -196,7 +201,13 @@ get_cached_partition_cooked_key_pl(PG_FUNCTION_ARGS) prel = get_pathman_relation_info(relid); shout_if_prel_is_invalid(relid, prel, PT_ANY); + +#if PG_VERSION_NUM >= 170000 /* for commit d20d8fbd3e4d */ + res = CStringGetTextDatum(nodeToStringWithLocations(prel->expr)); +#else res = CStringGetTextDatum(nodeToString(prel->expr)); +#endif + close_pathman_relation_info(prel); PG_RETURN_DATUM(res); diff --git a/src/relation_info.c b/src/relation_info.c index db75646f..2794a183 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -1491,7 +1491,8 @@ parse_partitioning_expression(const Oid relid, return ((ResTarget *) linitial(select_stmt->targetList))->val; } -/* Parse partitioning expression and return its type and nodeToString() as TEXT */ +/* Parse partitioning expression and return its type and nodeToString() + * (or nodeToStringWithLocations() in version 17 and higher) as TEXT */ Node * cook_partitioning_expression(const Oid relid, const char *expr_cstr, From a0025f4130261a200d2165db1809c022d570d05c Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Tue, 6 Aug 2024 10:24:00 +0300 Subject: [PATCH 1123/1124] PGPRO-10100: Revert "PGPRO-9797: Temporary disable test pathman_upd_del.sql" This reverts commit 5376dfba1b459de1935982964b2ba94a03fdcd6b. --- Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Makefile b/Makefile index f32398da..f6780044 100644 --- a/Makefile +++ b/Makefile @@ -64,8 +64,6 @@ REGRESS = pathman_array_qual \ pathman_utility_stmt \ pathman_views \ pathman_CVE-2020-14350 - -REGRESS := $(filter-out pathman_upd_del, $(REGRESS)) endif ISOLATION = insert_nodes for_update rollback_on_create_partitions From 810d906815269135838a924942195d9470d3f2e6 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Fri, 1 Nov 2024 19:51:47 +0300 Subject: [PATCH 1124/1124] PGPRO-10245: fix pathman_upd_del test --- expected/pathman_upd_del.out | 3 + expected/pathman_upd_del_1.out | 3 + expected/pathman_upd_del_2.out | 3 + expected/pathman_upd_del_3.out | 3 + expected/pathman_upd_del_4.out | 464 +++++++++++++++++++++++++++++++++ sql/pathman_upd_del.sql | 3 + 6 files changed, 479 insertions(+) create mode 100644 expected/pathman_upd_del_4.out diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out index 44bb34fc..752cff27 100644 --- a/expected/pathman_upd_del.out +++ b/expected/pathman_upd_del.out @@ -9,6 +9,9 @@ * plans here. There is an option to forcibly make them MATERIALIZED, but we * also need to run tests on older versions, so put updated plans in * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out index 0a7e91e9..6e0f312d 100644 --- a/expected/pathman_upd_del_1.out +++ b/expected/pathman_upd_del_1.out @@ -9,6 +9,9 @@ * plans here. There is an option to forcibly make them MATERIALIZED, but we * also need to run tests on older versions, so put updated plans in * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_2.out b/expected/pathman_upd_del_2.out index 80325d7e..0826594c 100644 --- a/expected/pathman_upd_del_2.out +++ b/expected/pathman_upd_del_2.out @@ -9,6 +9,9 @@ * plans here. There is an option to forcibly make them MATERIALIZED, but we * also need to run tests on older versions, so put updated plans in * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_3.out b/expected/pathman_upd_del_3.out index 70b41e7d..d11eb6f8 100644 --- a/expected/pathman_upd_del_3.out +++ b/expected/pathman_upd_del_3.out @@ -9,6 +9,9 @@ * plans here. There is an option to forcibly make them MATERIALIZED, but we * also need to run tests on older versions, so put updated plans in * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. */ \set VERBOSITY terse SET search_path = 'public'; diff --git a/expected/pathman_upd_del_4.out b/expected/pathman_upd_del_4.out new file mode 100644 index 00000000..54330190 --- /dev/null +++ b/expected/pathman_upd_del_4.out @@ -0,0 +1,464 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +------------------------------------------ + Update on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: (t.id = t2.id) + -> Seq Scan on tmp2_1 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_3 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_4 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_5 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_6 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_7 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_8 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_9 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_10 t2 + Filter: (t.id = id) +(25 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +--------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 tmp2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql index a034c14a..c99b9666 100644 --- a/sql/pathman_upd_del.sql +++ b/sql/pathman_upd_del.sql @@ -9,6 +9,9 @@ * plans here. There is an option to forcibly make them MATERIALIZED, but we * also need to run tests on older versions, so put updated plans in * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. */ \set VERBOSITY terse